From 2db017af37dfedd49b854fb431ef629b8157eefc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 30 Aug 2024 00:26:23 +0000 Subject: [PATCH 0001/1248] simplify service trait bounds and lifetimes Signed-off-by: Jason Volk --- src/service/service.rs | 23 +++++++++-------------- src/service/services.rs | 8 ++++---- 2 files changed, 13 insertions(+), 18 deletions(-) diff --git a/src/service/service.rs b/src/service/service.rs index 635f782e..065f78a0 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -51,7 +51,7 @@ pub(crate) struct Args<'a> { /// Dep is a reference to a service used within another service. /// Circular-dependencies between services require this indirection. -pub(crate) struct Dep { +pub(crate) struct Dep { dep: OnceLock>, service: Weak, name: &'static str, @@ -62,7 +62,7 @@ pub(crate) type MapType = BTreeMap; pub(crate) type MapVal = (Weak, Weak); pub(crate) type MapKey = String; -impl Deref for Dep { +impl Deref for Dep { type Target = Arc; /// Dereference a dependency. The dependency must be ready or panics. @@ -80,7 +80,7 @@ impl Deref for Dep { impl<'a> Args<'a> { /// Create a lazy-reference to a service when constructing another Service. - pub(crate) fn depend(&'a self, name: &'static str) -> Dep { + pub(crate) fn depend(&'a self, name: &'static str) -> Dep { Dep:: { dep: OnceLock::new(), service: Arc::downgrade(self.service), @@ -90,17 +90,12 @@ impl<'a> Args<'a> { /// Create a reference immediately to a service when constructing another /// Service. The other service must be constructed. - pub(crate) fn require(&'a self, name: &'static str) -> Arc { - require::(self.service, name) - } + pub(crate) fn require(&'a self, name: &str) -> Arc { require::(self.service, name) } } /// Reference a Service by name. Panics if the Service does not exist or was /// incorrectly cast. -pub(crate) fn require<'a, 'b, T>(map: &'b Map, name: &'a str) -> Arc -where - T: Send + Sync + 'a + 'b + 'static, -{ +pub(crate) fn require(map: &Map, name: &str) -> Arc { try_get::(map, name) .inspect_err(inspect_log) .expect("Failure to reference service required by another service.") @@ -112,9 +107,9 @@ where /// # Panics /// Incorrect type is not a silent failure (None) as the type never has a reason /// to be incorrect. -pub(crate) fn get<'a, 'b, T>(map: &'b Map, name: &'a str) -> Option> +pub(crate) fn get(map: &Map, name: &str) -> Option> where - T: Send + Sync + 'a + 'b + 'static, + T: Any + Send + Sync + Sized, { map.read() .expect("locked for reading") @@ -129,9 +124,9 @@ where /// Reference a Service by name. Returns Err if the Service does not exist or /// was incorrectly cast. -pub(crate) fn try_get<'a, 'b, T>(map: &'b Map, name: &'a str) -> Result> +pub(crate) fn try_get(map: &Map, name: &str) -> Result> where - T: Send + Sync + 'a + 'b + 'static, + T: Any + Send + Sync + Sized, { map.read() .expect("locked for reading") diff --git a/src/service/services.rs b/src/service/services.rs index 8e69cdbb..3aa095b8 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -193,16 +193,16 @@ impl Services { } } - pub fn try_get<'a, 'b, T>(&'b self, name: &'a str) -> Result> + pub fn try_get(&self, name: &str) -> Result> where - T: Send + Sync + 'a + 'b + 'static, + T: Any + Send + Sync + Sized, { service::try_get::(&self.service, name) } - pub fn get<'a, 'b, T>(&'b self, name: &'a str) -> Option> + pub fn get(&self, name: &str) -> Option> where - T: Send + Sync + 'a + 'b + 'static, + T: Any + Send + Sync + Sized, { service::get::(&self.service, name) } From 99ad404ea9f72b3a4d7aabb55a17127d82f39d12 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 31 Aug 2024 02:13:23 +0000 Subject: [PATCH 0002/1248] add str traits for split, between, unquote; consolidate tests Signed-off-by: Jason Volk --- src/core/utils/string.rs | 13 +++--- src/core/utils/string/between.rs | 26 ++++++++++++ src/core/utils/string/split.rs | 22 ++++++++++ src/core/utils/string/tests.rs | 70 +++++++++++++++++++++++++++++++ src/core/utils/string/unquote.rs | 33 +++++++++++++++ src/core/utils/string/unquoted.rs | 52 +++++++++++++++++++++++ src/core/utils/tests.rs | 43 ------------------- src/service/service.rs | 4 +- 8 files changed, 212 insertions(+), 51 deletions(-) create mode 100644 src/core/utils/string/between.rs create mode 100644 src/core/utils/string/split.rs create mode 100644 src/core/utils/string/tests.rs create mode 100644 src/core/utils/string/unquote.rs create mode 100644 src/core/utils/string/unquoted.rs diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index 85282b30..e65a3369 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -1,3 +1,10 @@ +mod between; +mod split; +mod tests; +mod unquote; +mod unquoted; + +pub use self::{between::Between, split::SplitInfallible, unquote::Unquote, unquoted::Unquoted}; use crate::{utils::exchange, Result}; pub const EMPTY: &str = ""; @@ -95,12 +102,6 @@ pub fn common_prefix<'a>(choice: &'a [&str]) -> &'a str { }) } -#[inline] -#[must_use] -pub fn split_once_infallible<'a>(input: &'a str, delim: &'_ str) -> (&'a str, &'a str) { - input.split_once(delim).unwrap_or((input, EMPTY)) -} - /// Parses the bytes into a string. pub fn string_from_bytes(bytes: &[u8]) -> Result { let str: &str = str_from_bytes(bytes)?; diff --git a/src/core/utils/string/between.rs b/src/core/utils/string/between.rs new file mode 100644 index 00000000..209a9dab --- /dev/null +++ b/src/core/utils/string/between.rs @@ -0,0 +1,26 @@ +type Delim<'a> = (&'a str, &'a str); + +/// Slice a string between a pair of delimeters. +pub trait Between<'a> { + /// Extract a string between the delimeters. If the delimeters were not + /// found None is returned, otherwise the first extraction is returned. + fn between(&self, delim: Delim<'_>) -> Option<&'a str>; + + /// Extract a string between the delimeters. If the delimeters were not + /// found the original string is returned; take note of this behavior, + /// if an empty slice is desired for this case use the fallible version and + /// unwrap to EMPTY. + fn between_infallible(&self, delim: Delim<'_>) -> &'a str; +} + +impl<'a> Between<'a> for &'a str { + #[inline] + fn between_infallible(&self, delim: Delim<'_>) -> &'a str { self.between(delim).unwrap_or(self) } + + #[inline] + fn between(&self, delim: Delim<'_>) -> Option<&'a str> { + self.split_once(delim.0) + .and_then(|(_, b)| b.rsplit_once(delim.1)) + .map(|(a, _)| a) + } +} diff --git a/src/core/utils/string/split.rs b/src/core/utils/string/split.rs new file mode 100644 index 00000000..96de28df --- /dev/null +++ b/src/core/utils/string/split.rs @@ -0,0 +1,22 @@ +use super::EMPTY; + +type Pair<'a> = (&'a str, &'a str); + +/// Split a string with default behaviors on non-match. +pub trait SplitInfallible<'a> { + /// Split a string at the first occurrence of delim. If not found, the + /// entire string is returned in \[0\], while \[1\] is empty. + fn split_once_infallible(&self, delim: &str) -> Pair<'a>; + + /// Split a string from the last occurrence of delim. If not found, the + /// entire string is returned in \[0\], while \[1\] is empty. + fn rsplit_once_infallible(&self, delim: &str) -> Pair<'a>; +} + +impl<'a> SplitInfallible<'a> for &'a str { + #[inline] + fn rsplit_once_infallible(&self, delim: &str) -> Pair<'a> { self.rsplit_once(delim).unwrap_or((self, EMPTY)) } + + #[inline] + fn split_once_infallible(&self, delim: &str) -> Pair<'a> { self.split_once(delim).unwrap_or((self, EMPTY)) } +} diff --git a/src/core/utils/string/tests.rs b/src/core/utils/string/tests.rs new file mode 100644 index 00000000..e8c17de6 --- /dev/null +++ b/src/core/utils/string/tests.rs @@ -0,0 +1,70 @@ +#![cfg(test)] + +#[test] +fn common_prefix() { + let input = ["conduwuit", "conduit", "construct"]; + let output = super::common_prefix(&input); + assert_eq!(output, "con"); +} + +#[test] +fn common_prefix_empty() { + let input = ["abcdefg", "hijklmn", "opqrstu"]; + let output = super::common_prefix(&input); + assert_eq!(output, ""); +} + +#[test] +fn common_prefix_none() { + let input = []; + let output = super::common_prefix(&input); + assert_eq!(output, ""); +} + +#[test] +fn camel_to_snake_case_0() { + let res = super::camel_to_snake_string("CamelToSnakeCase"); + assert_eq!(res, "camel_to_snake_case"); +} + +#[test] +fn camel_to_snake_case_1() { + let res = super::camel_to_snake_string("CAmelTOSnakeCase"); + assert_eq!(res, "camel_tosnake_case"); +} + +#[test] +fn unquote() { + use super::Unquote; + + assert_eq!("\"foo\"".unquote(), Some("foo")); + assert_eq!("\"foo".unquote(), None); + assert_eq!("foo".unquote(), None); +} + +#[test] +fn unquote_infallible() { + use super::Unquote; + + assert_eq!("\"foo\"".unquote_infallible(), "foo"); + assert_eq!("\"foo".unquote_infallible(), "\"foo"); + assert_eq!("foo".unquote_infallible(), "foo"); +} + +#[test] +fn between() { + use super::Between; + + assert_eq!("\"foo\"".between(("\"", "\"")), Some("foo")); + assert_eq!("\"foo".between(("\"", "\"")), None); + assert_eq!("foo".between(("\"", "\"")), None); +} + +#[test] +fn between_infallible() { + use super::Between; + + assert_eq!("\"foo\"".between_infallible(("\"", "\"")), "foo"); + assert_eq!("\"foo".between_infallible(("\"", "\"")), "\"foo"); + assert_eq!("foo".between_infallible(("\"", "\"")), "foo"); +} diff --git a/src/core/utils/string/unquote.rs b/src/core/utils/string/unquote.rs new file mode 100644 index 00000000..eeded610 --- /dev/null +++ b/src/core/utils/string/unquote.rs @@ -0,0 +1,33 @@ +const QUOTE: char = '"'; + +/// Slice a string between quotes +pub trait Unquote<'a> { + /// Whether the input is quoted. If this is false the fallible methods of + /// this interface will fail. + fn is_quoted(&self) -> bool; + + /// Unquotes a string. If the input is not quoted it is simply returned + /// as-is. If the input is partially quoted on either end that quote is not + /// removed. + fn unquote(&self) -> Option<&'a str>; + + /// Unquotes a string. The input must be quoted on each side for Some to be + /// returned + fn unquote_infallible(&self) -> &'a str; +} + +impl<'a> Unquote<'a> for &'a str { + #[inline] + fn unquote_infallible(&self) -> &'a str { + self.strip_prefix(QUOTE) + .unwrap_or(self) + .strip_suffix(QUOTE) + .unwrap_or(self) + } + + #[inline] + fn unquote(&self) -> Option<&'a str> { self.strip_prefix(QUOTE).and_then(|s| s.strip_suffix(QUOTE)) } + + #[inline] + fn is_quoted(&self) -> bool { self.starts_with(QUOTE) && self.ends_with(QUOTE) } +} diff --git a/src/core/utils/string/unquoted.rs b/src/core/utils/string/unquoted.rs new file mode 100644 index 00000000..5b002d99 --- /dev/null +++ b/src/core/utils/string/unquoted.rs @@ -0,0 +1,52 @@ +use std::ops::Deref; + +use serde::{de, Deserialize, Deserializer}; + +use super::Unquote; +use crate::{err, Result}; + +/// Unquoted string which deserialized from a quoted string. Construction from a +/// &str is infallible such that the input can already be unquoted. Construction +/// from serde deserialization is fallible and the input must be quoted. +#[repr(transparent)] +pub struct Unquoted(str); + +impl<'a> Unquoted { + #[inline] + #[must_use] + pub fn as_str(&'a self) -> &'a str { &self.0 } +} + +impl<'a, 'de: 'a> Deserialize<'de> for &'a Unquoted { + fn deserialize>(deserializer: D) -> Result { + let s = <&'a str>::deserialize(deserializer)?; + s.is_quoted() + .then_some(s) + .ok_or(err!(SerdeDe("expected quoted string"))) + .map_err(de::Error::custom) + .map(Into::into) + } +} + +impl<'a> From<&'a str> for &'a Unquoted { + fn from(s: &'a str) -> &'a Unquoted { + let s: &'a str = s.unquote_infallible(); + + //SAFETY: This is a pattern I lifted from ruma-identifiers for strong-type strs + // by wrapping in a tuple-struct. + #[allow(clippy::transmute_ptr_to_ptr)] + unsafe { + std::mem::transmute(s) + } + } +} + +impl Deref for Unquoted { + type Target = str; + + fn deref(&self) -> &Self::Target { &self.0 } +} + +impl<'a> AsRef for &'a Unquoted { + fn as_ref(&self) -> &'a str { &self.0 } +} diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs index e91accdf..5880470a 100644 --- a/src/core/utils/tests.rs +++ b/src/core/utils/tests.rs @@ -36,33 +36,6 @@ fn increment_wrap() { assert_eq!(res, 0); } -#[test] -fn common_prefix() { - use utils::string; - - let input = ["conduwuit", "conduit", "construct"]; - let output = string::common_prefix(&input); - assert_eq!(output, "con"); -} - -#[test] -fn common_prefix_empty() { - use utils::string; - - let input = ["abcdefg", "hijklmn", "opqrstu"]; - let output = string::common_prefix(&input); - assert_eq!(output, ""); -} - -#[test] -fn common_prefix_none() { - use utils::string; - - let input = []; - let output = string::common_prefix(&input); - assert_eq!(output, ""); -} - #[test] fn checked_add() { use crate::checked; @@ -134,19 +107,3 @@ async fn mutex_map_contend() { tokio::try_join!(join_b, join_a).expect("joined"); assert!(map.is_empty(), "Must be empty"); } - -#[test] -fn camel_to_snake_case_0() { - use utils::string::camel_to_snake_string; - - let res = camel_to_snake_string("CamelToSnakeCase"); - assert_eq!(res, "camel_to_snake_case"); -} - -#[test] -fn camel_to_snake_case_1() { - use utils::string::camel_to_snake_string; - - let res = camel_to_snake_string("CAmelTOSnakeCase"); - assert_eq!(res, "camel_tosnake_case"); -} diff --git a/src/service/service.rs b/src/service/service.rs index 065f78a0..03165050 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -7,7 +7,7 @@ use std::{ }; use async_trait::async_trait; -use conduit::{err, error::inspect_log, utils::string::split_once_infallible, Err, Result, Server}; +use conduit::{err, error::inspect_log, utils::string::SplitInfallible, Err, Result, Server}; use database::Database; /// Abstract interface for a Service @@ -147,4 +147,4 @@ where /// Utility for service implementations; see Service::name() in the trait. #[inline] -pub(crate) fn make_name(module_path: &str) -> &str { split_once_infallible(module_path, "::").1 } +pub(crate) fn make_name(module_path: &str) -> &str { module_path.split_once_infallible("::").1 } From 2709995f84cfa9dcaab14ac9ee856aafe06b22c0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 1 Sep 2024 01:53:22 +0000 Subject: [PATCH 0003/1248] add MapExpect to Result add DebugInspect to Result move Result typedef into unit Signed-off-by: Jason Volk --- src/core/debug.rs | 6 ++-- src/core/mod.rs | 4 +-- src/core/result.rs | 6 ++++ src/core/result/debug_inspect.rs | 52 ++++++++++++++++++++++++++++++++ src/core/result/map_expect.rs | 15 +++++++++ 5 files changed, 78 insertions(+), 5 deletions(-) create mode 100644 src/core/result.rs create mode 100644 src/core/result/debug_inspect.rs create mode 100644 src/core/result/map_expect.rs diff --git a/src/core/debug.rs b/src/core/debug.rs index 844445d5..1e36ca8e 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -1,10 +1,10 @@ use std::{any::Any, panic}; -/// Export debug proc_macros +// Export debug proc_macros pub use conduit_macros::recursion_depth; -/// Export all of the ancillary tools from here as well. -pub use crate::utils::debug::*; +// Export all of the ancillary tools from here as well. +pub use crate::{result::DebugInspect, utils::debug::*}; /// Log event at given level in debug-mode (when debug-assertions are enabled). /// In release-mode it becomes DEBUG level, and possibly subject to elision. diff --git a/src/core/mod.rs b/src/core/mod.rs index 9898243b..31851f4f 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -7,6 +7,7 @@ pub mod log; pub mod metrics; pub mod mods; pub mod pdu; +pub mod result; pub mod server; pub mod utils; @@ -15,13 +16,12 @@ pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; pub use pdu::{PduBuilder, PduCount, PduEvent}; +pub use result::Result; pub use server::Server; pub use utils::{ctor, dtor, implement}; pub use crate as conduit_core; -pub type Result = std::result::Result; - rustc_flags_capture! {} #[cfg(not(conduit_mods))] diff --git a/src/core/result.rs b/src/core/result.rs new file mode 100644 index 00000000..d58467cf --- /dev/null +++ b/src/core/result.rs @@ -0,0 +1,6 @@ +mod debug_inspect; +mod map_expect; + +pub use self::{debug_inspect::DebugInspect, map_expect::MapExpect}; + +pub type Result = std::result::Result; diff --git a/src/core/result/debug_inspect.rs b/src/core/result/debug_inspect.rs new file mode 100644 index 00000000..ef80979d --- /dev/null +++ b/src/core/result/debug_inspect.rs @@ -0,0 +1,52 @@ +use super::Result; + +/// Inspect Result values with release-mode elision. +pub trait DebugInspect { + /// Inspects an Err contained value in debug-mode. In release-mode closure F + /// is elided. + #[must_use] + fn debug_inspect_err(self, f: F) -> Self; + + /// Inspects an Ok contained value in debug-mode. In release-mode closure F + /// is elided. + #[must_use] + fn debug_inspect(self, f: F) -> Self; +} + +#[cfg(debug_assertions)] +impl DebugInspect for Result { + #[inline] + fn debug_inspect(self, f: F) -> Self + where + F: FnOnce(&T), + { + self.inspect(f) + } + + #[inline] + fn debug_inspect_err(self, f: F) -> Self + where + F: FnOnce(&E), + { + self.inspect_err(f) + } +} + +#[cfg(not(debug_assertions))] +impl DebugInspect for Result { + #[inline] + fn debug_inspect(self, _: F) -> Self + where + F: FnOnce(&T), + { + self + } + + #[inline] + fn debug_inspect_err(self, _: F) -> Self + where + F: FnOnce(&E), + { + self + } +} diff --git a/src/core/result/map_expect.rs b/src/core/result/map_expect.rs new file mode 100644 index 00000000..8ce9195f --- /dev/null +++ b/src/core/result/map_expect.rs @@ -0,0 +1,15 @@ +use std::fmt::Debug; + +use super::Result; + +pub trait MapExpect { + /// Calls expect(msg) on the mapped Result value. This is similar to + /// map(Result::unwrap) but composes an expect call and message without + /// requiring a closure. + fn map_expect(self, msg: &str) -> Option; +} + +impl MapExpect for Option> { + #[inline] + fn map_expect(self, msg: &str) -> Option { self.map(|result| result.expect(msg)) } +} From 3d4b0f10a59008a5cc785fd299937cdbee0dacd1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 7 Sep 2024 22:04:28 +0000 Subject: [PATCH 0004/1248] add expected! macro to checked math expression suite Signed-off-by: Jason Volk --- src/core/utils/math.rs | 36 +++++++++++++++++++++-------- src/service/rooms/auth_chain/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 2 +- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index f9d0de30..8c4b01be 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -7,32 +7,50 @@ use crate::{debug::type_name, err, Err, Error, Result}; /// Checked arithmetic expression. Returns a Result #[macro_export] macro_rules! checked { - ($($input:tt)*) => { - $crate::utils::math::checked_ops!($($input)*) + ($($input:tt)+) => { + $crate::utils::math::checked_ops!($($input)+) .ok_or_else(|| $crate::err!(Arithmetic("operation overflowed or result invalid"))) - } + }; } -/// in release-mode. Use for performance when the expression is obviously safe. -/// The check remains in debug-mode for regression analysis. +/// Checked arithmetic expression which panics on failure. This is for +/// expressions which do not meet the threshold for validated! but the caller +/// has no realistic expectation for error and no interest in cluttering the +/// callsite with result handling from checked!. +#[macro_export] +macro_rules! expected { + ($msg:literal, $($input:tt)+) => { + $crate::checked!($($input)+).expect($msg) + }; + + ($($input:tt)+) => { + $crate::expected!("arithmetic expression expectation failure", $($input)+) + }; +} + +/// Unchecked arithmetic expression in release-mode. Use for performance when +/// the expression is obviously safe. The check remains in debug-mode for +/// regression analysis. #[cfg(not(debug_assertions))] #[macro_export] macro_rules! validated { - ($($input:tt)*) => { + ($($input:tt)+) => { //#[allow(clippy::arithmetic_side_effects)] { //Some($($input)*) // .ok_or_else(|| $crate::err!(Arithmetic("this error should never been seen"))) //} //NOTE: remove me when stmt_expr_attributes is stable - $crate::checked!($($input)*) - } + $crate::expected!("validated arithmetic expression failed", $($input)+) + }; } +/// Checked arithmetic expression in debug-mode. Use for performance when +/// the expression is obviously safe. The check is elided in release-mode. #[cfg(debug_assertions)] #[macro_export] macro_rules! validated { - ($($input:tt)*) => { $crate::checked!($($input)*) } + ($($input:tt)+) => { $crate::expected!($($input)+) } } /// Returns false if the exponential backoff has expired based on the inputs diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 9a1e7e67..d0bc425f 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -66,7 +66,7 @@ impl Service { .enumerate() { let bucket: usize = short.try_into()?; - let bucket: usize = validated!(bucket % NUM_BUCKETS)?; + let bucket: usize = validated!(bucket % NUM_BUCKETS); buckets[bucket].insert((short, starting_events[i])); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 4f2352f8..04d9559d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1205,7 +1205,7 @@ impl Service { let count = self.services.globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); pdu_id.extend_from_slice(&0_u64.to_be_bytes()); - pdu_id.extend_from_slice(&(validated!(max - count)?).to_be_bytes()); + pdu_id.extend_from_slice(&(validated!(max - count)).to_be_bytes()); // Insert pdu self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?; From aa265f7ca4ee5f6b15cce83a235cf5f9c4317cfc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Sep 2024 04:39:27 +0000 Subject: [PATCH 0005/1248] add err log trait to Result Signed-off-by: Jason Volk --- src/core/error/log.rs | 33 +++++++++++++++--- src/core/result.rs | 4 ++- src/core/result/inspect_log.rs | 60 ++++++++++++++++++++++++++++++++ src/core/result/log_debug_err.rs | 36 +++++++++++++++++++ src/core/result/log_err.rs | 36 +++++++++++++++++++ 5 files changed, 163 insertions(+), 6 deletions(-) create mode 100644 src/core/result/inspect_log.rs create mode 100644 src/core/result/log_debug_err.rs create mode 100644 src/core/result/log_err.rs diff --git a/src/core/error/log.rs b/src/core/error/log.rs index c272bf73..60bd7014 100644 --- a/src/core/error/log.rs +++ b/src/core/error/log.rs @@ -1,7 +1,8 @@ use std::{convert::Infallible, fmt}; +use tracing::Level; + use super::Error; -use crate::{debug_error, error}; #[inline] pub fn else_log(error: E) -> Result @@ -64,11 +65,33 @@ where } #[inline] -pub fn inspect_log(error: &E) { - error!("{error}"); +pub fn inspect_log(error: &E) { inspect_log_level(error, Level::ERROR); } + +#[inline] +pub fn inspect_debug_log(error: &E) { inspect_debug_log_level(error, Level::ERROR); } + +#[inline] +pub fn inspect_log_level(error: &E, level: Level) { + use crate::{debug, error, info, trace, warn}; + + match level { + Level::ERROR => error!("{error}"), + Level::WARN => warn!("{error}"), + Level::INFO => info!("{error}"), + Level::DEBUG => debug!("{error}"), + Level::TRACE => trace!("{error}"), + } } #[inline] -pub fn inspect_debug_log(error: &E) { - debug_error!("{error:?}"); +pub fn inspect_debug_log_level(error: &E, level: Level) { + use crate::{debug, debug_error, debug_info, debug_warn, trace}; + + match level { + Level::ERROR => debug_error!("{error:?}"), + Level::WARN => debug_warn!("{error:?}"), + Level::INFO => debug_info!("{error:?}"), + Level::DEBUG => debug!("{error:?}"), + Level::TRACE => trace!("{error:?}"), + } } diff --git a/src/core/result.rs b/src/core/result.rs index d58467cf..c3eaf95b 100644 --- a/src/core/result.rs +++ b/src/core/result.rs @@ -1,6 +1,8 @@ mod debug_inspect; +mod log_debug_err; +mod log_err; mod map_expect; -pub use self::{debug_inspect::DebugInspect, map_expect::MapExpect}; +pub use self::{debug_inspect::DebugInspect, log_debug_err::LogDebugErr, log_err::LogErr, map_expect::MapExpect}; pub type Result = std::result::Result; diff --git a/src/core/result/inspect_log.rs b/src/core/result/inspect_log.rs new file mode 100644 index 00000000..577761c5 --- /dev/null +++ b/src/core/result/inspect_log.rs @@ -0,0 +1,60 @@ +use std::fmt; + +use tracing::Level; + +use super::Result; +use crate::error; + +pub trait ErrLog +where + E: fmt::Display, +{ + fn log_err(self, level: Level) -> Self; + + fn err_log(self) -> Self + where + Self: Sized, + { + self.log_err(Level::ERROR) + } +} + +pub trait ErrDebugLog +where + E: fmt::Debug, +{ + fn log_err_debug(self, level: Level) -> Self; + + fn err_debug_log(self) -> Self + where + Self: Sized, + { + self.log_err_debug(Level::ERROR) + } +} + +impl ErrLog for Result +where + E: fmt::Display, +{ + #[inline] + fn log_err(self, level: Level) -> Self + where + Self: Sized, + { + self.inspect_err(|error| error::inspect_log_level(&error, level)) + } +} + +impl ErrDebugLog for Result +where + E: fmt::Debug, +{ + #[inline] + fn log_err_debug(self, level: Level) -> Self + where + Self: Sized, + { + self.inspect_err(|error| error::inspect_debug_log_level(&error, level)) + } +} diff --git a/src/core/result/log_debug_err.rs b/src/core/result/log_debug_err.rs new file mode 100644 index 00000000..be2000ae --- /dev/null +++ b/src/core/result/log_debug_err.rs @@ -0,0 +1,36 @@ +use std::fmt; + +use tracing::Level; + +use super::{DebugInspect, Result}; +use crate::error; + +pub trait LogDebugErr +where + E: fmt::Debug, +{ + #[must_use] + fn err_debug_log(self, level: Level) -> Self; + + #[inline] + #[must_use] + fn log_debug_err(self) -> Self + where + Self: Sized, + { + self.err_debug_log(Level::ERROR) + } +} + +impl LogDebugErr for Result +where + E: fmt::Debug, +{ + #[inline] + fn err_debug_log(self, level: Level) -> Self + where + Self: Sized, + { + self.debug_inspect_err(|error| error::inspect_debug_log_level(&error, level)) + } +} diff --git a/src/core/result/log_err.rs b/src/core/result/log_err.rs new file mode 100644 index 00000000..079571f5 --- /dev/null +++ b/src/core/result/log_err.rs @@ -0,0 +1,36 @@ +use std::fmt; + +use tracing::Level; + +use super::Result; +use crate::error; + +pub trait LogErr +where + E: fmt::Display, +{ + #[must_use] + fn err_log(self, level: Level) -> Self; + + #[inline] + #[must_use] + fn log_err(self) -> Self + where + Self: Sized, + { + self.err_log(Level::ERROR) + } +} + +impl LogErr for Result +where + E: fmt::Display, +{ + #[inline] + fn err_log(self, level: Level) -> Self + where + Self: Sized, + { + self.inspect_err(|error| error::inspect_log_level(&error, level)) + } +} From bd75ff65c96427429a0334b6d899f2b630fe5f8a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Sep 2024 06:53:15 +0000 Subject: [PATCH 0006/1248] move common_elements util into unit Signed-off-by: Jason Volk --- src/core/utils/algorithm.rs | 25 +++++++++++++++++++++++++ src/core/utils/mod.rs | 28 +++------------------------- 2 files changed, 28 insertions(+), 25 deletions(-) create mode 100644 src/core/utils/algorithm.rs diff --git a/src/core/utils/algorithm.rs b/src/core/utils/algorithm.rs new file mode 100644 index 00000000..9bc1bc8a --- /dev/null +++ b/src/core/utils/algorithm.rs @@ -0,0 +1,25 @@ +use std::cmp::Ordering; + +#[allow(clippy::impl_trait_in_params)] +pub fn common_elements( + mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, +) -> Option>> { + let first_iterator = iterators.next()?; + let mut other_iterators = iterators.map(Iterator::peekable).collect::>(); + + Some(first_iterator.filter(move |target| { + other_iterators.iter_mut().all(|it| { + while let Some(element) = it.peek() { + match check_order(element, target) { + Ordering::Greater => return false, // We went too far + Ordering::Equal => return true, // Element is in both iters + Ordering::Less => { + // Keep searching + it.next(); + }, + } + } + false + }) + })) +} diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 1556646e..29d0b87b 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -1,3 +1,4 @@ +pub mod algorithm; pub mod bytes; pub mod content_disposition; pub mod debug; @@ -13,9 +14,10 @@ pub mod sys; mod tests; pub mod time; -use std::cmp::{self, Ordering}; +use std::cmp; pub use ::ctor::{ctor, dtor}; +pub use algorithm::common_elements; pub use bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}; pub use conduit_macros::implement; pub use debug::slice_truncated as debug_slice_truncated; @@ -47,27 +49,3 @@ pub fn generate_keypair() -> Vec { ); value } - -#[allow(clippy::impl_trait_in_params)] -pub fn common_elements( - mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, -) -> Option>> { - let first_iterator = iterators.next()?; - let mut other_iterators = iterators.map(Iterator::peekable).collect::>(); - - Some(first_iterator.filter(move |target| { - other_iterators.iter_mut().all(|it| { - while let Some(element) = it.peek() { - match check_order(element, target) { - Ordering::Greater => return false, // We went too far - Ordering::Equal => return true, // Element is in both iters - Ordering::Less => { - // Keep searching - it.next(); - }, - } - } - false - }) - })) -} From 63053640f1e5789719b3f88f92b3006ae3caecf4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 13 Sep 2024 18:24:33 +0000 Subject: [PATCH 0007/1248] add util functors for is_zero/is_equal; move clamp to math utils Signed-off-by: Jason Volk --- src/core/utils/math.rs | 35 +++++++++++++++++++++++++++++++++++ src/core/utils/mod.rs | 12 ++---------- src/macros/utils.rs | 7 ++----- 3 files changed, 39 insertions(+), 15 deletions(-) diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index 8c4b01be..215de339 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -53,6 +53,38 @@ macro_rules! validated { ($($input:tt)+) => { $crate::expected!($($input)+) } } +/// Functor for equality to zero +#[macro_export] +macro_rules! is_zero { + () => { + $crate::is_matching!(0) + }; +} + +/// Functor for equality i.e. .is_some_and(is_equal!(2)) +#[macro_export] +macro_rules! is_equal_to { + ($val:expr) => { + |x| (x == $val) + }; +} + +/// Functor for less i.e. .is_some_and(is_less_than!(2)) +#[macro_export] +macro_rules! is_less_than { + ($val:expr) => { + |x| (x < $val) + }; +} + +/// Functor for matches! i.e. .is_some_and(is_matching!('A'..='Z')) +#[macro_export] +macro_rules! is_matching { + ($val:expr) => { + |x| matches!(x, $val) + }; +} + /// Returns false if the exponential backoff has expired based on the inputs #[inline] #[must_use] @@ -118,3 +150,6 @@ fn try_into_err, Src>(e: >::Error) -> Erro type_name::() )) } + +#[inline] +pub fn clamp(val: T, min: T, max: T) -> T { cmp::min(cmp::max(val, min), max) } diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 29d0b87b..03b755e9 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -14,8 +14,6 @@ pub mod sys; mod tests; pub mod time; -use std::cmp; - pub use ::ctor::{ctor, dtor}; pub use algorithm::common_elements; pub use bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}; @@ -24,6 +22,7 @@ pub use debug::slice_truncated as debug_slice_truncated; pub use hash::calculate_hash; pub use html::Escape as HtmlEscape; pub use json::{deserialize_from_str, to_canonical_object}; +pub use math::clamp; pub use mutex_map::{Guard as MutexMapGuard, MutexMap}; pub use rand::string as random_string; pub use string::{str_from_bytes, string_from_bytes}; @@ -31,14 +30,7 @@ pub use sys::available_parallelism; pub use time::now_millis as millis_since_unix_epoch; #[inline] -pub fn clamp(val: T, min: T, max: T) -> T { cmp::min(cmp::max(val, min), max) } - -#[inline] -pub fn exchange(state: &mut T, source: T) -> T { - let ret = state.clone(); - *state = source; - ret -} +pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, source) } #[must_use] pub fn generate_keypair() -> Vec { diff --git a/src/macros/utils.rs b/src/macros/utils.rs index 58074e3a..197dd90e 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -41,8 +41,5 @@ pub(crate) fn camel_to_snake_string(s: &str) -> String { output } -pub(crate) fn exchange(state: &mut T, source: T) -> T { - let ret = state.clone(); - *state = source; - ret -} +#[inline] +pub(crate) fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, source) } From a5822ebc274ddf92ba75f4f62d4e527078447baf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 13 Sep 2024 18:55:56 +0000 Subject: [PATCH 0008/1248] add missing err! case Signed-off-by: Jason Volk --- src/core/error/err.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/core/error/err.rs b/src/core/error/err.rs index b3d0240e..4972e92a 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -85,6 +85,10 @@ macro_rules! err { $crate::error::Error::$variant($crate::err_log!(buf, $level, $($args)+)) }}; + ($variant:ident($($args:ident),+)) => { + $crate::error::Error::$variant($($args),+) + }; + ($variant:ident($($args:tt)+)) => { $crate::error::Error::$variant($crate::format_maybe!($($args)+)) }; From f7ce4db0b00bc24be6c127895e16ba29a248c8a4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 12 Sep 2024 00:01:25 +0000 Subject: [PATCH 0009/1248] add is_not_found functor to error; tweak status code matcher Signed-off-by: Jason Volk --- src/core/error/mod.rs | 19 +++++++++++++------ src/core/result.rs | 6 +++++- src/core/result/not_found.rs | 12 ++++++++++++ 3 files changed, 30 insertions(+), 7 deletions(-) create mode 100644 src/core/result/not_found.rs diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 92dbdfe3..48b9b58f 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -141,19 +141,22 @@ impl Error { use ruma::api::client::error::ErrorKind::Unknown; match self { - Self::Federation(_, error) => response::ruma_error_kind(error).clone(), + Self::Federation(_, error) | Self::Ruma(error) => response::ruma_error_kind(error).clone(), Self::BadRequest(kind, ..) | Self::Request(kind, ..) => kind.clone(), _ => Unknown, } } pub fn status_code(&self) -> http::StatusCode { + use http::StatusCode; + match self { - Self::Federation(_, ref error) | Self::Ruma(ref error) => error.status_code, - Self::Request(ref kind, _, code) => response::status_code(kind, *code), - Self::BadRequest(ref kind, ..) => response::bad_request_code(kind), - Self::Conflict(_) => http::StatusCode::CONFLICT, - _ => http::StatusCode::INTERNAL_SERVER_ERROR, + Self::Federation(_, error) | Self::Ruma(error) => error.status_code, + Self::Request(kind, _, code) => response::status_code(kind, *code), + Self::BadRequest(kind, ..) => response::bad_request_code(kind), + Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR), + Self::Conflict(_) => StatusCode::CONFLICT, + _ => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -176,3 +179,7 @@ impl From for Error { pub fn infallible(_e: &Infallible) { panic!("infallible error should never exist"); } + +#[inline] +#[must_use] +pub fn is_not_found(e: &Error) -> bool { e.status_code() == http::StatusCode::NOT_FOUND } diff --git a/src/core/result.rs b/src/core/result.rs index c3eaf95b..41d1d66c 100644 --- a/src/core/result.rs +++ b/src/core/result.rs @@ -2,7 +2,11 @@ mod debug_inspect; mod log_debug_err; mod log_err; mod map_expect; +mod not_found; -pub use self::{debug_inspect::DebugInspect, log_debug_err::LogDebugErr, log_err::LogErr, map_expect::MapExpect}; +pub use self::{ + debug_inspect::DebugInspect, log_debug_err::LogDebugErr, log_err::LogErr, map_expect::MapExpect, + not_found::NotFound, +}; pub type Result = std::result::Result; diff --git a/src/core/result/not_found.rs b/src/core/result/not_found.rs new file mode 100644 index 00000000..69ce821b --- /dev/null +++ b/src/core/result/not_found.rs @@ -0,0 +1,12 @@ +use super::Result; +use crate::{error, Error}; + +pub trait NotFound { + #[must_use] + fn is_not_found(&self) -> bool; +} + +impl NotFound for Result { + #[inline] + fn is_not_found(&self) -> bool { self.as_ref().is_err_and(error::is_not_found) } +} From a5de27442a0c68a3ff2b86d15e82e5065884b787 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 12 Sep 2024 00:59:08 +0000 Subject: [PATCH 0010/1248] re-export crates used by error macros Signed-off-by: Jason Volk --- src/core/error/err.rs | 30 +++++++++++++++--------------- src/core/mod.rs | 3 +++ 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 4972e92a..82bb40b0 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -44,34 +44,34 @@ macro_rules! err { (Request(Forbidden($level:ident!($($args:tt)+)))) => {{ let mut buf = String::new(); $crate::error::Error::Request( - ::ruma::api::client::error::ErrorKind::forbidden(), + $crate::ruma::api::client::error::ErrorKind::forbidden(), $crate::err_log!(buf, $level, $($args)+), - ::http::StatusCode::BAD_REQUEST + $crate::http::StatusCode::BAD_REQUEST ) }}; (Request(Forbidden($($args:tt)+))) => { $crate::error::Error::Request( - ::ruma::api::client::error::ErrorKind::forbidden(), + $crate::ruma::api::client::error::ErrorKind::forbidden(), $crate::format_maybe!($($args)+), - ::http::StatusCode::BAD_REQUEST + $crate::http::StatusCode::BAD_REQUEST ) }; (Request($variant:ident($level:ident!($($args:tt)+)))) => {{ let mut buf = String::new(); $crate::error::Error::Request( - ::ruma::api::client::error::ErrorKind::$variant, + $crate::ruma::api::client::error::ErrorKind::$variant, $crate::err_log!(buf, $level, $($args)+), - ::http::StatusCode::BAD_REQUEST + $crate::http::StatusCode::BAD_REQUEST ) }}; (Request($variant:ident($($args:tt)+))) => { $crate::error::Error::Request( - ::ruma::api::client::error::ErrorKind::$variant, + $crate::ruma::api::client::error::ErrorKind::$variant, $crate::format_maybe!($($args)+), - ::http::StatusCode::BAD_REQUEST + $crate::http::StatusCode::BAD_REQUEST ) }; @@ -113,7 +113,7 @@ macro_rules! err_log { ($out:ident, $level:ident, $($fields:tt)+) => {{ use std::{fmt, fmt::Write}; - use ::tracing::{ + use $crate::tracing::{ callsite, callsite2, level_enabled, metadata, valueset, Callsite, Event, __macro_support, __tracing_log, field::{Field, ValueSet, Visit}, @@ -169,25 +169,25 @@ macro_rules! err_log { macro_rules! err_lev { (debug_warn) => { if $crate::debug::logging() { - ::tracing::Level::WARN + $crate::tracing::Level::WARN } else { - ::tracing::Level::DEBUG + $crate::tracing::Level::DEBUG } }; (debug_error) => { if $crate::debug::logging() { - ::tracing::Level::ERROR + $crate::tracing::Level::ERROR } else { - ::tracing::Level::DEBUG + $crate::tracing::Level::DEBUG } }; (warn) => { - ::tracing::Level::WARN + $crate::tracing::Level::WARN }; (error) => { - ::tracing::Level::ERROR + $crate::tracing::Level::ERROR }; } diff --git a/src/core/mod.rs b/src/core/mod.rs index 31851f4f..e4553186 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -11,7 +11,10 @@ pub mod result; pub mod server; pub mod utils; +pub use ::http; +pub use ::ruma; pub use ::toml; +pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; From 60010140784a286e53aa560cad6604caded4257e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 13 Sep 2024 07:40:22 +0000 Subject: [PATCH 0011/1248] add UnwrapInfallible to Result Signed-off-by: Jason Volk --- src/core/result.rs | 3 ++- src/core/result/unwrap_infallible.rs | 17 +++++++++++++++++ src/router/serve/unix.rs | 8 ++------ 3 files changed, 21 insertions(+), 7 deletions(-) create mode 100644 src/core/result/unwrap_infallible.rs diff --git a/src/core/result.rs b/src/core/result.rs index 41d1d66c..96a34b8a 100644 --- a/src/core/result.rs +++ b/src/core/result.rs @@ -3,10 +3,11 @@ mod log_debug_err; mod log_err; mod map_expect; mod not_found; +mod unwrap_infallible; pub use self::{ debug_inspect::DebugInspect, log_debug_err::LogDebugErr, log_err::LogErr, map_expect::MapExpect, - not_found::NotFound, + not_found::NotFound, unwrap_infallible::UnwrapInfallible, }; pub type Result = std::result::Result; diff --git a/src/core/result/unwrap_infallible.rs b/src/core/result/unwrap_infallible.rs new file mode 100644 index 00000000..99309e02 --- /dev/null +++ b/src/core/result/unwrap_infallible.rs @@ -0,0 +1,17 @@ +use std::convert::Infallible; + +use super::{DebugInspect, Result}; +use crate::error; + +pub trait UnwrapInfallible { + fn unwrap_infallible(self) -> T; +} + +impl UnwrapInfallible for Result { + #[inline] + fn unwrap_infallible(self) -> T { + // SAFETY: Branchless unwrap for errors that can never happen. In debug + // mode this is asserted. + unsafe { self.debug_inspect_err(error::infallible).unwrap_unchecked() } + } +} diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index fb011f18..5df41b61 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -10,7 +10,7 @@ use axum::{ extract::{connect_info::IntoMakeServiceWithConnectInfo, Request}, Router, }; -use conduit::{debug, debug_error, error::infallible, info, trace, warn, Err, Result, Server}; +use conduit::{debug, debug_error, info, result::UnwrapInfallible, trace, warn, Err, Result, Server}; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ rt::{TokioExecutor, TokioIo}, @@ -62,11 +62,7 @@ async fn accept( let socket = TokioIo::new(socket); trace!(?listener, ?socket, ?remote, "accepted"); - let called = app - .call(NULL_ADDR) - .await - .inspect_err(infallible) - .expect("infallible"); + let called = app.call(NULL_ADDR).await.unwrap_infallible(); let service = move |req: Request| called.clone().oneshot(req); let handler = service_fn(service); From 946ca364e032be8ca2529099b415990262c977fd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 8 Aug 2024 17:18:30 +0000 Subject: [PATCH 0012/1248] Database Refactor combine service/users data w/ mod unit split sliding sync related out of service/users instrument database entry points remove increment crap from database interface de-wrap all database get() calls de-wrap all database insert() calls de-wrap all database remove() calls refactor database interface for async streaming add query key serializer for database implement Debug for result handle add query deserializer for database add deserialization trait for option handle start a stream utils suite de-wrap/asyncify/type-query count_one_time_keys() de-wrap/asyncify users count add admin query users command suite de-wrap/asyncify users exists de-wrap/partially asyncify user filter related asyncify/de-wrap users device/keys related asyncify/de-wrap user auth/misc related asyncify/de-wrap users blurhash asyncify/de-wrap account_data get; merge Data into Service partial asyncify/de-wrap uiaa; merge Data into Service partially asyncify/de-wrap transaction_ids get; merge Data into Service partially asyncify/de-wrap key_backups; merge Data into Service asyncify/de-wrap pusher service getters; merge Data into Service asyncify/de-wrap rooms alias getters/some iterators asyncify/de-wrap rooms directory getters/iterator partially asyncify/de-wrap rooms lazy-loading partially asyncify/de-wrap rooms metadata asyncify/dewrap rooms outlier asyncify/dewrap rooms pdu_metadata dewrap/partially asyncify rooms read receipt de-wrap rooms search service de-wrap/partially asyncify rooms user service partial de-wrap rooms state_compressor de-wrap rooms state_cache de-wrap room state et al de-wrap rooms timeline service additional users device/keys related de-wrap/asyncify sender asyncify services refactor database to TryFuture/TryStream refactor services for TryFuture/TryStream asyncify api handlers additional asyncification for admin module abstract stream related; support reverse streams additional stream conversions asyncify state-res related Signed-off-by: Jason Volk --- Cargo.lock | 53 +- Cargo.toml | 7 +- clippy.toml | 2 +- src/admin/Cargo.toml | 3 +- src/admin/check/commands.rs | 9 +- src/admin/debug/commands.rs | 100 +- src/admin/federation/commands.rs | 13 +- src/admin/media/commands.rs | 4 +- src/admin/processor.rs | 2 +- src/admin/query/account_data.rs | 6 +- src/admin/query/appservice.rs | 6 +- src/admin/query/globals.rs | 6 +- src/admin/query/presence.rs | 5 +- src/admin/query/pusher.rs | 2 +- src/admin/query/room_alias.rs | 13 +- src/admin/query/room_state_cache.rs | 93 +- src/admin/query/sending.rs | 9 +- src/admin/query/users.rs | 351 +++- src/admin/room/alias.rs | 118 +- src/admin/room/commands.rs | 47 +- src/admin/room/directory.rs | 22 +- src/admin/room/info.rs | 48 +- src/admin/room/mod.rs | 6 + src/admin/room/moderation.rs | 343 ++-- src/admin/user/commands.rs | 192 ++- src/admin/utils.rs | 22 +- src/api/Cargo.toml | 2 +- src/api/client/account.rs | 182 ++- src/api/client/alias.rs | 28 +- src/api/client/backup.rs | 230 +-- src/api/client/config.rs | 40 +- src/api/client/context.rs | 108 +- src/api/client/device.rs | 46 +- src/api/client/directory.rs | 188 +-- src/api/client/filter.rs | 25 +- src/api/client/keys.rs | 186 ++- src/api/client/membership.rs | 486 +++--- src/api/client/message.rs | 196 +-- src/api/client/presence.rs | 14 +- src/api/client/profile.rs | 200 ++- src/api/client/push.rs | 182 ++- src/api/client/read_marker.rs | 90 +- src/api/client/relations.rs | 90 +- src/api/client/report.rs | 20 +- src/api/client/room.rs | 105 +- src/api/client/search.rs | 108 +- src/api/client/session.rs | 75 +- src/api/client/state.rs | 63 +- src/api/client/sync.rs | 1091 +++++++------ src/api/client/tag.rs | 45 +- src/api/client/threads.rs | 19 +- src/api/client/to_device.rs | 52 +- src/api/client/typing.rs | 3 +- src/api/client/unstable.rs | 168 +- src/api/client/unversioned.rs | 3 +- src/api/client/user_directory.rs | 47 +- src/api/router.rs | 322 ++-- src/api/router/args.rs | 26 +- src/api/router/auth.rs | 10 +- src/api/router/handler.rs | 38 +- src/api/router/response.rs | 9 +- src/api/server/backfill.rs | 84 +- src/api/server/event.rs | 39 +- src/api/server/event_auth.rs | 33 +- src/api/server/get_missing_events.rs | 31 +- src/api/server/hierarchy.rs | 2 +- src/api/server/invite.rs | 38 +- src/api/server/make_join.rs | 87 +- src/api/server/make_leave.rs | 37 +- src/api/server/openid.rs | 5 +- src/api/server/query.rs | 36 +- src/api/server/send.rs | 196 +-- src/api/server/send_join.rs | 71 +- src/api/server/send_leave.rs | 20 +- src/api/server/state.rs | 65 +- src/api/server/state_ids.rs | 37 +- src/api/server/user.rs | 51 +- src/core/Cargo.toml | 1 + src/core/error/mod.rs | 4 +- src/core/pdu/mod.rs | 43 +- src/core/result/log_debug_err.rs | 18 +- src/core/result/log_err.rs | 20 +- src/core/utils/algorithm.rs | 25 - src/core/utils/mod.rs | 32 +- src/core/utils/set.rs | 47 + src/core/utils/stream/cloned.rs | 20 + src/core/utils/stream/expect.rs | 17 + src/core/utils/stream/ignore.rs | 21 + src/core/utils/stream/iter_stream.rs | 27 + src/core/utils/stream/mod.rs | 13 + src/core/utils/stream/ready.rs | 109 ++ src/core/utils/stream/try_ready.rs | 35 + src/core/utils/tests.rs | 130 ++ src/database/Cargo.toml | 3 + src/database/database.rs | 2 +- src/database/de.rs | 261 +++ src/database/deserialized.rs | 34 + src/database/engine.rs | 2 +- src/database/handle.rs | 89 +- src/database/iter.rs | 110 -- src/database/keyval.rs | 83 + src/database/map.rs | 304 ++-- src/database/map/count.rs | 36 + src/database/map/keys.rs | 21 + src/database/map/keys_from.rs | 49 + src/database/map/keys_prefix.rs | 54 + src/database/map/rev_keys.rs | 21 + src/database/map/rev_keys_from.rs | 49 + src/database/map/rev_keys_prefix.rs | 54 + src/database/map/rev_stream.rs | 29 + src/database/map/rev_stream_from.rs | 68 + src/database/map/rev_stream_prefix.rs | 74 + src/database/map/stream.rs | 28 + src/database/map/stream_from.rs | 68 + src/database/map/stream_prefix.rs | 74 + src/database/mod.rs | 28 +- src/database/ser.rs | 315 ++++ src/database/slice.rs | 57 - src/database/stream.rs | 122 ++ src/database/stream/items.rs | 44 + src/database/stream/items_rev.rs | 44 + src/database/stream/keys.rs | 44 + src/database/stream/keys_rev.rs | 44 + src/database/util.rs | 12 + src/service/Cargo.toml | 2 +- src/service/account_data/data.rs | 152 -- src/service/account_data/mod.rs | 164 +- src/service/admin/console.rs | 2 +- src/service/admin/create.rs | 2 +- src/service/admin/grant.rs | 216 +-- src/service/admin/mod.rs | 104 +- src/service/appservice/data.rs | 28 +- src/service/appservice/mod.rs | 49 +- src/service/emergency/mod.rs | 30 +- src/service/globals/data.rs | 121 +- src/service/globals/migrations.rs | 741 ++------- src/service/globals/mod.rs | 8 +- src/service/key_backups/data.rs | 346 ---- src/service/key_backups/mod.rs | 360 ++++- src/service/manager.rs | 2 +- src/service/media/data.rs | 100 +- src/service/media/migrations.rs | 33 +- src/service/media/mod.rs | 15 +- src/service/media/preview.rs | 8 +- src/service/media/thumbnail.rs | 4 +- src/service/mod.rs | 1 + src/service/presence/data.rs | 111 +- src/service/presence/mod.rs | 63 +- src/service/presence/presence.rs | 12 +- src/service/pusher/data.rs | 77 - src/service/pusher/mod.rs | 124 +- src/service/resolver/actual.rs | 6 +- src/service/rooms/alias/data.rs | 125 -- src/service/rooms/alias/mod.rs | 147 +- src/service/rooms/auth_chain/data.rs | 21 +- src/service/rooms/auth_chain/mod.rs | 45 +- src/service/rooms/directory/data.rs | 39 - src/service/rooms/directory/mod.rs | 40 +- src/service/rooms/event_handler/mod.rs | 1220 +++++++------- .../rooms/event_handler/parse_incoming_pdu.rs | 6 +- src/service/rooms/lazy_loading/data.rs | 65 - src/service/rooms/lazy_loading/mod.rs | 112 +- src/service/rooms/metadata/data.rs | 110 -- src/service/rooms/metadata/mod.rs | 99 +- src/service/rooms/outlier/data.rs | 42 - src/service/rooms/outlier/mod.rs | 59 +- src/service/rooms/pdu_metadata/data.rs | 76 +- src/service/rooms/pdu_metadata/mod.rs | 171 +- src/service/rooms/read_receipt/data.rs | 148 +- src/service/rooms/read_receipt/mod.rs | 49 +- src/service/rooms/search/data.rs | 73 +- src/service/rooms/search/mod.rs | 17 +- src/service/rooms/short/data.rs | 212 ++- src/service/rooms/short/mod.rs | 36 +- src/service/rooms/spaces/mod.rs | 174 +- src/service/rooms/state/data.rs | 71 +- src/service/rooms/state/mod.rs | 276 ++-- src/service/rooms/state_accessor/data.rs | 156 +- src/service/rooms/state_accessor/mod.rs | 356 +++-- src/service/rooms/state_cache/data.rs | 646 +------- src/service/rooms/state_cache/mod.rs | 475 ++++-- src/service/rooms/state_compressor/data.rs | 20 +- src/service/rooms/state_compressor/mod.rs | 85 +- src/service/rooms/threads/data.rs | 78 +- src/service/rooms/threads/mod.rs | 38 +- src/service/rooms/timeline/data.rs | 329 ++-- src/service/rooms/timeline/mod.rs | 656 ++++---- src/service/rooms/typing/mod.rs | 33 +- src/service/rooms/user/data.rs | 146 +- src/service/rooms/user/mod.rs | 46 +- src/service/sending/data.rs | 194 +-- src/service/sending/mod.rs | 113 +- src/service/sending/sender.rs | 278 ++-- src/service/server_keys/mod.rs | 26 +- src/service/services.rs | 4 +- src/service/sync/mod.rs | 233 +++ src/service/transaction_ids/data.rs | 44 - src/service/transaction_ids/mod.rs | 44 +- src/service/uiaa/data.rs | 87 - src/service/uiaa/mod.rs | 313 ++-- src/service/updates/mod.rs | 90 +- src/service/users/data.rs | 1098 ------------- src/service/users/mod.rs | 1413 +++++++++++------ 203 files changed, 12202 insertions(+), 10709 deletions(-) delete mode 100644 src/core/utils/algorithm.rs create mode 100644 src/core/utils/set.rs create mode 100644 src/core/utils/stream/cloned.rs create mode 100644 src/core/utils/stream/expect.rs create mode 100644 src/core/utils/stream/ignore.rs create mode 100644 src/core/utils/stream/iter_stream.rs create mode 100644 src/core/utils/stream/mod.rs create mode 100644 src/core/utils/stream/ready.rs create mode 100644 src/core/utils/stream/try_ready.rs create mode 100644 src/database/de.rs create mode 100644 src/database/deserialized.rs delete mode 100644 src/database/iter.rs create mode 100644 src/database/keyval.rs create mode 100644 src/database/map/count.rs create mode 100644 src/database/map/keys.rs create mode 100644 src/database/map/keys_from.rs create mode 100644 src/database/map/keys_prefix.rs create mode 100644 src/database/map/rev_keys.rs create mode 100644 src/database/map/rev_keys_from.rs create mode 100644 src/database/map/rev_keys_prefix.rs create mode 100644 src/database/map/rev_stream.rs create mode 100644 src/database/map/rev_stream_from.rs create mode 100644 src/database/map/rev_stream_prefix.rs create mode 100644 src/database/map/stream.rs create mode 100644 src/database/map/stream_from.rs create mode 100644 src/database/map/stream_prefix.rs create mode 100644 src/database/ser.rs delete mode 100644 src/database/slice.rs create mode 100644 src/database/stream.rs create mode 100644 src/database/stream/items.rs create mode 100644 src/database/stream/items_rev.rs create mode 100644 src/database/stream/keys.rs create mode 100644 src/database/stream/keys_rev.rs delete mode 100644 src/service/account_data/data.rs delete mode 100644 src/service/key_backups/data.rs delete mode 100644 src/service/pusher/data.rs delete mode 100644 src/service/rooms/alias/data.rs delete mode 100644 src/service/rooms/directory/data.rs delete mode 100644 src/service/rooms/lazy_loading/data.rs delete mode 100644 src/service/rooms/metadata/data.rs delete mode 100644 src/service/rooms/outlier/data.rs create mode 100644 src/service/sync/mod.rs delete mode 100644 src/service/transaction_ids/data.rs delete mode 100644 src/service/uiaa/data.rs delete mode 100644 src/service/users/data.rs diff --git a/Cargo.lock b/Cargo.lock index 6386f968..08e0498a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -626,10 +626,11 @@ dependencies = [ "clap", "conduit_api", "conduit_core", + "conduit_database", "conduit_macros", "conduit_service", "const-str", - "futures-util", + "futures", "log", "ruma", "serde_json", @@ -652,7 +653,7 @@ dependencies = [ "conduit_database", "conduit_service", "const-str", - "futures-util", + "futures", "hmac", "http", "http-body-util", @@ -689,6 +690,7 @@ dependencies = [ "cyborgtime", "either", "figment", + "futures", "hardened_malloc-rs", "http", "http-body-util", @@ -726,8 +728,11 @@ version = "0.4.7" dependencies = [ "conduit_core", "const-str", + "futures", "log", "rust-rocksdb-uwu", + "serde", + "serde_json", "tokio", "tracing", ] @@ -784,7 +789,7 @@ dependencies = [ "conduit_core", "conduit_database", "const-str", - "futures-util", + "futures", "hickory-resolver", "http", "image", @@ -1283,6 +1288,20 @@ dependencies = [ "new_debug_unreachable", ] +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -1345,6 +1364,7 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ + "futures-channel", "futures-core", "futures-io", "futures-macro", @@ -2953,7 +2973,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "assign", "js_int", @@ -2975,7 +2995,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "js_int", "ruma-common", @@ -2987,7 +3007,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "as_variant", "assign", @@ -3010,7 +3030,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "as_variant", "base64 0.22.1", @@ -3040,7 +3060,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3064,7 +3084,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "bytes", "http", @@ -3082,7 +3102,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "js_int", "thiserror", @@ -3091,7 +3111,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "js_int", "ruma-common", @@ -3101,7 +3121,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "cfg-if", "once_cell", @@ -3117,7 +3137,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "js_int", "ruma-common", @@ -3129,7 +3149,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "headers", "http", @@ -3142,7 +3162,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3158,8 +3178,9 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=9900d0676564883cfade556d6e8da2a2c9061efd#9900d0676564883cfade556d6e8da2a2c9061efd" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" dependencies = [ + "futures-util", "itertools 0.12.1", "js_int", "ruma-common", diff --git a/Cargo.toml b/Cargo.toml index b75c4975..3bfb3bc8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -210,9 +210,10 @@ features = [ "string", ] -[workspace.dependencies.futures-util] +[workspace.dependencies.futures] version = "0.3.30" default-features = false +features = ["std"] [workspace.dependencies.tokio] version = "1.40.0" @@ -314,7 +315,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "9900d0676564883cfade556d6e8da2a2c9061efd" +rev = "e7db44989d68406393270d3a91815597385d3acb" features = [ "compat", "rand", @@ -463,7 +464,6 @@ version = "1.0.36" [workspace.dependencies.proc-macro2] version = "1.0.89" - # # Patches # @@ -828,6 +828,7 @@ missing_panics_doc = { level = "allow", priority = 1 } module_name_repetitions = { level = "allow", priority = 1 } no_effect_underscore_binding = { level = "allow", priority = 1 } similar_names = { level = "allow", priority = 1 } +single_match_else = { level = "allow", priority = 1 } struct_field_names = { level = "allow", priority = 1 } unnecessary_wraps = { level = "allow", priority = 1 } unused_async = { level = "allow", priority = 1 } diff --git a/clippy.toml b/clippy.toml index c942b93c..08641fcc 100644 --- a/clippy.toml +++ b/clippy.toml @@ -2,6 +2,6 @@ array-size-threshold = 4096 cognitive-complexity-threshold = 94 # TODO reduce me ALARA excessive-nesting-threshold = 11 # TODO reduce me to 4 or 5 future-size-threshold = 7745 # TODO reduce me ALARA -stack-size-threshold = 144000 # reduce me ALARA +stack-size-threshold = 196608 # reduce me ALARA too-many-lines-threshold = 700 # TODO reduce me to <= 100 type-complexity-threshold = 250 # reduce me to ~200 diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml index d756b3cb..f5cab449 100644 --- a/src/admin/Cargo.toml +++ b/src/admin/Cargo.toml @@ -29,10 +29,11 @@ release_max_log_level = [ clap.workspace = true conduit-api.workspace = true conduit-core.workspace = true +conduit-database.workspace = true conduit-macros.workspace = true conduit-service.workspace = true const-str.workspace = true -futures-util.workspace = true +futures.workspace = true log.workspace = true ruma.workspace = true serde_json.workspace = true diff --git a/src/admin/check/commands.rs b/src/admin/check/commands.rs index 0a983046..88fca462 100644 --- a/src/admin/check/commands.rs +++ b/src/admin/check/commands.rs @@ -1,5 +1,6 @@ use conduit::Result; use conduit_macros::implement; +use futures::StreamExt; use ruma::events::room::message::RoomMessageEventContent; use crate::Command; @@ -10,14 +11,12 @@ use crate::Command; #[implement(Command, params = "<'_>")] pub(super) async fn check_all_users(&self) -> Result { let timer = tokio::time::Instant::now(); - let results = self.services.users.db.iter(); + let users = self.services.users.iter().collect::>().await; let query_time = timer.elapsed(); - let users = results.collect::>(); - let total = users.len(); - let err_count = users.iter().filter(|user| user.is_err()).count(); - let ok_count = users.iter().filter(|user| user.is_ok()).count(); + let err_count = users.iter().filter(|_user| false).count(); + let ok_count = users.iter().filter(|_user| true).count(); let message = format!( "Database query completed in {query_time:?}:\n\n```\nTotal entries: {total:?}\nFailure/Invalid user count: \ diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 2d967006..65c9bc71 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -7,6 +7,7 @@ use std::{ use api::client::validate_and_add_event_id; use conduit::{debug, debug_error, err, info, trace, utils, warn, Error, PduEvent, Result}; +use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::event::get_room_state}, events::room::message::RoomMessageEventContent, @@ -27,7 +28,7 @@ pub(super) async fn echo(&self, message: Vec) -> Result) -> Result { let event_id = Arc::::from(event_id); - if let Some(event) = self.services.rooms.timeline.get_pdu_json(&event_id)? { + if let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await { let room_id_str = event .get("room_id") .and_then(|val| val.as_str()) @@ -43,7 +44,8 @@ pub(super) async fn get_auth_chain(&self, event_id: Box) -> Result) -> Result { + Ok(json) => { let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); Ok(RoomMessageEventContent::notice_markdown(format!( "{}\n```json\n{}\n```", @@ -109,7 +114,7 @@ pub(super) async fn get_pdu(&self, event_id: Box) -> Result Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), + Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } } @@ -157,7 +162,8 @@ pub(super) async fn get_remote_pdu_list( .send_message(RoomMessageEventContent::text_plain(format!( "Failed to get remote PDU, ignoring error: {e}" ))) - .await; + .await + .ok(); warn!("Failed to get remote PDU, ignoring error: {e}"); } else { success_count = success_count.saturating_add(1); @@ -215,7 +221,9 @@ pub(super) async fn get_remote_pdu( .services .rooms .event_handler - .parse_incoming_pdu(&response.pdu); + .parse_incoming_pdu(&response.pdu) + .await; + let (event_id, value, room_id) = match parsed_result { Ok(t) => t, Err(e) => { @@ -333,9 +341,12 @@ pub(super) async fn ping(&self, server: Box) -> Result Result { // Force E2EE device list updates for all users - for user_id in self.services.users.iter().filter_map(Result::ok) { - self.services.users.mark_device_key_update(&user_id)?; - } + self.services + .users + .stream() + .for_each(|user_id| self.services.users.mark_device_key_update(user_id)) + .await; + Ok(RoomMessageEventContent::text_plain( "Marked all devices for all users as having new keys to update", )) @@ -470,7 +481,8 @@ pub(super) async fn first_pdu_in_room(&self, room_id: Box) -> Result) -> Result) -> Result) -> Result> = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); @@ -554,13 +571,21 @@ pub(super) async fn force_set_room_state_from_server( let mut events = Vec::with_capacity(remote_state_response.pdus.len()); for pdu in remote_state_response.pdus.clone() { - events.push(match self.services.rooms.event_handler.parse_incoming_pdu(&pdu) { - Ok(t) => t, - Err(e) => { - warn!("Could not parse PDU, ignoring: {e}"); - continue; + events.push( + match self + .services + .rooms + .event_handler + .parse_incoming_pdu(&pdu) + .await + { + Ok(t) => t, + Err(e) => { + warn!("Could not parse PDU, ignoring: {e}"); + continue; + }, }, - }); + ); } info!("Fetching required signing keys for all the state events we got"); @@ -587,13 +612,16 @@ pub(super) async fn force_set_room_state_from_server( self.services .rooms .outlier - .add_pdu_outlier(&event_id, &value)?; + .add_pdu_outlier(&event_id, &value); + if let Some(state_key) = &pdu.state_key { let shortstatekey = self .services .rooms .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)?; + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key) + .await; + state.insert(shortstatekey, pdu.event_id.clone()); } } @@ -611,7 +639,7 @@ pub(super) async fn force_set_room_state_from_server( self.services .rooms .outlier - .add_pdu_outlier(&event_id, &value)?; + .add_pdu_outlier(&event_id, &value); } let new_room_state = self @@ -626,7 +654,8 @@ pub(super) async fn force_set_room_state_from_server( .services .rooms .state_compressor - .save_state(room_id.clone().as_ref(), new_room_state)?; + .save_state(room_id.clone().as_ref(), new_room_state) + .await?; let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; self.services @@ -642,7 +671,8 @@ pub(super) async fn force_set_room_state_from_server( self.services .rooms .state_cache - .update_joined_count(&room_id)?; + .update_joined_count(&room_id) + .await; drop(state_lock); @@ -656,7 +686,7 @@ pub(super) async fn get_signing_keys( &self, server_name: Option>, _cached: bool, ) -> Result { let server_name = server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); - let signing_keys = self.services.globals.signing_keys_for(&server_name)?; + let signing_keys = self.services.globals.signing_keys_for(&server_name).await?; Ok(RoomMessageEventContent::notice_markdown(format!( "```rs\n{signing_keys:#?}\n```" @@ -674,7 +704,7 @@ pub(super) async fn get_verify_keys( if cached { writeln!(out, "| Key ID | VerifyKey |")?; writeln!(out, "| --- | --- |")?; - for (key_id, verify_key) in self.services.globals.verify_keys_for(&server_name)? { + for (key_id, verify_key) in self.services.globals.verify_keys_for(&server_name).await? { writeln!(out, "| {key_id} | {verify_key:?} |")?; } diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 8917a46b..ce95ac01 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -1,19 +1,20 @@ use std::fmt::Write; use conduit::Result; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId}; use crate::{admin_command, escape_html, get_room_info}; #[admin_command] pub(super) async fn disable_room(&self, room_id: Box) -> Result { - self.services.rooms.metadata.disable_room(&room_id, true)?; + self.services.rooms.metadata.disable_room(&room_id, true); Ok(RoomMessageEventContent::text_plain("Room disabled.")) } #[admin_command] pub(super) async fn enable_room(&self, room_id: Box) -> Result { - self.services.rooms.metadata.disable_room(&room_id, false)?; + self.services.rooms.metadata.disable_room(&room_id, false); Ok(RoomMessageEventContent::text_plain("Room enabled.")) } @@ -85,7 +86,7 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: Box) -> Result< )); } - if !self.services.users.exists(&user_id)? { + if !self.services.users.exists(&user_id).await { return Ok(RoomMessageEventContent::text_plain( "Remote user does not exist in our database.", )); @@ -96,9 +97,9 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: Box) -> Result< .rooms .state_cache .rooms_joined(&user_id) - .filter_map(Result::ok) - .map(|room_id| get_room_info(self.services, &room_id)) - .collect(); + .then(|room_id| get_room_info(self.services, room_id)) + .collect() + .await; if rooms.is_empty() { return Ok(RoomMessageEventContent::text_plain("User is not in any rooms.")); diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index 3c4bf2ef..82ac162e 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -36,7 +36,7 @@ pub(super) async fn delete( let mut mxc_urls = Vec::with_capacity(4); // parsing the PDU for any MXC URLs begins here - if let Some(event_json) = self.services.rooms.timeline.get_pdu_json(&event_id)? { + if let Ok(event_json) = self.services.rooms.timeline.get_pdu_json(&event_id).await { if let Some(content_key) = event_json.get("content") { debug!("Event ID has \"content\"."); let content_obj = content_key.as_object(); @@ -300,7 +300,7 @@ pub(super) async fn delete_all_from_server( #[admin_command] pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; - let metadata = self.services.media.get_metadata(&mxc); + let metadata = self.services.media.get_metadata(&mxc).await; Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```"))) } diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 4f60f56e..3c1895ff 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -17,7 +17,7 @@ use conduit::{ utils::string::{collect_stream, common_prefix}, warn, Error, Result, }; -use futures_util::future::FutureExt; +use futures::future::FutureExt; use ruma::{ events::{ relation::InReplyTo, diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index e18c298a..896bf95c 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -44,7 +44,8 @@ pub(super) async fn process(subcommand: AccountDataCommand, context: &Command<'_ let timer = tokio::time::Instant::now(); let results = services .account_data - .changes_since(room_id.as_deref(), &user_id, since)?; + .changes_since(room_id.as_deref(), &user_id, since) + .await?; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -59,7 +60,8 @@ pub(super) async fn process(subcommand: AccountDataCommand, context: &Command<'_ let timer = tokio::time::Instant::now(); let results = services .account_data - .get(room_id.as_deref(), &user_id, kind)?; + .get(room_id.as_deref(), &user_id, kind) + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index 683c228f..4b97ef4e 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -29,7 +29,9 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_> let results = services .appservice .db - .get_registration(appservice_id.as_ref()); + .get_registration(appservice_id.as_ref()) + .await; + let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -38,7 +40,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_> }, AppserviceCommand::All => { let timer = tokio::time::Instant::now(); - let results = services.appservice.all(); + let results = services.appservice.all().await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 5f271c2c..150a213c 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -29,7 +29,7 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) - match subcommand { GlobalsCommand::DatabaseVersion => { let timer = tokio::time::Instant::now(); - let results = services.globals.db.database_version(); + let results = services.globals.db.database_version().await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -47,7 +47,7 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) - }, GlobalsCommand::LastCheckForUpdatesId => { let timer = tokio::time::Instant::now(); - let results = services.updates.last_check_for_updates_id(); + let results = services.updates.last_check_for_updates_id().await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -67,7 +67,7 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) - origin, } => { let timer = tokio::time::Instant::now(); - let results = services.globals.db.verify_keys_for(&origin); + let results = services.globals.db.verify_keys_for(&origin).await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 145ecd9b..6189270c 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -1,5 +1,6 @@ use clap::Subcommand; use conduit::Result; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, UserId}; use crate::Command; @@ -30,7 +31,7 @@ pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) user_id, } => { let timer = tokio::time::Instant::now(); - let results = services.presence.db.get_presence(&user_id)?; + let results = services.presence.db.get_presence(&user_id).await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -42,7 +43,7 @@ pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) } => { let timer = tokio::time::Instant::now(); let results = services.presence.db.presence_since(since); - let presence_since: Vec<(_, _, _)> = results.collect(); + let presence_since: Vec<(_, _, _)> = results.collect().await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 637c57b6..a1bd32f9 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -21,7 +21,7 @@ pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> user_id, } => { let timer = tokio::time::Instant::now(); - let results = services.pusher.get_pushers(&user_id)?; + let results = services.pusher.get_pushers(&user_id).await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs index 1809e26a..05fac42c 100644 --- a/src/admin/query/room_alias.rs +++ b/src/admin/query/room_alias.rs @@ -1,5 +1,6 @@ use clap::Subcommand; use conduit::Result; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId}; use crate::Command; @@ -31,7 +32,7 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) alias, } => { let timer = tokio::time::Instant::now(); - let results = services.rooms.alias.resolve_local_alias(&alias); + let results = services.rooms.alias.resolve_local_alias(&alias).await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -43,7 +44,7 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) } => { let timer = tokio::time::Instant::now(); let results = services.rooms.alias.local_aliases_for_room(&room_id); - let aliases: Vec<_> = results.collect(); + let aliases: Vec<_> = results.collect().await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -52,8 +53,12 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) }, RoomAliasCommand::AllLocalAliases => { let timer = tokio::time::Instant::now(); - let results = services.rooms.alias.all_local_aliases(); - let aliases: Vec<_> = results.collect(); + let aliases = services + .rooms + .alias + .all_local_aliases() + .collect::>() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 4215cf8d..e32517fb 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,5 +1,6 @@ use clap::Subcommand; use conduit::Result; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId}; use crate::Command; @@ -86,7 +87,11 @@ pub(super) async fn process( room_id, } => { let timer = tokio::time::Instant::now(); - let result = services.rooms.state_cache.server_in_room(&server, &room_id); + let result = services + .rooms + .state_cache + .server_in_room(&server, &room_id) + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -97,7 +102,13 @@ pub(super) async fn process( room_id, } => { let timer = tokio::time::Instant::now(); - let results: Result> = services.rooms.state_cache.room_servers(&room_id).collect(); + let results: Vec<_> = services + .rooms + .state_cache + .room_servers(&room_id) + .map(ToOwned::to_owned) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -108,7 +119,13 @@ pub(super) async fn process( server, } => { let timer = tokio::time::Instant::now(); - let results: Result> = services.rooms.state_cache.server_rooms(&server).collect(); + let results: Vec<_> = services + .rooms + .state_cache + .server_rooms(&server) + .map(ToOwned::to_owned) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -119,7 +136,13 @@ pub(super) async fn process( room_id, } => { let timer = tokio::time::Instant::now(); - let results: Result> = services.rooms.state_cache.room_members(&room_id).collect(); + let results: Vec<_> = services + .rooms + .state_cache + .room_members(&room_id) + .map(ToOwned::to_owned) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -134,7 +157,9 @@ pub(super) async fn process( .rooms .state_cache .local_users_in_room(&room_id) - .collect(); + .map(ToOwned::to_owned) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -149,7 +174,9 @@ pub(super) async fn process( .rooms .state_cache .active_local_users_in_room(&room_id) - .collect(); + .map(ToOwned::to_owned) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -160,7 +187,7 @@ pub(super) async fn process( room_id, } => { let timer = tokio::time::Instant::now(); - let results = services.rooms.state_cache.room_joined_count(&room_id); + let results = services.rooms.state_cache.room_joined_count(&room_id).await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -171,7 +198,11 @@ pub(super) async fn process( room_id, } => { let timer = tokio::time::Instant::now(); - let results = services.rooms.state_cache.room_invited_count(&room_id); + let results = services + .rooms + .state_cache + .room_invited_count(&room_id) + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -182,11 +213,13 @@ pub(super) async fn process( room_id, } => { let timer = tokio::time::Instant::now(); - let results: Result> = services + let results: Vec<_> = services .rooms .state_cache .room_useroncejoined(&room_id) - .collect(); + .map(ToOwned::to_owned) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -197,11 +230,13 @@ pub(super) async fn process( room_id, } => { let timer = tokio::time::Instant::now(); - let results: Result> = services + let results: Vec<_> = services .rooms .state_cache .room_members_invited(&room_id) - .collect(); + .map(ToOwned::to_owned) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -216,7 +251,8 @@ pub(super) async fn process( let results = services .rooms .state_cache - .get_invite_count(&room_id, &user_id); + .get_invite_count(&room_id, &user_id) + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -231,7 +267,8 @@ pub(super) async fn process( let results = services .rooms .state_cache - .get_left_count(&room_id, &user_id); + .get_left_count(&room_id, &user_id) + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -242,7 +279,13 @@ pub(super) async fn process( user_id, } => { let timer = tokio::time::Instant::now(); - let results: Result> = services.rooms.state_cache.rooms_joined(&user_id).collect(); + let results: Vec<_> = services + .rooms + .state_cache + .rooms_joined(&user_id) + .map(ToOwned::to_owned) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -253,7 +296,12 @@ pub(super) async fn process( user_id, } => { let timer = tokio::time::Instant::now(); - let results: Result> = services.rooms.state_cache.rooms_invited(&user_id).collect(); + let results: Vec<_> = services + .rooms + .state_cache + .rooms_invited(&user_id) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -264,7 +312,12 @@ pub(super) async fn process( user_id, } => { let timer = tokio::time::Instant::now(); - let results: Result> = services.rooms.state_cache.rooms_left(&user_id).collect(); + let results: Vec<_> = services + .rooms + .state_cache + .rooms_left(&user_id) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -276,7 +329,11 @@ pub(super) async fn process( room_id, } => { let timer = tokio::time::Instant::now(); - let results = services.rooms.state_cache.invite_state(&user_id, &room_id); + let results = services + .rooms + .state_cache + .invite_state(&user_id, &room_id) + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 6d54bddf..eaab1f5e 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,5 +1,6 @@ use clap::Subcommand; use conduit::Result; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, ServerName, UserId}; use service::sending::Destination; @@ -68,7 +69,7 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - SendingCommand::ActiveRequests => { let timer = tokio::time::Instant::now(); let results = services.sending.db.active_requests(); - let active_requests: Result> = results.collect(); + let active_requests = results.collect::>().await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -133,7 +134,7 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - }, }; - let queued_requests = results.collect::>>(); + let queued_requests = results.collect::>().await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -199,7 +200,7 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - }, }; - let active_requests = results.collect::>>(); + let active_requests = results.collect::>().await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -210,7 +211,7 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - server_name, } => { let timer = tokio::time::Instant::now(); - let results = services.sending.db.get_latest_educount(&server_name); + let results = services.sending.db.get_latest_educount(&server_name).await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index fee12fbf..0792e484 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -1,29 +1,344 @@ use clap::Subcommand; use conduit::Result; -use ruma::events::room::message::RoomMessageEventContent; +use futures::stream::StreamExt; +use ruma::{events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId}; -use crate::Command; +use crate::{admin_command, admin_command_dispatch}; +#[admin_command_dispatch] #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/users.rs pub(crate) enum UsersCommand { - Iter, + CountUsers, + + IterUsers, + + PasswordHash { + user_id: OwnedUserId, + }, + + ListDevices { + user_id: OwnedUserId, + }, + + ListDevicesMetadata { + user_id: OwnedUserId, + }, + + GetDeviceMetadata { + user_id: OwnedUserId, + device_id: OwnedDeviceId, + }, + + GetDevicesVersion { + user_id: OwnedUserId, + }, + + CountOneTimeKeys { + user_id: OwnedUserId, + device_id: OwnedDeviceId, + }, + + GetDeviceKeys { + user_id: OwnedUserId, + device_id: OwnedDeviceId, + }, + + GetUserSigningKey { + user_id: OwnedUserId, + }, + + GetMasterKey { + user_id: OwnedUserId, + }, + + GetToDeviceEvents { + user_id: OwnedUserId, + device_id: OwnedDeviceId, + }, + + GetLatestBackup { + user_id: OwnedUserId, + }, + + GetLatestBackupVersion { + user_id: OwnedUserId, + }, + + GetBackupAlgorithm { + user_id: OwnedUserId, + version: String, + }, + + GetAllBackups { + user_id: OwnedUserId, + version: String, + }, + + GetRoomBackups { + user_id: OwnedUserId, + version: String, + room_id: OwnedRoomId, + }, + + GetBackupSession { + user_id: OwnedUserId, + version: String, + room_id: OwnedRoomId, + session_id: String, + }, } -/// All the getters and iterators in key_value/users.rs -pub(super) async fn process(subcommand: UsersCommand, context: &Command<'_>) -> Result { - let services = context.services; +#[admin_command] +async fn get_backup_session( + &self, user_id: OwnedUserId, version: String, room_id: OwnedRoomId, session_id: String, +) -> Result { + let timer = tokio::time::Instant::now(); + let result = self + .services + .key_backups + .get_session(&user_id, &version, &room_id, &session_id) + .await; + let query_time = timer.elapsed(); - match subcommand { - UsersCommand::Iter => { - let timer = tokio::time::Instant::now(); - let results = services.users.db.iter(); - let users = results.collect::>(); - let query_time = timer.elapsed(); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{users:#?}\n```" - ))) - }, - } + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn get_room_backups( + &self, user_id: OwnedUserId, version: String, room_id: OwnedRoomId, +) -> Result { + let timer = tokio::time::Instant::now(); + let result = self + .services + .key_backups + .get_room(&user_id, &version, &room_id) + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result { + let timer = tokio::time::Instant::now(); + let result = self.services.key_backups.get_all(&user_id, &version).await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result { + let timer = tokio::time::Instant::now(); + let result = self + .services + .key_backups + .get_backup(&user_id, &version) + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result { + let timer = tokio::time::Instant::now(); + let result = self + .services + .key_backups + .get_latest_backup_version(&user_id) + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { + let timer = tokio::time::Instant::now(); + let result = self.services.key_backups.get_latest_backup(&user_id).await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn iter_users(&self) -> Result { + let timer = tokio::time::Instant::now(); + let result: Vec = self.services.users.stream().map(Into::into).collect().await; + + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn count_users(&self) -> Result { + let timer = tokio::time::Instant::now(); + let result = self.services.users.count().await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn password_hash(&self, user_id: OwnedUserId) -> Result { + let timer = tokio::time::Instant::now(); + let result = self.services.users.password_hash(&user_id).await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn list_devices(&self, user_id: OwnedUserId) -> Result { + let timer = tokio::time::Instant::now(); + let devices = self + .services + .users + .all_device_ids(&user_id) + .map(ToOwned::to_owned) + .collect::>() + .await; + + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```" + ))) +} + +#[admin_command] +async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result { + let timer = tokio::time::Instant::now(); + let devices = self + .services + .users + .all_devices_metadata(&user_id) + .collect::>() + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```" + ))) +} + +#[admin_command] +async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { + let timer = tokio::time::Instant::now(); + let device = self + .services + .users + .get_device_metadata(&user_id, &device_id) + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" + ))) +} + +#[admin_command] +async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { + let timer = tokio::time::Instant::now(); + let device = self.services.users.get_devicelist_version(&user_id).await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" + ))) +} + +#[admin_command] +async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { + let timer = tokio::time::Instant::now(); + let result = self + .services + .users + .count_one_time_keys(&user_id, &device_id) + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { + let timer = tokio::time::Instant::now(); + let result = self + .services + .users + .get_device_keys(&user_id, &device_id) + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { + let timer = tokio::time::Instant::now(); + let result = self.services.users.get_user_signing_key(&user_id).await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn get_master_key(&self, user_id: OwnedUserId) -> Result { + let timer = tokio::time::Instant::now(); + let result = self + .services + .users + .get_master_key(None, &user_id, &|_| true) + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +async fn get_to_device_events( + &self, user_id: OwnedUserId, device_id: OwnedDeviceId, +) -> Result { + let timer = tokio::time::Instant::now(); + let result = self + .services + .users + .get_to_device_events(&user_id, &device_id) + .collect::>() + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) } diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 415b8a08..34b6c42e 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -2,7 +2,8 @@ use std::fmt::Write; use clap::Subcommand; use conduit::Result; -use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId}; +use futures::StreamExt; +use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; use crate::{escape_html, Command}; @@ -66,8 +67,8 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> force, room_id, .. - } => match (force, services.rooms.alias.resolve_local_alias(&room_alias)) { - (true, Ok(Some(id))) => match services + } => match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) { + (true, Ok(id)) => match services .rooms .alias .set_alias(&room_alias, &room_id, server_user) @@ -77,10 +78,10 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> ))), Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))), }, - (false, Ok(Some(id))) => Ok(RoomMessageEventContent::text_plain(format!( + (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!( "Refusing to overwrite in use alias for {id}, use -f or --force to overwrite" ))), - (_, Ok(None)) => match services + (_, Err(_)) => match services .rooms .alias .set_alias(&room_alias, &room_id, server_user) @@ -88,12 +89,11 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Ok(()) => Ok(RoomMessageEventContent::text_plain("Successfully set alias")), Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))), }, - (_, Err(err)) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {err}"))), }, RoomAliasCommand::Remove { .. - } => match services.rooms.alias.resolve_local_alias(&room_alias) { - Ok(Some(id)) => match services + } => match services.rooms.alias.resolve_local_alias(&room_alias).await { + Ok(id) => match services .rooms .alias .remove_alias(&room_alias, server_user) @@ -102,15 +102,13 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Ok(()) => Ok(RoomMessageEventContent::text_plain(format!("Removed alias from {id}"))), Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))), }, - Ok(None) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {err}"))), + Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), }, RoomAliasCommand::Which { .. - } => match services.rooms.alias.resolve_local_alias(&room_alias) { - Ok(Some(id)) => Ok(RoomMessageEventContent::text_plain(format!("Alias resolves to {id}"))), - Ok(None) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {err}"))), + } => match services.rooms.alias.resolve_local_alias(&room_alias).await { + Ok(id) => Ok(RoomMessageEventContent::text_plain(format!("Alias resolves to {id}"))), + Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), }, RoomAliasCommand::List { .. @@ -125,63 +123,59 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> .rooms .alias .local_aliases_for_room(&room_id) - .collect::, _>>(); - match aliases { - Ok(aliases) => { - let plain_list = aliases.iter().fold(String::new(), |mut output, alias| { - writeln!(output, "- {alias}").expect("should be able to write to string buffer"); - output - }); + .map(Into::into) + .collect::>() + .await; - let html_list = aliases.iter().fold(String::new(), |mut output, alias| { - writeln!(output, "
  • {}
  • ", escape_html(alias.as_ref())) - .expect("should be able to write to string buffer"); - output - }); + let plain_list = aliases.iter().fold(String::new(), |mut output, alias| { + writeln!(output, "- {alias}").expect("should be able to write to string buffer"); + output + }); - let plain = format!("Aliases for {room_id}:\n{plain_list}"); - let html = format!("Aliases for {room_id}:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) - }, - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to list aliases: {err}"))), - } + let html_list = aliases.iter().fold(String::new(), |mut output, alias| { + writeln!(output, "
  • {}
  • ", escape_html(alias.as_ref())) + .expect("should be able to write to string buffer"); + output + }); + + let plain = format!("Aliases for {room_id}:\n{plain_list}"); + let html = format!("Aliases for {room_id}:\n
      {html_list}
    "); + Ok(RoomMessageEventContent::text_html(plain, html)) } else { let aliases = services .rooms .alias .all_local_aliases() - .collect::, _>>(); - match aliases { - Ok(aliases) => { - let server_name = services.globals.server_name(); - let plain_list = aliases - .iter() - .fold(String::new(), |mut output, (alias, id)| { - writeln!(output, "- `{alias}` -> #{id}:{server_name}") - .expect("should be able to write to string buffer"); - output - }); + .map(|(room_id, localpart)| (room_id.into(), localpart.into())) + .collect::>() + .await; - let html_list = aliases - .iter() - .fold(String::new(), |mut output, (alias, id)| { - writeln!( - output, - "
  • {} -> #{}:{}
  • ", - escape_html(alias.as_ref()), - escape_html(id.as_ref()), - server_name - ) - .expect("should be able to write to string buffer"); - output - }); + let server_name = services.globals.server_name(); + let plain_list = aliases + .iter() + .fold(String::new(), |mut output, (alias, id)| { + writeln!(output, "- `{alias}` -> #{id}:{server_name}") + .expect("should be able to write to string buffer"); + output + }); - let plain = format!("Aliases:\n{plain_list}"); - let html = format!("Aliases:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) - }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Unable to list room aliases: {e}"))), - } + let html_list = aliases + .iter() + .fold(String::new(), |mut output, (alias, id)| { + writeln!( + output, + "
  • {} -> #{}:{}
  • ", + escape_html(alias.as_ref()), + escape_html(id), + server_name + ) + .expect("should be able to write to string buffer"); + output + }); + + let plain = format!("Aliases:\n{plain_list}"); + let html = format!("Aliases:\n
      {html_list}
    "); + Ok(RoomMessageEventContent::text_html(plain, html)) } }, } diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index 2adfa7d7..1c90a998 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -1,11 +1,12 @@ use conduit::Result; -use ruma::events::room::message::RoomMessageEventContent; +use futures::StreamExt; +use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; use crate::{admin_command, get_room_info, PAGE_SIZE}; #[admin_command] pub(super) async fn list_rooms( - &self, page: Option, exclude_disabled: bool, exclude_banned: bool, no_details: bool, + &self, page: Option, _exclude_disabled: bool, _exclude_banned: bool, no_details: bool, ) -> Result { // TODO: i know there's a way to do this with clap, but i can't seem to find it let page = page.unwrap_or(1); @@ -14,37 +15,12 @@ pub(super) async fn list_rooms( .rooms .metadata .iter_ids() - .filter_map(|room_id| { - room_id - .ok() - .filter(|room_id| { - if exclude_disabled - && self - .services - .rooms - .metadata - .is_disabled(room_id) - .unwrap_or(false) - { - return false; - } + //.filter(|room_id| async { !exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await }) + //.filter(|room_id| async { !exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await }) + .then(|room_id| get_room_info(self.services, room_id)) + .collect::>() + .await; - if exclude_banned - && self - .services - .rooms - .metadata - .is_banned(room_id) - .unwrap_or(false) - { - return false; - } - - true - }) - .map(|room_id| get_room_info(self.services, &room_id)) - }) - .collect::>(); rooms.sort_by_key(|r| r.1); rooms.reverse(); @@ -74,3 +50,10 @@ pub(super) async fn list_rooms( Ok(RoomMessageEventContent::notice_markdown(output_plain)) } + +#[admin_command] +pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { + let result = self.services.rooms.metadata.exists(&room_id).await; + + Ok(RoomMessageEventContent::notice_markdown(format!("{result}"))) +} diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 7bba2eb7..7ccdea6f 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -2,7 +2,8 @@ use std::fmt::Write; use clap::Subcommand; use conduit::Result; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId}; +use futures::StreamExt; +use ruma::{events::room::message::RoomMessageEventContent, RoomId}; use crate::{escape_html, get_room_info, Command, PAGE_SIZE}; @@ -31,15 +32,15 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_> match command { RoomDirectoryCommand::Publish { room_id, - } => match services.rooms.directory.set_public(&room_id) { - Ok(()) => Ok(RoomMessageEventContent::text_plain("Room published")), - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {err}"))), + } => { + services.rooms.directory.set_public(&room_id); + Ok(RoomMessageEventContent::notice_plain("Room published")) }, RoomDirectoryCommand::Unpublish { room_id, - } => match services.rooms.directory.set_not_public(&room_id) { - Ok(()) => Ok(RoomMessageEventContent::text_plain("Room unpublished")), - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {err}"))), + } => { + services.rooms.directory.set_not_public(&room_id); + Ok(RoomMessageEventContent::notice_plain("Room unpublished")) }, RoomDirectoryCommand::List { page, @@ -50,9 +51,10 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_> .rooms .directory .public_rooms() - .filter_map(Result::ok) - .map(|id: OwnedRoomId| get_room_info(services, &id)) - .collect::>(); + .then(|room_id| get_room_info(services, room_id)) + .collect::>() + .await; + rooms.sort_by_key(|r| r.1); rooms.reverse(); diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index d17a2924..fc0619e3 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,5 +1,6 @@ use clap::Subcommand; -use conduit::Result; +use conduit::{utils::ReadyExt, Result}; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId}; use crate::{admin_command, admin_command_dispatch}; @@ -32,46 +33,42 @@ async fn list_joined_members(&self, room_id: Box, local_only: bool) -> R .rooms .state_accessor .get_name(&room_id) - .ok() - .flatten() - .unwrap_or_else(|| room_id.to_string()); + .await + .unwrap_or_else(|_| room_id.to_string()); - let members = self + let member_info: Vec<_> = self .services .rooms .state_cache .room_members(&room_id) - .filter_map(|member| { + .ready_filter(|user_id| { if local_only { - member - .ok() - .filter(|user| self.services.globals.user_is_local(user)) + self.services.globals.user_is_local(user_id) } else { - member.ok() + true } - }); - - let member_info = members - .into_iter() - .map(|user_id| { - ( - user_id.clone(), + }) + .filter_map(|user_id| async move { + let user_id = user_id.to_owned(); + Some(( self.services .users .displayname(&user_id) - .unwrap_or(None) - .unwrap_or_else(|| user_id.to_string()), - ) + .await + .unwrap_or_else(|_| user_id.to_string()), + user_id, + )) }) - .collect::>(); + .collect() + .await; let output_plain = format!( "{} Members in Room \"{}\":\n```\n{}\n```", member_info.len(), room_name, member_info - .iter() - .map(|(mxid, displayname)| format!("{mxid} | {displayname}")) + .into_iter() + .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) .collect::>() .join("\n") ); @@ -81,11 +78,12 @@ async fn list_joined_members(&self, room_id: Box, local_only: bool) -> R #[admin_command] async fn view_room_topic(&self, room_id: Box) -> Result { - let Some(room_topic) = self + let Ok(room_topic) = self .services .rooms .state_accessor - .get_room_topic(&room_id)? + .get_room_topic(&room_id) + .await else { return Ok(RoomMessageEventContent::text_plain("Room does not have a room topic set.")); }; diff --git a/src/admin/room/mod.rs b/src/admin/room/mod.rs index 64d2af45..8c6cbeaa 100644 --- a/src/admin/room/mod.rs +++ b/src/admin/room/mod.rs @@ -6,6 +6,7 @@ mod moderation; use clap::Subcommand; use conduit::Result; +use ruma::OwnedRoomId; use self::{ alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand, moderation::RoomModerationCommand, @@ -49,4 +50,9 @@ pub(super) enum RoomCommand { #[command(subcommand)] /// - Manage the room directory Directory(RoomDirectoryCommand), + + /// - Check if we know about a room + Exists { + room_id: OwnedRoomId, + }, } diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index 70d8486b..9a772da4 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -1,6 +1,11 @@ use api::client::leave_room; use clap::Subcommand; -use conduit::{debug, error, info, warn, Result}; +use conduit::{ + debug, error, info, + utils::{IterStream, ReadyExt}, + warn, Result, +}; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId}; use crate::{admin_command, admin_command_dispatch, get_room_info}; @@ -76,7 +81,7 @@ async fn ban_room( let admin_room_alias = &self.services.globals.admin_alias; - if let Some(admin_room_id) = self.services.admin.get_admin_room()? { + if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) { return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room.")); } @@ -95,7 +100,7 @@ async fn ban_room( debug!("Room specified is a room ID, banning room ID"); - self.services.rooms.metadata.ban_room(&room_id, true)?; + self.services.rooms.metadata.ban_room(&room_id, true); room_id } else if room.is_room_alias_id() { @@ -114,7 +119,13 @@ async fn ban_room( get_alias_helper to fetch room ID remotely" ); - let room_id = if let Some(room_id) = self.services.rooms.alias.resolve_local_alias(&room_alias)? { + let room_id = if let Ok(room_id) = self + .services + .rooms + .alias + .resolve_local_alias(&room_alias) + .await + { room_id } else { debug!("We don't have this room alias to a room ID locally, attempting to fetch room ID over federation"); @@ -138,7 +149,7 @@ async fn ban_room( } }; - self.services.rooms.metadata.ban_room(&room_id, true)?; + self.services.rooms.metadata.ban_room(&room_id, true); room_id } else { @@ -150,56 +161,40 @@ async fn ban_room( debug!("Making all users leave the room {}", &room); if force { - for local_user in self + let mut users = self .services .rooms .state_cache .room_members(&room_id) - .filter_map(|user| { - user.ok().filter(|local_user| { - self.services.globals.user_is_local(local_user) - // additional wrapped check here is to avoid adding remote users - // who are in the admin room to the list of local users (would - // fail auth check) - && (self.services.globals.user_is_local(local_user) - // since this is a force operation, assume user is an admin - // if somehow this fails - && self.services - .users - .is_admin(local_user) - .unwrap_or(true)) - }) - }) { + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); + + while let Some(local_user) = users.next().await { debug!( - "Attempting leave for user {} in room {} (forced, ignoring all errors, evicting admins too)", - &local_user, &room_id + "Attempting leave for user {local_user} in room {room_id} (forced, ignoring all errors, evicting \ + admins too)", ); - if let Err(e) = leave_room(self.services, &local_user, &room_id, None).await { + if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { warn!(%e, "Failed to leave room"); } } } else { - for local_user in self + let mut users = self .services .rooms .state_cache .room_members(&room_id) - .filter_map(|user| { - user.ok().filter(|local_user| { - local_user.server_name() == self.services.globals.server_name() - // additional wrapped check here is to avoid adding remote users - // who are in the admin room to the list of local users (would fail auth check) - && (local_user.server_name() - == self.services.globals.server_name() - && !self.services - .users - .is_admin(local_user) - .unwrap_or(false)) - }) - }) { + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); + + while let Some(local_user) = users.next().await { + if self.services.users.is_admin(local_user).await { + continue; + } + debug!("Attempting leave for user {} in room {}", &local_user, &room_id); - if let Err(e) = leave_room(self.services, &local_user, &room_id, None).await { + if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { error!( "Error attempting to make local user {} leave room {} during room banning: {}", &local_user, &room_id, e @@ -214,12 +209,14 @@ async fn ban_room( } // remove any local aliases, ignore errors - for ref local_alias in self + for local_alias in &self .services .rooms .alias .local_aliases_for_room(&room_id) - .filter_map(Result::ok) + .map(ToOwned::to_owned) + .collect::>() + .await { _ = self .services @@ -230,10 +227,10 @@ async fn ban_room( } // unpublish from room directory, ignore errors - _ = self.services.rooms.directory.set_not_public(&room_id); + self.services.rooms.directory.set_not_public(&room_id); if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true)?; + self.services.rooms.metadata.disable_room(&room_id, true); return Ok(RoomMessageEventContent::text_plain( "Room banned, removed all our local users, and disabled incoming federation with room.", )); @@ -268,7 +265,7 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu for &room in &rooms_s { match <&RoomOrAliasId>::try_from(room) { Ok(room_alias_or_id) => { - if let Some(admin_room_id) = self.services.admin.get_admin_room()? { + if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias) { info!("User specified admin room in bulk ban list, ignoring"); continue; @@ -300,43 +297,48 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu if room_alias_or_id.is_room_alias_id() { match RoomAliasId::parse(room_alias_or_id) { Ok(room_alias) => { - let room_id = - if let Some(room_id) = self.services.rooms.alias.resolve_local_alias(&room_alias)? { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room \ - ID over federation" - ); + let room_id = if let Ok(room_id) = self + .services + .rooms + .alias + .resolve_local_alias(&room_alias) + .await + { + room_id + } else { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch room ID \ + over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(&room_alias, None) - .await - { - Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room}", - ); - room_id - }, - Err(e) => { - // don't fail if force blocking - if force { - warn!("Failed to resolve room alias {room} to a room ID: {e}"); - continue; - } + match self + .services + .rooms + .alias + .resolve_alias(&room_alias, None) + .await + { + Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for {room}", + ); + room_id + }, + Err(e) => { + // don't fail if force blocking + if force { + warn!("Failed to resolve room alias {room} to a room ID: {e}"); + continue; + } - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" - ))); - }, - } - }; + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to resolve room alias {room} to a room ID: {e}" + ))); + }, + } + }; room_ids.push(room_id); }, @@ -374,74 +376,52 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu } for room_id in room_ids { - if self - .services - .rooms - .metadata - .ban_room(&room_id, true) - .is_ok() - { - debug!("Banned {room_id} successfully"); - room_ban_count = room_ban_count.saturating_add(1); - } + self.services.rooms.metadata.ban_room(&room_id, true); + + debug!("Banned {room_id} successfully"); + room_ban_count = room_ban_count.saturating_add(1); debug!("Making all users leave the room {}", &room_id); if force { - for local_user in self + let mut users = self .services .rooms .state_cache .room_members(&room_id) - .filter_map(|user| { - user.ok().filter(|local_user| { - local_user.server_name() == self.services.globals.server_name() - // additional wrapped check here is to avoid adding remote - // users who are in the admin room to the list of local - // users (would fail auth check) - && (local_user.server_name() - == self.services.globals.server_name() - // since this is a force operation, assume user is an - // admin if somehow this fails - && self.services - .users - .is_admin(local_user) - .unwrap_or(true)) - }) - }) { + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); + + while let Some(local_user) = users.next().await { debug!( - "Attempting leave for user {} in room {} (forced, ignoring all errors, evicting admins too)", - &local_user, room_id + "Attempting leave for user {local_user} in room {room_id} (forced, ignoring all errors, evicting \ + admins too)", ); - if let Err(e) = leave_room(self.services, &local_user, &room_id, None).await { + + if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { warn!(%e, "Failed to leave room"); } } } else { - for local_user in self + let mut users = self .services .rooms .state_cache .room_members(&room_id) - .filter_map(|user| { - user.ok().filter(|local_user| { - local_user.server_name() == self.services.globals.server_name() - // additional wrapped check here is to avoid adding remote - // users who are in the admin room to the list of local - // users (would fail auth check) - && (local_user.server_name() - == self.services.globals.server_name() - && !self.services - .users - .is_admin(local_user) - .unwrap_or(false)) - }) - }) { - debug!("Attempting leave for user {} in room {}", &local_user, &room_id); - if let Err(e) = leave_room(self.services, &local_user, &room_id, None).await { + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); + + while let Some(local_user) = users.next().await { + if self.services.users.is_admin(local_user).await { + continue; + } + + debug!("Attempting leave for user {local_user} in room {room_id}"); + if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { error!( - "Error attempting to make local user {} leave room {} during bulk room banning: {}", - &local_user, &room_id, e + "Error attempting to make local user {local_user} leave room {room_id} during bulk room \ + banning: {e}", ); + return Ok(RoomMessageEventContent::text_plain(format!( "Error attempting to make local user {} leave room {} during room banning (room is still \ banned but not removing any more users and not banning any more rooms): {}\nIf you would \ @@ -453,26 +433,26 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu } // remove any local aliases, ignore errors - for ref local_alias in self - .services + self.services .rooms .alias .local_aliases_for_room(&room_id) - .filter_map(Result::ok) - { - _ = self - .services - .rooms - .alias - .remove_alias(local_alias, &self.services.globals.server_user) - .await; - } + .map(ToOwned::to_owned) + .for_each(|local_alias| async move { + self.services + .rooms + .alias + .remove_alias(&local_alias, &self.services.globals.server_user) + .await + .ok(); + }) + .await; // unpublish from room directory, ignore errors - _ = self.services.rooms.directory.set_not_public(&room_id); + self.services.rooms.directory.set_not_public(&room_id); if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true)?; + self.services.rooms.metadata.disable_room(&room_id, true); } } @@ -503,7 +483,7 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> debug!("Room specified is a room ID, unbanning room ID"); - self.services.rooms.metadata.ban_room(&room_id, false)?; + self.services.rooms.metadata.ban_room(&room_id, false); room_id } else if room.is_room_alias_id() { @@ -522,7 +502,13 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> get_alias_helper to fetch room ID remotely" ); - let room_id = if let Some(room_id) = self.services.rooms.alias.resolve_local_alias(&room_alias)? { + let room_id = if let Ok(room_id) = self + .services + .rooms + .alias + .resolve_local_alias(&room_alias) + .await + { room_id } else { debug!("We don't have this room alias to a room ID locally, attempting to fetch room ID over federation"); @@ -546,7 +532,7 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> } }; - self.services.rooms.metadata.ban_room(&room_id, false)?; + self.services.rooms.metadata.ban_room(&room_id, false); room_id } else { @@ -557,7 +543,7 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> }; if enable_federation { - self.services.rooms.metadata.disable_room(&room_id, false)?; + self.services.rooms.metadata.disable_room(&room_id, false); return Ok(RoomMessageEventContent::text_plain("Room unbanned.")); } @@ -569,45 +555,42 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> #[admin_command] async fn list_banned_rooms(&self, no_details: bool) -> Result { - let rooms = self + let room_ids = self .services .rooms .metadata .list_banned_rooms() - .collect::, _>>(); + .map(Into::into) + .collect::>() + .await; - match rooms { - Ok(room_ids) => { - if room_ids.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No rooms are banned.")); - } - - let mut rooms = room_ids - .into_iter() - .map(|room_id| get_room_info(self.services, &room_id)) - .collect::>(); - rooms.sort_by_key(|r| r.1); - rooms.reverse(); - - let output_plain = format!( - "Rooms Banned ({}):\n```\n{}\n```", - rooms.len(), - rooms - .iter() - .map(|(id, members, name)| if no_details { - format!("{id}") - } else { - format!("{id}\tMembers: {members}\tName: {name}") - }) - .collect::>() - .join("\n") - ); - - Ok(RoomMessageEventContent::notice_markdown(output_plain)) - }, - Err(e) => { - error!("Failed to list banned rooms: {e}"); - Ok(RoomMessageEventContent::text_plain(format!("Unable to list banned rooms: {e}"))) - }, + if room_ids.is_empty() { + return Ok(RoomMessageEventContent::text_plain("No rooms are banned.")); } + + let mut rooms = room_ids + .iter() + .stream() + .then(|room_id| get_room_info(self.services, room_id)) + .collect::>() + .await; + + rooms.sort_by_key(|r| r.1); + rooms.reverse(); + + let output_plain = format!( + "Rooms Banned ({}):\n```\n{}\n```", + rooms.len(), + rooms + .iter() + .map(|(id, members, name)| if no_details { + format!("{id}") + } else { + format!("{id}\tMembers: {members}\tName: {name}") + }) + .collect::>() + .join("\n") + ); + + Ok(RoomMessageEventContent::notice_markdown(output_plain)) } diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 20691f1a..1b086856 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -1,7 +1,9 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; -use conduit::{error, info, utils, warn, PduBuilder, Result}; +use conduit::{error, info, is_equal_to, utils, warn, PduBuilder, Result}; +use conduit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; +use futures::StreamExt; use ruma::{ events::{ room::{ @@ -25,16 +27,19 @@ const AUTO_GEN_PASSWORD_LENGTH: usize = 25; #[admin_command] pub(super) async fn list_users(&self) -> Result { - match self.services.users.list_local_users() { - Ok(users) => { - let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len()); - plain_msg += users.join("\n").as_str(); - plain_msg += "\n```"; + let users = self + .services + .users + .list_local_users() + .map(ToString::to_string) + .collect::>() + .await; - Ok(RoomMessageEventContent::notice_markdown(plain_msg)) - }, - Err(e) => Ok(RoomMessageEventContent::text_plain(e.to_string())), - } + let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len()); + plain_msg += users.join("\n").as_str(); + plain_msg += "\n```"; + + Ok(RoomMessageEventContent::notice_markdown(plain_msg)) } #[admin_command] @@ -42,7 +47,7 @@ pub(super) async fn create_user(&self, username: String, password: Option )); } - self.services.users.deactivate_account(&user_id)?; + self.services.users.deactivate_account(&user_id).await?; if !no_leave_rooms { self.services @@ -175,17 +184,22 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> .send_message(RoomMessageEventContent::text_plain(format!( "Making {user_id} leave all rooms after deactivation..." ))) - .await; + .await + .ok(); let all_joined_rooms: Vec = self .services .rooms .state_cache .rooms_joined(&user_id) - .filter_map(Result::ok) - .collect(); + .map(Into::into) + .collect() + .await; - full_user_deactivate(self.services, &user_id, all_joined_rooms).await?; + full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?; + update_displayname(self.services, &user_id, None, &all_joined_rooms).await?; + update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms).await?; + leave_all_rooms(self.services, &user_id).await; } Ok(RoomMessageEventContent::text_plain(format!( @@ -238,15 +252,16 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> let mut admins = Vec::new(); for username in usernames { - match parse_active_local_user_id(self.services, username) { + match parse_active_local_user_id(self.services, username).await { Ok(user_id) => { - if self.services.users.is_admin(&user_id)? && !force { + if self.services.users.is_admin(&user_id).await && !force { self.services .admin .send_message(RoomMessageEventContent::text_plain(format!( "{username} is an admin and --force is not set, skipping over" ))) - .await; + .await + .ok(); admins.push(username); continue; } @@ -258,7 +273,8 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> .send_message(RoomMessageEventContent::text_plain(format!( "{username} is the server service account, skipping over" ))) - .await; + .await + .ok(); continue; } @@ -270,7 +286,8 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> .send_message(RoomMessageEventContent::text_plain(format!( "{username} is not a valid username, skipping over: {e}" ))) - .await; + .await + .ok(); continue; }, } @@ -279,7 +296,7 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> let mut deactivation_count: usize = 0; for user_id in user_ids { - match self.services.users.deactivate_account(&user_id) { + match self.services.users.deactivate_account(&user_id).await { Ok(()) => { deactivation_count = deactivation_count.saturating_add(1); if !no_leave_rooms { @@ -289,16 +306,26 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> .rooms .state_cache .rooms_joined(&user_id) - .filter_map(Result::ok) - .collect(); - full_user_deactivate(self.services, &user_id, all_joined_rooms).await?; + .map(Into::into) + .collect() + .await; + + full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?; + update_displayname(self.services, &user_id, None, &all_joined_rooms) + .await + .ok(); + update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms) + .await + .ok(); + leave_all_rooms(self.services, &user_id).await; } }, Err(e) => { self.services .admin .send_message(RoomMessageEventContent::text_plain(format!("Failed deactivating user: {e}"))) - .await; + .await + .ok(); }, } } @@ -326,9 +353,9 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result(&room_id, &StateEventType::RoomPowerLevels, "") + .await + .ok(); let user_can_demote_self = room_power_levels .as_ref() @@ -417,9 +443,9 @@ pub(super) async fn force_demote( .services .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "")? - .as_ref() - .is_some_and(|event| event.sender == user_id); + .room_state_get(&room_id, &StateEventType::RoomCreate, "") + .await + .is_ok_and(|event| event.sender == user_id); if !user_can_demote_self { return Ok(RoomMessageEventContent::notice_markdown( @@ -473,15 +499,16 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result, tag: String, ) -> Result { - let user_id = parse_active_local_user_id(self.services, &user_id)?; + let user_id = parse_active_local_user_id(self.services, &user_id).await?; let event = self .services .account_data - .get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag)?; + .get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag) + .await; let mut tags_event = event.map_or_else( - || TagEvent { + |_| TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, @@ -494,12 +521,15 @@ pub(super) async fn put_room_tag( .tags .insert(tag.clone().into(), TagInfo::new()); - self.services.account_data.update( - Some(&room_id), - &user_id, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), - )?; + self.services + .account_data + .update( + Some(&room_id), + &user_id, + RoomAccountDataEventType::Tag, + &serde_json::to_value(tags_event).expect("to json value always works"), + ) + .await?; Ok(RoomMessageEventContent::text_plain(format!( "Successfully updated room account data for {user_id} and room {room_id} with tag {tag}" @@ -510,15 +540,16 @@ pub(super) async fn put_room_tag( pub(super) async fn delete_room_tag( &self, user_id: String, room_id: Box, tag: String, ) -> Result { - let user_id = parse_active_local_user_id(self.services, &user_id)?; + let user_id = parse_active_local_user_id(self.services, &user_id).await?; let event = self .services .account_data - .get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag)?; + .get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag) + .await; let mut tags_event = event.map_or_else( - || TagEvent { + |_| TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, @@ -528,12 +559,15 @@ pub(super) async fn delete_room_tag( tags_event.content.tags.remove(&tag.clone().into()); - self.services.account_data.update( - Some(&room_id), - &user_id, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), - )?; + self.services + .account_data + .update( + Some(&room_id), + &user_id, + RoomAccountDataEventType::Tag, + &serde_json::to_value(tags_event).expect("to json value always works"), + ) + .await?; Ok(RoomMessageEventContent::text_plain(format!( "Successfully updated room account data for {user_id} and room {room_id}, deleting room tag {tag}" @@ -542,15 +576,16 @@ pub(super) async fn delete_room_tag( #[admin_command] pub(super) async fn get_room_tags(&self, user_id: String, room_id: Box) -> Result { - let user_id = parse_active_local_user_id(self.services, &user_id)?; + let user_id = parse_active_local_user_id(self.services, &user_id).await?; let event = self .services .account_data - .get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag)?; + .get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag) + .await; let tags_event = event.map_or_else( - || TagEvent { + |_| TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, @@ -566,11 +601,12 @@ pub(super) async fn get_room_tags(&self, user_id: String, room_id: Box) #[admin_command] pub(super) async fn redact_event(&self, event_id: Box) -> Result { - let Some(event) = self + let Ok(event) = self .services .rooms .timeline - .get_non_outlier_pdu(&event_id)? + .get_non_outlier_pdu(&event_id) + .await else { return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database.")); }; diff --git a/src/admin/utils.rs b/src/admin/utils.rs index 8d3d15ae..ba98bbea 100644 --- a/src/admin/utils.rs +++ b/src/admin/utils.rs @@ -8,23 +8,21 @@ pub(crate) fn escape_html(s: &str) -> String { .replace('>', ">") } -pub(crate) fn get_room_info(services: &Services, id: &RoomId) -> (OwnedRoomId, u64, String) { +pub(crate) async fn get_room_info(services: &Services, room_id: &RoomId) -> (OwnedRoomId, u64, String) { ( - id.into(), + room_id.into(), services .rooms .state_cache - .room_joined_count(id) - .ok() - .flatten() + .room_joined_count(room_id) + .await .unwrap_or(0), services .rooms .state_accessor - .get_name(id) - .ok() - .flatten() - .unwrap_or_else(|| id.to_string()), + .get_name(room_id) + .await + .unwrap_or_else(|_| room_id.to_string()), ) } @@ -46,14 +44,14 @@ pub(crate) fn parse_local_user_id(services: &Services, user_id: &str) -> Result< } /// Parses user ID that is an active (not guest or deactivated) local user -pub(crate) fn parse_active_local_user_id(services: &Services, user_id: &str) -> Result { +pub(crate) async fn parse_active_local_user_id(services: &Services, user_id: &str) -> Result { let user_id = parse_local_user_id(services, user_id)?; - if !services.users.exists(&user_id)? { + if !services.users.exists(&user_id).await { return Err!("User {user_id:?} does not exist on this server."); } - if services.users.is_deactivated(&user_id)? { + if services.users.is_deactivated(&user_id).await? { return Err!("User {user_id:?} is deactivated."); } diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 2b89c3e8..6e37cb40 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -45,7 +45,7 @@ conduit-core.workspace = true conduit-database.workspace = true conduit-service.workspace = true const-str.workspace = true -futures-util.workspace = true +futures.workspace = true hmac.workspace = true http.workspace = true http-body-util.workspace = true diff --git a/src/api/client/account.rs b/src/api/client/account.rs index cee86f80..63d02f8f 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -2,7 +2,8 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{debug_info, error, info, utils, warn, Error, PduBuilder, Result}; +use conduit::{debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result}; +use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ api::client::{ @@ -55,7 +56,7 @@ pub(crate) async fn get_register_available_route( .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; // Check if username is creative enough - if services.users.exists(&user_id)? { + if services.users.exists(&user_id).await { return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); } @@ -125,7 +126,7 @@ pub(crate) async fn register_route( // forbid guests from registering if there is not a real admin user yet. give // generic user error. - if is_guest && services.users.count()? < 2 { + if is_guest && services.users.count().await < 2 { warn!( "Guest account attempted to register before a real admin user has been registered, rejecting \ registration. Guest's initial device name: {:?}", @@ -142,7 +143,7 @@ pub(crate) async fn register_route( .filter(|user_id| !user_id.is_historical() && services.globals.user_is_local(user_id)) .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; - if services.users.exists(&proposed_user_id)? { + if services.users.exists(&proposed_user_id).await { return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); } @@ -162,7 +163,7 @@ pub(crate) async fn register_route( services.globals.server_name(), ) .unwrap(); - if !services.users.exists(&proposed_user_id)? { + if !services.users.exists(&proposed_user_id).await { break proposed_user_id; } }, @@ -210,12 +211,15 @@ pub(crate) async fn register_route( if !skip_auth { if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services.uiaa.try_auth( - &UserId::parse_with_server_name("", services.globals.server_name()).expect("we know this is valid"), - "".into(), - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = services + .uiaa + .try_auth( + &UserId::parse_with_server_name("", services.globals.server_name()).expect("we know this is valid"), + "".into(), + auth, + &uiaainfo, + ) + .await?; if !worked { return Err(Error::Uiaa(uiaainfo)); } @@ -227,7 +231,7 @@ pub(crate) async fn register_route( "".into(), &uiaainfo, &json, - )?; + ); return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); @@ -255,21 +259,23 @@ pub(crate) async fn register_route( services .users - .set_displayname(&user_id, Some(displayname.clone())) - .await?; + .set_displayname(&user_id, Some(displayname.clone())); // Initial account data - services.account_data.update( - None, - &user_id, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: push::Ruleset::server_default(&user_id), - }, - }) - .expect("to json always works"), - )?; + services + .account_data + .update( + None, + &user_id, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: push::Ruleset::server_default(&user_id), + }, + }) + .expect("to json always works"), + ) + .await?; // Inhibit login does not work for guests if !is_guest && body.inhibit_login { @@ -294,13 +300,16 @@ pub(crate) async fn register_route( let token = utils::random_string(TOKEN_LENGTH); // Create device for this account - services.users.create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - Some(client.to_string()), - )?; + services + .users + .create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + Some(client.to_string()), + ) + .await?; debug_info!(%user_id, %device_id, "User account was created"); @@ -318,7 +327,8 @@ pub(crate) async fn register_route( "New user \"{user_id}\" registered on this server from IP {client} and device display name \ \"{device_display_name}\"" ))) - .await; + .await + .ok(); } } else { info!("New user \"{user_id}\" registered on this server."); @@ -329,7 +339,8 @@ pub(crate) async fn register_route( .send_message(RoomMessageEventContent::notice_plain(format!( "New user \"{user_id}\" registered on this server from IP {client}" ))) - .await; + .await + .ok(); } } } @@ -346,7 +357,8 @@ pub(crate) async fn register_route( "Guest user \"{user_id}\" with device display name \"{device_display_name}\" registered on \ this server from IP {client}" ))) - .await; + .await + .ok(); } } else { #[allow(clippy::collapsible_else_if)] @@ -357,7 +369,8 @@ pub(crate) async fn register_route( "Guest user \"{user_id}\" with no device display name registered on this server from IP \ {client}", ))) - .await; + .await + .ok(); } } } @@ -365,10 +378,15 @@ pub(crate) async fn register_route( // If this is the first real user, grant them admin privileges except for guest // users Note: the server user, @conduit:servername, is generated first if !is_guest { - if let Some(admin_room) = services.admin.get_admin_room()? { - if services.rooms.state_cache.room_joined_count(&admin_room)? == Some(1) { + if let Ok(admin_room) = services.admin.get_admin_room().await { + if services + .rooms + .state_cache + .room_joined_count(&admin_room) + .await + .is_ok_and(is_equal_to!(1)) + { services.admin.make_user_admin(&user_id).await?; - warn!("Granting {user_id} admin privileges as the first user"); } } @@ -382,7 +400,8 @@ pub(crate) async fn register_route( if !services .rooms .state_cache - .server_in_room(services.globals.server_name(), room)? + .server_in_room(services.globals.server_name(), room) + .await { warn!("Skipping room {room} to automatically join as we have never joined before."); continue; @@ -398,6 +417,7 @@ pub(crate) async fn register_route( None, &body.appservice_info, ) + .boxed() .await { // don't return this error so we don't fail registrations @@ -461,16 +481,20 @@ pub(crate) async fn change_password_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = services .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; + if !worked { return Err(Error::Uiaa(uiaainfo)); } - // Success! + + // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; + .create(sender_user, sender_device, &uiaainfo, &json); + return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); @@ -482,14 +506,12 @@ pub(crate) async fn change_password_route( if body.logout_devices { // Logout all devices except the current one - for id in services + services .users .all_device_ids(sender_user) - .filter_map(Result::ok) - .filter(|id| id != sender_device) - { - services.users.remove_device(sender_user, &id)?; - } + .ready_filter(|id| id != sender_device) + .for_each(|id| services.users.remove_device(sender_user, id)) + .await; } info!("User {sender_user} changed their password."); @@ -500,7 +522,8 @@ pub(crate) async fn change_password_route( .send_message(RoomMessageEventContent::notice_plain(format!( "User {sender_user} changed their password." ))) - .await; + .await + .ok(); } Ok(change_password::v3::Response {}) @@ -520,7 +543,7 @@ pub(crate) async fn whoami_route( Ok(whoami::v3::Response { user_id: sender_user.clone(), device_id, - is_guest: services.users.is_deactivated(sender_user)? && body.appservice_info.is_none(), + is_guest: services.users.is_deactivated(sender_user).await? && body.appservice_info.is_none(), }) } @@ -561,7 +584,9 @@ pub(crate) async fn deactivate_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = services .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; + if !worked { return Err(Error::Uiaa(uiaainfo)); } @@ -570,7 +595,8 @@ pub(crate) async fn deactivate_route( uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; + .create(sender_user, sender_device, &uiaainfo, &json); + return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); @@ -581,10 +607,14 @@ pub(crate) async fn deactivate_route( .rooms .state_cache .rooms_joined(sender_user) - .filter_map(Result::ok) - .collect(); + .map(Into::into) + .collect() + .await; - full_user_deactivate(&services, sender_user, all_joined_rooms).await?; + super::update_displayname(&services, sender_user, None, &all_joined_rooms).await?; + super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await?; + + full_user_deactivate(&services, sender_user, &all_joined_rooms).await?; info!("User {sender_user} deactivated their account."); @@ -594,7 +624,8 @@ pub(crate) async fn deactivate_route( .send_message(RoomMessageEventContent::notice_plain(format!( "User {sender_user} deactivated their account." ))) - .await; + .await + .ok(); } Ok(deactivate::v3::Response { @@ -674,34 +705,27 @@ pub(crate) async fn check_registration_token_validity( /// - Removing all profile data /// - Leaving all rooms (and forgets all of them) pub async fn full_user_deactivate( - services: &Services, user_id: &UserId, all_joined_rooms: Vec, + services: &Services, user_id: &UserId, all_joined_rooms: &[OwnedRoomId], ) -> Result<()> { - services.users.deactivate_account(user_id)?; + services.users.deactivate_account(user_id).await?; + super::update_displayname(services, user_id, None, all_joined_rooms).await?; + super::update_avatar_url(services, user_id, None, None, all_joined_rooms).await?; - super::update_displayname(services, user_id, None, all_joined_rooms.clone()).await?; - super::update_avatar_url(services, user_id, None, None, all_joined_rooms.clone()).await?; - - let all_profile_keys = services + services .users .all_profile_keys(user_id) - .filter_map(Result::ok); - - for (profile_key, _profile_value) in all_profile_keys { - if let Err(e) = services.users.set_profile_key(user_id, &profile_key, None) { - warn!("Failed removing {user_id} profile key {profile_key}: {e}"); - } - } + .ready_for_each(|(profile_key, _)| services.users.set_profile_key(user_id, &profile_key, None)) + .await; for room_id in all_joined_rooms { - let state_lock = services.rooms.state.mutex.lock(&room_id).await; + let state_lock = services.rooms.state.mutex.lock(room_id).await; let room_power_levels = services .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")? - .as_ref() - .and_then(|event| serde_json::from_str(event.content.get()).ok()?) - .and_then(|content: RoomPowerLevelsEventContent| content.into()); + .room_state_get_content::(room_id, &StateEventType::RoomPowerLevels, "") + .await + .ok(); let user_can_demote_self = room_power_levels .as_ref() @@ -710,9 +734,9 @@ pub async fn full_user_deactivate( }) || services .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "")? - .as_ref() - .is_some_and(|event| event.sender == user_id); + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + .is_ok_and(|event| event.sender == user_id); if user_can_demote_self { let mut power_levels_content = room_power_levels.unwrap_or_default(); @@ -732,7 +756,7 @@ pub async fn full_user_deactivate( timestamp: None, }, user_id, - &room_id, + room_id, &state_lock, ) .await diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 12d6352c..2399a355 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -1,11 +1,9 @@ use axum::extract::State; -use conduit::{debug, Error, Result}; +use conduit::{debug, Err, Result}; +use futures::StreamExt; use rand::seq::SliceRandom; use ruma::{ - api::client::{ - alias::{create_alias, delete_alias, get_alias}, - error::ErrorKind, - }, + api::client::alias::{create_alias, delete_alias, get_alias}, OwnedServerName, RoomAliasId, RoomId, }; use service::Services; @@ -33,16 +31,17 @@ pub(crate) async fn create_alias_route( .forbidden_alias_names() .is_match(body.room_alias.alias()) { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Room alias is forbidden.")); + return Err!(Request(Forbidden("Room alias is forbidden."))); } if services .rooms .alias - .resolve_local_alias(&body.room_alias)? - .is_some() + .resolve_local_alias(&body.room_alias) + .await + .is_ok() { - return Err(Error::Conflict("Alias already exists.")); + return Err!(Conflict("Alias already exists.")); } services @@ -95,16 +94,16 @@ pub(crate) async fn get_alias_route( .resolve_alias(&room_alias, servers.as_ref()) .await else { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room with alias not found.")); + return Err!(Request(NotFound("Room with alias not found."))); }; - let servers = room_available_servers(&services, &room_id, &room_alias, &pre_servers); + let servers = room_available_servers(&services, &room_id, &room_alias, &pre_servers).await; debug!(?room_alias, ?room_id, "available servers: {servers:?}"); Ok(get_alias::v3::Response::new(room_id, servers)) } -fn room_available_servers( +async fn room_available_servers( services: &Services, room_id: &RoomId, room_alias: &RoomAliasId, pre_servers: &Option>, ) -> Vec { // find active servers in room state cache to suggest @@ -112,8 +111,9 @@ fn room_available_servers( .rooms .state_cache .room_servers(room_id) - .filter_map(Result::ok) - .collect(); + .map(ToOwned::to_owned) + .collect() + .await; // push any servers we want in the list already (e.g. responded remote alias // servers, room alias server itself) diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 4ead8777..d52da80a 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,18 +1,16 @@ use axum::extract::State; +use conduit::{err, Err}; use ruma::{ - api::client::{ - backup::{ - add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, - delete_backup_keys, delete_backup_keys_for_room, delete_backup_keys_for_session, delete_backup_version, - get_backup_info, get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session, - get_latest_backup_info, update_backup_version, - }, - error::ErrorKind, + api::client::backup::{ + add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, + delete_backup_keys, delete_backup_keys_for_room, delete_backup_keys_for_session, delete_backup_version, + get_backup_info, get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session, + get_latest_backup_info, update_backup_version, }, UInt, }; -use crate::{Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `POST /_matrix/client/r0/room_keys/version` /// @@ -40,7 +38,8 @@ pub(crate) async fn update_backup_version_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); services .key_backups - .update_backup(sender_user, &body.version, &body.algorithm)?; + .update_backup(sender_user, &body.version, &body.algorithm) + .await?; Ok(update_backup_version::v3::Response {}) } @@ -55,14 +54,15 @@ pub(crate) async fn get_latest_backup_info_route( let (version, algorithm) = services .key_backups - .get_latest_backup(sender_user)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Key backup does not exist."))?; + .get_latest_backup(sender_user) + .await + .map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?; Ok(get_latest_backup_info::v3::Response { algorithm, - count: (UInt::try_from(services.key_backups.count_keys(sender_user, &version)?) + count: (UInt::try_from(services.key_backups.count_keys(sender_user, &version).await) .expect("user backup keys count should not be that high")), - etag: services.key_backups.get_etag(sender_user, &version)?, + etag: services.key_backups.get_etag(sender_user, &version).await, version, }) } @@ -76,18 +76,21 @@ pub(crate) async fn get_backup_info_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = services .key_backups - .get_backup(sender_user, &body.version)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Key backup does not exist."))?; + .get_backup(sender_user, &body.version) + .await + .map_err(|_| err!(Request(NotFound("Key backup does not exist at version {:?}", body.version))))?; Ok(get_backup_info::v3::Response { algorithm, - count: (UInt::try_from( - services - .key_backups - .count_keys(sender_user, &body.version)?, - ) - .expect("user backup keys count should not be that high")), - etag: services.key_backups.get_etag(sender_user, &body.version)?, + count: services + .key_backups + .count_keys(sender_user, &body.version) + .await + .try_into()?, + etag: services + .key_backups + .get_etag(sender_user, &body.version) + .await, version: body.version.clone(), }) } @@ -105,7 +108,8 @@ pub(crate) async fn delete_backup_version_route( services .key_backups - .delete_backup(sender_user, &body.version)?; + .delete_backup(sender_user, &body.version) + .await; Ok(delete_backup_version::v3::Response {}) } @@ -123,34 +127,36 @@ pub(crate) async fn add_backup_keys_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if Some(&body.version) - != services - .key_backups - .get_latest_backup_version(sender_user)? - .as_ref() + if services + .key_backups + .get_latest_backup_version(sender_user) + .await + .is_ok_and(|version| version != body.version) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "You may only manipulate the most recently created version of the backup.", - )); + return Err!(Request(InvalidParam( + "You may only manipulate the most recently created version of the backup." + ))); } for (room_id, room) in &body.rooms { for (session_id, key_data) in &room.sessions { services .key_backups - .add_key(sender_user, &body.version, room_id, session_id, key_data)?; + .add_key(sender_user, &body.version, room_id, session_id, key_data) + .await?; } } Ok(add_backup_keys::v3::Response { - count: (UInt::try_from( - services - .key_backups - .count_keys(sender_user, &body.version)?, - ) - .expect("user backup keys count should not be that high")), - etag: services.key_backups.get_etag(sender_user, &body.version)?, + count: services + .key_backups + .count_keys(sender_user, &body.version) + .await + .try_into()?, + etag: services + .key_backups + .get_etag(sender_user, &body.version) + .await, }) } @@ -167,32 +173,34 @@ pub(crate) async fn add_backup_keys_for_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if Some(&body.version) - != services - .key_backups - .get_latest_backup_version(sender_user)? - .as_ref() + if services + .key_backups + .get_latest_backup_version(sender_user) + .await + .is_ok_and(|version| version != body.version) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "You may only manipulate the most recently created version of the backup.", - )); + return Err!(Request(InvalidParam( + "You may only manipulate the most recently created version of the backup." + ))); } for (session_id, key_data) in &body.sessions { services .key_backups - .add_key(sender_user, &body.version, &body.room_id, session_id, key_data)?; + .add_key(sender_user, &body.version, &body.room_id, session_id, key_data) + .await?; } Ok(add_backup_keys_for_room::v3::Response { - count: (UInt::try_from( - services - .key_backups - .count_keys(sender_user, &body.version)?, - ) - .expect("user backup keys count should not be that high")), - etag: services.key_backups.get_etag(sender_user, &body.version)?, + count: services + .key_backups + .count_keys(sender_user, &body.version) + .await + .try_into()?, + etag: services + .key_backups + .get_etag(sender_user, &body.version) + .await, }) } @@ -209,30 +217,32 @@ pub(crate) async fn add_backup_keys_for_session_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if Some(&body.version) - != services - .key_backups - .get_latest_backup_version(sender_user)? - .as_ref() + if services + .key_backups + .get_latest_backup_version(sender_user) + .await + .is_ok_and(|version| version != body.version) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "You may only manipulate the most recently created version of the backup.", - )); + return Err!(Request(InvalidParam( + "You may only manipulate the most recently created version of the backup." + ))); } services .key_backups - .add_key(sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data)?; + .add_key(sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data) + .await?; Ok(add_backup_keys_for_session::v3::Response { - count: (UInt::try_from( - services - .key_backups - .count_keys(sender_user, &body.version)?, - ) - .expect("user backup keys count should not be that high")), - etag: services.key_backups.get_etag(sender_user, &body.version)?, + count: services + .key_backups + .count_keys(sender_user, &body.version) + .await + .try_into()?, + etag: services + .key_backups + .get_etag(sender_user, &body.version) + .await, }) } @@ -244,7 +254,10 @@ pub(crate) async fn get_backup_keys_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let rooms = services.key_backups.get_all(sender_user, &body.version)?; + let rooms = services + .key_backups + .get_all(sender_user, &body.version) + .await; Ok(get_backup_keys::v3::Response { rooms, @@ -261,7 +274,8 @@ pub(crate) async fn get_backup_keys_for_room_route( let sessions = services .key_backups - .get_room(sender_user, &body.version, &body.room_id)?; + .get_room(sender_user, &body.version, &body.room_id) + .await; Ok(get_backup_keys_for_room::v3::Response { sessions, @@ -278,8 +292,9 @@ pub(crate) async fn get_backup_keys_for_session_route( let key_data = services .key_backups - .get_session(sender_user, &body.version, &body.room_id, &body.session_id)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Backup key not found for this user's session."))?; + .get_session(sender_user, &body.version, &body.room_id, &body.session_id) + .await + .map_err(|_| err!(Request(NotFound(debug_error!("Backup key not found for this user's session.")))))?; Ok(get_backup_keys_for_session::v3::Response { key_data, @@ -296,16 +311,19 @@ pub(crate) async fn delete_backup_keys_route( services .key_backups - .delete_all_keys(sender_user, &body.version)?; + .delete_all_keys(sender_user, &body.version) + .await; Ok(delete_backup_keys::v3::Response { - count: (UInt::try_from( - services - .key_backups - .count_keys(sender_user, &body.version)?, - ) - .expect("user backup keys count should not be that high")), - etag: services.key_backups.get_etag(sender_user, &body.version)?, + count: services + .key_backups + .count_keys(sender_user, &body.version) + .await + .try_into()?, + etag: services + .key_backups + .get_etag(sender_user, &body.version) + .await, }) } @@ -319,16 +337,19 @@ pub(crate) async fn delete_backup_keys_for_room_route( services .key_backups - .delete_room_keys(sender_user, &body.version, &body.room_id)?; + .delete_room_keys(sender_user, &body.version, &body.room_id) + .await; Ok(delete_backup_keys_for_room::v3::Response { - count: (UInt::try_from( - services - .key_backups - .count_keys(sender_user, &body.version)?, - ) - .expect("user backup keys count should not be that high")), - etag: services.key_backups.get_etag(sender_user, &body.version)?, + count: services + .key_backups + .count_keys(sender_user, &body.version) + .await + .try_into()?, + etag: services + .key_backups + .get_etag(sender_user, &body.version) + .await, }) } @@ -342,15 +363,18 @@ pub(crate) async fn delete_backup_keys_for_session_route( services .key_backups - .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?; + .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id) + .await; Ok(delete_backup_keys_for_session::v3::Response { - count: (UInt::try_from( - services - .key_backups - .count_keys(sender_user, &body.version)?, - ) - .expect("user backup keys count should not be that high")), - etag: services.key_backups.get_etag(sender_user, &body.version)?, + count: services + .key_backups + .count_keys(sender_user, &body.version) + .await + .try_into()?, + etag: services + .key_backups + .get_etag(sender_user, &body.version) + .await, }) } diff --git a/src/api/client/config.rs b/src/api/client/config.rs index 61cc97ff..33b85136 100644 --- a/src/api/client/config.rs +++ b/src/api/client/config.rs @@ -1,4 +1,5 @@ use axum::extract::State; +use conduit::err; use ruma::{ api::client::{ config::{get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data}, @@ -25,7 +26,8 @@ pub(crate) async fn set_global_account_data_route( &body.sender_user, &body.event_type.to_string(), body.data.json(), - )?; + ) + .await?; Ok(set_global_account_data::v3::Response {}) } @@ -42,7 +44,8 @@ pub(crate) async fn set_room_account_data_route( &body.sender_user, &body.event_type.to_string(), body.data.json(), - )?; + ) + .await?; Ok(set_room_account_data::v3::Response {}) } @@ -57,8 +60,9 @@ pub(crate) async fn get_global_account_data_route( let event: Box = services .account_data - .get(None, sender_user, body.event_type.to_string().into())? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + .get(None, sender_user, body.event_type.to_string().into()) + .await + .map_err(|_| err!(Request(NotFound("Data not found."))))?; let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? @@ -79,8 +83,9 @@ pub(crate) async fn get_room_account_data_route( let event: Box = services .account_data - .get(Some(&body.room_id), sender_user, body.event_type.clone())? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + .get(Some(&body.room_id), sender_user, body.event_type.clone()) + .await + .map_err(|_| err!(Request(NotFound("Data not found."))))?; let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? @@ -91,7 +96,7 @@ pub(crate) async fn get_room_account_data_route( }) } -fn set_account_data( +async fn set_account_data( services: &Services, room_id: Option<&RoomId>, sender_user: &Option, event_type: &str, data: &RawJsonValue, ) -> Result<()> { @@ -100,15 +105,18 @@ fn set_account_data( let data: serde_json::Value = serde_json::from_str(data.get()).map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; - services.account_data.update( - room_id, - sender_user, - event_type.into(), - &json!({ - "type": event_type, - "content": data, - }), - )?; + services + .account_data + .update( + room_id, + sender_user, + event_type.into(), + &json!({ + "type": event_type, + "content": data, + }), + ) + .await?; Ok(()) } diff --git a/src/api/client/context.rs b/src/api/client/context.rs index f223d488..cc49b763 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,13 +1,14 @@ use std::collections::HashSet; use axum::extract::State; +use conduit::{err, error, Err}; +use futures::StreamExt; use ruma::{ - api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, + api::client::{context::get_context, filter::LazyLoadOptions}, events::StateEventType, }; -use tracing::error; -use crate::{Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `GET /_matrix/client/r0/rooms/{roomId}/context` /// @@ -35,34 +36,33 @@ pub(crate) async fn get_context_route( let base_token = services .rooms .timeline - .get_pdu_count(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Base event id not found."))?; + .get_pdu_count(&body.event_id) + .await + .map_err(|_| err!(Request(NotFound("Base event id not found."))))?; let base_event = services .rooms .timeline - .get_pdu(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Base event not found."))?; + .get_pdu(&body.event_id) + .await + .map_err(|_| err!(Request(NotFound("Base event not found."))))?; - let room_id = base_event.room_id.clone(); + let room_id = &base_event.room_id; if !services .rooms .state_accessor - .user_can_see_event(sender_user, &room_id, &body.event_id)? + .user_can_see_event(sender_user, room_id, &body.event_id) + .await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this event.", - )); + return Err!(Request(Forbidden("You don't have permission to view this event."))); } - if !services.rooms.lazy_loading.lazy_load_was_sent_before( - sender_user, - sender_device, - &room_id, - &base_event.sender, - )? || lazy_load_send_redundant + if !services + .rooms + .lazy_loading + .lazy_load_was_sent_before(sender_user, sender_device, room_id, &base_event.sender) + .await || lazy_load_send_redundant { lazy_loaded.insert(base_event.sender.as_str().to_owned()); } @@ -75,25 +75,26 @@ pub(crate) async fn get_context_route( let events_before: Vec<_> = services .rooms .timeline - .pdus_until(sender_user, &room_id, base_token)? + .pdus_until(sender_user, room_id, base_token) + .await? .take(limit / 2) - .filter_map(Result::ok) // Remove buggy events - .filter(|(_, pdu)| { + .filter_map(|(count, pdu)| async move { services .rooms .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) - .unwrap_or(false) + .user_can_see_event(sender_user, room_id, &pdu.event_id) + .await + .then_some((count, pdu)) }) - .collect(); + .collect() + .await; for (_, event) in &events_before { - if !services.rooms.lazy_loading.lazy_load_was_sent_before( - sender_user, - sender_device, - &room_id, - &event.sender, - )? || lazy_load_send_redundant + if !services + .rooms + .lazy_loading + .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) + .await || lazy_load_send_redundant { lazy_loaded.insert(event.sender.as_str().to_owned()); } @@ -111,25 +112,26 @@ pub(crate) async fn get_context_route( let events_after: Vec<_> = services .rooms .timeline - .pdus_after(sender_user, &room_id, base_token)? + .pdus_after(sender_user, room_id, base_token) + .await? .take(limit / 2) - .filter_map(Result::ok) // Remove buggy events - .filter(|(_, pdu)| { + .filter_map(|(count, pdu)| async move { services .rooms .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) - .unwrap_or(false) + .user_can_see_event(sender_user, room_id, &pdu.event_id) + .await + .then_some((count, pdu)) }) - .collect(); + .collect() + .await; for (_, event) in &events_after { - if !services.rooms.lazy_loading.lazy_load_was_sent_before( - sender_user, - sender_device, - &room_id, - &event.sender, - )? || lazy_load_send_redundant + if !services + .rooms + .lazy_loading + .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) + .await || lazy_load_send_redundant { lazy_loaded.insert(event.sender.as_str().to_owned()); } @@ -142,12 +144,14 @@ pub(crate) async fn get_context_route( events_after .last() .map_or(&*body.event_id, |(_, e)| &*e.event_id), - )? + ) + .await .map_or( services .rooms .state - .get_room_shortstatehash(&room_id)? + .get_room_shortstatehash(room_id) + .await .expect("All rooms have state"), |hash| hash, ); @@ -156,7 +160,8 @@ pub(crate) async fn get_context_route( .rooms .state_accessor .state_full_ids(shortstatehash) - .await?; + .await + .map_err(|e| err!(Database("State not found: {e}")))?; let end_token = events_after .last() @@ -173,18 +178,19 @@ pub(crate) async fn get_context_route( let (event_type, state_key) = services .rooms .short - .get_statekey_from_short(shortstatekey)?; + .get_statekey_from_short(shortstatekey) + .await?; if event_type != StateEventType::RoomMember { - let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else { - error!("Pdu in state not found: {}", id); + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { + error!("Pdu in state not found: {id}"); continue; }; state.push(pdu.to_state_event()); } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { - let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else { - error!("Pdu in state not found: {}", id); + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { + error!("Pdu in state not found: {id}"); continue; }; diff --git a/src/api/client/device.rs b/src/api/client/device.rs index bad7f284..93eaa393 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,4 +1,6 @@ use axum::extract::State; +use conduit::{err, Err}; +use futures::StreamExt; use ruma::api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, @@ -19,8 +21,8 @@ pub(crate) async fn get_devices_route( let devices: Vec = services .users .all_devices_metadata(sender_user) - .filter_map(Result::ok) // Filter out buggy devices - .collect(); + .collect() + .await; Ok(get_devices::v3::Response { devices, @@ -37,8 +39,9 @@ pub(crate) async fn get_device_route( let device = services .users - .get_device_metadata(sender_user, &body.body.device_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; + .get_device_metadata(sender_user, &body.body.device_id) + .await + .map_err(|_| err!(Request(NotFound("Device not found."))))?; Ok(get_device::v3::Response { device, @@ -55,14 +58,16 @@ pub(crate) async fn update_device_route( let mut device = services .users - .get_device_metadata(sender_user, &body.device_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; + .get_device_metadata(sender_user, &body.device_id) + .await + .map_err(|_| err!(Request(NotFound("Device not found."))))?; device.display_name.clone_from(&body.display_name); services .users - .update_device_metadata(sender_user, &body.device_id, &device)?; + .update_device_metadata(sender_user, &body.device_id, &device) + .await?; Ok(update_device::v3::Response {}) } @@ -97,22 +102,28 @@ pub(crate) async fn delete_device_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = services .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; + if !worked { - return Err(Error::Uiaa(uiaainfo)); + return Err!(Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); + .create(sender_user, sender_device, &uiaainfo, &json); + + return Err!(Uiaa(uiaainfo)); } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("Not json."))); } - services.users.remove_device(sender_user, &body.device_id)?; + services + .users + .remove_device(sender_user, &body.device_id) + .await; Ok(delete_device::v3::Response {}) } @@ -149,7 +160,9 @@ pub(crate) async fn delete_devices_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = services .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; + if !worked { return Err(Error::Uiaa(uiaainfo)); } @@ -158,14 +171,15 @@ pub(crate) async fn delete_devices_route( uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; + .create(sender_user, sender_device, &uiaainfo, &json); + return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } for device_id in &body.devices { - services.users.remove_device(sender_user, device_id)?; + services.users.remove_device(sender_user, device_id).await; } Ok(delete_devices::v3::Response {}) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 602f876a..ea499545 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,6 +1,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{err, info, warn, Err, Error, Result}; +use conduit::{info, warn, Err, Error, Result}; +use futures::{StreamExt, TryFutureExt}; use ruma::{ api::{ client::{ @@ -18,7 +19,7 @@ use ruma::{ }, StateEventType, }, - uint, RoomId, ServerName, UInt, UserId, + uint, OwnedRoomId, RoomId, ServerName, UInt, UserId, }; use service::Services; @@ -119,16 +120,22 @@ pub(crate) async fn set_room_visibility_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services.rooms.metadata.exists(&body.room_id)? { + if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); } - if services.users.is_deactivated(sender_user).unwrap_or(false) && body.appservice_info.is_none() { + if services + .users + .is_deactivated(sender_user) + .await + .unwrap_or(false) + && body.appservice_info.is_none() + { return Err!(Request(Forbidden("Guests cannot publish to room directories"))); } - if !user_can_publish_room(&services, sender_user, &body.room_id)? { + if !user_can_publish_room(&services, sender_user, &body.room_id).await? { return Err(Error::BadRequest( ErrorKind::forbidden(), "User is not allowed to publish this room", @@ -138,7 +145,7 @@ pub(crate) async fn set_room_visibility_route( match &body.visibility { room::Visibility::Public => { if services.globals.config.lockdown_public_room_directory - && !services.users.is_admin(sender_user)? + && !services.users.is_admin(sender_user).await && body.appservice_info.is_none() { info!( @@ -164,7 +171,7 @@ pub(crate) async fn set_room_visibility_route( )); } - services.rooms.directory.set_public(&body.room_id)?; + services.rooms.directory.set_public(&body.room_id); if services.globals.config.admin_room_notices { services @@ -174,7 +181,7 @@ pub(crate) async fn set_room_visibility_route( } info!("{sender_user} made {0} public to the room directory", body.room_id); }, - room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id)?, + room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id), _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -192,13 +199,13 @@ pub(crate) async fn set_room_visibility_route( pub(crate) async fn get_room_visibility_route( State(services): State, body: Ruma, ) -> Result { - if !services.rooms.metadata.exists(&body.room_id)? { + if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); } Ok(get_room_visibility::v3::Response { - visibility: if services.rooms.directory.is_public_room(&body.room_id)? { + visibility: if services.rooms.directory.is_public_room(&body.room_id).await { room::Visibility::Public } else { room::Visibility::Private @@ -257,101 +264,41 @@ pub(crate) async fn get_public_rooms_filtered_helper( } } - let mut all_rooms: Vec<_> = services + let mut all_rooms: Vec = services .rooms .directory .public_rooms() - .map(|room_id| { - let room_id = room_id?; - - let chunk = PublicRoomsChunk { - canonical_alias: services - .rooms - .state_accessor - .get_canonical_alias(&room_id)?, - name: services.rooms.state_accessor.get_name(&room_id)?, - num_joined_members: services - .rooms - .state_cache - .room_joined_count(&room_id)? - .unwrap_or_else(|| { - warn!("Room {} has no member count", room_id); - 0 - }) - .try_into() - .expect("user count should not be that big"), - topic: services - .rooms - .state_accessor - .get_room_topic(&room_id) - .unwrap_or(None), - world_readable: services.rooms.state_accessor.is_world_readable(&room_id)?, - guest_can_join: services - .rooms - .state_accessor - .guest_can_join(&room_id)?, - avatar_url: services - .rooms - .state_accessor - .get_avatar(&room_id)? - .into_option() - .unwrap_or_default() - .url, - join_rule: services - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? - .map(|s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomJoinRulesEventContent| match c.join_rule { - JoinRule::Public => Some(PublicRoomJoinRule::Public), - JoinRule::Knock => Some(PublicRoomJoinRule::Knock), - _ => None, - }) - .map_err(|e| { - err!(Database(error!("Invalid room join rule event in database: {e}"))) - }) - }) - .transpose()? - .flatten() - .ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?, - room_type: services - .rooms - .state_accessor - .get_room_type(&room_id)?, - room_id, - }; - Ok(chunk) - }) - .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms - .filter(|chunk| { + .map(ToOwned::to_owned) + .then(|room_id| public_rooms_chunk(services, room_id)) + .filter_map(|chunk| async move { if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { if name.as_str().to_lowercase().contains(&query) { - return true; + return Some(chunk); } } if let Some(topic) = &chunk.topic { if topic.to_lowercase().contains(&query) { - return true; + return Some(chunk); } } if let Some(canonical_alias) = &chunk.canonical_alias { if canonical_alias.as_str().to_lowercase().contains(&query) { - return true; + return Some(chunk); } } - false - } else { - // No search term - true + return None; } + + // No search term + Some(chunk) }) // We need to collect all, so we can sort by member count - .collect(); + .collect() + .await; all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); @@ -394,22 +341,23 @@ pub(crate) async fn get_public_rooms_filtered_helper( /// Check whether the user can publish to the room directory via power levels of /// room history visibility event or room creator -fn user_can_publish_room(services: &Services, user_id: &UserId, room_id: &RoomId) -> Result { - if let Some(event) = services +async fn user_can_publish_room(services: &Services, user_id: &UserId, room_id: &RoomId) -> Result { + if let Ok(event) = services .rooms .state_accessor - .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? + .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") + .await { serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) .map(|content: RoomPowerLevelsEventContent| { RoomPowerLevels::from(content).user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) }) - } else if let Some(event) = - services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "")? + } else if let Ok(event) = services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await { Ok(event.sender == user_id) } else { @@ -419,3 +367,61 @@ fn user_can_publish_room(services: &Services, user_id: &UserId, room_id: &RoomId )); } } + +async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk { + PublicRoomsChunk { + canonical_alias: services + .rooms + .state_accessor + .get_canonical_alias(&room_id) + .await + .ok(), + name: services.rooms.state_accessor.get_name(&room_id).await.ok(), + num_joined_members: services + .rooms + .state_cache + .room_joined_count(&room_id) + .await + .unwrap_or(0) + .try_into() + .expect("joined count overflows ruma UInt"), + topic: services + .rooms + .state_accessor + .get_room_topic(&room_id) + .await + .ok(), + world_readable: services + .rooms + .state_accessor + .is_world_readable(&room_id) + .await, + guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, + avatar_url: services + .rooms + .state_accessor + .get_avatar(&room_id) + .await + .into_option() + .unwrap_or_default() + .url, + join_rule: services + .rooms + .state_accessor + .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") + .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { + JoinRule::Public => PublicRoomJoinRule::Public, + JoinRule::Knock => PublicRoomJoinRule::Knock, + _ => "invite".into(), + }) + .await + .unwrap_or_default(), + room_type: services + .rooms + .state_accessor + .get_room_type(&room_id) + .await + .ok(), + room_id, + } +} diff --git a/src/api/client/filter.rs b/src/api/client/filter.rs index 8b2690c6..2a8ebb9c 100644 --- a/src/api/client/filter.rs +++ b/src/api/client/filter.rs @@ -1,10 +1,8 @@ use axum::extract::State; -use ruma::api::client::{ - error::ErrorKind, - filter::{create_filter, get_filter}, -}; +use conduit::err; +use ruma::api::client::filter::{create_filter, get_filter}; -use crate::{Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// @@ -15,11 +13,13 @@ pub(crate) async fn get_filter_route( State(services): State, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let Some(filter) = services.users.get_filter(sender_user, &body.filter_id)? else { - return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")); - }; - Ok(get_filter::v3::Response::new(filter)) + services + .users + .get_filter(sender_user, &body.filter_id) + .await + .map(get_filter::v3::Response::new) + .map_err(|_| err!(Request(NotFound("Filter not found.")))) } /// # `PUT /_matrix/client/r0/user/{userId}/filter` @@ -29,7 +29,8 @@ pub(crate) async fn create_filter_route( State(services): State, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(create_filter::v3::Response::new( - services.users.create_filter(sender_user, &body.filter)?, - )) + + let filter_id = services.users.create_filter(sender_user, &body.filter); + + Ok(create_filter::v3::Response::new(filter_id)) } diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index a426364a..abf2a22f 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -4,8 +4,8 @@ use std::{ }; use axum::extract::State; -use conduit::{utils, utils::math::continue_exponential_backoff_secs, Err, Error, Result}; -use futures_util::{stream::FuturesUnordered, StreamExt}; +use conduit::{err, utils, utils::math::continue_exponential_backoff_secs, Err, Error, Result}; +use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ client::{ @@ -21,7 +21,10 @@ use ruma::{ use serde_json::json; use super::SESSION_ID_LENGTH; -use crate::{service::Services, Ruma}; +use crate::{ + service::{users::parse_master_key, Services}, + Ruma, +}; /// # `POST /_matrix/client/r0/keys/upload` /// @@ -39,7 +42,8 @@ pub(crate) async fn upload_keys_route( for (key_key, key_value) in &body.one_time_keys { services .users - .add_one_time_key(sender_user, sender_device, key_key, key_value)?; + .add_one_time_key(sender_user, sender_device, key_key, key_value) + .await?; } if let Some(device_keys) = &body.device_keys { @@ -47,19 +51,22 @@ pub(crate) async fn upload_keys_route( // This check is needed to assure that signatures are kept if services .users - .get_device_keys(sender_user, sender_device)? - .is_none() + .get_device_keys(sender_user, sender_device) + .await + .is_err() { services .users - .add_device_keys(sender_user, sender_device, device_keys)?; + .add_device_keys(sender_user, sender_device, device_keys) + .await; } } Ok(upload_keys::v3::Response { one_time_key_counts: services .users - .count_one_time_keys(sender_user, sender_device)?, + .count_one_time_keys(sender_user, sender_device) + .await, }) } @@ -120,7 +127,9 @@ pub(crate) async fn upload_signing_keys_route( if let Some(auth) = &body.auth { let (worked, uiaainfo) = services .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; + if !worked { return Err(Error::Uiaa(uiaainfo)); } @@ -129,20 +138,24 @@ pub(crate) async fn upload_signing_keys_route( uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; + .create(sender_user, sender_device, &uiaainfo, &json); + return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } if let Some(master_key) = &body.master_key { - services.users.add_cross_signing_keys( - sender_user, - master_key, - &body.self_signing_key, - &body.user_signing_key, - true, // notify so that other users see the new keys - )?; + services + .users + .add_cross_signing_keys( + sender_user, + master_key, + &body.self_signing_key, + &body.user_signing_key, + true, // notify so that other users see the new keys + ) + .await?; } Ok(upload_signing_keys::v3::Response {}) @@ -179,9 +192,11 @@ pub(crate) async fn upload_signatures_route( .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature value."))? .to_owned(), ); + services .users - .sign_key(user_id, key_id, signature, sender_user)?; + .sign_key(user_id, key_id, signature, sender_user) + .await?; } } } @@ -204,56 +219,51 @@ pub(crate) async fn get_key_changes_route( let mut device_list_updates = HashSet::new(); + let from = body + .from + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?; + + let to = body + .to + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?; + device_list_updates.extend( services .users - .keys_changed( - sender_user.as_str(), - body.from - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, - Some( - body.to - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?, - ), - ) - .filter_map(Result::ok), + .keys_changed(sender_user.as_str(), from, Some(to)) + .map(ToOwned::to_owned) + .collect::>() + .await, ); - for room_id in services - .rooms - .state_cache - .rooms_joined(sender_user) - .filter_map(Result::ok) - { + let mut rooms_joined = services.rooms.state_cache.rooms_joined(sender_user).boxed(); + + while let Some(room_id) = rooms_joined.next().await { device_list_updates.extend( services .users - .keys_changed( - room_id.as_ref(), - body.from - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, - Some( - body.to - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?, - ), - ) - .filter_map(Result::ok), + .keys_changed(room_id.as_ref(), from, Some(to)) + .map(ToOwned::to_owned) + .collect::>() + .await, ); } + Ok(get_key_changes::v3::Response { changed: device_list_updates.into_iter().collect(), left: Vec::new(), // TODO }) } -pub(crate) async fn get_keys_helper bool + Send>( +pub(crate) async fn get_keys_helper( services: &Services, sender_user: Option<&UserId>, device_keys_input: &BTreeMap>, allowed_signatures: F, include_display_names: bool, -) -> Result { +) -> Result +where + F: Fn(&UserId) -> bool + Send + Sync, +{ let mut master_keys = BTreeMap::new(); let mut self_signing_keys = BTreeMap::new(); let mut user_signing_keys = BTreeMap::new(); @@ -274,56 +284,60 @@ pub(crate) async fn get_keys_helper bool + Send>( if device_ids.is_empty() { let mut container = BTreeMap::new(); - for device_id in services.users.all_device_ids(user_id) { - let device_id = device_id?; - if let Some(mut keys) = services.users.get_device_keys(user_id, &device_id)? { + let mut devices = services.users.all_device_ids(user_id).boxed(); + + while let Some(device_id) = devices.next().await { + if let Ok(mut keys) = services.users.get_device_keys(user_id, device_id).await { let metadata = services .users - .get_device_metadata(user_id, &device_id)? - .ok_or_else(|| Error::bad_database("all_device_keys contained nonexistent device."))?; + .get_device_metadata(user_id, device_id) + .await + .map_err(|_| err!(Database("all_device_keys contained nonexistent device.")))?; add_unsigned_device_display_name(&mut keys, metadata, include_display_names) - .map_err(|_| Error::bad_database("invalid device keys in database"))?; + .map_err(|_| err!(Database("invalid device keys in database")))?; - container.insert(device_id, keys); + container.insert(device_id.to_owned(), keys); } } + device_keys.insert(user_id.to_owned(), container); } else { for device_id in device_ids { let mut container = BTreeMap::new(); - if let Some(mut keys) = services.users.get_device_keys(user_id, device_id)? { + if let Ok(mut keys) = services.users.get_device_keys(user_id, device_id).await { let metadata = services .users - .get_device_metadata(user_id, device_id)? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to get keys for nonexistent device.", - ))?; + .get_device_metadata(user_id, device_id) + .await + .map_err(|_| err!(Request(InvalidParam("Tried to get keys for nonexistent device."))))?; add_unsigned_device_display_name(&mut keys, metadata, include_display_names) - .map_err(|_| Error::bad_database("invalid device keys in database"))?; + .map_err(|_| err!(Database("invalid device keys in database")))?; + container.insert(device_id.to_owned(), keys); } + device_keys.insert(user_id.to_owned(), container); } } - if let Some(master_key) = services + if let Ok(master_key) = services .users - .get_master_key(sender_user, user_id, &allowed_signatures)? + .get_master_key(sender_user, user_id, &allowed_signatures) + .await { master_keys.insert(user_id.to_owned(), master_key); } - if let Some(self_signing_key) = - services - .users - .get_self_signing_key(sender_user, user_id, &allowed_signatures)? + if let Ok(self_signing_key) = services + .users + .get_self_signing_key(sender_user, user_id, &allowed_signatures) + .await { self_signing_keys.insert(user_id.to_owned(), self_signing_key); } if Some(user_id) == sender_user { - if let Some(user_signing_key) = services.users.get_user_signing_key(user_id)? { + if let Ok(user_signing_key) = services.users.get_user_signing_key(user_id).await { user_signing_keys.insert(user_id.to_owned(), user_signing_key); } } @@ -386,23 +400,26 @@ pub(crate) async fn get_keys_helper bool + Send>( while let Some((server, response)) = futures.next().await { if let Ok(Ok(response)) = response { for (user, masterkey) in response.master_keys { - let (master_key_id, mut master_key) = services.users.parse_master_key(&user, &masterkey)?; + let (master_key_id, mut master_key) = parse_master_key(&user, &masterkey)?; - if let Some(our_master_key) = - services - .users - .get_key(&master_key_id, sender_user, &user, &allowed_signatures)? + if let Ok(our_master_key) = services + .users + .get_key(&master_key_id, sender_user, &user, &allowed_signatures) + .await { - let (_, our_master_key) = services.users.parse_master_key(&user, &our_master_key)?; + let (_, our_master_key) = parse_master_key(&user, &our_master_key)?; master_key.signatures.extend(our_master_key.signatures); } let json = serde_json::to_value(master_key).expect("to_value always works"); let raw = serde_json::from_value(json).expect("Raw::from_value always works"); - services.users.add_cross_signing_keys( - &user, &raw, &None, &None, - false, /* Dont notify. A notification would trigger another key request resulting in an - * endless loop */ - )?; + services + .users + .add_cross_signing_keys( + &user, &raw, &None, &None, + false, /* Dont notify. A notification would trigger another key request resulting in an + * endless loop */ + ) + .await?; master_keys.insert(user.clone(), raw); } @@ -465,9 +482,10 @@ pub(crate) async fn claim_keys_helper( let mut container = BTreeMap::new(); for (device_id, key_algorithm) in map { - if let Some(one_time_keys) = services + if let Ok(one_time_keys) = services .users - .take_one_time_key(user_id, device_id, key_algorithm)? + .take_one_time_key(user_id, device_id, key_algorithm) + .await { let mut c = BTreeMap::new(); c.insert(one_time_keys.0, one_time_keys.1); diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 470db669..5a5d436f 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -11,9 +11,10 @@ use conduit::{ debug, debug_error, debug_warn, err, error, info, pdu::{gen_event_id_canonical_json, PduBuilder}, trace, utils, - utils::math::continue_exponential_backoff_secs, + utils::{math::continue_exponential_backoff_secs, IterStream, ReadyExt}, warn, Err, Error, PduEvent, Result, }; +use futures::{FutureExt, StreamExt}; use ruma::{ api::{ client::{ @@ -55,9 +56,9 @@ async fn banned_room_check( services: &Services, user_id: &UserId, room_id: Option<&RoomId>, server_name: Option<&ServerName>, client_ip: IpAddr, ) -> Result<()> { - if !services.users.is_admin(user_id)? { + if !services.users.is_admin(user_id).await { if let Some(room_id) = room_id { - if services.rooms.metadata.is_banned(room_id)? + if services.rooms.metadata.is_banned(room_id).await || services .globals .config @@ -79,23 +80,22 @@ async fn banned_room_check( "Automatically deactivating user {user_id} due to attempted banned room join from IP \ {client_ip}" ))) - .await; + .await + .ok(); } let all_joined_rooms: Vec = services .rooms .state_cache .rooms_joined(user_id) - .filter_map(Result::ok) - .collect(); + .map(Into::into) + .collect() + .await; - full_user_deactivate(services, user_id, all_joined_rooms).await?; + full_user_deactivate(services, user_id, &all_joined_rooms).await?; } - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "This room is banned on this homeserver.", - )); + return Err!(Request(Forbidden("This room is banned on this homeserver."))); } } else if let Some(server_name) = server_name { if services @@ -119,23 +119,22 @@ async fn banned_room_check( "Automatically deactivating user {user_id} due to attempted banned room join from IP \ {client_ip}" ))) - .await; + .await + .ok(); } let all_joined_rooms: Vec = services .rooms .state_cache .rooms_joined(user_id) - .filter_map(Result::ok) - .collect(); + .map(Into::into) + .collect() + .await; - full_user_deactivate(services, user_id, all_joined_rooms).await?; + full_user_deactivate(services, user_id, &all_joined_rooms).await?; } - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "This remote server is banned on this homeserver.", - )); + return Err!(Request(Forbidden("This remote server is banned on this homeserver."))); } } } @@ -172,14 +171,16 @@ pub(crate) async fn join_room_by_id_route( .rooms .state_cache .servers_invite_via(&body.room_id) - .filter_map(Result::ok) - .collect::>(); + .map(ToOwned::to_owned) + .collect::>() + .await; servers.extend( services .rooms .state_cache - .invite_state(sender_user, &body.room_id)? + .invite_state(sender_user, &body.room_id) + .await .unwrap_or_default() .iter() .filter_map(|event| serde_json::from_str(event.json().get()).ok()) @@ -202,6 +203,7 @@ pub(crate) async fn join_room_by_id_route( body.third_party_signed.as_ref(), &body.appservice_info, ) + .boxed() .await } @@ -233,14 +235,17 @@ pub(crate) async fn join_room_by_id_or_alias_route( .rooms .state_cache .servers_invite_via(&room_id) - .filter_map(Result::ok), + .map(ToOwned::to_owned) + .collect::>() + .await, ); servers.extend( services .rooms .state_cache - .invite_state(sender_user, &room_id)? + .invite_state(sender_user, &room_id) + .await .unwrap_or_default() .iter() .filter_map(|event| serde_json::from_str(event.json().get()).ok()) @@ -270,19 +275,23 @@ pub(crate) async fn join_room_by_id_or_alias_route( if let Some(pre_servers) = &mut pre_servers { servers.append(pre_servers); } + servers.extend( services .rooms .state_cache .servers_invite_via(&room_id) - .filter_map(Result::ok), + .map(ToOwned::to_owned) + .collect::>() + .await, ); servers.extend( services .rooms .state_cache - .invite_state(sender_user, &room_id)? + .invite_state(sender_user, &room_id) + .await .unwrap_or_default() .iter() .filter_map(|event| serde_json::from_str(event.json().get()).ok()) @@ -305,6 +314,7 @@ pub(crate) async fn join_room_by_id_or_alias_route( body.third_party_signed.as_ref(), appservice_info, ) + .boxed() .await?; Ok(join_room_by_id_or_alias::v3::Response { @@ -337,7 +347,7 @@ pub(crate) async fn invite_user_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services.users.is_admin(sender_user)? && services.globals.block_non_admin_invites() { + if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { info!( "User {sender_user} is not an admin and attempted to send an invite to room {}", &body.room_id @@ -375,15 +385,13 @@ pub(crate) async fn kick_user_route( services .rooms .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot kick member that's not in the room.", - ))? + .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) + .await + .map_err(|_| err!(Request(BadState("Cannot kick member that's not in the room."))))? .content .get(), ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + .map_err(|_| err!(Database("Invalid member event in database.")))?; event.membership = MembershipState::Leave; event.reason.clone_from(&body.reason); @@ -421,10 +429,13 @@ pub(crate) async fn ban_user_route( let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + let blurhash = services.users.blurhash(&body.user_id).await.ok(); + let event = services .rooms .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref())? + .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) + .await .map_or( Ok(RoomMemberEventContent { membership: MembershipState::Ban, @@ -432,7 +443,7 @@ pub(crate) async fn ban_user_route( avatar_url: None, is_direct: None, third_party_invite: None, - blurhash: services.users.blurhash(&body.user_id).unwrap_or_default(), + blurhash: blurhash.clone(), reason: body.reason.clone(), join_authorized_via_users_server: None, }), @@ -442,12 +453,12 @@ pub(crate) async fn ban_user_route( membership: MembershipState::Ban, displayname: None, avatar_url: None, - blurhash: services.users.blurhash(&body.user_id).unwrap_or_default(), + blurhash: blurhash.clone(), reason: body.reason.clone(), join_authorized_via_users_server: None, ..event }) - .map_err(|_| Error::bad_database("Invalid member event in database.")) + .map_err(|e| err!(Database("Invalid member event in database: {e:?}"))) }, )?; @@ -488,12 +499,13 @@ pub(crate) async fn unban_user_route( services .rooms .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref())? - .ok_or(Error::BadRequest(ErrorKind::BadState, "Cannot unban a user who is not banned."))? + .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) + .await + .map_err(|_| err!(Request(BadState("Cannot unban a user who is not banned."))))? .content .get(), ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + .map_err(|e| err!(Database("Invalid member event in database: {e:?}")))?; event.membership = MembershipState::Leave; event.reason.clone_from(&body.reason); @@ -539,18 +551,16 @@ pub(crate) async fn forget_room_route( if services .rooms .state_cache - .is_joined(sender_user, &body.room_id)? + .is_joined(sender_user, &body.room_id) + .await { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "You must leave the room before forgetting it", - )); + return Err!(Request(Unknown("You must leave the room before forgetting it"))); } services .rooms .state_cache - .forget(&body.room_id, sender_user)?; + .forget(&body.room_id, sender_user); Ok(forget_room::v3::Response::new()) } @@ -568,8 +578,9 @@ pub(crate) async fn joined_rooms_route( .rooms .state_cache .rooms_joined(sender_user) - .filter_map(Result::ok) - .collect(), + .map(ToOwned::to_owned) + .collect() + .await, }) } @@ -587,12 +598,10 @@ pub(crate) async fn get_member_events_route( if !services .rooms .state_accessor - .user_can_see_state_events(sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id) + .await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this room.", - )); + return Err!(Request(Forbidden("You don't have permission to view this room."))); } Ok(get_member_events::v3::Response { @@ -622,30 +631,27 @@ pub(crate) async fn joined_members_route( if !services .rooms .state_accessor - .user_can_see_state_events(sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id) + .await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this room.", - )); + return Err!(Request(Forbidden("You don't have permission to view this room."))); } let joined: BTreeMap = services .rooms .state_cache .room_members(&body.room_id) - .filter_map(|user| { - let user = user.ok()?; - - Some(( - user.clone(), + .then(|user| async move { + ( + user.to_owned(), RoomMember { - display_name: services.users.displayname(&user).unwrap_or_default(), - avatar_url: services.users.avatar_url(&user).unwrap_or_default(), + display_name: services.users.displayname(user).await.ok(), + avatar_url: services.users.avatar_url(user).await.ok(), }, - )) + ) }) - .collect(); + .collect() + .await; Ok(joined_members::v3::Response { joined, @@ -658,13 +664,23 @@ pub async fn join_room_by_id_helper( ) -> Result { let state_lock = services.rooms.state.mutex.lock(room_id).await; - let user_is_guest = services.users.is_deactivated(sender_user).unwrap_or(false) && appservice_info.is_none(); + let user_is_guest = services + .users + .is_deactivated(sender_user) + .await + .unwrap_or(false) + && appservice_info.is_none(); - if matches!(services.rooms.state_accessor.guest_can_join(room_id), Ok(false)) && user_is_guest { + if user_is_guest && !services.rooms.state_accessor.guest_can_join(room_id).await { return Err!(Request(Forbidden("Guests are not allowed to join this room"))); } - if matches!(services.rooms.state_cache.is_joined(sender_user, room_id), Ok(true)) { + if services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { debug_warn!("{sender_user} is already joined in {room_id}"); return Ok(join_room_by_id::v3::Response { room_id: room_id.into(), @@ -674,15 +690,17 @@ pub async fn join_room_by_id_helper( if services .rooms .state_cache - .server_in_room(services.globals.server_name(), room_id)? - || servers.is_empty() + .server_in_room(services.globals.server_name(), room_id) + .await || servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])) { join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, third_party_signed, state_lock) + .boxed() .await } else { // Ask a remote server if we are not participating in this room join_room_by_id_helper_remote(services, sender_user, room_id, reason, servers, third_party_signed, state_lock) + .boxed() .await } } @@ -739,11 +757,11 @@ async fn join_room_by_id_helper_remote( "content".to_owned(), to_canonical_value(RoomMemberEventContent { membership: MembershipState::Join, - displayname: services.users.displayname(sender_user)?, - avatar_url: services.users.avatar_url(sender_user)?, + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), is_direct: None, third_party_invite: None, - blurhash: services.users.blurhash(sender_user)?, + blurhash: services.users.blurhash(sender_user).await.ok(), reason, join_authorized_via_users_server: join_authorized_via_users_server.clone(), }) @@ -791,10 +809,11 @@ async fn join_room_by_id_helper_remote( federation::membership::create_join_event::v2::Request { room_id: room_id.to_owned(), event_id: event_id.to_owned(), + omit_members: false, pdu: services .sending - .convert_to_outgoing_federation_event(join_event.clone()), - omit_members: false, + .convert_to_outgoing_federation_event(join_event.clone()) + .await, }, ) .await?; @@ -864,7 +883,11 @@ async fn join_room_by_id_helper_remote( } } - services.rooms.short.get_or_create_shortroomid(room_id)?; + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; info!("Parsing join event"); let parsed_join_pdu = PduEvent::from_id_val(event_id, join_event.clone()) @@ -895,12 +918,13 @@ async fn join_room_by_id_helper_remote( err!(BadServerResponse("Invalid PDU in send_join response: {e:?}")) })?; - services.rooms.outlier.add_pdu_outlier(&event_id, &value)?; + services.rooms.outlier.add_pdu_outlier(&event_id, &value); if let Some(state_key) = &pdu.state_key { let shortstatekey = services .rooms .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)?; + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key) + .await; state.insert(shortstatekey, pdu.event_id.clone()); } } @@ -916,50 +940,53 @@ async fn join_room_by_id_helper_remote( continue; }; - services.rooms.outlier.add_pdu_outlier(&event_id, &value)?; + services.rooms.outlier.add_pdu_outlier(&event_id, &value); } debug!("Running send_join auth check"); + let fetch_state = &state; + let state_fetch = |k: &'static StateEventType, s: String| async move { + let shortstatekey = services.rooms.short.get_shortstatekey(k, &s).await.ok()?; + + let event_id = fetch_state.get(&shortstatekey)?; + services.rooms.timeline.get_pdu(event_id).await.ok() + }; let auth_check = state_res::event_auth::auth_check( &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), &parsed_join_pdu, - None::, // TODO: third party invite - |k, s| { - services - .rooms - .timeline - .get_pdu( - state.get( - &services - .rooms - .short - .get_or_create_shortstatekey(&k.to_string().into(), s) - .ok()?, - )?, - ) - .ok()? - }, + None, // TODO: third party invite + |k, s| state_fetch(k, s.to_owned()), ) - .map_err(|e| { - warn!("Auth check failed: {e}"); - Error::BadRequest(ErrorKind::forbidden(), "Auth check failed") - })?; + .await + .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; if !auth_check { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Auth check failed")); + return Err!(Request(Forbidden("Auth check failed"))); } info!("Saving state from send_join"); - let (statehash_before_join, new, removed) = services.rooms.state_compressor.save_state( - room_id, - Arc::new( - state - .into_iter() - .map(|(k, id)| services.rooms.state_compressor.compress_state_event(k, &id)) - .collect::>()?, - ), - )?; + let (statehash_before_join, new, removed) = services + .rooms + .state_compressor + .save_state( + room_id, + Arc::new( + state + .into_iter() + .stream() + .then(|(k, id)| async move { + services + .rooms + .state_compressor + .compress_state_event(k, &id) + .await + }) + .collect() + .await, + ), + ) + .await?; services .rooms @@ -968,12 +995,20 @@ async fn join_room_by_id_helper_remote( .await?; info!("Updating joined counts for new room"); - services.rooms.state_cache.update_joined_count(room_id)?; + services + .rooms + .state_cache + .update_joined_count(room_id) + .await; // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. - let statehash_after_join = services.rooms.state.append_to_state(&parsed_join_pdu)?; + let statehash_after_join = services + .rooms + .state + .append_to_state(&parsed_join_pdu) + .await?; info!("Appending new room join event"); services @@ -993,7 +1028,7 @@ async fn join_room_by_id_helper_remote( services .rooms .state - .set_room_state(room_id, statehash_after_join, &state_lock)?; + .set_room_state(room_id, statehash_after_join, &state_lock); Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) } @@ -1005,23 +1040,15 @@ async fn join_room_by_id_helper_local( ) -> Result { debug!("We can join locally"); - let join_rules_event = services + let join_rules_event_content = services .rooms .state_accessor - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; - - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") - }) - }) - .transpose()?; + .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") + .await + .map(|content: RoomJoinRulesEventContent| content); let restriction_rooms = match join_rules_event_content { - Some(RoomJoinRulesEventContent { + Ok(RoomJoinRulesEventContent { join_rule: JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted), }) => restricted .allow @@ -1034,29 +1061,34 @@ async fn join_room_by_id_helper_local( _ => Vec::new(), }; - let local_members = services + let local_members: Vec<_> = services .rooms .state_cache .room_members(room_id) - .filter_map(Result::ok) - .filter(|user| services.globals.user_is_local(user)) - .collect::>(); + .ready_filter(|user| services.globals.user_is_local(user)) + .map(ToOwned::to_owned) + .collect() + .await; let mut join_authorized_via_users_server: Option = None; - if restriction_rooms.iter().any(|restriction_room_id| { - services - .rooms - .state_cache - .is_joined(sender_user, restriction_room_id) - .unwrap_or(false) - }) { + if restriction_rooms + .iter() + .stream() + .any(|restriction_room_id| { + services + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + }) + .await + { for user in local_members { if services .rooms .state_accessor .user_can_invite(room_id, &user, sender_user, &state_lock) - .unwrap_or(false) + .await { join_authorized_via_users_server = Some(user); break; @@ -1066,11 +1098,11 @@ async fn join_room_by_id_helper_local( let event = RoomMemberEventContent { membership: MembershipState::Join, - displayname: services.users.displayname(sender_user)?, - avatar_url: services.users.avatar_url(sender_user)?, + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), is_direct: None, third_party_invite: None, - blurhash: services.users.blurhash(sender_user)?, + blurhash: services.users.blurhash(sender_user).await.ok(), reason: reason.clone(), join_authorized_via_users_server, }; @@ -1144,11 +1176,11 @@ async fn join_room_by_id_helper_local( "content".to_owned(), to_canonical_value(RoomMemberEventContent { membership: MembershipState::Join, - displayname: services.users.displayname(sender_user)?, - avatar_url: services.users.avatar_url(sender_user)?, + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), is_direct: None, third_party_invite: None, - blurhash: services.users.blurhash(sender_user)?, + blurhash: services.users.blurhash(sender_user).await.ok(), reason, join_authorized_via_users_server, }) @@ -1195,10 +1227,11 @@ async fn join_room_by_id_helper_local( federation::membership::create_join_event::v2::Request { room_id: room_id.to_owned(), event_id: event_id.to_owned(), + omit_members: false, pdu: services .sending - .convert_to_outgoing_federation_event(join_event.clone()), - omit_members: false, + .convert_to_outgoing_federation_event(join_event.clone()) + .await, }, ) .await?; @@ -1369,7 +1402,7 @@ pub(crate) async fn invite_helper( services: &Services, sender_user: &UserId, user_id: &UserId, room_id: &RoomId, reason: Option, is_direct: bool, ) -> Result<()> { - if !services.users.is_admin(user_id)? && services.globals.block_non_admin_invites() { + if !services.users.is_admin(user_id).await && services.globals.block_non_admin_invites() { info!("User {sender_user} is not an admin and attempted to send an invite to room {room_id}"); return Err(Error::BadRequest( ErrorKind::forbidden(), @@ -1381,7 +1414,7 @@ pub(crate) async fn invite_helper( let (pdu, pdu_json, invite_room_state) = { let state_lock = services.rooms.state.mutex.lock(room_id).await; let content = to_raw_value(&RoomMemberEventContent { - avatar_url: services.users.avatar_url(user_id)?, + avatar_url: services.users.avatar_url(user_id).await.ok(), displayname: None, is_direct: Some(is_direct), membership: MembershipState::Invite, @@ -1392,28 +1425,32 @@ pub(crate) async fn invite_helper( }) .expect("member event is valid value"); - let (pdu, pdu_json) = services.rooms.timeline.create_hash_and_sign_event( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content, - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }, - sender_user, - room_id, - &state_lock, - )?; + let (pdu, pdu_json) = services + .rooms + .timeline + .create_hash_and_sign_event( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + timestamp: None, + }, + sender_user, + room_id, + &state_lock, + ) + .await?; - let invite_room_state = services.rooms.state.calculate_invite_state(&pdu)?; + let invite_room_state = services.rooms.state.calculate_invite_state(&pdu).await?; drop(state_lock); (pdu, pdu_json, invite_room_state) }; - let room_version_id = services.rooms.state.get_room_version(room_id)?; + let room_version_id = services.rooms.state.get_room_version(room_id).await?; let response = services .sending @@ -1425,9 +1462,15 @@ pub(crate) async fn invite_helper( room_version: room_version_id.clone(), event: services .sending - .convert_to_outgoing_federation_event(pdu_json.clone()), + .convert_to_outgoing_federation_event(pdu_json.clone()) + .await, invite_room_state, - via: services.rooms.state_cache.servers_route_via(room_id).ok(), + via: services + .rooms + .state_cache + .servers_route_via(room_id) + .await + .ok(), }, ) .await?; @@ -1478,11 +1521,16 @@ pub(crate) async fn invite_helper( "Could not accept incoming PDU as timeline event.", ))?; - services.sending.send_pdu_room(room_id, &pdu_id)?; + services.sending.send_pdu_room(room_id, &pdu_id).await?; return Ok(()); } - if !services.rooms.state_cache.is_joined(sender_user, room_id)? { + if !services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { return Err(Error::BadRequest( ErrorKind::forbidden(), "You don't have permission to view this room.", @@ -1499,11 +1547,11 @@ pub(crate) async fn invite_helper( event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, - displayname: services.users.displayname(user_id)?, - avatar_url: services.users.avatar_url(user_id)?, + displayname: services.users.displayname(user_id).await.ok(), + avatar_url: services.users.avatar_url(user_id).await.ok(), is_direct: Some(is_direct), third_party_invite: None, - blurhash: services.users.blurhash(user_id)?, + blurhash: services.users.blurhash(user_id).await.ok(), reason, join_authorized_via_users_server: None, }) @@ -1531,36 +1579,37 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { .rooms .state_cache .rooms_joined(user_id) + .map(ToOwned::to_owned) .chain( services .rooms .state_cache .rooms_invited(user_id) - .map(|t| t.map(|(r, _)| r)), + .map(|(r, _)| r), ) - .collect::>(); + .collect::>() + .await; for room_id in all_rooms { - let Ok(room_id) = room_id else { - continue; - }; - // ignore errors if let Err(e) = leave_room(services, user_id, &room_id, None).await { warn!(%room_id, %user_id, %e, "Failed to leave room"); } - if let Err(e) = services.rooms.state_cache.forget(&room_id, user_id) { - warn!(%room_id, %user_id, %e, "Failed to forget room"); - } + + services.rooms.state_cache.forget(&room_id, user_id); } } pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, reason: Option) -> Result<()> { + //use conduit::utils::stream::OptionStream; + use futures::TryFutureExt; + // Ask a remote server if we don't have this room if !services .rooms .state_cache - .server_in_room(services.globals.server_name(), room_id)? + .server_in_room(services.globals.server_name(), room_id) + .await { if let Err(e) = remote_leave_room(services, user_id, room_id).await { warn!("Failed to leave room {} remotely: {}", user_id, e); @@ -1570,34 +1619,42 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, let last_state = services .rooms .state_cache - .invite_state(user_id, room_id)? - .map_or_else(|| services.rooms.state_cache.left_state(user_id, room_id), |s| Ok(Some(s)))?; + .invite_state(user_id, room_id) + .map_err(|_| services.rooms.state_cache.left_state(user_id, room_id)) + .await + .ok(); // We always drop the invite, we can't rely on other servers - services.rooms.state_cache.update_membership( - room_id, - user_id, - RoomMemberEventContent::new(MembershipState::Leave), - user_id, - last_state, - None, - true, - )?; + services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + RoomMemberEventContent::new(MembershipState::Leave), + user_id, + last_state, + None, + true, + ) + .await?; } else { let state_lock = services.rooms.state.mutex.lock(room_id).await; - let member_event = - services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?; + let member_event = services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) + .await; // Fix for broken rooms - let member_event = match member_event { - None => { - error!("Trying to leave a room you are not a member of."); + let Ok(member_event) = member_event else { + error!("Trying to leave a room you are not a member of."); - services.rooms.state_cache.update_membership( + services + .rooms + .state_cache + .update_membership( room_id, user_id, RoomMemberEventContent::new(MembershipState::Leave), @@ -1605,16 +1662,14 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, None, None, true, - )?; - return Ok(()); - }, - Some(e) => e, + ) + .await?; + + return Ok(()); }; - let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get()).map_err(|e| { - error!("Invalid room member event in database: {}", e); - Error::bad_database("Invalid member event in database.") - })?; + let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get()) + .map_err(|e| err!(Database(error!("Invalid room member event in database: {e}"))))?; event.membership = MembershipState::Leave; event.reason = reason; @@ -1647,15 +1702,17 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room let invite_state = services .rooms .state_cache - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest(ErrorKind::BadState, "User is not invited."))?; + .invite_state(user_id, room_id) + .await + .map_err(|_| err!(Request(BadState("User is not invited."))))?; let mut servers: HashSet = services .rooms .state_cache .servers_invite_via(room_id) - .filter_map(Result::ok) - .collect(); + .map(ToOwned::to_owned) + .collect() + .await; servers.extend( invite_state @@ -1760,7 +1817,8 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room event_id, pdu: services .sending - .convert_to_outgoing_federation_event(leave_event.clone()), + .convert_to_outgoing_federation_event(leave_event.clone()) + .await, }, ) .await?; diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 51aee8c1..bab5fa54 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,7 +1,8 @@ use std::collections::{BTreeMap, HashSet}; use axum::extract::State; -use conduit::PduCount; +use conduit::{err, utils::ReadyExt, Err, PduCount}; +use futures::{FutureExt, StreamExt}; use ruma::{ api::client::{ error::ErrorKind, @@ -9,13 +10,14 @@ use ruma::{ message::{get_message_events, send_message_event}, }, events::{MessageLikeEventType, StateEventType}, - RoomId, UserId, + UserId, }; use serde_json::{from_str, Value}; +use service::rooms::timeline::PdusIterItem; use crate::{ service::{pdu::PduBuilder, Services}, - utils, Error, PduEvent, Result, Ruma, + utils, Error, Result, Ruma, }; /// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` @@ -30,79 +32,78 @@ use crate::{ pub(crate) async fn send_message_event_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); - - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + let appservice_info = body.appservice_info.as_ref(); // Forbid m.room.encrypted if encryption is disabled if MessageLikeEventType::RoomEncrypted == body.event_type && !services.globals.allow_encryption() { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Encryption has been disabled")); + return Err!(Request(Forbidden("Encryption has been disabled"))); } - if body.event_type == MessageLikeEventType::CallInvite && services.rooms.directory.is_public_room(&body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Room call invites are not allowed in public rooms", - )); + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + if body.event_type == MessageLikeEventType::CallInvite + && services.rooms.directory.is_public_room(&body.room_id).await + { + return Err!(Request(Forbidden("Room call invites are not allowed in public rooms"))); } // Check if this is a new transaction id - if let Some(response) = services + if let Ok(response) = services .transaction_ids - .existing_txnid(sender_user, sender_device, &body.txn_id)? + .existing_txnid(sender_user, sender_device, &body.txn_id) + .await { // The client might have sent a txnid of the /sendToDevice endpoint // This txnid has no response associated with it if response.is_empty() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to use txn id already used for an incompatible endpoint.", - )); + return Err!(Request(InvalidParam( + "Tried to use txn id already used for an incompatible endpoint." + ))); } - let event_id = utils::string_from_bytes(&response) - .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; return Ok(send_message_event::v3::Response { - event_id, + event_id: utils::string_from_bytes(&response) + .map(TryInto::try_into) + .map_err(|e| err!(Database("Invalid event_id in txnid data: {e:?}")))??, }); } let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); + let content = from_str(body.body.body.json().get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; + let event_id = services .rooms .timeline .build_and_append_pdu( PduBuilder { event_type: body.event_type.to_string().into(), - content: from_str(body.body.body.json().get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + content, unsigned: Some(unsigned), state_key: None, redacts: None, - timestamp: if body.appservice_info.is_some() { - body.timestamp - } else { - None - }, + timestamp: appservice_info.and(body.timestamp), }, sender_user, &body.room_id, &state_lock, ) - .await?; + .await + .map(|event_id| (*event_id).to_owned())?; services .transaction_ids - .add_txnid(sender_user, sender_device, &body.txn_id, event_id.as_bytes())?; + .add_txnid(sender_user, sender_device, &body.txn_id, event_id.as_bytes()); drop(state_lock); - Ok(send_message_event::v3::Response::new((*event_id).to_owned())) + Ok(send_message_event::v3::Response { + event_id, + }) } /// # `GET /_matrix/client/r0/rooms/{roomId}/messages` @@ -117,8 +118,12 @@ pub(crate) async fn get_message_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - let from = match body.from.clone() { - Some(from) => PduCount::try_from_string(&from)?, + let room_id = &body.room_id; + let filter = &body.filter; + + let limit = usize::try_from(body.limit).unwrap_or(10).min(100); + let from = match body.from.as_ref() { + Some(from) => PduCount::try_from_string(from)?, None => match body.dir { ruma::api::Direction::Forward => PduCount::min(), ruma::api::Direction::Backward => PduCount::max(), @@ -133,30 +138,25 @@ pub(crate) async fn get_message_events_route( services .rooms .lazy_loading - .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from) - .await?; - - let limit = usize::try_from(body.limit).unwrap_or(10).min(100); - - let next_token; + .lazy_load_confirm_delivery(sender_user, sender_device, room_id, from); let mut resp = get_message_events::v3::Response::new(); - let mut lazy_loaded = HashSet::new(); - + let next_token; match body.dir { ruma::api::Direction::Forward => { - let events_after: Vec<_> = services + let events_after: Vec = services .rooms .timeline - .pdus_after(sender_user, &body.room_id, from)? - .filter_map(Result::ok) // Filter out buggy events - .filter(|(_, pdu)| { contains_url_filter(pdu, &body.filter) && visibility_filter(&services, pdu, sender_user, &body.room_id) - - }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .pdus_after(sender_user, room_id, from) + .await? + .ready_filter_map(|item| contains_url_filter(item, filter)) + .filter_map(|item| visibility_filter(&services, item, sender_user)) + .ready_take_while(|(count, _)| Some(*count) != to) // Stop at `to` .take(limit) - .collect(); + .collect() + .boxed() + .await; for (_, event) in &events_after { /* TODO: Remove the not "element_hacks" check when these are resolved: @@ -164,16 +164,18 @@ pub(crate) async fn get_message_events_route( * https://github.com/vector-im/element-web/issues/21034 */ if !cfg!(feature = "element_hacks") - && !services.rooms.lazy_loading.lazy_load_was_sent_before( - sender_user, - sender_device, - &body.room_id, - &event.sender, - )? { + && !services + .rooms + .lazy_loading + .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) + .await + { lazy_loaded.insert(event.sender.clone()); } - lazy_loaded.insert(event.sender.clone()); + if cfg!(features = "element_hacks") { + lazy_loaded.insert(event.sender.clone()); + } } next_token = events_after.last().map(|(count, _)| count).copied(); @@ -191,17 +193,22 @@ pub(crate) async fn get_message_events_route( services .rooms .timeline - .backfill_if_required(&body.room_id, from) + .backfill_if_required(room_id, from) + .boxed() .await?; - let events_before: Vec<_> = services + + let events_before: Vec = services .rooms .timeline - .pdus_until(sender_user, &body.room_id, from)? - .filter_map(Result::ok) // Filter out buggy events - .filter(|(_, pdu)| {contains_url_filter(pdu, &body.filter) && visibility_filter(&services, pdu, sender_user, &body.room_id)}) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .pdus_until(sender_user, room_id, from) + .await? + .ready_filter_map(|item| contains_url_filter(item, filter)) + .filter_map(|item| visibility_filter(&services, item, sender_user)) + .ready_take_while(|(count, _)| Some(*count) != to) // Stop at `to` .take(limit) - .collect(); + .collect() + .boxed() + .await; for (_, event) in &events_before { /* TODO: Remove the not "element_hacks" check when these are resolved: @@ -209,16 +216,18 @@ pub(crate) async fn get_message_events_route( * https://github.com/vector-im/element-web/issues/21034 */ if !cfg!(feature = "element_hacks") - && !services.rooms.lazy_loading.lazy_load_was_sent_before( - sender_user, - sender_device, - &body.room_id, - &event.sender, - )? { + && !services + .rooms + .lazy_loading + .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) + .await + { lazy_loaded.insert(event.sender.clone()); } - lazy_loaded.insert(event.sender.clone()); + if cfg!(features = "element_hacks") { + lazy_loaded.insert(event.sender.clone()); + } } next_token = events_before.last().map(|(count, _)| count).copied(); @@ -236,11 +245,11 @@ pub(crate) async fn get_message_events_route( resp.state = Vec::new(); for ll_id in &lazy_loaded { - if let Some(member_event) = - services - .rooms - .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())? + if let Ok(member_event) = services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, ll_id.as_str()) + .await { resp.state.push(member_event.to_state_event()); } @@ -249,34 +258,43 @@ pub(crate) async fn get_message_events_route( // remove the feature check when we are sure clients like element can handle it if !cfg!(feature = "element_hacks") { if let Some(next_token) = next_token { - services - .rooms - .lazy_loading - .lazy_load_mark_sent(sender_user, sender_device, &body.room_id, lazy_loaded, next_token) - .await; + services.rooms.lazy_loading.lazy_load_mark_sent( + sender_user, + sender_device, + room_id, + lazy_loaded, + next_token, + ); } } Ok(resp) } -fn visibility_filter(services: &Services, pdu: &PduEvent, user_id: &UserId, room_id: &RoomId) -> bool { +async fn visibility_filter(services: &Services, item: PdusIterItem, user_id: &UserId) -> Option { + let (_, pdu) = &item; + services .rooms .state_accessor - .user_can_see_event(user_id, room_id, &pdu.event_id) - .unwrap_or(false) + .user_can_see_event(user_id, &pdu.room_id, &pdu.event_id) + .await + .then_some(item) } -fn contains_url_filter(pdu: &PduEvent, filter: &RoomEventFilter) -> bool { +fn contains_url_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { + let (_, pdu) = &item; + if filter.url_filter.is_none() { - return true; + return Some(item); } let content: Value = from_str(pdu.content.get()).unwrap(); - match filter.url_filter { + let res = match filter.url_filter { Some(UrlFilter::EventsWithoutUrl) => !content["url"].is_string(), Some(UrlFilter::EventsWithUrl) => content["url"].is_string(), None => true, - } + }; + + res.then_some(item) } diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index 8384d5ac..ba48808b 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -28,7 +28,8 @@ pub(crate) async fn set_presence_route( services .presence - .set_presence(sender_user, &body.presence, None, None, body.status_msg.clone())?; + .set_presence(sender_user, &body.presence, None, None, body.status_msg.clone()) + .await?; Ok(set_presence::v3::Response {}) } @@ -49,14 +50,15 @@ pub(crate) async fn get_presence_route( let mut presence_event = None; - for _room_id in services + let has_shared_rooms = services .rooms .user - .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? - { - if let Some(presence) = services.presence.get_presence(&body.user_id)? { + .has_shared_rooms(sender_user, &body.user_id) + .await; + + if has_shared_rooms { + if let Ok(presence) = services.presence.get_presence(&body.user_id).await { presence_event = Some(presence); - break; } } diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index bf47a3f8..495bc8ec 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -1,5 +1,10 @@ use axum::extract::State; -use conduit::{pdu::PduBuilder, warn, Err, Error, Result}; +use conduit::{ + pdu::PduBuilder, + utils::{stream::TryIgnore, IterStream}, + warn, Err, Error, Result, +}; +use futures::{StreamExt, TryStreamExt}; use ruma::{ api::{ client::{ @@ -35,16 +40,18 @@ pub(crate) async fn set_displayname_route( .rooms .state_cache .rooms_joined(&body.user_id) - .filter_map(Result::ok) - .collect(); + .map(ToOwned::to_owned) + .collect() + .await; - update_displayname(&services, &body.user_id, body.displayname.clone(), all_joined_rooms).await?; + update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms).await?; if services.globals.allow_local_presence() { // Presence update services .presence - .ping_presence(&body.user_id, &PresenceState::Online)?; + .ping_presence(&body.user_id, &PresenceState::Online) + .await?; } Ok(set_display_name::v3::Response {}) @@ -72,22 +79,19 @@ pub(crate) async fn get_displayname_route( ) .await { - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { services.users.create(&body.user_id, None)?; } services .users - .set_displayname(&body.user_id, response.displayname.clone()) - .await?; + .set_displayname(&body.user_id, response.displayname.clone()); services .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()) - .await?; + .set_avatar_url(&body.user_id, response.avatar_url.clone()); services .users - .set_blurhash(&body.user_id, response.blurhash.clone()) - .await?; + .set_blurhash(&body.user_id, response.blurhash.clone()); return Ok(get_display_name::v3::Response { displayname: response.displayname, @@ -95,14 +99,14 @@ pub(crate) async fn get_displayname_route( } } - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { // Return 404 if this user doesn't exist and we couldn't fetch it over // federation return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); } Ok(get_display_name::v3::Response { - displayname: services.users.displayname(&body.user_id)?, + displayname: services.users.displayname(&body.user_id).await.ok(), }) } @@ -124,15 +128,16 @@ pub(crate) async fn set_avatar_url_route( .rooms .state_cache .rooms_joined(&body.user_id) - .filter_map(Result::ok) - .collect(); + .map(ToOwned::to_owned) + .collect() + .await; update_avatar_url( &services, &body.user_id, body.avatar_url.clone(), body.blurhash.clone(), - all_joined_rooms, + &all_joined_rooms, ) .await?; @@ -140,7 +145,9 @@ pub(crate) async fn set_avatar_url_route( // Presence update services .presence - .ping_presence(&body.user_id, &PresenceState::Online)?; + .ping_presence(&body.user_id, &PresenceState::Online) + .await + .ok(); } Ok(set_avatar_url::v3::Response {}) @@ -168,22 +175,21 @@ pub(crate) async fn get_avatar_url_route( ) .await { - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { services.users.create(&body.user_id, None)?; } services .users - .set_displayname(&body.user_id, response.displayname.clone()) - .await?; + .set_displayname(&body.user_id, response.displayname.clone()); + services .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()) - .await?; + .set_avatar_url(&body.user_id, response.avatar_url.clone()); + services .users - .set_blurhash(&body.user_id, response.blurhash.clone()) - .await?; + .set_blurhash(&body.user_id, response.blurhash.clone()); return Ok(get_avatar_url::v3::Response { avatar_url: response.avatar_url, @@ -192,15 +198,15 @@ pub(crate) async fn get_avatar_url_route( } } - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { // Return 404 if this user doesn't exist and we couldn't fetch it over // federation return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); } Ok(get_avatar_url::v3::Response { - avatar_url: services.users.avatar_url(&body.user_id)?, - blurhash: services.users.blurhash(&body.user_id)?, + avatar_url: services.users.avatar_url(&body.user_id).await.ok(), + blurhash: services.users.blurhash(&body.user_id).await.ok(), }) } @@ -226,31 +232,30 @@ pub(crate) async fn get_profile_route( ) .await { - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { services.users.create(&body.user_id, None)?; } services .users - .set_displayname(&body.user_id, response.displayname.clone()) - .await?; + .set_displayname(&body.user_id, response.displayname.clone()); + services .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()) - .await?; + .set_avatar_url(&body.user_id, response.avatar_url.clone()); + services .users - .set_blurhash(&body.user_id, response.blurhash.clone()) - .await?; + .set_blurhash(&body.user_id, response.blurhash.clone()); + services .users - .set_timezone(&body.user_id, response.tz.clone()) - .await?; + .set_timezone(&body.user_id, response.tz.clone()); for (profile_key, profile_key_value) in &response.custom_profile_fields { services .users - .set_profile_key(&body.user_id, profile_key, Some(profile_key_value.clone()))?; + .set_profile_key(&body.user_id, profile_key, Some(profile_key_value.clone())); } return Ok(get_profile::v3::Response { @@ -263,104 +268,93 @@ pub(crate) async fn get_profile_route( } } - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { // Return 404 if this user doesn't exist and we couldn't fetch it over // federation return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); } Ok(get_profile::v3::Response { - avatar_url: services.users.avatar_url(&body.user_id)?, - blurhash: services.users.blurhash(&body.user_id)?, - displayname: services.users.displayname(&body.user_id)?, - tz: services.users.timezone(&body.user_id)?, + avatar_url: services.users.avatar_url(&body.user_id).await.ok(), + blurhash: services.users.blurhash(&body.user_id).await.ok(), + displayname: services.users.displayname(&body.user_id).await.ok(), + tz: services.users.timezone(&body.user_id).await.ok(), custom_profile_fields: services .users .all_profile_keys(&body.user_id) - .filter_map(Result::ok) - .collect(), + .collect() + .await, }) } pub async fn update_displayname( - services: &Services, user_id: &UserId, displayname: Option, all_joined_rooms: Vec, + services: &Services, user_id: &UserId, displayname: Option, all_joined_rooms: &[OwnedRoomId], ) -> Result<()> { - let current_display_name = services.users.displayname(user_id).unwrap_or_default(); + let current_display_name = services.users.displayname(user_id).await.ok(); if displayname == current_display_name { return Ok(()); } - services - .users - .set_displayname(user_id, displayname.clone()) - .await?; + services.users.set_displayname(user_id, displayname.clone()); // Send a new join membership event into all joined rooms - let all_joined_rooms: Vec<_> = all_joined_rooms - .iter() - .map(|room_id| { - Ok::<_, Error>(( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - displayname: displayname.clone(), - join_authorized_via_users_server: None, - ..serde_json::from_str( - services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or_else(|| { - Error::bad_database("Tried to send display name update for user not in the room.") - })? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }, - room_id, - )) - }) - .filter_map(Result::ok) - .collect(); + let mut joined_rooms = Vec::new(); + for room_id in all_joined_rooms { + let Ok(event) = services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) + .await + else { + continue; + }; - update_all_rooms(services, all_joined_rooms, user_id).await; + let pdu = PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + displayname: displayname.clone(), + join_authorized_via_users_server: None, + ..serde_json::from_str(event.content.get()).expect("Database contains invalid PDU.") + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + timestamp: None, + }; + + joined_rooms.push((pdu, room_id)); + } + + update_all_rooms(services, joined_rooms, user_id).await; Ok(()) } pub async fn update_avatar_url( services: &Services, user_id: &UserId, avatar_url: Option, blurhash: Option, - all_joined_rooms: Vec, + all_joined_rooms: &[OwnedRoomId], ) -> Result<()> { - let current_avatar_url = services.users.avatar_url(user_id).unwrap_or_default(); - let current_blurhash = services.users.blurhash(user_id).unwrap_or_default(); + let current_avatar_url = services.users.avatar_url(user_id).await.ok(); + let current_blurhash = services.users.blurhash(user_id).await.ok(); if current_avatar_url == avatar_url && current_blurhash == blurhash { return Ok(()); } - services - .users - .set_avatar_url(user_id, avatar_url.clone()) - .await?; - services - .users - .set_blurhash(user_id, blurhash.clone()) - .await?; + services.users.set_avatar_url(user_id, avatar_url.clone()); + + services.users.set_blurhash(user_id, blurhash.clone()); // Send a new join membership event into all joined rooms + let avatar_url = &avatar_url; + let blurhash = &blurhash; let all_joined_rooms: Vec<_> = all_joined_rooms .iter() - .map(|room_id| { - Ok::<_, Error>(( + .try_stream() + .and_then(|room_id: &OwnedRoomId| async move { + Ok(( PduBuilder { event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -371,8 +365,9 @@ pub async fn update_avatar_url( services .rooms .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or_else(|| { + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) + .await + .map_err(|_| { Error::bad_database("Tried to send avatar URL update for user not in the room.") })? .content @@ -389,8 +384,9 @@ pub async fn update_avatar_url( room_id, )) }) - .filter_map(Result::ok) - .collect(); + .ignore_err() + .collect() + .await; update_all_rooms(services, all_joined_rooms, user_id).await; diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 8723e676..39095199 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -29,41 +29,37 @@ pub(crate) async fn get_pushrules_all_route( let global_ruleset: Ruleset; - let Ok(event) = - services - .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) - else { - // push rules event doesn't exist, create it and return default - return recreate_push_rules_and_return(&services, sender_user); - }; + let event = services + .account_data + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .await; - if let Some(event) = event { - let value = serde_json::from_str::(event.get()) - .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; - - let Some(content_value) = value.get("content") else { - // user somehow has a push rule event with no content key, recreate it and - // return server default silently - return recreate_push_rules_and_return(&services, sender_user); - }; - - if content_value.to_string().is_empty() { - // user somehow has a push rule event with empty content, recreate it and return - // server default silently - return recreate_push_rules_and_return(&services, sender_user); - } - - let account_data_content = serde_json::from_value::(content_value.clone().into()) - .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; - - global_ruleset = account_data_content.global; - } else { + let Ok(event) = event else { // user somehow has non-existent push rule event. recreate it and return server // default silently - return recreate_push_rules_and_return(&services, sender_user); + return recreate_push_rules_and_return(&services, sender_user).await; + }; + + let value = serde_json::from_str::(event.get()) + .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; + + let Some(content_value) = value.get("content") else { + // user somehow has a push rule event with no content key, recreate it and + // return server default silently + return recreate_push_rules_and_return(&services, sender_user).await; + }; + + if content_value.to_string().is_empty() { + // user somehow has a push rule event with empty content, recreate it and return + // server default silently + return recreate_push_rules_and_return(&services, sender_user).await; } + let account_data_content = serde_json::from_value::(content_value.clone().into()) + .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; + + global_ruleset = account_data_content.global; + Ok(get_pushrules_all::v3::Response { global: global_ruleset, }) @@ -79,8 +75,9 @@ pub(crate) async fn get_pushrule_route( let event = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .await + .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? @@ -118,8 +115,9 @@ pub(crate) async fn set_pushrule_route( let event = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .await + .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; @@ -155,12 +153,15 @@ pub(crate) async fn set_pushrule_route( return Err(err); } - services.account_data.update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; + services + .account_data + .update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + ) + .await?; Ok(set_pushrule::v3::Response {}) } @@ -182,8 +183,9 @@ pub(crate) async fn get_pushrule_actions_route( let event = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .await + .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))? @@ -217,8 +219,9 @@ pub(crate) async fn set_pushrule_actions_route( let event = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .await + .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; @@ -232,12 +235,15 @@ pub(crate) async fn set_pushrule_actions_route( return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); } - services.account_data.update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; + services + .account_data + .update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + ) + .await?; Ok(set_pushrule_actions::v3::Response {}) } @@ -259,8 +265,9 @@ pub(crate) async fn get_pushrule_enabled_route( let event = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .await + .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; let account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; @@ -293,8 +300,9 @@ pub(crate) async fn set_pushrule_enabled_route( let event = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .await + .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; @@ -308,12 +316,15 @@ pub(crate) async fn set_pushrule_enabled_route( return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); } - services.account_data.update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; + services + .account_data + .update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + ) + .await?; Ok(set_pushrule_enabled::v3::Response {}) } @@ -335,8 +346,9 @@ pub(crate) async fn delete_pushrule_route( let event = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .await + .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; let mut account_data = serde_json::from_str::(event.get()) .map_err(|_| Error::bad_database("Invalid account data event in db."))?; @@ -357,12 +369,15 @@ pub(crate) async fn delete_pushrule_route( return Err(err); } - services.account_data.update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; + services + .account_data + .update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + ) + .await?; Ok(delete_pushrule::v3::Response {}) } @@ -376,7 +391,7 @@ pub(crate) async fn get_pushers_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_pushers::v3::Response { - pushers: services.pusher.get_pushers(sender_user)?, + pushers: services.pusher.get_pushers(sender_user).await, }) } @@ -390,27 +405,30 @@ pub(crate) async fn set_pushers_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services.pusher.set_pusher(sender_user, &body.action)?; + services.pusher.set_pusher(sender_user, &body.action); Ok(set_pusher::v3::Response::default()) } /// user somehow has bad push rules, these must always exist per spec. /// so recreate it and return server default silently -fn recreate_push_rules_and_return( +async fn recreate_push_rules_and_return( services: &Services, sender_user: &ruma::UserId, ) -> Result { - services.account_data.update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { - global: Ruleset::server_default(sender_user), - }, - }) - .expect("to json always works"), - )?; + services + .account_data + .update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(PushRulesEvent { + content: PushRulesEventContent { + global: Ruleset::server_default(sender_user), + }, + }) + .expect("to json always works"), + ) + .await?; Ok(get_pushrules_all::v3::Response { global: Ruleset::server_default(sender_user), diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index f40f2493..f28b2aec 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -31,27 +31,32 @@ pub(crate) async fn set_read_marker_route( event_id: fully_read.clone(), }, }; - services.account_data.update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), - )?; + services + .account_data + .update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + ) + .await?; } if body.private_read_receipt.is_some() || body.read_receipt.is_some() { services .rooms .user - .reset_notification_counts(sender_user, &body.room_id)?; + .reset_notification_counts(sender_user, &body.room_id); } if let Some(event) = &body.private_read_receipt { let count = services .rooms .timeline - .get_pdu_count(event)? - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?; + .get_pdu_count(event) + .await + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?; + let count = match count { PduCount::Backfilled(_) => { return Err(Error::BadRequest( @@ -64,7 +69,7 @@ pub(crate) async fn set_read_marker_route( services .rooms .read_receipt - .private_read_set(&body.room_id, sender_user, count)?; + .private_read_set(&body.room_id, sender_user, count); } if let Some(event) = &body.read_receipt { @@ -83,14 +88,18 @@ pub(crate) async fn set_read_marker_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(event.to_owned(), receipts); - services.rooms.read_receipt.readreceipt_update( - sender_user, - &body.room_id, - &ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - )?; + services + .rooms + .read_receipt + .readreceipt_update( + sender_user, + &body.room_id, + &ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + ) + .await; } Ok(set_read_marker::v3::Response {}) @@ -111,7 +120,7 @@ pub(crate) async fn create_receipt_route( services .rooms .user - .reset_notification_counts(sender_user, &body.room_id)?; + .reset_notification_counts(sender_user, &body.room_id); } match body.receipt_type { @@ -121,12 +130,15 @@ pub(crate) async fn create_receipt_route( event_id: body.event_id.clone(), }, }; - services.account_data.update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), - )?; + services + .account_data + .update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + ) + .await?; }, create_receipt::v3::ReceiptType::Read => { let mut user_receipts = BTreeMap::new(); @@ -143,21 +155,27 @@ pub(crate) async fn create_receipt_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(body.event_id.clone(), receipts); - services.rooms.read_receipt.readreceipt_update( - sender_user, - &body.room_id, - &ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - )?; + services + .rooms + .read_receipt + .readreceipt_update( + sender_user, + &body.room_id, + &ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + ) + .await; }, create_receipt::v3::ReceiptType::ReadPrivate => { let count = services .rooms .timeline - .get_pdu_count(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?; + .get_pdu_count(&body.event_id) + .await + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?; + let count = match count { PduCount::Backfilled(_) => { return Err(Error::BadRequest( @@ -170,7 +188,7 @@ pub(crate) async fn create_receipt_route( services .rooms .read_receipt - .private_read_set(&body.room_id, sender_user, count)?; + .private_read_set(&body.room_id, sender_user, count); }, _ => return Err(Error::bad_database("Unsupported receipt type")), } diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index ae645940..d4384730 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -9,20 +9,24 @@ use crate::{Result, Ruma}; pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_deref().expect("user is authenticated"); - let res = services.rooms.pdu_metadata.paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - &Some(body.event_type.clone()), - &Some(body.rel_type.clone()), - &body.from, - &body.to, - &body.limit, - body.recurse, - body.dir, - )?; + let res = services + .rooms + .pdu_metadata + .paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + body.event_type.clone().into(), + body.rel_type.clone().into(), + body.from.as_ref(), + body.to.as_ref(), + body.limit, + body.recurse, + body.dir, + ) + .await?; Ok(get_relating_events_with_rel_type_and_event_type::v1::Response { chunk: res.chunk, @@ -36,20 +40,24 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route( pub(crate) async fn get_relating_events_with_rel_type_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_deref().expect("user is authenticated"); - let res = services.rooms.pdu_metadata.paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - &None, - &Some(body.rel_type.clone()), - &body.from, - &body.to, - &body.limit, - body.recurse, - body.dir, - )?; + let res = services + .rooms + .pdu_metadata + .paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + None, + body.rel_type.clone().into(), + body.from.as_ref(), + body.to.as_ref(), + body.limit, + body.recurse, + body.dir, + ) + .await?; Ok(get_relating_events_with_rel_type::v1::Response { chunk: res.chunk, @@ -63,18 +71,22 @@ pub(crate) async fn get_relating_events_with_rel_type_route( pub(crate) async fn get_relating_events_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_deref().expect("user is authenticated"); - services.rooms.pdu_metadata.paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - &None, - &None, - &body.from, - &body.to, - &body.limit, - body.recurse, - body.dir, - ) + services + .rooms + .pdu_metadata + .paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + None, + None, + body.from.as_ref(), + body.to.as_ref(), + body.limit, + body.recurse, + body.dir, + ) + .await } diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 588bd368..a40c35a2 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -1,6 +1,7 @@ use std::time::Duration; use axum::extract::State; +use conduit::{utils::ReadyExt, Err}; use rand::Rng; use ruma::{ api::client::{error::ErrorKind, room::report_content}, @@ -34,11 +35,8 @@ pub(crate) async fn report_event_route( delay_response().await; // check if we know about the reported event ID or if it's invalid - let Some(pdu) = services.rooms.timeline.get_pdu(&body.event_id)? else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Event ID is not known to us or Event ID is invalid", - )); + let Ok(pdu) = services.rooms.timeline.get_pdu(&body.event_id).await else { + return Err!(Request(NotFound("Event ID is not known to us or Event ID is invalid"))); }; is_report_valid( @@ -49,7 +47,8 @@ pub(crate) async fn report_event_route( &body.reason, body.score, &pdu, - )?; + ) + .await?; // send admin room message that we received the report with an @room ping for // urgency @@ -81,7 +80,8 @@ pub(crate) async fn report_event_route( HtmlEscape(body.reason.as_deref().unwrap_or("")) ), )) - .await; + .await + .ok(); Ok(report_content::v3::Response {}) } @@ -92,7 +92,7 @@ pub(crate) async fn report_event_route( /// check if score is in valid range /// check if report reasoning is less than or equal to 750 characters /// check if reporting user is in the reporting room -fn is_report_valid( +async fn is_report_valid( services: &Services, event_id: &EventId, room_id: &RoomId, sender_user: &UserId, reason: &Option, score: Option, pdu: &std::sync::Arc, ) -> Result<()> { @@ -123,8 +123,8 @@ fn is_report_valid( .rooms .state_cache .room_members(room_id) - .filter_map(Result::ok) - .any(|user_id| user_id == *sender_user) + .ready_any(|user_id| user_id == sender_user) + .await { return Err(Error::BadRequest( ErrorKind::NotFound, diff --git a/src/api/client/room.rs b/src/api/client/room.rs index 0112e76d..1edf85d8 100644 --- a/src/api/client/room.rs +++ b/src/api/client/room.rs @@ -2,6 +2,7 @@ use std::{cmp::max, collections::BTreeMap}; use axum::extract::State; use conduit::{debug_info, debug_warn, err, Err}; +use futures::{FutureExt, StreamExt}; use ruma::{ api::client::{ error::ErrorKind, @@ -74,7 +75,7 @@ pub(crate) async fn create_room_route( if !services.globals.allow_room_creation() && body.appservice_info.is_none() - && !services.users.is_admin(sender_user)? + && !services.users.is_admin(sender_user).await { return Err(Error::BadRequest(ErrorKind::forbidden(), "Room creation has been disabled.")); } @@ -86,7 +87,7 @@ pub(crate) async fn create_room_route( }; // check if room ID doesn't already exist instead of erroring on auth check - if services.rooms.short.get_shortroomid(&room_id)?.is_some() { + if services.rooms.short.get_shortroomid(&room_id).await.is_ok() { return Err(Error::BadRequest( ErrorKind::RoomInUse, "Room with that custom room ID already exists", @@ -95,7 +96,7 @@ pub(crate) async fn create_room_route( if body.visibility == room::Visibility::Public && services.globals.config.lockdown_public_room_directory - && !services.users.is_admin(sender_user)? + && !services.users.is_admin(sender_user).await && body.appservice_info.is_none() { info!( @@ -118,7 +119,11 @@ pub(crate) async fn create_room_route( return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed"))); } - let _short_id = services.rooms.short.get_or_create_shortroomid(&room_id)?; + let _short_id = services + .rooms + .short + .get_or_create_shortroomid(&room_id) + .await; let state_lock = services.rooms.state.mutex.lock(&room_id).await; let alias: Option = if let Some(alias) = &body.room_alias_name { @@ -218,6 +223,7 @@ pub(crate) async fn create_room_route( &room_id, &state_lock, ) + .boxed() .await?; // 2. Let the room creator join @@ -229,11 +235,11 @@ pub(crate) async fn create_room_route( event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, - displayname: services.users.displayname(sender_user)?, - avatar_url: services.users.avatar_url(sender_user)?, + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), is_direct: Some(body.is_direct), third_party_invite: None, - blurhash: services.users.blurhash(sender_user)?, + blurhash: services.users.blurhash(sender_user).await.ok(), reason: None, join_authorized_via_users_server: None, }) @@ -247,6 +253,7 @@ pub(crate) async fn create_room_route( &room_id, &state_lock, ) + .boxed() .await?; // 3. Power levels @@ -284,6 +291,7 @@ pub(crate) async fn create_room_route( &room_id, &state_lock, ) + .boxed() .await?; // 4. Canonical room alias @@ -308,6 +316,7 @@ pub(crate) async fn create_room_route( &room_id, &state_lock, ) + .boxed() .await?; } @@ -335,6 +344,7 @@ pub(crate) async fn create_room_route( &room_id, &state_lock, ) + .boxed() .await?; // 5.2 History Visibility @@ -355,6 +365,7 @@ pub(crate) async fn create_room_route( &room_id, &state_lock, ) + .boxed() .await?; // 5.3 Guest Access @@ -378,6 +389,7 @@ pub(crate) async fn create_room_route( &room_id, &state_lock, ) + .boxed() .await?; // 6. Events listed in initial_state @@ -410,6 +422,7 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + .boxed() .await?; } @@ -432,6 +445,7 @@ pub(crate) async fn create_room_route( &room_id, &state_lock, ) + .boxed() .await?; } @@ -455,13 +469,17 @@ pub(crate) async fn create_room_route( &room_id, &state_lock, ) + .boxed() .await?; } // 8. Events implied by invite (and TODO: invite_3pid) drop(state_lock); for user_id in &body.invite { - if let Err(e) = invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct).await { + if let Err(e) = invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct) + .boxed() + .await + { warn!(%e, "Failed to send invite"); } } @@ -475,7 +493,7 @@ pub(crate) async fn create_room_route( } if body.visibility == room::Visibility::Public { - services.rooms.directory.set_public(&room_id)?; + services.rooms.directory.set_public(&room_id); if services.globals.config.admin_room_notices { services @@ -505,13 +523,15 @@ pub(crate) async fn get_room_event_route( let event = services .rooms .timeline - .get_pdu(&body.event_id)? - .ok_or_else(|| err!(Request(NotFound("Event {} not found.", &body.event_id))))?; + .get_pdu(&body.event_id) + .await + .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id))))?; if !services .rooms .state_accessor - .user_can_see_event(sender_user, &event.room_id, &body.event_id)? + .user_can_see_event(sender_user, &event.room_id, &body.event_id) + .await { return Err(Error::BadRequest( ErrorKind::forbidden(), @@ -541,7 +561,8 @@ pub(crate) async fn get_room_aliases_route( if !services .rooms .state_accessor - .user_can_see_state_events(sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id) + .await { return Err(Error::BadRequest( ErrorKind::forbidden(), @@ -554,8 +575,9 @@ pub(crate) async fn get_room_aliases_route( .rooms .alias .local_aliases_for_room(&body.room_id) - .filter_map(Result::ok) - .collect(), + .map(ToOwned::to_owned) + .collect() + .await, }) } @@ -591,7 +613,8 @@ pub(crate) async fn upgrade_room_route( let _short_id = services .rooms .short - .get_or_create_shortroomid(&replacement_room)?; + .get_or_create_shortroomid(&replacement_room) + .await; let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; @@ -629,12 +652,12 @@ pub(crate) async fn upgrade_room_route( services .rooms .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? - .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .room_state_get(&body.room_id, &StateEventType::RoomCreate, "") + .await + .map_err(|_| err!(Database("Found room without m.room.create event.")))? .content .get(), - ) - .map_err(|_| Error::bad_database("Invalid room event in database."))?; + )?; // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( @@ -714,11 +737,11 @@ pub(crate) async fn upgrade_room_route( event_type: TimelineEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, - displayname: services.users.displayname(sender_user)?, - avatar_url: services.users.avatar_url(sender_user)?, + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), is_direct: None, third_party_invite: None, - blurhash: services.users.blurhash(sender_user)?, + blurhash: services.users.blurhash(sender_user).await.ok(), reason: None, join_authorized_via_users_server: None, }) @@ -739,10 +762,11 @@ pub(crate) async fn upgrade_room_route( let event_content = match services .rooms .state_accessor - .room_state_get(&body.room_id, event_type, "")? + .room_state_get(&body.room_id, event_type, "") + .await { - Some(v) => v.content.clone(), - None => continue, // Skipping missing events. + Ok(v) => v.content.clone(), + Err(_) => continue, // Skipping missing events. }; services @@ -765,21 +789,23 @@ pub(crate) async fn upgrade_room_route( } // Moves any local aliases to the new room - for alias in services + let mut local_aliases = services .rooms .alias .local_aliases_for_room(&body.room_id) - .filter_map(Result::ok) - { + .boxed(); + + while let Some(alias) = local_aliases.next().await { services .rooms .alias - .remove_alias(&alias, sender_user) + .remove_alias(alias, sender_user) .await?; + services .rooms .alias - .set_alias(&alias, &replacement_room, sender_user)?; + .set_alias(alias, &replacement_room, sender_user)?; } // Get the old room power levels @@ -787,12 +813,12 @@ pub(crate) async fn upgrade_room_route( services .rooms .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? - .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "") + .await + .map_err(|_| err!(Database("Found room without m.room.create event.")))? .content .get(), - ) - .map_err(|_| Error::bad_database("Invalid room event in database."))?; + )?; // Setting events_default and invite to the greater of 50 and users_default + 1 let new_level = max( @@ -800,9 +826,7 @@ pub(crate) async fn upgrade_room_route( power_levels_event_content .users_default .checked_add(int!(1)) - .ok_or_else(|| { - Error::BadRequest(ErrorKind::BadJson, "users_default power levels event content is not valid") - })?, + .ok_or_else(|| err!(Request(BadJson("users_default power levels event content is not valid"))))?, ); power_levels_event_content.events_default = new_level; power_levels_event_content.invite = new_level; @@ -921,8 +945,9 @@ async fn room_alias_check( if services .rooms .alias - .resolve_local_alias(&full_room_alias)? - .is_some() + .resolve_local_alias(&full_room_alias) + .await + .is_ok() { return Err(Error::BadRequest(ErrorKind::RoomInUse, "Room alias already exists.")); } diff --git a/src/api/client/search.rs b/src/api/client/search.rs index b143bd2c..7a061d49 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -1,6 +1,12 @@ use std::collections::BTreeMap; use axum::extract::State; +use conduit::{ + debug, + utils::{IterStream, ReadyExt}, + Err, +}; +use futures::{FutureExt, StreamExt}; use ruma::{ api::client::{ error::ErrorKind, @@ -13,7 +19,6 @@ use ruma::{ serde::Raw, uint, OwnedRoomId, }; -use tracing::debug; use crate::{Error, Result, Ruma}; @@ -32,14 +37,17 @@ pub(crate) async fn search_events_route( let filter = &search_criteria.filter; let include_state = &search_criteria.include_state; - let room_ids = filter.rooms.clone().unwrap_or_else(|| { + let room_ids = if let Some(room_ids) = &filter.rooms { + room_ids.clone() + } else { services .rooms .state_cache .rooms_joined(sender_user) - .filter_map(Result::ok) + .map(ToOwned::to_owned) .collect() - }); + .await + }; // Use limit or else 10, with maximum 100 let limit: usize = filter @@ -53,18 +61,21 @@ pub(crate) async fn search_events_route( if include_state.is_some_and(|include_state| include_state) { for room_id in &room_ids { - if !services.rooms.state_cache.is_joined(sender_user, room_id)? { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this room.", - )); + if !services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { + return Err!(Request(Forbidden("You don't have permission to view this room."))); } // check if sender_user can see state events if services .rooms .state_accessor - .user_can_see_state_events(sender_user, room_id)? + .user_can_see_state_events(sender_user, room_id) + .await { let room_state = services .rooms @@ -87,10 +98,15 @@ pub(crate) async fn search_events_route( } } - let mut searches = Vec::new(); + let mut search_vecs = Vec::new(); for room_id in &room_ids { - if !services.rooms.state_cache.is_joined(sender_user, room_id)? { + if !services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { return Err(Error::BadRequest( ErrorKind::forbidden(), "You don't have permission to view this room.", @@ -100,12 +116,18 @@ pub(crate) async fn search_events_route( if let Some(search) = services .rooms .search - .search_pdus(room_id, &search_criteria.search_term)? + .search_pdus(room_id, &search_criteria.search_term) + .await { - searches.push(search.0.peekable()); + search_vecs.push(search.0); } } + let mut searches: Vec<_> = search_vecs + .iter() + .map(|vec| vec.iter().peekable()) + .collect(); + let skip: usize = match body.next_batch.as_ref().map(|s| s.parse()) { Some(Ok(s)) => s, Some(Err(_)) => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid next_batch token.")), @@ -118,8 +140,8 @@ pub(crate) async fn search_events_route( for _ in 0..next_batch { if let Some(s) = searches .iter_mut() - .map(|s| (s.peek().cloned(), s)) - .max_by_key(|(peek, _)| peek.clone()) + .map(|s| (s.peek().copied(), s)) + .max_by_key(|(peek, _)| *peek) .and_then(|(_, i)| i.next()) { results.push(s); @@ -127,42 +149,38 @@ pub(crate) async fn search_events_route( } let results: Vec<_> = results - .iter() + .into_iter() .skip(skip) - .filter_map(|result| { + .stream() + .filter_map(|id| services.rooms.timeline.get_pdu_from_id(id).map(Result::ok)) + .ready_filter(|pdu| !pdu.is_redacted()) + .filter_map(|pdu| async move { services .rooms - .timeline - .get_pdu_from_id(result) - .ok()? - .filter(|pdu| { - !pdu.is_redacted() - && services - .rooms - .state_accessor - .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) - .unwrap_or(false) - }) - .map(|pdu| pdu.to_room_event()) + .state_accessor + .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) + .await + .then_some(pdu) }) - .map(|result| { - Ok::<_, Error>(SearchResult { - context: EventContextResult { - end: None, - events_after: Vec::new(), - events_before: Vec::new(), - profile_info: BTreeMap::new(), - start: None, - }, - rank: None, - result: Some(result), - }) - }) - .filter_map(Result::ok) .take(limit) - .collect(); + .map(|pdu| pdu.to_room_event()) + .map(|result| SearchResult { + context: EventContextResult { + end: None, + events_after: Vec::new(), + events_before: Vec::new(), + profile_info: BTreeMap::new(), + start: None, + }, + rank: None, + result: Some(result), + }) + .collect() + .boxed() + .await; let more_unloaded_results = searches.iter_mut().any(|s| s.peek().is_some()); + let next_batch = more_unloaded_results.then(|| next_batch.to_string()); Ok(search_events::v3::Response::new(ResultCategories { diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 4702b0ec..6347a2c9 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -1,5 +1,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; +use conduit::{debug, err, info, utils::ReadyExt, warn, Err}; +use futures::StreamExt; use ruma::{ api::client::{ error::ErrorKind, @@ -19,7 +21,6 @@ use ruma::{ UserId, }; use serde::Deserialize; -use tracing::{debug, info, warn}; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; use crate::{utils, utils::hash, Error, Result, Ruma}; @@ -79,21 +80,22 @@ pub(crate) async fn login_route( UserId::parse(user) } else { warn!("Bad login type: {:?}", &body.login_info); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); + return Err!(Request(Forbidden("Bad login type."))); } .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; let hash = services .users - .password_hash(&user_id)? - .ok_or(Error::BadRequest(ErrorKind::forbidden(), "Wrong username or password."))?; + .password_hash(&user_id) + .await + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; if hash.is_empty() { - return Err(Error::BadRequest(ErrorKind::UserDeactivated, "The user has been deactivated")); + return Err!(Request(UserDeactivated("The user has been deactivated"))); } if hash::verify_password(password, &hash).is_err() { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Wrong username or password.")); + return Err!(Request(Forbidden("Wrong username or password."))); } user_id @@ -112,15 +114,12 @@ pub(crate) async fn login_route( let username = token.claims.sub.to_lowercase(); - UserId::parse_with_server_name(username, services.globals.server_name()).map_err(|e| { - warn!("Failed to parse username from user logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })? + UserId::parse_with_server_name(username, services.globals.server_name()) + .map_err(|e| err!(Request(InvalidUsername(debug_error!(?e, "Failed to parse login username")))))? } else { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "Token login is not supported (server has no jwt decoding key).", - )); + return Err!(Request(Unknown( + "Token login is not supported (server has no jwt decoding key)." + ))); } }, #[allow(deprecated)] @@ -169,23 +168,32 @@ pub(crate) async fn login_route( let token = utils::random_string(TOKEN_LENGTH); // Determine if device_id was provided and exists in the db for this user - let device_exists = body.device_id.as_ref().map_or(false, |device_id| { + let device_exists = if body.device_id.is_some() { services .users .all_device_ids(&user_id) - .any(|x| x.as_ref().map_or(false, |v| v == device_id)) - }); + .ready_any(|v| v == device_id) + .await + } else { + false + }; if device_exists { - services.users.set_token(&user_id, &device_id, &token)?; + services + .users + .set_token(&user_id, &device_id, &token) + .await?; } else { - services.users.create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - Some(client.to_string()), - )?; + services + .users + .create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + Some(client.to_string()), + ) + .await?; } // send client well-known if specified so the client knows to reconfigure itself @@ -228,10 +236,13 @@ pub(crate) async fn logout_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - services.users.remove_device(sender_user, sender_device)?; + services + .users + .remove_device(sender_user, sender_device) + .await; // send device list update for user after logout - services.users.mark_device_key_update(sender_user)?; + services.users.mark_device_key_update(sender_user).await; Ok(logout::v3::Response::new()) } @@ -256,12 +267,14 @@ pub(crate) async fn logout_all_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for device_id in services.users.all_device_ids(sender_user).flatten() { - services.users.remove_device(sender_user, &device_id)?; - } + services + .users + .all_device_ids(sender_user) + .for_each(|device_id| services.users.remove_device(sender_user, device_id)) + .await; // send device list update for user after logout - services.users.mark_device_key_update(sender_user)?; + services.users.mark_device_key_update(sender_user).await; Ok(logout_all::v3::Response::new()) } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index fd049663..f9a4a763 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use axum::extract::State; -use conduit::{debug_info, error, pdu::PduBuilder, Error, Result}; +use conduit::{err, error, pdu::PduBuilder, Err, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, @@ -84,12 +84,10 @@ pub(crate) async fn get_state_events_route( if !services .rooms .state_accessor - .user_can_see_state_events(sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id) + .await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view the room state.", - )); + return Err!(Request(Forbidden("You don't have permission to view the room state."))); } Ok(get_state_events::v3::Response { @@ -120,22 +118,25 @@ pub(crate) async fn get_state_events_for_key_route( if !services .rooms .state_accessor - .user_can_see_state_events(sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id) + .await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view the room state.", - )); + return Err!(Request(Forbidden("You don't have permission to view the room state."))); } let event = services .rooms .state_accessor - .room_state_get(&body.room_id, &body.event_type, &body.state_key)? - .ok_or_else(|| { - debug_info!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id); - Error::BadRequest(ErrorKind::NotFound, "State event not found.") + .room_state_get(&body.room_id, &body.event_type, &body.state_key) + .await + .map_err(|_| { + err!(Request(NotFound(error!( + room_id = ?body.room_id, + event_type = ?body.event_type, + "State event not found in room.", + )))) })?; + if body .format .as_ref() @@ -204,7 +205,7 @@ async fn send_state_event_for_key_helper( async fn allowed_to_send_state_event( services: &Services, room_id: &RoomId, event_type: &StateEventType, json: &Raw, -) -> Result<()> { +) -> Result { match event_type { // Forbid m.room.encryption if encryption is disabled StateEventType::RoomEncryption => { @@ -214,7 +215,7 @@ async fn allowed_to_send_state_event( }, // admin room is a sensitive room, it should not ever be made public StateEventType::RoomJoinRules => { - if let Some(admin_room_id) = services.admin.get_admin_room()? { + if let Ok(admin_room_id) = services.admin.get_admin_room().await { if admin_room_id == room_id { if let Ok(join_rule) = serde_json::from_str::(json.json().get()) { if join_rule.join_rule == JoinRule::Public { @@ -229,7 +230,7 @@ async fn allowed_to_send_state_event( }, // admin room is a sensitive room, it should not ever be made world readable StateEventType::RoomHistoryVisibility => { - if let Some(admin_room_id) = services.admin.get_admin_room()? { + if let Ok(admin_room_id) = services.admin.get_admin_room().await { if admin_room_id == room_id { if let Ok(visibility_content) = serde_json::from_str::(json.json().get()) @@ -254,23 +255,27 @@ async fn allowed_to_send_state_event( } for alias in aliases { - if !services.globals.server_is_ours(alias.server_name()) - || services - .rooms - .alias - .resolve_local_alias(&alias)? - .filter(|room| room == room_id) // Make sure it's the right room - .is_none() + if !services.globals.server_is_ours(alias.server_name()) { + return Err!(Request(Forbidden("canonical_alias must be for this server"))); + } + + if !services + .rooms + .alias + .resolve_local_alias(&alias) + .await + .is_ok_and(|room| room == room_id) + // Make sure it's the right room { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You are only allowed to send canonical_alias events when its aliases already exist", - )); + return Err!(Request(Forbidden( + "You are only allowed to send canonical_alias events when its aliases already exist" + ))); } } } }, _ => (), } + Ok(()) } diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index eb534205..53d4f3c3 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -6,10 +6,14 @@ use std::{ use axum::extract::State; use conduit::{ - debug, error, - utils::math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, - warn, Err, PduCount, + debug, err, error, is_equal_to, + utils::{ + math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, + IterStream, ReadyExt, + }, + warn, PduCount, }; +use futures::{pin_mut, StreamExt}; use ruma::{ api::client::{ error::ErrorKind, @@ -108,7 +112,8 @@ pub(crate) async fn sync_events_route( if services.globals.allow_local_presence() { services .presence - .ping_presence(&sender_user, &body.set_presence)?; + .ping_presence(&sender_user, &body.set_presence) + .await?; } // Setup watchers, so if there's no response, we can wait for them @@ -124,7 +129,8 @@ pub(crate) async fn sync_events_route( Some(Filter::FilterDefinition(filter)) => filter, Some(Filter::FilterId(filter_id)) => services .users - .get_filter(&sender_user, &filter_id)? + .get_filter(&sender_user, &filter_id) + .await .unwrap_or_default(), }; @@ -157,7 +163,9 @@ pub(crate) async fn sync_events_route( services .users .keys_changed(sender_user.as_ref(), since, None) - .filter_map(Result::ok), + .map(ToOwned::to_owned) + .collect::>() + .await, ); if services.globals.allow_local_presence() { @@ -168,13 +176,14 @@ pub(crate) async fn sync_events_route( .rooms .state_cache .rooms_joined(&sender_user) - .collect::>(); + .map(ToOwned::to_owned) + .collect::>() + .await; // Coalesce database writes for the remainder of this scope. let _cork = services.db.cork_and_flush(); for room_id in all_joined_rooms { - let room_id = room_id?; if let Ok(joined_room) = load_joined_room( &services, &sender_user, @@ -203,12 +212,14 @@ pub(crate) async fn sync_events_route( .rooms .state_cache .rooms_left(&sender_user) - .collect(); + .collect() + .await; + for result in all_left_rooms { handle_left_room( &services, since, - &result?.0, + &result.0, &sender_user, &mut left_rooms, &next_batch_string, @@ -224,10 +235,10 @@ pub(crate) async fn sync_events_route( .rooms .state_cache .rooms_invited(&sender_user) - .collect(); - for result in all_invited_rooms { - let (room_id, invite_state_events) = result?; + .collect() + .await; + for (room_id, invite_state_events) in all_invited_rooms { // Get and drop the lock to wait for remaining operations to finish let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; drop(insert_lock); @@ -235,7 +246,9 @@ pub(crate) async fn sync_events_route( let invite_count = services .rooms .state_cache - .get_invite_count(&room_id, &sender_user)?; + .get_invite_count(&room_id, &sender_user) + .await + .ok(); // Invited before last sync if Some(since) >= invite_count { @@ -253,22 +266,8 @@ pub(crate) async fn sync_events_route( } for user_id in left_encrypted_users { - let dont_share_encrypted_room = services - .rooms - .user - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? - .filter_map(Result::ok) - .filter_map(|other_room_id| { - Some( - services - .rooms - .state_accessor - .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") - .ok()? - .is_some(), - ) - }) - .all(|encrypted| !encrypted); + let dont_share_encrypted_room = !share_encrypted_room(&services, &sender_user, &user_id, None).await; + // If the user doesn't share an encrypted room with the target anymore, we need // to tell them if dont_share_encrypted_room { @@ -279,7 +278,8 @@ pub(crate) async fn sync_events_route( // Remove all to-device events the device received *last time* services .users - .remove_to_device_events(&sender_user, &sender_device, since)?; + .remove_to_device_events(&sender_user, &sender_device, since) + .await; let response = sync_events::v3::Response { next_batch: next_batch_string, @@ -298,7 +298,8 @@ pub(crate) async fn sync_events_route( account_data: GlobalAccountData { events: services .account_data - .changes_since(None, &sender_user, since)? + .changes_since(None, &sender_user, since) + .await? .into_iter() .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect(), @@ -309,11 +310,14 @@ pub(crate) async fn sync_events_route( }, device_one_time_keys_count: services .users - .count_one_time_keys(&sender_user, &sender_device)?, + .count_one_time_keys(&sender_user, &sender_device) + .await, to_device: ToDevice { events: services .users - .get_to_device_events(&sender_user, &sender_device)?, + .get_to_device_events(&sender_user, &sender_device) + .collect() + .await, }, // Fallback keys are not yet supported device_unused_fallback_key_types: None, @@ -351,14 +355,16 @@ async fn handle_left_room( let left_count = services .rooms .state_cache - .get_left_count(room_id, sender_user)?; + .get_left_count(room_id, sender_user) + .await + .ok(); // Left before last sync if Some(since) >= left_count { return Ok(()); } - if !services.rooms.metadata.exists(room_id)? { + if !services.rooms.metadata.exists(room_id).await { // This is just a rejected invite, not a room we know // Insert a leave event anyways let event = PduEvent { @@ -408,27 +414,29 @@ async fn handle_left_room( let since_shortstatehash = services .rooms .user - .get_token_shortstatehash(room_id, since)?; + .get_token_shortstatehash(room_id, since) + .await; let since_state_ids = match since_shortstatehash { - Some(s) => services.rooms.state_accessor.state_full_ids(s).await?, - None => HashMap::new(), + Ok(s) => services.rooms.state_accessor.state_full_ids(s).await?, + Err(_) => HashMap::new(), }; - let Some(left_event_id) = - services - .rooms - .state_accessor - .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str())? + let Ok(left_event_id) = services + .rooms + .state_accessor + .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str()) + .await else { error!("Left room but no left state event"); return Ok(()); }; - let Some(left_shortstatehash) = services + let Ok(left_shortstatehash) = services .rooms .state_accessor - .pdu_shortstatehash(&left_event_id)? + .pdu_shortstatehash(&left_event_id) + .await else { error!(event_id = %left_event_id, "Leave event has no state"); return Ok(()); @@ -443,14 +451,15 @@ async fn handle_left_room( let leave_shortstatekey = services .rooms .short - .get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str())?; + .get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str()) + .await; left_state_ids.insert(leave_shortstatekey, left_event_id); let mut i: u8 = 0; for (key, id) in left_state_ids { if full_state || since_state_ids.get(&key) != Some(&id) { - let (event_type, state_key) = services.rooms.short.get_statekey_from_short(key)?; + let (event_type, state_key) = services.rooms.short.get_statekey_from_short(key).await?; if !lazy_load_enabled || event_type != StateEventType::RoomMember @@ -458,7 +467,7 @@ async fn handle_left_room( // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 || (cfg!(feature = "element_hacks") && *sender_user == state_key) { - let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else { + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { error!("Pdu in state not found: {}", id); continue; }; @@ -495,19 +504,25 @@ async fn handle_left_room( async fn process_presence_updates( services: &Services, presence_updates: &mut HashMap, since: u64, syncing_user: &UserId, ) -> Result<()> { + let presence_since = services.presence.presence_since(since); + // Take presence updates - for (user_id, _, presence_bytes) in services.presence.presence_since(since) { + pin_mut!(presence_since); + while let Some((user_id, _, presence_bytes)) = presence_since.next().await { if !services .rooms .state_cache - .user_sees_user(syncing_user, &user_id)? + .user_sees_user(syncing_user, &user_id) + .await { continue; } let presence_event = services .presence - .from_json_bytes_to_event(&presence_bytes, &user_id)?; + .from_json_bytes_to_event(&presence_bytes, &user_id) + .await?; + match presence_updates.entry(user_id) { Entry::Vacant(slot) => { slot.insert(presence_event); @@ -551,14 +566,14 @@ async fn load_joined_room( let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; drop(insert_lock); - let (timeline_pdus, limited) = load_timeline(services, sender_user, room_id, sincecount, 10)?; + let (timeline_pdus, limited) = load_timeline(services, sender_user, room_id, sincecount, 10).await?; let send_notification_counts = !timeline_pdus.is_empty() || services .rooms .user - .last_notification_read(sender_user, room_id)? - > since; + .last_notification_read(sender_user, room_id) + .await > since; let mut timeline_users = HashSet::new(); for (_, event) in &timeline_pdus { @@ -568,355 +583,384 @@ async fn load_joined_room( services .rooms .lazy_loading - .lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount) - .await?; + .lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount); // Database queries: - let Some(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id)? else { - return Err!(Database(error!("Room {room_id} has no state"))); - }; + let current_shortstatehash = services + .rooms + .state + .get_room_shortstatehash(room_id) + .await + .map_err(|_| err!(Database(error!("Room {room_id} has no state"))))?; let since_shortstatehash = services .rooms .user - .get_token_shortstatehash(room_id, since)?; + .get_token_shortstatehash(room_id, since) + .await + .ok(); - let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = - if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { - // No state changes - (Vec::new(), None, None, false, Vec::new()) - } else { - // Calculates joined_member_count, invited_member_count and heroes - let calculate_counts = || { - let joined_member_count = services - .rooms - .state_cache - .room_joined_count(room_id)? - .unwrap_or(0); - let invited_member_count = services - .rooms - .state_cache - .room_invited_count(room_id)? - .unwrap_or(0); + let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = if timeline_pdus + .is_empty() + && (since_shortstatehash.is_none() || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))) + { + // No state changes + (Vec::new(), None, None, false, Vec::new()) + } else { + // Calculates joined_member_count, invited_member_count and heroes + let calculate_counts = || async { + let joined_member_count = services + .rooms + .state_cache + .room_joined_count(room_id) + .await + .unwrap_or(0); - // Recalculate heroes (first 5 members) - let mut heroes: Vec = Vec::with_capacity(5); + let invited_member_count = services + .rooms + .state_cache + .room_invited_count(room_id) + .await + .unwrap_or(0); - if joined_member_count.saturating_add(invited_member_count) <= 5 { - // Go through all PDUs and for each member event, check if the user is still - // joined or invited until we have 5 or we reach the end + if joined_member_count.saturating_add(invited_member_count) > 5 { + return Ok::<_, Error>((Some(joined_member_count), Some(invited_member_count), Vec::new())); + } - for hero in services - .rooms - .timeline - .all_pdus(sender_user, room_id)? - .filter_map(Result::ok) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) - .map(|(_, pdu)| { - let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + // Go through all PDUs and for each member event, check if the user is still + // joined or invited until we have 5 or we reach the end - if let Some(state_key) = &pdu.state_key { - let user_id = UserId::parse(state_key.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + // Recalculate heroes (first 5 members) + let heroes = services + .rooms + .timeline + .all_pdus(sender_user, room_id) + .await? + .ready_filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) + .filter_map(|(_, pdu)| async move { + let Ok(content) = serde_json::from_str::(pdu.content.get()) else { + return None; + }; - // The membership was and still is invite or join - if matches!(content.membership, MembershipState::Join | MembershipState::Invite) - && (services.rooms.state_cache.is_joined(&user_id, room_id)? - || services.rooms.state_cache.is_invited(&user_id, room_id)?) - { - Ok::<_, Error>(Some(user_id)) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - .filter_map(Result::ok) - // Filter for possible heroes - .flatten() + let Some(state_key) = &pdu.state_key else { + return None; + }; + + let Ok(user_id) = UserId::parse(state_key) else { + return None; + }; + + if user_id == sender_user { + return None; + } + + // The membership was and still is invite or join + if !matches!(content.membership, MembershipState::Join | MembershipState::Invite) { + return None; + } + + if !services + .rooms + .state_cache + .is_joined(&user_id, room_id) + .await && services + .rooms + .state_cache + .is_invited(&user_id, room_id) + .await { - if heroes.contains(&hero) || hero == sender_user { - continue; - } + return None; + } - heroes.push(hero); + Some(user_id) + }) + .collect::>() + .await; + + Ok::<_, Error>(( + Some(joined_member_count), + Some(invited_member_count), + heroes.into_iter().collect::>(), + )) + }; + + let since_sender_member: Option = if let Some(short) = since_shortstatehash { + services + .rooms + .state_accessor + .state_get(short, &StateEventType::RoomMember, sender_user.as_str()) + .await + .and_then(|pdu| serde_json::from_str(pdu.content.get()).map_err(Into::into)) + .ok() + } else { + None + }; + + let joined_since_last_sync = + since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + + if since_shortstatehash.is_none() || joined_since_last_sync { + // Probably since = 0, we will do an initial sync + + let (joined_member_count, invited_member_count, heroes) = calculate_counts().await?; + + let current_state_ids = services + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + let mut i: u8 = 0; + for (shortstatekey, id) in current_state_ids { + let (event_type, state_key) = services + .rooms + .short + .get_statekey_from_short(shortstatekey) + .await?; + + if event_type != StateEventType::RoomMember { + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { + error!("Pdu in state not found: {id}"); + continue; + }; + state_events.push(pdu); + + i = i.wrapping_add(1); + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } else if !lazy_load_enabled + || full_state + || timeline_users.contains(&state_key) + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || (cfg!(feature = "element_hacks") && *sender_user == state_key) + { + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { + error!("Pdu in state not found: {id}"); + continue; + }; + + // This check is in case a bad user ID made it into the database + if let Ok(uid) = UserId::parse(&state_key) { + lazy_loaded.insert(uid); + } + state_events.push(pdu); + + i = i.wrapping_add(1); + if i % 100 == 0 { + tokio::task::yield_now().await; } } + } - Ok::<_, Error>((Some(joined_member_count), Some(invited_member_count), heroes)) - }; + // Reset lazy loading because this is an initial sync + services + .rooms + .lazy_loading + .lazy_load_reset(sender_user, sender_device, room_id) + .await; - let since_sender_member: Option = since_shortstatehash - .and_then(|shortstatehash| { - services - .rooms - .state_accessor - .state_get(shortstatehash, &StateEventType::RoomMember, sender_user.as_str()) - .transpose() - }) - .transpose()? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); + // The state_events above should contain all timeline_users, let's mark them as + // lazy loaded. + services.rooms.lazy_loading.lazy_load_mark_sent( + sender_user, + sender_device, + room_id, + lazy_loaded, + next_batchcount, + ); - let joined_since_last_sync = - since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + (heroes, joined_member_count, invited_member_count, true, state_events) + } else { + // Incremental /sync + let since_shortstatehash = since_shortstatehash.expect("missing since_shortstatehash on incremental sync"); - if since_shortstatehash.is_none() || joined_since_last_sync { - // Probably since = 0, we will do an initial sync - - let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; + let mut delta_state_events = Vec::new(); + if since_shortstatehash != current_shortstatehash { let current_state_ids = services .rooms .state_accessor .state_full_ids(current_shortstatehash) .await?; - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); - - let mut i: u8 = 0; - for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = services - .rooms - .short - .get_statekey_from_short(shortstatekey)?; - - if event_type != StateEventType::RoomMember { - let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else { - error!("Pdu in state not found: {}", id); - continue; - }; - state_events.push(pdu); - - i = i.wrapping_add(1); - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } else if !lazy_load_enabled - || full_state - || timeline_users.contains(&state_key) - // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 - || (cfg!(feature = "element_hacks") && *sender_user == state_key) - { - let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else { - error!("Pdu in state not found: {}", id); - continue; - }; - - // This check is in case a bad user ID made it into the database - if let Ok(uid) = UserId::parse(&state_key) { - lazy_loaded.insert(uid); - } - state_events.push(pdu); - - i = i.wrapping_add(1); - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - } - - // Reset lazy loading because this is an initial sync - services - .rooms - .lazy_loading - .lazy_load_reset(sender_user, sender_device, room_id)?; - - // The state_events above should contain all timeline_users, let's mark them as - // lazy loaded. - services - .rooms - .lazy_loading - .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount) - .await; - - (heroes, joined_member_count, invited_member_count, true, state_events) - } else { - // Incremental /sync - let since_shortstatehash = since_shortstatehash.unwrap(); - - let mut delta_state_events = Vec::new(); - - if since_shortstatehash != current_shortstatehash { - let current_state_ids = services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - let since_state_ids = services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - - for (key, id) in current_state_ids { - if full_state || since_state_ids.get(&key) != Some(&id) { - let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else { - error!("Pdu in state not found: {}", id); - continue; - }; - - delta_state_events.push(pdu); - tokio::task::yield_now().await; - } - } - } - - let encrypted_room = services + let since_state_ids = services .rooms .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? - .is_some(); + .state_full_ids(since_shortstatehash) + .await?; - let since_encryption = services.rooms.state_accessor.state_get( - since_shortstatehash, - &StateEventType::RoomEncryption, - "", - )?; - - // Calculations: - let new_encrypted_room = encrypted_room && since_encryption.is_none(); - - let send_member_count = delta_state_events - .iter() - .any(|event| event.kind == TimelineEventType::RoomMember); - - if encrypted_room { - for state_event in &delta_state_events { - if state_event.kind != TimelineEventType::RoomMember { + for (key, id) in current_state_ids { + if full_state || since_state_ids.get(&key) != Some(&id) { + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { + error!("Pdu in state not found: {id}"); continue; - } + }; - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == sender_user { - continue; - } - - let new_membership = - serde_json::from_str::(state_event.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; - - match new_membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(services, sender_user, &user_id, room_id)? { - device_list_updates.insert(user_id); - } - }, - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - }, - _ => {}, - } - } + delta_state_events.push(pdu); + tokio::task::yield_now().await; } } + } - if joined_since_last_sync && encrypted_room || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend( - services - .rooms - .state_cache - .room_members(room_id) - .flatten() - .filter(|user_id| { - // Don't send key updates from the sender to the sender - sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target - // already - !share_encrypted_room(services, sender_user, user_id, room_id).unwrap_or(false) - }), - ); - } + let encrypted_room = services + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") + .await + .is_ok(); - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - calculate_counts()? - } else { - (None, None, Vec::new()) - }; + let since_encryption = services + .rooms + .state_accessor + .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") + .await; - let mut state_events = delta_state_events; - let mut lazy_loaded = HashSet::new(); + // Calculations: + let new_encrypted_room = encrypted_room && since_encryption.is_err(); - // Mark all member events we're returning as lazy-loaded - for pdu in &state_events { - if pdu.kind == TimelineEventType::RoomMember { - match UserId::parse( - pdu.state_key - .as_ref() - .expect("State event has state key") - .clone(), - ) { - Ok(state_key_userid) => { - lazy_loaded.insert(state_key_userid); - }, - Err(e) => error!("Invalid state key for member event: {}", e), - } - } - } + let send_member_count = delta_state_events + .iter() + .any(|event| event.kind == TimelineEventType::RoomMember); - // Fetch contextual member state events for events from the timeline, and - // mark them as lazy-loaded as well. - for (_, event) in &timeline_pdus { - if lazy_loaded.contains(&event.sender) { + if encrypted_room { + for state_event in &delta_state_events { + if state_event.kind != TimelineEventType::RoomMember { continue; } - if !services.rooms.lazy_loading.lazy_load_was_sent_before( - sender_user, - sender_device, - room_id, - &event.sender, - )? || lazy_load_send_redundant - { - if let Some(member_event) = services.rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomMember, - event.sender.as_str(), - )? { - lazy_loaded.insert(event.sender.clone()); - state_events.push(member_event); + if let Some(state_key) = &state_event.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + if user_id == sender_user { + continue; + } + + let new_membership = serde_json::from_str::(state_event.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(services, sender_user, &user_id, Some(room_id)).await { + device_list_updates.insert(user_id); + } + }, + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + }, + _ => {}, } } } + } - services + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend( + services + .rooms + .state_cache + .room_members(room_id) + .ready_filter(|user_id| { + // Don't send key updates from the sender to the sender + sender_user != *user_id + }) + .filter_map(|user_id| async move { + // Only send keys if the sender doesn't share an encrypted room with the target + // already + (!share_encrypted_room(services, sender_user, user_id, Some(room_id)).await) + .then_some(user_id.to_owned()) + }) + .collect::>() + .await, + ); + } + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + calculate_counts().await? + } else { + (None, None, Vec::new()) + }; + + let mut state_events = delta_state_events; + let mut lazy_loaded = HashSet::new(); + + // Mark all member events we're returning as lazy-loaded + for pdu in &state_events { + if pdu.kind == TimelineEventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + }, + Err(e) => error!("Invalid state key for member event: {}", e), + } + } + } + + // Fetch contextual member state events for events from the timeline, and + // mark them as lazy-loaded as well. + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + + if !services .rooms .lazy_loading - .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount) - .await; - - ( - heroes, - joined_member_count, - invited_member_count, - joined_since_last_sync, - state_events, - ) + .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) + .await || lazy_load_send_redundant + { + if let Ok(member_event) = services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, event.sender.as_str()) + .await + { + lazy_loaded.insert(event.sender.clone()); + state_events.push(member_event); + } + } } - }; + + services.rooms.lazy_loading.lazy_load_mark_sent( + sender_user, + sender_device, + room_id, + lazy_loaded, + next_batchcount, + ); + + ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) + } + }; // Look for device list updates in this room device_list_updates.extend( services .users .keys_changed(room_id.as_ref(), since, None) - .filter_map(Result::ok), + .map(ToOwned::to_owned) + .collect::>() + .await, ); let notification_count = if send_notification_counts { @@ -924,7 +968,8 @@ async fn load_joined_room( services .rooms .user - .notification_count(sender_user, room_id)? + .notification_count(sender_user, room_id) + .await .try_into() .expect("notification count can't go that high"), ) @@ -937,7 +982,8 @@ async fn load_joined_room( services .rooms .user - .highlight_count(sender_user, room_id)? + .highlight_count(sender_user, room_id) + .await .try_into() .expect("highlight count can't go that high"), ) @@ -966,9 +1012,9 @@ async fn load_joined_room( .rooms .read_receipt .readreceipts_since(room_id, since) - .filter_map(Result::ok) // Filter out buggy events .map(|(_, _, v)| v) - .collect(); + .collect() + .await; if services.rooms.typing.last_typing_update(room_id).await? > since { edus.push( @@ -985,13 +1031,15 @@ async fn load_joined_room( services .rooms .user - .associate_token_shortstatehash(room_id, next_batch, current_shortstatehash)?; + .associate_token_shortstatehash(room_id, next_batch, current_shortstatehash) + .await; Ok(JoinedRoom { account_data: RoomAccountData { events: services .account_data - .changes_since(Some(room_id), sender_user, since)? + .changes_since(Some(room_id), sender_user, since) + .await? .into_iter() .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect(), @@ -1023,41 +1071,37 @@ async fn load_joined_room( }) } -fn load_timeline( +async fn load_timeline( services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64, ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { let timeline_pdus; let limited = if services .rooms .timeline - .last_timeline_count(sender_user, room_id)? + .last_timeline_count(sender_user, room_id) + .await? > roomsincecount { let mut non_timeline_pdus = services .rooms .timeline - .pdus_until(sender_user, room_id, PduCount::max())? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .take_while(|(pducount, _)| pducount > &roomsincecount); + .pdus_until(sender_user, room_id, PduCount::max()) + .await? + .ready_take_while(|(pducount, _)| pducount > &roomsincecount); // Take the last events for the timeline timeline_pdus = non_timeline_pdus .by_ref() .take(usize_from_u64_truncated(limit)) .collect::>() + .await .into_iter() .rev() .collect::>(); // They /sync response doesn't always return all messages, so we say the output // is limited unless there are events in non_timeline_pdus - non_timeline_pdus.next().is_some() + non_timeline_pdus.next().await.is_some() } else { timeline_pdus = Vec::new(); false @@ -1065,26 +1109,23 @@ fn load_timeline( Ok((timeline_pdus, limited)) } -fn share_encrypted_room( - services: &Services, sender_user: &UserId, user_id: &UserId, ignore_room: &RoomId, -) -> Result { - Ok(services +async fn share_encrypted_room( + services: &Services, sender_user: &UserId, user_id: &UserId, ignore_room: Option<&RoomId>, +) -> bool { + services .rooms .user - .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? - .filter_map(Result::ok) - .filter(|room_id| room_id != ignore_room) - .filter_map(|other_room_id| { - Some( - services - .rooms - .state_accessor - .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") - .ok()? - .is_some(), - ) + .get_shared_rooms(sender_user, user_id) + .ready_filter(|&room_id| Some(room_id) != ignore_room) + .any(|other_room_id| async move { + services + .rooms + .state_accessor + .room_state_get(other_room_id, &StateEventType::RoomEncryption, "") + .await + .is_ok() }) - .any(|encrypted| encrypted)) + .await } /// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` @@ -1114,7 +1155,7 @@ pub(crate) async fn sync_events_v4_route( if globalsince != 0 && !services - .users + .sync .remembered(sender_user.clone(), sender_device.clone(), conn_id.clone()) { debug!("Restarting sync stream because it was gone from the database"); @@ -1127,41 +1168,43 @@ pub(crate) async fn sync_events_v4_route( if globalsince == 0 { services - .users + .sync .forget_sync_request_connection(sender_user.clone(), sender_device.clone(), conn_id.clone()); } // Get sticky parameters from cache let known_rooms = services - .users + .sync .update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body); let all_joined_rooms = services .rooms .state_cache .rooms_joined(&sender_user) - .filter_map(Result::ok) - .collect::>(); + .map(ToOwned::to_owned) + .collect::>() + .await; let all_invited_rooms = services .rooms .state_cache .rooms_invited(&sender_user) - .filter_map(Result::ok) .map(|r| r.0) - .collect::>(); + .collect::>() + .await; let all_rooms = all_joined_rooms .iter() - .cloned() - .chain(all_invited_rooms.clone()) + .chain(all_invited_rooms.iter()) + .map(Clone::clone) .collect(); if body.extensions.to_device.enabled.unwrap_or(false) { services .users - .remove_to_device_events(&sender_user, &sender_device, globalsince)?; + .remove_to_device_events(&sender_user, &sender_device, globalsince) + .await; } let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in @@ -1179,7 +1222,8 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.account_data.enabled.unwrap_or(false) { account_data.global = services .account_data - .changes_since(None, &sender_user, globalsince)? + .changes_since(None, &sender_user, globalsince) + .await? .into_iter() .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect(); @@ -1190,7 +1234,8 @@ pub(crate) async fn sync_events_v4_route( room.clone(), services .account_data - .changes_since(Some(&room), &sender_user, globalsince)? + .changes_since(Some(&room), &sender_user, globalsince) + .await? .into_iter() .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect(), @@ -1205,40 +1250,42 @@ pub(crate) async fn sync_events_v4_route( services .users .keys_changed(sender_user.as_ref(), globalsince, None) - .filter_map(Result::ok), + .map(ToOwned::to_owned) + .collect::>() + .await, ); for room_id in &all_joined_rooms { - let Some(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id)? else { - error!("Room {} has no state", room_id); + let Ok(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id).await else { + error!("Room {room_id} has no state"); continue; }; let since_shortstatehash = services .rooms .user - .get_token_shortstatehash(room_id, globalsince)?; + .get_token_shortstatehash(room_id, globalsince) + .await + .ok(); - let since_sender_member: Option = since_shortstatehash - .and_then(|shortstatehash| { - services - .rooms - .state_accessor - .state_get(shortstatehash, &StateEventType::RoomMember, sender_user.as_str()) - .transpose() - }) - .transpose()? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); + let since_sender_member: Option = if let Some(short) = since_shortstatehash { + services + .rooms + .state_accessor + .state_get(short, &StateEventType::RoomMember, sender_user.as_str()) + .await + .and_then(|pdu| serde_json::from_str(pdu.content.get()).map_err(Into::into)) + .ok() + } else { + None + }; let encrypted_room = services .rooms .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? - .is_some(); + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") + .await + .is_ok(); if let Some(since_shortstatehash) = since_shortstatehash { // Skip if there are only timeline changes @@ -1246,22 +1293,24 @@ pub(crate) async fn sync_events_v4_route( continue; } - let since_encryption = services.rooms.state_accessor.state_get( - since_shortstatehash, - &StateEventType::RoomEncryption, - "", - )?; + let since_encryption = services + .rooms + .state_accessor + .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") + .await; let joined_since_last_sync = since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); - let new_encrypted_room = encrypted_room && since_encryption.is_none(); + let new_encrypted_room = encrypted_room && since_encryption.is_err(); + if encrypted_room { let current_state_ids = services .rooms .state_accessor .state_full_ids(current_shortstatehash) .await?; + let since_state_ids = services .rooms .state_accessor @@ -1270,8 +1319,8 @@ pub(crate) async fn sync_events_v4_route( for (key, id) in current_state_ids { if since_state_ids.get(&key) != Some(&id) { - let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else { - error!("Pdu in state not found: {}", id); + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { + error!("Pdu in state not found: {id}"); continue; }; if pdu.kind == TimelineEventType::RoomMember { @@ -1291,7 +1340,9 @@ pub(crate) async fn sync_events_v4_route( match new_membership { MembershipState::Join => { // A new user joined an encrypted room - if !share_encrypted_room(&services, &sender_user, &user_id, room_id)? { + if !share_encrypted_room(&services, &sender_user, &user_id, Some(room_id)) + .await + { device_list_changes.insert(user_id); } }, @@ -1306,22 +1357,25 @@ pub(crate) async fn sync_events_v4_route( } } if joined_since_last_sync || new_encrypted_room { + let sender_user = &sender_user; // If the user is in a new encrypted room, give them all joined users device_list_changes.extend( services .rooms .state_cache .room_members(room_id) - .flatten() - .filter(|user_id| { + .ready_filter(|user_id| { // Don't send key updates from the sender to the sender - &sender_user != user_id + sender_user != user_id }) - .filter(|user_id| { + .filter_map(|user_id| async move { // Only send keys if the sender doesn't share an encrypted room with the target // already - !share_encrypted_room(&services, &sender_user, user_id, room_id).unwrap_or(false) - }), + (!share_encrypted_room(&services, sender_user, user_id, Some(room_id)).await) + .then_some(user_id.to_owned()) + }) + .collect::>() + .await, ); } } @@ -1331,26 +1385,15 @@ pub(crate) async fn sync_events_v4_route( services .users .keys_changed(room_id.as_ref(), globalsince, None) - .filter_map(Result::ok), + .map(ToOwned::to_owned) + .collect::>() + .await, ); } + for user_id in left_encrypted_users { - let dont_share_encrypted_room = services - .rooms - .user - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? - .filter_map(Result::ok) - .filter_map(|other_room_id| { - Some( - services - .rooms - .state_accessor - .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") - .ok()? - .is_some(), - ) - }) - .all(|encrypted| !encrypted); + let dont_share_encrypted_room = !share_encrypted_room(&services, &sender_user, &user_id, None).await; + // If the user doesn't share an encrypted room with the target anymore, we need // to tell them if dont_share_encrypted_room { @@ -1362,7 +1405,7 @@ pub(crate) async fn sync_events_v4_route( let mut lists = BTreeMap::new(); let mut todo_rooms = BTreeMap::new(); // and required state - for (list_id, list) in body.lists { + for (list_id, list) in &body.lists { let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { Some(true) => &all_invited_rooms, Some(false) => &all_joined_rooms, @@ -1371,23 +1414,23 @@ pub(crate) async fn sync_events_v4_route( let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { Some(filter) if filter.is_empty() => active_rooms.clone(), - Some(value) => filter_rooms(active_rooms, State(services), &value, true), + Some(value) => filter_rooms(active_rooms, State(services), &value, true).await, None => active_rooms.clone(), }; let active_rooms = match list.filters.clone().map(|f| f.room_types) { Some(filter) if filter.is_empty() => active_rooms.clone(), - Some(value) => filter_rooms(&active_rooms, State(services), &value, false), + Some(value) => filter_rooms(&active_rooms, State(services), &value, false).await, None => active_rooms, }; let mut new_known_rooms = BTreeSet::new(); + let ranges = list.ranges.clone(); lists.insert( list_id.clone(), sync_events::v4::SyncList { - ops: list - .ranges + ops: ranges .into_iter() .map(|mut r| { r.0 = r.0.clamp( @@ -1396,29 +1439,34 @@ pub(crate) async fn sync_events_v4_route( ); r.1 = r.1.clamp(r.0, UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX)); + let room_ids = if !active_rooms.is_empty() { active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec() } else { Vec::new() }; + new_known_rooms.extend(room_ids.iter().cloned()); for room_id in &room_ids { let todo_room = todo_rooms .entry(room_id.clone()) .or_insert((BTreeSet::new(), 0, u64::MAX)); + let limit = list .room_details .timeline_limit .map_or(10, u64::from) .min(100); + todo_room .0 .extend(list.room_details.required_state.iter().cloned()); + todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date todo_room.2 = todo_room.2.min( known_rooms - .get(&list_id) + .get(list_id.as_str()) .and_then(|k| k.get(room_id)) .copied() .unwrap_or(0), @@ -1438,11 +1486,11 @@ pub(crate) async fn sync_events_v4_route( ); if let Some(conn_id) = &body.conn_id { - services.users.update_sync_known_rooms( + services.sync.update_sync_known_rooms( sender_user.clone(), sender_device.clone(), conn_id.clone(), - list_id, + list_id.clone(), new_known_rooms, globalsince, ); @@ -1451,7 +1499,7 @@ pub(crate) async fn sync_events_v4_route( let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id)? { + if !services.rooms.metadata.exists(room_id).await { continue; } let todo_room = todo_rooms @@ -1477,7 +1525,7 @@ pub(crate) async fn sync_events_v4_route( } if let Some(conn_id) = &body.conn_id { - services.users.update_sync_known_rooms( + services.sync.update_sync_known_rooms( sender_user.clone(), sender_device.clone(), conn_id.clone(), @@ -1488,7 +1536,7 @@ pub(crate) async fn sync_events_v4_route( } if let Some(conn_id) = &body.conn_id { - services.users.update_sync_subscriptions( + services.sync.update_sync_subscriptions( sender_user.clone(), sender_device.clone(), conn_id.clone(), @@ -1509,12 +1557,13 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_cache .invite_state(&sender_user, room_id) - .unwrap_or(None); + .await + .ok(); (timeline_pdus, limited) = (Vec::new(), true); } else { (timeline_pdus, limited) = - match load_timeline(&services, &sender_user, room_id, roomsincecount, *timeline_limit) { + match load_timeline(&services, &sender_user, room_id, roomsincecount, *timeline_limit).await { Ok(value) => value, Err(err) => { warn!("Encountered missing timeline in {}, error {}", room_id, err); @@ -1527,17 +1576,20 @@ pub(crate) async fn sync_events_v4_route( room_id.clone(), services .account_data - .changes_since(Some(room_id), &sender_user, *roomsince)? + .changes_since(Some(room_id), &sender_user, *roomsince) + .await? .into_iter() .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect(), ); - let room_receipts = services + let vector: Vec<_> = services .rooms .read_receipt - .readreceipts_since(room_id, *roomsince); - let vector: Vec<_> = room_receipts.into_iter().collect(); + .readreceipts_since(room_id, *roomsince) + .collect() + .await; + let receipt_size = vector.len(); receipts .rooms @@ -1584,41 +1636,42 @@ pub(crate) async fn sync_events_v4_route( let required_state = required_state_request .iter() - .map(|state| { + .stream() + .filter_map(|state| async move { services .rooms .state_accessor .room_state_get(room_id, &state.0, &state.1) + .await + .map(|s| s.to_sync_state_event()) + .ok() }) - .filter_map(Result::ok) - .flatten() - .map(|state| state.to_sync_state_event()) - .collect(); + .collect() + .await; // Heroes let heroes = services .rooms .state_cache .room_members(room_id) - .filter_map(Result::ok) - .filter(|member| member != &sender_user) - .map(|member| { - Ok::<_, Error>( - services - .rooms - .state_accessor - .get_member(room_id, &member)? - .map(|memberevent| SlidingSyncRoomHero { - user_id: member, - name: memberevent.displayname, - avatar: memberevent.avatar_url, - }), - ) + .ready_filter(|member| member != &sender_user) + .filter_map(|member| async move { + services + .rooms + .state_accessor + .get_member(room_id, member) + .await + .map(|memberevent| SlidingSyncRoomHero { + user_id: member.to_owned(), + name: memberevent.displayname, + avatar: memberevent.avatar_url, + }) + .ok() }) - .filter_map(Result::ok) - .flatten() .take(5) - .collect::>(); + .collect::>() + .await; + let name = match heroes.len().cmp(&(1_usize)) { Ordering::Greater => { let firsts = heroes[1..] @@ -1626,10 +1679,12 @@ pub(crate) async fn sync_events_v4_route( .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) .collect::>() .join(", "); + let last = heroes[0] .name .clone() .unwrap_or_else(|| heroes[0].user_id.to_string()); + Some(format!("{firsts} and {last}")) }, Ordering::Equal => Some( @@ -1650,11 +1705,17 @@ pub(crate) async fn sync_events_v4_route( rooms.insert( room_id.clone(), sync_events::v4::SlidingSyncRoom { - name: services.rooms.state_accessor.get_name(room_id)?.or(name), + name: services + .rooms + .state_accessor + .get_name(room_id) + .await + .ok() + .or(name), avatar: if let Some(heroes_avatar) = heroes_avatar { ruma::JsOption::Some(heroes_avatar) } else { - match services.rooms.state_accessor.get_avatar(room_id)? { + match services.rooms.state_accessor.get_avatar(room_id).await { ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), ruma::JsOption::Null => ruma::JsOption::Null, ruma::JsOption::Undefined => ruma::JsOption::Undefined, @@ -1668,7 +1729,8 @@ pub(crate) async fn sync_events_v4_route( services .rooms .user - .highlight_count(&sender_user, room_id)? + .highlight_count(&sender_user, room_id) + .await .try_into() .expect("notification count can't go that high"), ), @@ -1676,7 +1738,8 @@ pub(crate) async fn sync_events_v4_route( services .rooms .user - .notification_count(&sender_user, room_id)? + .notification_count(&sender_user, room_id) + .await .try_into() .expect("notification count can't go that high"), ), @@ -1689,7 +1752,8 @@ pub(crate) async fn sync_events_v4_route( services .rooms .state_cache - .room_joined_count(room_id)? + .room_joined_count(room_id) + .await .unwrap_or(0) .try_into() .unwrap_or_else(|_| uint!(0)), @@ -1698,7 +1762,8 @@ pub(crate) async fn sync_events_v4_route( services .rooms .state_cache - .room_invited_count(room_id)? + .room_invited_count(room_id) + .await .unwrap_or(0) .try_into() .unwrap_or_else(|_| uint!(0)), @@ -1732,7 +1797,9 @@ pub(crate) async fn sync_events_v4_route( Some(sync_events::v4::ToDevice { events: services .users - .get_to_device_events(&sender_user, &sender_device)?, + .get_to_device_events(&sender_user, &sender_device) + .collect() + .await, next_batch: next_batch.to_string(), }) } else { @@ -1745,7 +1812,8 @@ pub(crate) async fn sync_events_v4_route( }, device_one_time_keys_count: services .users - .count_one_time_keys(&sender_user, &sender_device)?, + .count_one_time_keys(&sender_user, &sender_device) + .await, // Fallback keys are not yet supported device_unused_fallback_key_types: None, }, @@ -1759,25 +1827,26 @@ pub(crate) async fn sync_events_v4_route( }) } -fn filter_rooms( +async fn filter_rooms( rooms: &[OwnedRoomId], State(services): State, filter: &[RoomTypeFilter], negate: bool, ) -> Vec { - return rooms + rooms .iter() - .filter(|r| match services.rooms.state_accessor.get_room_type(r) { - Err(e) => { - warn!("Requested room type for {}, but could not retrieve with error {}", r, e); - false - }, - Ok(result) => { - let result = RoomTypeFilter::from(result); - if negate { - !filter.contains(&result) - } else { - filter.is_empty() || filter.contains(&result) - } - }, + .stream() + .filter_map(|r| async move { + match services.rooms.state_accessor.get_room_type(r).await { + Err(_) => false, + Ok(result) => { + let result = RoomTypeFilter::from(Some(result)); + if negate { + !filter.contains(&result) + } else { + filter.is_empty() || filter.contains(&result) + } + }, + } + .then_some(r.to_owned()) }) - .cloned() - .collect(); + .collect() + .await } diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index 301568e5..bcd0f817 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -23,10 +23,11 @@ pub(crate) async fn update_tag_route( let event = services .account_data - .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag)?; + .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag) + .await; let mut tags_event = event.map_or_else( - || { + |_| { Ok(TagEvent { content: TagEventContent { tags: BTreeMap::new(), @@ -41,12 +42,15 @@ pub(crate) async fn update_tag_route( .tags .insert(body.tag.clone().into(), body.tag_info.clone()); - services.account_data.update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), - )?; + services + .account_data + .update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + &serde_json::to_value(tags_event).expect("to json value always works"), + ) + .await?; Ok(create_tag::v3::Response {}) } @@ -63,10 +67,11 @@ pub(crate) async fn delete_tag_route( let event = services .account_data - .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag)?; + .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag) + .await; let mut tags_event = event.map_or_else( - || { + |_| { Ok(TagEvent { content: TagEventContent { tags: BTreeMap::new(), @@ -78,12 +83,15 @@ pub(crate) async fn delete_tag_route( tags_event.content.tags.remove(&body.tag.clone().into()); - services.account_data.update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), - )?; + services + .account_data + .update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + &serde_json::to_value(tags_event).expect("to json value always works"), + ) + .await?; Ok(delete_tag::v3::Response {}) } @@ -100,10 +108,11 @@ pub(crate) async fn get_tags_route( let event = services .account_data - .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag)?; + .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag) + .await; let tags_event = event.map_or_else( - || { + |_| { Ok(TagEvent { content: TagEventContent { tags: BTreeMap::new(), diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 8100f0e6..50f6cdfb 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,4 +1,6 @@ use axum::extract::State; +use conduit::PduEvent; +use futures::StreamExt; use ruma::{ api::client::{error::ErrorKind, threads::get_threads}, uint, @@ -27,20 +29,23 @@ pub(crate) async fn get_threads_route( u64::MAX }; - let threads = services + let room_id = &body.room_id; + let threads: Vec<(u64, PduEvent)> = services .rooms .threads - .threads_until(sender_user, &body.room_id, from, &body.include)? + .threads_until(sender_user, &body.room_id, from, &body.include) + .await? .take(limit) - .filter_map(Result::ok) - .filter(|(_, pdu)| { + .filter_map(|(count, pdu)| async move { services .rooms .state_accessor - .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) - .unwrap_or(false) + .user_can_see_event(sender_user, room_id, &pdu.event_id) + .await + .then_some((count, pdu)) }) - .collect::>(); + .collect() + .await; let next_batch = threads.last().map(|(count, _)| count.to_string()); diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index 1f557ad7..2b37a9ec 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduit::{Error, Result}; +use futures::StreamExt; use ruma::{ api::{ client::{error::ErrorKind, to_device::send_event_to_device}, @@ -24,8 +25,9 @@ pub(crate) async fn send_event_to_device_route( // Check if this is a new transaction id if services .transaction_ids - .existing_txnid(sender_user, sender_device, &body.txn_id)? - .is_some() + .existing_txnid(sender_user, sender_device, &body.txn_id) + .await + .is_ok() { return Ok(send_event_to_device::v3::Response {}); } @@ -53,31 +55,35 @@ pub(crate) async fn send_event_to_device_route( continue; } + let event_type = &body.event_type.to_string(); + + let event = event + .deserialize_as() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?; + match target_device_id_maybe { DeviceIdOrAllDevices::DeviceId(target_device_id) => { - services.users.add_to_device_event( - sender_user, - target_user_id, - target_device_id, - &body.event_type.to_string(), - event - .deserialize_as() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?, - )?; + services + .users + .add_to_device_event(sender_user, target_user_id, target_device_id, event_type, event) + .await; }, DeviceIdOrAllDevices::AllDevices => { - for target_device_id in services.users.all_device_ids(target_user_id) { - services.users.add_to_device_event( - sender_user, - target_user_id, - &target_device_id?, - &body.event_type.to_string(), - event - .deserialize_as() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?, - )?; - } + let (event_type, event) = (&event_type, &event); + services + .users + .all_device_ids(target_user_id) + .for_each(|target_device_id| { + services.users.add_to_device_event( + sender_user, + target_user_id, + target_device_id, + event_type, + event.clone(), + ) + }) + .await; }, } } @@ -86,7 +92,7 @@ pub(crate) async fn send_event_to_device_route( // Save transaction id with empty data services .transaction_ids - .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; + .add_txnid(sender_user, sender_device, &body.txn_id, &[]); Ok(send_event_to_device::v3::Response {}) } diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index a06648e0..932d221e 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -16,7 +16,8 @@ pub(crate) async fn create_typing_event_route( if !services .rooms .state_cache - .is_joined(sender_user, &body.room_id)? + .is_joined(sender_user, &body.room_id) + .await { return Err(Error::BadRequest(ErrorKind::forbidden(), "You are not in this room.")); } diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index ab4703fd..dc570295 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -2,7 +2,8 @@ use std::collections::BTreeMap; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{warn, Err}; +use conduit::Err; +use futures::StreamExt; use ruma::{ api::{ client::{ @@ -45,7 +46,7 @@ pub(crate) async fn get_mutual_rooms_route( )); } - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { return Ok(mutual_rooms::unstable::Response { joined: vec![], next_batch_token: None, @@ -55,9 +56,10 @@ pub(crate) async fn get_mutual_rooms_route( let mutual_rooms: Vec = services .rooms .user - .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? - .filter_map(Result::ok) - .collect(); + .get_shared_rooms(sender_user, &body.user_id) + .map(ToOwned::to_owned) + .collect() + .await; Ok(mutual_rooms::unstable::Response { joined: mutual_rooms, @@ -99,7 +101,7 @@ pub(crate) async fn get_room_summary( let room_id = services.rooms.alias.resolve(&body.room_id_or_alias).await?; - if !services.rooms.metadata.exists(&room_id)? { + if !services.rooms.metadata.exists(&room_id).await { return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); } @@ -108,7 +110,7 @@ pub(crate) async fn get_room_summary( .rooms .state_accessor .is_world_readable(&room_id) - .unwrap_or(false) + .await { return Err(Error::BadRequest( ErrorKind::forbidden(), @@ -122,50 +124,58 @@ pub(crate) async fn get_room_summary( .rooms .state_accessor .get_canonical_alias(&room_id) - .unwrap_or(None), + .await + .ok(), avatar_url: services .rooms .state_accessor - .get_avatar(&room_id)? + .get_avatar(&room_id) + .await .into_option() .unwrap_or_default() .url, - guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id)?, - name: services - .rooms - .state_accessor - .get_name(&room_id) - .unwrap_or(None), + guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, + name: services.rooms.state_accessor.get_name(&room_id).await.ok(), num_joined_members: services .rooms .state_cache .room_joined_count(&room_id) - .unwrap_or_default() - .unwrap_or_else(|| { - warn!("Room {room_id} has no member count"); - 0 - }) - .try_into() - .expect("user count should not be that big"), + .await + .unwrap_or(0) + .try_into()?, topic: services .rooms .state_accessor .get_room_topic(&room_id) - .unwrap_or(None), + .await + .ok(), world_readable: services .rooms .state_accessor .is_world_readable(&room_id) - .unwrap_or(false), - join_rule: services.rooms.state_accessor.get_join_rule(&room_id)?.0, - room_type: services.rooms.state_accessor.get_room_type(&room_id)?, - room_version: Some(services.rooms.state.get_room_version(&room_id)?), + .await, + join_rule: services + .rooms + .state_accessor + .get_join_rule(&room_id) + .await + .unwrap_or_default() + .0, + room_type: services + .rooms + .state_accessor + .get_room_type(&room_id) + .await + .ok(), + room_version: services.rooms.state.get_room_version(&room_id).await.ok(), membership: if let Some(sender_user) = sender_user { services .rooms .state_accessor - .get_member(&room_id, sender_user)? - .map_or_else(|| Some(MembershipState::Leave), |content| Some(content.membership)) + .get_member(&room_id, sender_user) + .await + .map_or_else(|_| MembershipState::Leave, |content| content.membership) + .into() } else { None }, @@ -173,7 +183,8 @@ pub(crate) async fn get_room_summary( .rooms .state_accessor .get_room_encryption(&room_id) - .unwrap_or_else(|_e| None), + .await + .ok(), }) } @@ -191,13 +202,14 @@ pub(crate) async fn delete_timezone_key_route( return Err!(Request(Forbidden("You cannot update the profile of another user"))); } - services.users.set_timezone(&body.user_id, None).await?; + services.users.set_timezone(&body.user_id, None); if services.globals.allow_local_presence() { // Presence update services .presence - .ping_presence(&body.user_id, &PresenceState::Online)?; + .ping_presence(&body.user_id, &PresenceState::Online) + .await?; } Ok(delete_timezone_key::unstable::Response {}) @@ -217,16 +229,14 @@ pub(crate) async fn set_timezone_key_route( return Err!(Request(Forbidden("You cannot update the profile of another user"))); } - services - .users - .set_timezone(&body.user_id, body.tz.clone()) - .await?; + services.users.set_timezone(&body.user_id, body.tz.clone()); if services.globals.allow_local_presence() { // Presence update services .presence - .ping_presence(&body.user_id, &PresenceState::Online)?; + .ping_presence(&body.user_id, &PresenceState::Online) + .await?; } Ok(set_timezone_key::unstable::Response {}) @@ -280,10 +290,11 @@ pub(crate) async fn set_profile_key_route( .rooms .state_cache .rooms_joined(&body.user_id) - .filter_map(Result::ok) - .collect(); + .map(Into::into) + .collect() + .await; - update_displayname(&services, &body.user_id, Some(profile_key_value.to_string()), all_joined_rooms).await?; + update_displayname(&services, &body.user_id, Some(profile_key_value.to_string()), &all_joined_rooms).await?; } else if body.key == "avatar_url" { let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string()); @@ -291,21 +302,23 @@ pub(crate) async fn set_profile_key_route( .rooms .state_cache .rooms_joined(&body.user_id) - .filter_map(Result::ok) - .collect(); + .map(Into::into) + .collect() + .await; - update_avatar_url(&services, &body.user_id, Some(mxc), None, all_joined_rooms).await?; + update_avatar_url(&services, &body.user_id, Some(mxc), None, &all_joined_rooms).await?; } else { services .users - .set_profile_key(&body.user_id, &body.key, Some(profile_key_value.clone()))?; + .set_profile_key(&body.user_id, &body.key, Some(profile_key_value.clone())); } if services.globals.allow_local_presence() { // Presence update services .presence - .ping_presence(&body.user_id, &PresenceState::Online)?; + .ping_presence(&body.user_id, &PresenceState::Online) + .await?; } Ok(set_profile_key::unstable::Response {}) @@ -335,30 +348,33 @@ pub(crate) async fn delete_profile_key_route( .rooms .state_cache .rooms_joined(&body.user_id) - .filter_map(Result::ok) - .collect(); + .map(Into::into) + .collect() + .await; - update_displayname(&services, &body.user_id, None, all_joined_rooms).await?; + update_displayname(&services, &body.user_id, None, &all_joined_rooms).await?; } else if body.key == "avatar_url" { let all_joined_rooms: Vec = services .rooms .state_cache .rooms_joined(&body.user_id) - .filter_map(Result::ok) - .collect(); + .map(Into::into) + .collect() + .await; - update_avatar_url(&services, &body.user_id, None, None, all_joined_rooms).await?; + update_avatar_url(&services, &body.user_id, None, None, &all_joined_rooms).await?; } else { services .users - .set_profile_key(&body.user_id, &body.key, None)?; + .set_profile_key(&body.user_id, &body.key, None); } if services.globals.allow_local_presence() { // Presence update services .presence - .ping_presence(&body.user_id, &PresenceState::Online)?; + .ping_presence(&body.user_id, &PresenceState::Online) + .await?; } Ok(delete_profile_key::unstable::Response {}) @@ -386,26 +402,25 @@ pub(crate) async fn get_timezone_key_route( ) .await { - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { services.users.create(&body.user_id, None)?; } services .users - .set_displayname(&body.user_id, response.displayname.clone()) - .await?; + .set_displayname(&body.user_id, response.displayname.clone()); + services .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()) - .await?; + .set_avatar_url(&body.user_id, response.avatar_url.clone()); + services .users - .set_blurhash(&body.user_id, response.blurhash.clone()) - .await?; + .set_blurhash(&body.user_id, response.blurhash.clone()); + services .users - .set_timezone(&body.user_id, response.tz.clone()) - .await?; + .set_timezone(&body.user_id, response.tz.clone()); return Ok(get_timezone_key::unstable::Response { tz: response.tz, @@ -413,14 +428,14 @@ pub(crate) async fn get_timezone_key_route( } } - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { // Return 404 if this user doesn't exist and we couldn't fetch it over // federation return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); } Ok(get_timezone_key::unstable::Response { - tz: services.users.timezone(&body.user_id)?, + tz: services.users.timezone(&body.user_id).await.ok(), }) } @@ -448,32 +463,31 @@ pub(crate) async fn get_profile_key_route( ) .await { - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { services.users.create(&body.user_id, None)?; } services .users - .set_displayname(&body.user_id, response.displayname.clone()) - .await?; + .set_displayname(&body.user_id, response.displayname.clone()); + services .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()) - .await?; + .set_avatar_url(&body.user_id, response.avatar_url.clone()); + services .users - .set_blurhash(&body.user_id, response.blurhash.clone()) - .await?; + .set_blurhash(&body.user_id, response.blurhash.clone()); + services .users - .set_timezone(&body.user_id, response.tz.clone()) - .await?; + .set_timezone(&body.user_id, response.tz.clone()); if let Some(value) = response.custom_profile_fields.get(&body.key) { profile_key_value.insert(body.key.clone(), value.clone()); services .users - .set_profile_key(&body.user_id, &body.key, Some(value.clone()))?; + .set_profile_key(&body.user_id, &body.key, Some(value.clone())); } else { return Err!(Request(NotFound("The requested profile key does not exist."))); } @@ -484,13 +498,13 @@ pub(crate) async fn get_profile_key_route( } } - if !services.users.exists(&body.user_id)? { + if !services.users.exists(&body.user_id).await { // Return 404 if this user doesn't exist and we couldn't fetch it over // federation - return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); + return Err!(Request(NotFound("Profile was not found."))); } - if let Some(value) = services.users.profile_key(&body.user_id, &body.key)? { + if let Ok(value) = services.users.profile_key(&body.user_id, &body.key).await { profile_key_value.insert(body.key.clone(), value); } else { return Err!(Request(NotFound("The requested profile key does not exist."))); diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index d714fda5..d5bb14e5 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use axum::{extract::State, response::IntoResponse, Json}; +use futures::StreamExt; use ruma::api::client::{ discovery::{ discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, @@ -173,7 +174,7 @@ pub(crate) async fn conduwuit_server_version() -> Result { /// homeserver. Endpoint is disabled if federation is disabled for privacy. This /// only includes active users (not deactivated, no guests, etc) pub(crate) async fn conduwuit_local_user_count(State(services): State) -> Result { - let user_count = services.users.list_local_users()?.len(); + let user_count = services.users.list_local_users().count().await; Ok(Json(serde_json::json!({ "count": user_count diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 87d4062c..8ea7f1b8 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,4 +1,5 @@ use axum::extract::State; +use futures::{pin_mut, StreamExt}; use ruma::{ api::client::user_directory::search_users, events::{ @@ -21,14 +22,12 @@ pub(crate) async fn search_users_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let limit = usize::try_from(body.limit).unwrap_or(10); // default limit is 10 - let mut users = services.users.iter().filter_map(|user_id| { + let users = services.users.stream().filter_map(|user_id| async { // Filter out buggy users (they should not exist, but you never know...) - let user_id = user_id.ok()?; - let user = search_users::v3::User { - user_id: user_id.clone(), - display_name: services.users.displayname(&user_id).ok()?, - avatar_url: services.users.avatar_url(&user_id).ok()?, + user_id: user_id.to_owned(), + display_name: services.users.displayname(user_id).await.ok(), + avatar_url: services.users.avatar_url(user_id).await.ok(), }; let user_id_matches = user @@ -56,20 +55,19 @@ pub(crate) async fn search_users_route( let user_is_in_public_rooms = services .rooms .state_cache - .rooms_joined(&user_id) - .filter_map(Result::ok) - .any(|room| { + .rooms_joined(&user.user_id) + .any(|room| async move { services .rooms .state_accessor - .room_state_get(&room, &StateEventType::RoomJoinRules, "") + .room_state_get(room, &StateEventType::RoomJoinRules, "") + .await .map_or(false, |event| { - event.map_or(false, |event| { - serde_json::from_str(event.content.get()) - .map_or(false, |r: RoomJoinRulesEventContent| r.join_rule == JoinRule::Public) - }) + serde_json::from_str(event.content.get()) + .map_or(false, |r: RoomJoinRulesEventContent| r.join_rule == JoinRule::Public) }) - }); + }) + .await; if user_is_in_public_rooms { user_visible = true; @@ -77,25 +75,22 @@ pub(crate) async fn search_users_route( let user_is_in_shared_rooms = services .rooms .user - .get_shared_rooms(vec![sender_user.clone(), user_id]) - .ok()? - .next() - .is_some(); + .has_shared_rooms(sender_user, &user.user_id) + .await; if user_is_in_shared_rooms { user_visible = true; } } - if !user_visible { - return None; - } - - Some(user) + user_visible.then_some(user) }); - let results = users.by_ref().take(limit).collect(); - let limited = users.next().is_some(); + pin_mut!(users); + + let limited = users.by_ref().next().await.is_some(); + + let results = users.take(limit).collect().await; Ok(search_users::v3::Response { results, diff --git a/src/api/router.rs b/src/api/router.rs index 4264e01d..c4275f05 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -22,101 +22,101 @@ use crate::{client, server}; pub fn build(router: Router, server: &Server) -> Router { let config = &server.config; let mut router = router - .ruma_route(client::get_timezone_key_route) - .ruma_route(client::get_profile_key_route) - .ruma_route(client::set_profile_key_route) - .ruma_route(client::delete_profile_key_route) - .ruma_route(client::set_timezone_key_route) - .ruma_route(client::delete_timezone_key_route) - .ruma_route(client::appservice_ping) - .ruma_route(client::get_supported_versions_route) - .ruma_route(client::get_register_available_route) - .ruma_route(client::register_route) - .ruma_route(client::get_login_types_route) - .ruma_route(client::login_route) - .ruma_route(client::whoami_route) - .ruma_route(client::logout_route) - .ruma_route(client::logout_all_route) - .ruma_route(client::change_password_route) - .ruma_route(client::deactivate_route) - .ruma_route(client::third_party_route) - .ruma_route(client::request_3pid_management_token_via_email_route) - .ruma_route(client::request_3pid_management_token_via_msisdn_route) - .ruma_route(client::check_registration_token_validity) - .ruma_route(client::get_capabilities_route) - .ruma_route(client::get_pushrules_all_route) - .ruma_route(client::set_pushrule_route) - .ruma_route(client::get_pushrule_route) - .ruma_route(client::set_pushrule_enabled_route) - .ruma_route(client::get_pushrule_enabled_route) - .ruma_route(client::get_pushrule_actions_route) - .ruma_route(client::set_pushrule_actions_route) - .ruma_route(client::delete_pushrule_route) - .ruma_route(client::get_room_event_route) - .ruma_route(client::get_room_aliases_route) - .ruma_route(client::get_filter_route) - .ruma_route(client::create_filter_route) - .ruma_route(client::create_openid_token_route) - .ruma_route(client::set_global_account_data_route) - .ruma_route(client::set_room_account_data_route) - .ruma_route(client::get_global_account_data_route) - .ruma_route(client::get_room_account_data_route) - .ruma_route(client::set_displayname_route) - .ruma_route(client::get_displayname_route) - .ruma_route(client::set_avatar_url_route) - .ruma_route(client::get_avatar_url_route) - .ruma_route(client::get_profile_route) - .ruma_route(client::set_presence_route) - .ruma_route(client::get_presence_route) - .ruma_route(client::upload_keys_route) - .ruma_route(client::get_keys_route) - .ruma_route(client::claim_keys_route) - .ruma_route(client::create_backup_version_route) - .ruma_route(client::update_backup_version_route) - .ruma_route(client::delete_backup_version_route) - .ruma_route(client::get_latest_backup_info_route) - .ruma_route(client::get_backup_info_route) - .ruma_route(client::add_backup_keys_route) - .ruma_route(client::add_backup_keys_for_room_route) - .ruma_route(client::add_backup_keys_for_session_route) - .ruma_route(client::delete_backup_keys_for_room_route) - .ruma_route(client::delete_backup_keys_for_session_route) - .ruma_route(client::delete_backup_keys_route) - .ruma_route(client::get_backup_keys_for_room_route) - .ruma_route(client::get_backup_keys_for_session_route) - .ruma_route(client::get_backup_keys_route) - .ruma_route(client::set_read_marker_route) - .ruma_route(client::create_receipt_route) - .ruma_route(client::create_typing_event_route) - .ruma_route(client::create_room_route) - .ruma_route(client::redact_event_route) - .ruma_route(client::report_event_route) - .ruma_route(client::create_alias_route) - .ruma_route(client::delete_alias_route) - .ruma_route(client::get_alias_route) - .ruma_route(client::join_room_by_id_route) - .ruma_route(client::join_room_by_id_or_alias_route) - .ruma_route(client::joined_members_route) - .ruma_route(client::leave_room_route) - .ruma_route(client::forget_room_route) - .ruma_route(client::joined_rooms_route) - .ruma_route(client::kick_user_route) - .ruma_route(client::ban_user_route) - .ruma_route(client::unban_user_route) - .ruma_route(client::invite_user_route) - .ruma_route(client::set_room_visibility_route) - .ruma_route(client::get_room_visibility_route) - .ruma_route(client::get_public_rooms_route) - .ruma_route(client::get_public_rooms_filtered_route) - .ruma_route(client::search_users_route) - .ruma_route(client::get_member_events_route) - .ruma_route(client::get_protocols_route) + .ruma_route(&client::get_timezone_key_route) + .ruma_route(&client::get_profile_key_route) + .ruma_route(&client::set_profile_key_route) + .ruma_route(&client::delete_profile_key_route) + .ruma_route(&client::set_timezone_key_route) + .ruma_route(&client::delete_timezone_key_route) + .ruma_route(&client::appservice_ping) + .ruma_route(&client::get_supported_versions_route) + .ruma_route(&client::get_register_available_route) + .ruma_route(&client::register_route) + .ruma_route(&client::get_login_types_route) + .ruma_route(&client::login_route) + .ruma_route(&client::whoami_route) + .ruma_route(&client::logout_route) + .ruma_route(&client::logout_all_route) + .ruma_route(&client::change_password_route) + .ruma_route(&client::deactivate_route) + .ruma_route(&client::third_party_route) + .ruma_route(&client::request_3pid_management_token_via_email_route) + .ruma_route(&client::request_3pid_management_token_via_msisdn_route) + .ruma_route(&client::check_registration_token_validity) + .ruma_route(&client::get_capabilities_route) + .ruma_route(&client::get_pushrules_all_route) + .ruma_route(&client::set_pushrule_route) + .ruma_route(&client::get_pushrule_route) + .ruma_route(&client::set_pushrule_enabled_route) + .ruma_route(&client::get_pushrule_enabled_route) + .ruma_route(&client::get_pushrule_actions_route) + .ruma_route(&client::set_pushrule_actions_route) + .ruma_route(&client::delete_pushrule_route) + .ruma_route(&client::get_room_event_route) + .ruma_route(&client::get_room_aliases_route) + .ruma_route(&client::get_filter_route) + .ruma_route(&client::create_filter_route) + .ruma_route(&client::create_openid_token_route) + .ruma_route(&client::set_global_account_data_route) + .ruma_route(&client::set_room_account_data_route) + .ruma_route(&client::get_global_account_data_route) + .ruma_route(&client::get_room_account_data_route) + .ruma_route(&client::set_displayname_route) + .ruma_route(&client::get_displayname_route) + .ruma_route(&client::set_avatar_url_route) + .ruma_route(&client::get_avatar_url_route) + .ruma_route(&client::get_profile_route) + .ruma_route(&client::set_presence_route) + .ruma_route(&client::get_presence_route) + .ruma_route(&client::upload_keys_route) + .ruma_route(&client::get_keys_route) + .ruma_route(&client::claim_keys_route) + .ruma_route(&client::create_backup_version_route) + .ruma_route(&client::update_backup_version_route) + .ruma_route(&client::delete_backup_version_route) + .ruma_route(&client::get_latest_backup_info_route) + .ruma_route(&client::get_backup_info_route) + .ruma_route(&client::add_backup_keys_route) + .ruma_route(&client::add_backup_keys_for_room_route) + .ruma_route(&client::add_backup_keys_for_session_route) + .ruma_route(&client::delete_backup_keys_for_room_route) + .ruma_route(&client::delete_backup_keys_for_session_route) + .ruma_route(&client::delete_backup_keys_route) + .ruma_route(&client::get_backup_keys_for_room_route) + .ruma_route(&client::get_backup_keys_for_session_route) + .ruma_route(&client::get_backup_keys_route) + .ruma_route(&client::set_read_marker_route) + .ruma_route(&client::create_receipt_route) + .ruma_route(&client::create_typing_event_route) + .ruma_route(&client::create_room_route) + .ruma_route(&client::redact_event_route) + .ruma_route(&client::report_event_route) + .ruma_route(&client::create_alias_route) + .ruma_route(&client::delete_alias_route) + .ruma_route(&client::get_alias_route) + .ruma_route(&client::join_room_by_id_route) + .ruma_route(&client::join_room_by_id_or_alias_route) + .ruma_route(&client::joined_members_route) + .ruma_route(&client::leave_room_route) + .ruma_route(&client::forget_room_route) + .ruma_route(&client::joined_rooms_route) + .ruma_route(&client::kick_user_route) + .ruma_route(&client::ban_user_route) + .ruma_route(&client::unban_user_route) + .ruma_route(&client::invite_user_route) + .ruma_route(&client::set_room_visibility_route) + .ruma_route(&client::get_room_visibility_route) + .ruma_route(&client::get_public_rooms_route) + .ruma_route(&client::get_public_rooms_filtered_route) + .ruma_route(&client::search_users_route) + .ruma_route(&client::get_member_events_route) + .ruma_route(&client::get_protocols_route) .route("/_matrix/client/unstable/thirdparty/protocols", get(client::get_protocols_route_unstable)) - .ruma_route(client::send_message_event_route) - .ruma_route(client::send_state_event_for_key_route) - .ruma_route(client::get_state_events_route) - .ruma_route(client::get_state_events_for_key_route) + .ruma_route(&client::send_message_event_route) + .ruma_route(&client::send_state_event_for_key_route) + .ruma_route(&client::get_state_events_route) + .ruma_route(&client::get_state_events_for_key_route) // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes // share one Ruma request / response type pair with {get,send}_state_event_for_key_route .route( @@ -140,46 +140,46 @@ pub fn build(router: Router, server: &Server) -> Router { get(client::get_state_events_for_empty_key_route) .put(client::send_state_event_for_empty_key_route), ) - .ruma_route(client::sync_events_route) - .ruma_route(client::sync_events_v4_route) - .ruma_route(client::get_context_route) - .ruma_route(client::get_message_events_route) - .ruma_route(client::search_events_route) - .ruma_route(client::turn_server_route) - .ruma_route(client::send_event_to_device_route) - .ruma_route(client::create_content_route) - .ruma_route(client::get_content_thumbnail_route) - .ruma_route(client::get_content_route) - .ruma_route(client::get_content_as_filename_route) - .ruma_route(client::get_media_preview_route) - .ruma_route(client::get_media_config_route) - .ruma_route(client::get_devices_route) - .ruma_route(client::get_device_route) - .ruma_route(client::update_device_route) - .ruma_route(client::delete_device_route) - .ruma_route(client::delete_devices_route) - .ruma_route(client::get_tags_route) - .ruma_route(client::update_tag_route) - .ruma_route(client::delete_tag_route) - .ruma_route(client::upload_signing_keys_route) - .ruma_route(client::upload_signatures_route) - .ruma_route(client::get_key_changes_route) - .ruma_route(client::get_pushers_route) - .ruma_route(client::set_pushers_route) - .ruma_route(client::upgrade_room_route) - .ruma_route(client::get_threads_route) - .ruma_route(client::get_relating_events_with_rel_type_and_event_type_route) - .ruma_route(client::get_relating_events_with_rel_type_route) - .ruma_route(client::get_relating_events_route) - .ruma_route(client::get_hierarchy_route) - .ruma_route(client::get_mutual_rooms_route) - .ruma_route(client::get_room_summary) + .ruma_route(&client::sync_events_route) + .ruma_route(&client::sync_events_v4_route) + .ruma_route(&client::get_context_route) + .ruma_route(&client::get_message_events_route) + .ruma_route(&client::search_events_route) + .ruma_route(&client::turn_server_route) + .ruma_route(&client::send_event_to_device_route) + .ruma_route(&client::create_content_route) + .ruma_route(&client::get_content_thumbnail_route) + .ruma_route(&client::get_content_route) + .ruma_route(&client::get_content_as_filename_route) + .ruma_route(&client::get_media_preview_route) + .ruma_route(&client::get_media_config_route) + .ruma_route(&client::get_devices_route) + .ruma_route(&client::get_device_route) + .ruma_route(&client::update_device_route) + .ruma_route(&client::delete_device_route) + .ruma_route(&client::delete_devices_route) + .ruma_route(&client::get_tags_route) + .ruma_route(&client::update_tag_route) + .ruma_route(&client::delete_tag_route) + .ruma_route(&client::upload_signing_keys_route) + .ruma_route(&client::upload_signatures_route) + .ruma_route(&client::get_key_changes_route) + .ruma_route(&client::get_pushers_route) + .ruma_route(&client::set_pushers_route) + .ruma_route(&client::upgrade_room_route) + .ruma_route(&client::get_threads_route) + .ruma_route(&client::get_relating_events_with_rel_type_and_event_type_route) + .ruma_route(&client::get_relating_events_with_rel_type_route) + .ruma_route(&client::get_relating_events_route) + .ruma_route(&client::get_hierarchy_route) + .ruma_route(&client::get_mutual_rooms_route) + .ruma_route(&client::get_room_summary) .route( "/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary", get(client::get_room_summary_legacy) ) - .ruma_route(client::well_known_support) - .ruma_route(client::well_known_client) + .ruma_route(&client::well_known_support) + .ruma_route(&client::well_known_client) .route("/_conduwuit/server_version", get(client::conduwuit_server_version)) .route("/_matrix/client/r0/rooms/:room_id/initialSync", get(initial_sync)) .route("/_matrix/client/v3/rooms/:room_id/initialSync", get(initial_sync)) @@ -187,35 +187,35 @@ pub fn build(router: Router, server: &Server) -> Router { if config.allow_federation { router = router - .ruma_route(server::get_server_version_route) + .ruma_route(&server::get_server_version_route) .route("/_matrix/key/v2/server", get(server::get_server_keys_route)) .route("/_matrix/key/v2/server/:key_id", get(server::get_server_keys_deprecated_route)) - .ruma_route(server::get_public_rooms_route) - .ruma_route(server::get_public_rooms_filtered_route) - .ruma_route(server::send_transaction_message_route) - .ruma_route(server::get_event_route) - .ruma_route(server::get_backfill_route) - .ruma_route(server::get_missing_events_route) - .ruma_route(server::get_event_authorization_route) - .ruma_route(server::get_room_state_route) - .ruma_route(server::get_room_state_ids_route) - .ruma_route(server::create_leave_event_template_route) - .ruma_route(server::create_leave_event_v1_route) - .ruma_route(server::create_leave_event_v2_route) - .ruma_route(server::create_join_event_template_route) - .ruma_route(server::create_join_event_v1_route) - .ruma_route(server::create_join_event_v2_route) - .ruma_route(server::create_invite_route) - .ruma_route(server::get_devices_route) - .ruma_route(server::get_room_information_route) - .ruma_route(server::get_profile_information_route) - .ruma_route(server::get_keys_route) - .ruma_route(server::claim_keys_route) - .ruma_route(server::get_openid_userinfo_route) - .ruma_route(server::get_hierarchy_route) - .ruma_route(server::well_known_server) - .ruma_route(server::get_content_route) - .ruma_route(server::get_content_thumbnail_route) + .ruma_route(&server::get_public_rooms_route) + .ruma_route(&server::get_public_rooms_filtered_route) + .ruma_route(&server::send_transaction_message_route) + .ruma_route(&server::get_event_route) + .ruma_route(&server::get_backfill_route) + .ruma_route(&server::get_missing_events_route) + .ruma_route(&server::get_event_authorization_route) + .ruma_route(&server::get_room_state_route) + .ruma_route(&server::get_room_state_ids_route) + .ruma_route(&server::create_leave_event_template_route) + .ruma_route(&server::create_leave_event_v1_route) + .ruma_route(&server::create_leave_event_v2_route) + .ruma_route(&server::create_join_event_template_route) + .ruma_route(&server::create_join_event_v1_route) + .ruma_route(&server::create_join_event_v2_route) + .ruma_route(&server::create_invite_route) + .ruma_route(&server::get_devices_route) + .ruma_route(&server::get_room_information_route) + .ruma_route(&server::get_profile_information_route) + .ruma_route(&server::get_keys_route) + .ruma_route(&server::claim_keys_route) + .ruma_route(&server::get_openid_userinfo_route) + .ruma_route(&server::get_hierarchy_route) + .ruma_route(&server::well_known_server) + .ruma_route(&server::get_content_route) + .ruma_route(&server::get_content_thumbnail_route) .route("/_conduwuit/local_user_count", get(client::conduwuit_local_user_count)); } else { router = router @@ -227,11 +227,11 @@ pub fn build(router: Router, server: &Server) -> Router { if config.allow_legacy_media { router = router - .ruma_route(client::get_media_config_legacy_route) - .ruma_route(client::get_media_preview_legacy_route) - .ruma_route(client::get_content_legacy_route) - .ruma_route(client::get_content_as_filename_legacy_route) - .ruma_route(client::get_content_thumbnail_legacy_route) + .ruma_route(&client::get_media_config_legacy_route) + .ruma_route(&client::get_media_preview_legacy_route) + .ruma_route(&client::get_content_legacy_route) + .ruma_route(&client::get_content_as_filename_legacy_route) + .ruma_route(&client::get_content_thumbnail_legacy_route) .route("/_matrix/media/v1/config", get(client::get_media_config_legacy_legacy_route)) .route("/_matrix/media/v1/upload", post(client::create_content_legacy_route)) .route( diff --git a/src/api/router/args.rs b/src/api/router/args.rs index a3d09dff..7381a55f 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -10,7 +10,10 @@ use super::{auth, auth::Auth, request, request::Request}; use crate::{service::appservice::RegistrationInfo, State}; /// Extractor for Ruma request structs -pub(crate) struct Args { +pub(crate) struct Args +where + T: IncomingRequest + Send + Sync + 'static, +{ /// Request struct body pub(crate) body: T, @@ -38,7 +41,7 @@ pub(crate) struct Args { #[async_trait] impl FromRequest for Args where - T: IncomingRequest, + T: IncomingRequest + Send + Sync + 'static, { type Rejection = Error; @@ -57,7 +60,10 @@ where } } -impl Deref for Args { +impl Deref for Args +where + T: IncomingRequest + Send + Sync + 'static, +{ type Target = T; fn deref(&self) -> &Self::Target { &self.body } @@ -67,7 +73,7 @@ fn make_body( services: &Services, request: &mut Request, json_body: &mut Option, auth: &Auth, ) -> Result where - T: IncomingRequest, + T: IncomingRequest + Send + Sync + 'static, { let body = if let Some(CanonicalJsonValue::Object(json_body)) = json_body { let user_id = auth.sender_user.clone().unwrap_or_else(|| { @@ -77,15 +83,13 @@ where let uiaa_request = json_body .get("auth") - .and_then(|auth| auth.as_object()) + .and_then(CanonicalJsonValue::as_object) .and_then(|auth| auth.get("session")) - .and_then(|session| session.as_str()) + .and_then(CanonicalJsonValue::as_str) .and_then(|session| { - services.uiaa.get_uiaa_request( - &user_id, - &auth.sender_device.clone().unwrap_or_else(|| EMPTY.into()), - session, - ) + services + .uiaa + .get_uiaa_request(&user_id, auth.sender_device.as_deref(), session) }); if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 670f72ba..8d76b4be 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -44,8 +44,8 @@ pub(super) async fn auth( let token = if let Some(token) = token { if let Some(reg_info) = services.appservice.find_from_token(token).await { Token::Appservice(Box::new(reg_info)) - } else if let Some((user_id, device_id)) = services.users.find_from_token(token)? { - Token::User((user_id, OwnedDeviceId::from(device_id))) + } else if let Ok((user_id, device_id)) = services.users.find_from_token(token).await { + Token::User((user_id, device_id)) } else { Token::Invalid } @@ -98,7 +98,7 @@ pub(super) async fn auth( )) } }, - (AuthScheme::AccessToken, Token::Appservice(info)) => Ok(auth_appservice(services, request, info)?), + (AuthScheme::AccessToken, Token::Appservice(info)) => Ok(auth_appservice(services, request, info).await?), (AuthScheme::None | AuthScheme::AccessTokenOptional | AuthScheme::AppserviceToken, Token::Appservice(info)) => { Ok(Auth { origin: None, @@ -150,7 +150,7 @@ pub(super) async fn auth( } } -fn auth_appservice(services: &Services, request: &Request, info: Box) -> Result { +async fn auth_appservice(services: &Services, request: &Request, info: Box) -> Result { let user_id = request .query .user_id @@ -170,7 +170,7 @@ fn auth_appservice(services: &Services, request: &Request, info: Box { + fn add_route(&'static self, router: Router, path: &str) -> Router; + fn add_routes(&'static self, router: Router) -> Router; +} + pub(in super::super) trait RouterExt { - fn ruma_route(self, handler: H) -> Self + fn ruma_route(self, handler: &'static H) -> Self where H: RumaHandler; } impl RouterExt for Router { - fn ruma_route(self, handler: H) -> Self + fn ruma_route(self, handler: &'static H) -> Self where H: RumaHandler, { @@ -27,34 +31,28 @@ impl RouterExt for Router { } } -pub(in super::super) trait RumaHandler { - fn add_routes(&self, router: Router) -> Router; - - fn add_route(&self, router: Router, path: &str) -> Router; -} - macro_rules! ruma_handler { ( $($tx:ident),* $(,)? ) => { #[allow(non_snake_case)] - impl RumaHandler<($($tx,)* Ruma,)> for Fun + impl RumaHandler<($($tx,)* Ruma,)> for Fun where - Req: IncomingRequest + Send + 'static, - Ret: IntoResponse, - Fut: Future> + Send, - Fun: FnOnce($($tx,)* Ruma,) -> Fut + Clone + Send + Sync + 'static, - $( $tx: FromRequestParts + Send + 'static, )* + Fun: Fn($($tx,)* Ruma,) -> Fut + Send + Sync + 'static, + Fut: Future> + Send, + Req: IncomingRequest + Send + Sync, + Err: IntoResponse + Send, + ::OutgoingResponse: Send, + $( $tx: FromRequestParts + Send + Sync + 'static, )* { - fn add_routes(&self, router: Router) -> Router { + fn add_routes(&'static self, router: Router) -> Router { Req::METADATA .history .all_paths() .fold(router, |router, path| self.add_route(router, path)) } - fn add_route(&self, router: Router, path: &str) -> Router { - let handle = self.clone(); + fn add_route(&'static self, router: Router, path: &str) -> Router { + let action = |$($tx,)* req| self($($tx,)* req).map_ok(RumaResponse); let method = method_to_filter(&Req::METADATA.method); - let action = |$($tx,)* req| async { handle($($tx,)* req).await.map(RumaResponse) }; router.route(path, on(method, action)) } } diff --git a/src/api/router/response.rs b/src/api/router/response.rs index 2aaa79fa..70bbb936 100644 --- a/src/api/router/response.rs +++ b/src/api/router/response.rs @@ -5,13 +5,18 @@ use http::StatusCode; use http_body_util::Full; use ruma::api::{client::uiaa::UiaaResponse, OutgoingResponse}; -pub(crate) struct RumaResponse(pub(crate) T); +pub(crate) struct RumaResponse(pub(crate) T) +where + T: OutgoingResponse; impl From for RumaResponse { fn from(t: Error) -> Self { Self(t.into()) } } -impl IntoResponse for RumaResponse { +impl IntoResponse for RumaResponse +where + T: OutgoingResponse, +{ fn into_response(self) -> Response { self.0 .try_into_http_response::() diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 1b665c19..2bbc95ca 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -1,9 +1,13 @@ +use std::cmp; + use axum::extract::State; -use conduit::{Error, Result}; -use ruma::{ - api::{client::error::ErrorKind, federation::backfill::get_backfill}, - uint, user_id, MilliSecondsSinceUnixEpoch, +use conduit::{ + is_equal_to, + utils::{IterStream, ReadyExt}, + Err, PduCount, Result, }; +use futures::{FutureExt, StreamExt}; +use ruma::{api::federation::backfill::get_backfill, uint, user_id, MilliSecondsSinceUnixEpoch}; use crate::Ruma; @@ -19,27 +23,35 @@ pub(crate) async fn get_backfill_route( services .rooms .event_handler - .acl_check(origin, &body.room_id)?; + .acl_check(origin, &body.room_id) + .await?; if !services .rooms .state_accessor - .is_world_readable(&body.room_id)? - && !services - .rooms - .state_cache - .server_in_room(origin, &body.room_id)? + .is_world_readable(&body.room_id) + .await && !services + .rooms + .state_cache + .server_in_room(origin, &body.room_id) + .await { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); + return Err!(Request(Forbidden("Server is not in room."))); } let until = body .v .iter() - .map(|event_id| services.rooms.timeline.get_pdu_count(event_id)) - .filter_map(|r| r.ok().flatten()) - .max() - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event not found."))?; + .stream() + .filter_map(|event_id| { + services + .rooms + .timeline + .get_pdu_count(event_id) + .map(Result::ok) + }) + .ready_fold(PduCount::Backfilled(0), cmp::max) + .await; let limit = body .limit @@ -47,31 +59,37 @@ pub(crate) async fn get_backfill_route( .try_into() .expect("UInt could not be converted to usize"); - let all_events = services + let pdus = services .rooms .timeline - .pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? - .take(limit); + .pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until) + .await? + .take(limit) + .filter_map(|(_, pdu)| async move { + if !services + .rooms + .state_accessor + .server_can_see_event(origin, &pdu.room_id, &pdu.event_id) + .await + .is_ok_and(is_equal_to!(true)) + { + return None; + } - let events = all_events - .filter_map(Result::ok) - .filter(|(_, e)| { - matches!( - services - .rooms - .state_accessor - .server_can_see_event(origin, &e.room_id, &e.event_id,), - Ok(true), - ) + services + .rooms + .timeline + .get_pdu_json(&pdu.event_id) + .await + .ok() }) - .map(|(_, pdu)| services.rooms.timeline.get_pdu_json(&pdu.event_id)) - .filter_map(|r| r.ok().flatten()) - .map(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) - .collect(); + .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) + .collect() + .await; Ok(get_backfill::v1::Response { origin: services.globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdus: events, + pdus, }) } diff --git a/src/api/server/event.rs b/src/api/server/event.rs index e11a01a2..e4eac794 100644 --- a/src/api/server/event.rs +++ b/src/api/server/event.rs @@ -1,9 +1,6 @@ use axum::extract::State; -use conduit::{Error, Result}; -use ruma::{ - api::{client::error::ErrorKind, federation::event::get_event}, - MilliSecondsSinceUnixEpoch, RoomId, -}; +use conduit::{err, Err, Result}; +use ruma::{api::federation::event::get_event, MilliSecondsSinceUnixEpoch, RoomId}; use crate::Ruma; @@ -21,34 +18,46 @@ pub(crate) async fn get_event_route( let event = services .rooms .timeline - .get_pdu_json(&body.event_id)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + .get_pdu_json(&body.event_id) + .await + .map_err(|_| err!(Request(NotFound("Event not found."))))?; let room_id_str = event .get("room_id") .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database."))?; + .ok_or_else(|| err!(Database("Invalid event in database.")))?; let room_id = - <&RoomId>::try_from(room_id_str).map_err(|_| Error::bad_database("Invalid room_id in event in database."))?; + <&RoomId>::try_from(room_id_str).map_err(|_| err!(Database("Invalid room_id in event in database.")))?; - if !services.rooms.state_accessor.is_world_readable(room_id)? - && !services.rooms.state_cache.server_in_room(origin, room_id)? + if !services + .rooms + .state_accessor + .is_world_readable(room_id) + .await && !services + .rooms + .state_cache + .server_in_room(origin, room_id) + .await { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); + return Err!(Request(Forbidden("Server is not in room."))); } if !services .rooms .state_accessor - .server_can_see_event(origin, room_id, &body.event_id)? + .server_can_see_event(origin, room_id, &body.event_id) + .await? { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not allowed to see event.")); + return Err!(Request(Forbidden("Server is not allowed to see event."))); } Ok(get_event::v1::Response { origin: services.globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdu: services.sending.convert_to_outgoing_federation_event(event), + pdu: services + .sending + .convert_to_outgoing_federation_event(event) + .await, }) } diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 4b0f6bc0..6ec00b50 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use axum::extract::State; use conduit::{Error, Result}; +use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, RoomId, @@ -22,16 +23,18 @@ pub(crate) async fn get_event_authorization_route( services .rooms .event_handler - .acl_check(origin, &body.room_id)?; + .acl_check(origin, &body.room_id) + .await?; if !services .rooms .state_accessor - .is_world_readable(&body.room_id)? - && !services - .rooms - .state_cache - .server_in_room(origin, &body.room_id)? + .is_world_readable(&body.room_id) + .await && !services + .rooms + .state_cache + .server_in_room(origin, &body.room_id) + .await { return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); } @@ -39,8 +42,9 @@ pub(crate) async fn get_event_authorization_route( let event = services .rooms .timeline - .get_pdu_json(&body.event_id)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + .get_pdu_json(&body.event_id) + .await + .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; let room_id_str = event .get("room_id") @@ -50,16 +54,17 @@ pub(crate) async fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| Error::bad_database("Invalid room_id in event in database."))?; - let auth_chain_ids = services + let auth_chain = services .rooms .auth_chain .event_ids_iter(room_id, vec![Arc::from(&*body.event_id)]) - .await?; + .await? + .filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() }) + .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) + .collect() + .await; Ok(get_event_authorization::v1::Response { - auth_chain: auth_chain_ids - .filter_map(|id| services.rooms.timeline.get_pdu_json(&id).ok()?) - .map(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) - .collect(), + auth_chain, }) } diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index e2c3c93c..7ae0ff60 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -18,16 +18,18 @@ pub(crate) async fn get_missing_events_route( services .rooms .event_handler - .acl_check(origin, &body.room_id)?; + .acl_check(origin, &body.room_id) + .await?; if !services .rooms .state_accessor - .is_world_readable(&body.room_id)? - && !services - .rooms - .state_cache - .server_in_room(origin, &body.room_id)? + .is_world_readable(&body.room_id) + .await && !services + .rooms + .state_cache + .server_in_room(origin, &body.room_id) + .await { return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room")); } @@ -43,7 +45,12 @@ pub(crate) async fn get_missing_events_route( let mut i: usize = 0; while i < queued_events.len() && events.len() < limit { - if let Some(pdu) = services.rooms.timeline.get_pdu_json(&queued_events[i])? { + if let Ok(pdu) = services + .rooms + .timeline + .get_pdu_json(&queued_events[i]) + .await + { let room_id_str = pdu .get("room_id") .and_then(|val| val.as_str()) @@ -64,7 +71,8 @@ pub(crate) async fn get_missing_events_route( if !services .rooms .state_accessor - .server_can_see_event(origin, &body.room_id, &queued_events[i])? + .server_can_see_event(origin, &body.room_id, &queued_events[i]) + .await? { i = i.saturating_add(1); continue; @@ -81,7 +89,12 @@ pub(crate) async fn get_missing_events_route( ) .map_err(|_| Error::bad_database("Invalid prev_events in event in database."))?, ); - events.push(services.sending.convert_to_outgoing_federation_event(pdu)); + events.push( + services + .sending + .convert_to_outgoing_federation_event(pdu) + .await, + ); } i = i.saturating_add(1); } diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index 530ed145..002bd763 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -12,7 +12,7 @@ pub(crate) async fn get_hierarchy_route( ) -> Result { let origin = body.origin.as_ref().expect("server is authenticated"); - if services.rooms.metadata.exists(&body.room_id)? { + if services.rooms.metadata.exists(&body.room_id).await { services .rooms .spaces diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 688e026c..9968bdf7 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -24,7 +24,8 @@ pub(crate) async fn create_invite_route( services .rooms .event_handler - .acl_check(origin, &body.room_id)?; + .acl_check(origin, &body.room_id) + .await?; if !services .globals @@ -98,7 +99,8 @@ pub(crate) async fn create_invite_route( services .rooms .event_handler - .acl_check(invited_user.server_name(), &body.room_id)?; + .acl_check(invited_user.server_name(), &body.room_id) + .await?; ruma::signatures::hash_and_sign_event( services.globals.server_name().as_str(), @@ -128,14 +130,14 @@ pub(crate) async fn create_invite_route( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user ID."))?; - if services.rooms.metadata.is_banned(&body.room_id)? && !services.users.is_admin(&invited_user)? { + if services.rooms.metadata.is_banned(&body.room_id).await && !services.users.is_admin(&invited_user).await { return Err(Error::BadRequest( ErrorKind::forbidden(), "This room is banned on this homeserver.", )); } - if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user)? { + if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user).await { return Err(Error::BadRequest( ErrorKind::forbidden(), "This server does not allow room invites.", @@ -159,22 +161,28 @@ pub(crate) async fn create_invite_route( if !services .rooms .state_cache - .server_in_room(services.globals.server_name(), &body.room_id)? + .server_in_room(services.globals.server_name(), &body.room_id) + .await { - services.rooms.state_cache.update_membership( - &body.room_id, - &invited_user, - RoomMemberEventContent::new(MembershipState::Invite), - &sender, - Some(invite_state), - body.via.clone(), - true, - )?; + services + .rooms + .state_cache + .update_membership( + &body.room_id, + &invited_user, + RoomMemberEventContent::new(MembershipState::Invite), + &sender, + Some(invite_state), + body.via.clone(), + true, + ) + .await?; } Ok(create_invite::v2::Response { event: services .sending - .convert_to_outgoing_federation_event(signed_event), + .convert_to_outgoing_federation_event(signed_event) + .await, }) } diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index 021016be..ba081aad 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,4 +1,6 @@ use axum::extract::State; +use conduit::utils::{IterStream, ReadyExt}; +use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::membership::prepare_join_event}, events::{ @@ -24,7 +26,7 @@ use crate::{ pub(crate) async fn create_join_event_template_route( State(services): State, body: Ruma, ) -> Result { - if !services.rooms.metadata.exists(&body.room_id)? { + if !services.rooms.metadata.exists(&body.room_id).await { return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); } @@ -40,7 +42,8 @@ pub(crate) async fn create_join_event_template_route( services .rooms .event_handler - .acl_check(origin, &body.room_id)?; + .acl_check(origin, &body.room_id) + .await?; if services .globals @@ -73,7 +76,7 @@ pub(crate) async fn create_join_event_template_route( } } - let room_version_id = services.rooms.state.get_room_version(&body.room_id)?; + let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; @@ -81,22 +84,24 @@ pub(crate) async fn create_join_event_template_route( .rooms .state_cache .is_left(&body.user_id, &body.room_id) - .unwrap_or(true)) - && user_can_perform_restricted_join(&services, &body.user_id, &body.room_id, &room_version_id)? + .await) + && user_can_perform_restricted_join(&services, &body.user_id, &body.room_id, &room_version_id).await? { let auth_user = services .rooms .state_cache .room_members(&body.room_id) - .filter_map(Result::ok) - .filter(|user| user.server_name() == services.globals.server_name()) - .find(|user| { + .ready_filter(|user| user.server_name() == services.globals.server_name()) + .filter(|user| { services .rooms .state_accessor .user_can_invite(&body.room_id, user, &body.user_id, &state_lock) - .unwrap_or(false) - }); + }) + .boxed() + .next() + .await + .map(ToOwned::to_owned); if auth_user.is_some() { auth_user @@ -110,7 +115,7 @@ pub(crate) async fn create_join_event_template_route( None }; - let room_version_id = services.rooms.state.get_room_version(&body.room_id)?; + let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; if !body.ver.contains(&room_version_id) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { @@ -132,19 +137,23 @@ pub(crate) async fn create_join_event_template_route( }) .expect("member event is valid value"); - let (_pdu, mut pdu_json) = services.rooms.timeline.create_hash_and_sign_event( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content, - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - timestamp: None, - }, - &body.user_id, - &body.room_id, - &state_lock, - )?; + let (_pdu, mut pdu_json) = services + .rooms + .timeline + .create_hash_and_sign_event( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + timestamp: None, + }, + &body.user_id, + &body.room_id, + &state_lock, + ) + .await?; drop(state_lock); @@ -161,7 +170,7 @@ pub(crate) async fn create_join_event_template_route( /// This doesn't check the current user's membership. This should be done /// externally, either by using the state cache or attempting to authorize the /// event. -pub(crate) fn user_can_perform_restricted_join( +pub(crate) async fn user_can_perform_restricted_join( services: &Services, user_id: &UserId, room_id: &RoomId, room_version_id: &RoomVersionId, ) -> Result { use RoomVersionId::*; @@ -169,18 +178,15 @@ pub(crate) fn user_can_perform_restricted_join( let join_rules_event = services .rooms .state_accessor - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; + .room_state_get(room_id, &StateEventType::RoomJoinRules, "") + .await; - let Some(join_rules_event_content) = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str::(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event in database: {e}"); - Error::bad_database("Invalid join rules event in database") - }) + let Ok(Ok(join_rules_event_content)) = join_rules_event.as_ref().map(|join_rules_event| { + serde_json::from_str::(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event in database: {e}"); + Error::bad_database("Invalid join rules event in database") }) - .transpose()? - else { + }) else { return Ok(false); }; @@ -201,13 +207,10 @@ pub(crate) fn user_can_perform_restricted_join( None } }) - .any(|m| { - services - .rooms - .state_cache - .is_joined(user_id, &m.room_id) - .unwrap_or(false) - }) { + .stream() + .any(|m| services.rooms.state_cache.is_joined(user_id, &m.room_id)) + .await + { Ok(true) } else { Err(Error::BadRequest( diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 3eb0d77a..41ea1c80 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -18,7 +18,7 @@ use crate::{service::pdu::PduBuilder, Ruma}; pub(crate) async fn create_leave_event_template_route( State(services): State, body: Ruma, ) -> Result { - if !services.rooms.metadata.exists(&body.room_id)? { + if !services.rooms.metadata.exists(&body.room_id).await { return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); } @@ -34,9 +34,10 @@ pub(crate) async fn create_leave_event_template_route( services .rooms .event_handler - .acl_check(origin, &body.room_id)?; + .acl_check(origin, &body.room_id) + .await?; - let room_version_id = services.rooms.state.get_room_version(&body.room_id)?; + let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let content = to_raw_value(&RoomMemberEventContent { avatar_url: None, @@ -50,19 +51,23 @@ pub(crate) async fn create_leave_event_template_route( }) .expect("member event is valid value"); - let (_pdu, mut pdu_json) = services.rooms.timeline.create_hash_and_sign_event( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content, - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - timestamp: None, - }, - &body.user_id, - &body.room_id, - &state_lock, - )?; + let (_pdu, mut pdu_json) = services + .rooms + .timeline + .create_hash_and_sign_event( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + timestamp: None, + }, + &body.user_id, + &body.room_id, + &state_lock, + ) + .await?; drop(state_lock); diff --git a/src/api/server/openid.rs b/src/api/server/openid.rs index 6a1b99b7..9b54807a 100644 --- a/src/api/server/openid.rs +++ b/src/api/server/openid.rs @@ -10,6 +10,9 @@ pub(crate) async fn get_openid_userinfo_route( State(services): State, body: Ruma, ) -> Result { Ok(get_openid_userinfo::v1::Response::new( - services.users.find_from_openid_token(&body.access_token)?, + services + .users + .find_from_openid_token(&body.access_token) + .await?, )) } diff --git a/src/api/server/query.rs b/src/api/server/query.rs index c2b78bde..348b8c6e 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -1,7 +1,8 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::{Error, Result}; +use conduit::{err, Error, Result}; +use futures::StreamExt; use get_profile_information::v1::ProfileField; use rand::seq::SliceRandom; use ruma::{ @@ -23,15 +24,17 @@ pub(crate) async fn get_room_information_route( let room_id = services .rooms .alias - .resolve_local_alias(&body.room_alias)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Room alias not found."))?; + .resolve_local_alias(&body.room_alias) + .await + .map_err(|_| err!(Request(NotFound("Room alias not found."))))?; let mut servers: Vec = services .rooms .state_cache .room_servers(&room_id) - .filter_map(Result::ok) - .collect(); + .map(ToOwned::to_owned) + .collect() + .await; servers.sort_unstable(); servers.dedup(); @@ -82,30 +85,31 @@ pub(crate) async fn get_profile_information_route( match &body.field { Some(ProfileField::DisplayName) => { - displayname = services.users.displayname(&body.user_id)?; + displayname = services.users.displayname(&body.user_id).await.ok(); }, Some(ProfileField::AvatarUrl) => { - avatar_url = services.users.avatar_url(&body.user_id)?; - blurhash = services.users.blurhash(&body.user_id)?; + avatar_url = services.users.avatar_url(&body.user_id).await.ok(); + blurhash = services.users.blurhash(&body.user_id).await.ok(); }, Some(custom_field) => { - if let Some(value) = services + if let Ok(value) = services .users - .profile_key(&body.user_id, custom_field.as_str())? + .profile_key(&body.user_id, custom_field.as_str()) + .await { custom_profile_fields.insert(custom_field.to_string(), value); } }, None => { - displayname = services.users.displayname(&body.user_id)?; - avatar_url = services.users.avatar_url(&body.user_id)?; - blurhash = services.users.blurhash(&body.user_id)?; - tz = services.users.timezone(&body.user_id)?; + displayname = services.users.displayname(&body.user_id).await.ok(); + avatar_url = services.users.avatar_url(&body.user_id).await.ok(); + blurhash = services.users.blurhash(&body.user_id).await.ok(); + tz = services.users.timezone(&body.user_id).await.ok(); custom_profile_fields = services .users .all_profile_keys(&body.user_id) - .filter_map(Result::ok) - .collect(); + .collect() + .await; }, } diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 15f82faa..bb424988 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -2,7 +2,8 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{debug, debug_warn, err, trace, warn, Err}; +use conduit::{debug, debug_warn, err, result::LogErr, trace, utils::ReadyExt, warn, Err, Error, Result}; +use futures::StreamExt; use ruma::{ api::{ client::error::ErrorKind, @@ -23,10 +24,13 @@ use tokio::sync::RwLock; use crate::{ services::Services, utils::{self}, - Error, Result, Ruma, + Ruma, }; -type ResolvedMap = BTreeMap>; +const PDU_LIMIT: usize = 50; +const EDU_LIMIT: usize = 100; + +type ResolvedMap = BTreeMap>; /// # `PUT /_matrix/federation/v1/send/{txnId}` /// @@ -44,12 +48,16 @@ pub(crate) async fn send_transaction_message_route( ))); } - if body.pdus.len() > 50_usize { - return Err!(Request(Forbidden("Not allowed to send more than 50 PDUs in one transaction"))); + if body.pdus.len() > PDU_LIMIT { + return Err!(Request(Forbidden( + "Not allowed to send more than {PDU_LIMIT} PDUs in one transaction" + ))); } - if body.edus.len() > 100_usize { - return Err!(Request(Forbidden("Not allowed to send more than 100 EDUs in one transaction"))); + if body.edus.len() > EDU_LIMIT { + return Err!(Request(Forbidden( + "Not allowed to send more than {EDU_LIMIT} EDUs in one transaction" + ))); } let txn_start_time = Instant::now(); @@ -62,8 +70,8 @@ pub(crate) async fn send_transaction_message_route( "Starting txn", ); - let resolved_map = handle_pdus(&services, &client, &body, origin, &txn_start_time).await?; - handle_edus(&services, &client, &body, origin).await?; + let resolved_map = handle_pdus(&services, &client, &body, origin, &txn_start_time).await; + handle_edus(&services, &client, &body, origin).await; debug!( pdus = ?body.pdus.len(), @@ -85,10 +93,10 @@ pub(crate) async fn send_transaction_message_route( async fn handle_pdus( services: &Services, _client: &IpAddr, body: &Ruma, origin: &ServerName, txn_start_time: &Instant, -) -> Result { +) -> ResolvedMap { let mut parsed_pdus = Vec::with_capacity(body.pdus.len()); for pdu in &body.pdus { - parsed_pdus.push(match services.rooms.event_handler.parse_incoming_pdu(pdu) { + parsed_pdus.push(match services.rooms.event_handler.parse_incoming_pdu(pdu).await { Ok(t) => t, Err(e) => { debug_warn!("Could not parse PDU: {e}"); @@ -151,38 +159,34 @@ async fn handle_pdus( } } - Ok(resolved_map) + resolved_map } async fn handle_edus( services: &Services, client: &IpAddr, body: &Ruma, origin: &ServerName, -) -> Result<()> { +) { for edu in body .edus .iter() .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) { match edu { - Edu::Presence(presence) => handle_edu_presence(services, client, origin, presence).await?, - Edu::Receipt(receipt) => handle_edu_receipt(services, client, origin, receipt).await?, - Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await?, - Edu::DeviceListUpdate(content) => handle_edu_device_list_update(services, client, origin, content).await?, - Edu::DirectToDevice(content) => handle_edu_direct_to_device(services, client, origin, content).await?, - Edu::SigningKeyUpdate(content) => handle_edu_signing_key_update(services, client, origin, content).await?, + Edu::Presence(presence) => handle_edu_presence(services, client, origin, presence).await, + Edu::Receipt(receipt) => handle_edu_receipt(services, client, origin, receipt).await, + Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await, + Edu::DeviceListUpdate(content) => handle_edu_device_list_update(services, client, origin, content).await, + Edu::DirectToDevice(content) => handle_edu_direct_to_device(services, client, origin, content).await, + Edu::SigningKeyUpdate(content) => handle_edu_signing_key_update(services, client, origin, content).await, Edu::_Custom(ref _custom) => { debug_warn!(?body.edus, "received custom/unknown EDU"); }, } } - - Ok(()) } -async fn handle_edu_presence( - services: &Services, _client: &IpAddr, origin: &ServerName, presence: PresenceContent, -) -> Result<()> { +async fn handle_edu_presence(services: &Services, _client: &IpAddr, origin: &ServerName, presence: PresenceContent) { if !services.globals.allow_incoming_presence() { - return Ok(()); + return; } for update in presence.push { @@ -194,23 +198,24 @@ async fn handle_edu_presence( continue; } - services.presence.set_presence( - &update.user_id, - &update.presence, - Some(update.currently_active), - Some(update.last_active_ago), - update.status_msg.clone(), - )?; + services + .presence + .set_presence( + &update.user_id, + &update.presence, + Some(update.currently_active), + Some(update.last_active_ago), + update.status_msg.clone(), + ) + .await + .log_err() + .ok(); } - - Ok(()) } -async fn handle_edu_receipt( - services: &Services, _client: &IpAddr, origin: &ServerName, receipt: ReceiptContent, -) -> Result<()> { +async fn handle_edu_receipt(services: &Services, _client: &IpAddr, origin: &ServerName, receipt: ReceiptContent) { if !services.globals.allow_incoming_read_receipts() { - return Ok(()); + return; } for (room_id, room_updates) in receipt.receipts { @@ -218,6 +223,7 @@ async fn handle_edu_receipt( .rooms .event_handler .acl_check(origin, &room_id) + .await .is_err() { debug_warn!( @@ -240,8 +246,8 @@ async fn handle_edu_receipt( .rooms .state_cache .room_members(&room_id) - .filter_map(Result::ok) - .any(|member| member.server_name() == user_id.server_name()) + .ready_any(|member| member.server_name() == user_id.server_name()) + .await { for event_id in &user_updates.event_ids { let user_receipts = BTreeMap::from([(user_id.clone(), user_updates.data.clone())]); @@ -255,7 +261,8 @@ async fn handle_edu_receipt( services .rooms .read_receipt - .readreceipt_update(&user_id, &room_id, &event)?; + .readreceipt_update(&user_id, &room_id, &event) + .await; } } else { debug_warn!( @@ -266,15 +273,11 @@ async fn handle_edu_receipt( } } } - - Ok(()) } -async fn handle_edu_typing( - services: &Services, _client: &IpAddr, origin: &ServerName, typing: TypingContent, -) -> Result<()> { +async fn handle_edu_typing(services: &Services, _client: &IpAddr, origin: &ServerName, typing: TypingContent) { if !services.globals.config.allow_incoming_typing { - return Ok(()); + return; } if typing.user_id.server_name() != origin { @@ -282,26 +285,28 @@ async fn handle_edu_typing( %typing.user_id, %origin, "received typing EDU for user not belonging to origin" ); - return Ok(()); + return; } if services .rooms .event_handler .acl_check(typing.user_id.server_name(), &typing.room_id) + .await .is_err() { debug_warn!( %typing.user_id, %typing.room_id, %origin, "received typing EDU for ACL'd user's server" ); - return Ok(()); + return; } if services .rooms .state_cache - .is_joined(&typing.user_id, &typing.room_id)? + .is_joined(&typing.user_id, &typing.room_id) + .await { if typing.typing { let timeout = utils::millis_since_unix_epoch().saturating_add( @@ -315,28 +320,29 @@ async fn handle_edu_typing( .rooms .typing .typing_add(&typing.user_id, &typing.room_id, timeout) - .await?; + .await + .log_err() + .ok(); } else { services .rooms .typing .typing_remove(&typing.user_id, &typing.room_id) - .await?; + .await + .log_err() + .ok(); } } else { debug_warn!( %typing.user_id, %typing.room_id, %origin, "received typing EDU for user not in room" ); - return Ok(()); } - - Ok(()) } async fn handle_edu_device_list_update( services: &Services, _client: &IpAddr, origin: &ServerName, content: DeviceListUpdateContent, -) -> Result<()> { +) { let DeviceListUpdateContent { user_id, .. @@ -347,17 +353,15 @@ async fn handle_edu_device_list_update( %user_id, %origin, "received device list update EDU for user not belonging to origin" ); - return Ok(()); + return; } - services.users.mark_device_key_update(&user_id)?; - - Ok(()) + services.users.mark_device_key_update(&user_id).await; } async fn handle_edu_direct_to_device( services: &Services, _client: &IpAddr, origin: &ServerName, content: DirectDeviceContent, -) -> Result<()> { +) { let DirectDeviceContent { sender, ev_type, @@ -370,45 +374,52 @@ async fn handle_edu_direct_to_device( %sender, %origin, "received direct to device EDU for user not belonging to origin" ); - return Ok(()); + return; } // Check if this is a new transaction id if services .transaction_ids - .existing_txnid(&sender, None, &message_id)? - .is_some() + .existing_txnid(&sender, None, &message_id) + .await + .is_ok() { - return Ok(()); + return; } for (target_user_id, map) in &messages { for (target_device_id_maybe, event) in map { + let Ok(event) = event + .deserialize_as() + .map_err(|e| err!(Request(InvalidParam(error!("To-Device event is invalid: {e}"))))) + else { + continue; + }; + + let ev_type = ev_type.to_string(); match target_device_id_maybe { DeviceIdOrAllDevices::DeviceId(target_device_id) => { - services.users.add_to_device_event( - &sender, - target_user_id, - target_device_id, - &ev_type.to_string(), - event - .deserialize_as() - .map_err(|e| err!(Request(InvalidParam(error!("To-Device event is invalid: {e}")))))?, - )?; + services + .users + .add_to_device_event(&sender, target_user_id, target_device_id, &ev_type, event) + .await; }, DeviceIdOrAllDevices::AllDevices => { - for target_device_id in services.users.all_device_ids(target_user_id) { - services.users.add_to_device_event( - &sender, - target_user_id, - &target_device_id?, - &ev_type.to_string(), - event - .deserialize_as() - .map_err(|e| err!(Request(InvalidParam("Event is invalid: {e}"))))?, - )?; - } + let (sender, ev_type, event) = (&sender, &ev_type, &event); + services + .users + .all_device_ids(target_user_id) + .for_each(|target_device_id| { + services.users.add_to_device_event( + sender, + target_user_id, + target_device_id, + ev_type, + event.clone(), + ) + }) + .await; }, } } @@ -417,14 +428,12 @@ async fn handle_edu_direct_to_device( // Save transaction id with empty data services .transaction_ids - .add_txnid(&sender, None, &message_id, &[])?; - - Ok(()) + .add_txnid(&sender, None, &message_id, &[]); } async fn handle_edu_signing_key_update( services: &Services, _client: &IpAddr, origin: &ServerName, content: SigningKeyUpdateContent, -) -> Result<()> { +) { let SigningKeyUpdateContent { user_id, master_key, @@ -436,14 +445,15 @@ async fn handle_edu_signing_key_update( %user_id, %origin, "received signing key update EDU from server that does not belong to user's server" ); - return Ok(()); + return; } if let Some(master_key) = master_key { services .users - .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true)?; + .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) + .await + .log_err() + .ok(); } - - Ok(()) } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index c4d016f6..639fcafd 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -3,7 +3,8 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::{pdu::gen_event_id_canonical_json, warn, Error, Result}; +use conduit::{err, pdu::gen_event_id_canonical_json, utils::IterStream, warn, Error, Result}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ api::{client::error::ErrorKind, federation::membership::create_join_event}, events::{ @@ -22,27 +23,32 @@ use crate::Ruma; async fn create_join_event( services: &Services, origin: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, ) -> Result { - if !services.rooms.metadata.exists(room_id)? { + if !services.rooms.metadata.exists(room_id).await { return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); } // ACL check origin server - services.rooms.event_handler.acl_check(origin, room_id)?; + services + .rooms + .event_handler + .acl_check(origin, room_id) + .await?; // We need to return the state prior to joining, let's keep a reference to that // here let shortstatehash = services .rooms .state - .get_room_shortstatehash(room_id)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Event state not found."))?; + .get_room_shortstatehash(room_id) + .await + .map_err(|_| err!(Request(NotFound("Event state not found."))))?; let pub_key_map = RwLock::new(BTreeMap::new()); // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and // hashes checks - let room_version_id = services.rooms.state.get_room_version(room_id)?; + let room_version_id = services.rooms.state.get_room_version(room_id).await?; let Ok((event_id, mut value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { // Event could not be converted to canonical json @@ -97,7 +103,8 @@ async fn create_join_event( services .rooms .event_handler - .acl_check(sender.server_name(), room_id)?; + .acl_check(sender.server_name(), room_id) + .await?; // check if origin server is trying to send for another server if sender.server_name() != origin { @@ -126,7 +133,9 @@ async fn create_join_event( if content .join_authorized_via_users_server .is_some_and(|user| services.globals.user_is_local(&user)) - && super::user_can_perform_restricted_join(services, &sender, room_id, &room_version_id).unwrap_or_default() + && super::user_can_perform_restricted_join(services, &sender, room_id, &room_version_id) + .await + .unwrap_or_default() { ruma::signatures::hash_and_sign_event( services.globals.server_name().as_str(), @@ -158,12 +167,14 @@ async fn create_join_event( .mutex_federation .lock(room_id) .await; + let pdu_id: Vec = services .rooms .event_handler .handle_incoming_pdu(&origin, room_id, &event_id, value.clone(), true, &pub_key_map) .await? .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Could not accept as timeline event."))?; + drop(mutex_lock); let state_ids = services @@ -171,29 +182,43 @@ async fn create_join_event( .state_accessor .state_full_ids(shortstatehash) .await?; - let auth_chain_ids = services + + let state = state_ids + .iter() + .try_stream() + .and_then(|(_, event_id)| services.rooms.timeline.get_pdu_json(event_id)) + .and_then(|pdu| { + services + .sending + .convert_to_outgoing_federation_event(pdu) + .map(Ok) + }) + .try_collect() + .await?; + + let auth_chain = services .rooms .auth_chain .event_ids_iter(room_id, state_ids.values().cloned().collect()) + .await? + .map(Ok) + .and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) + .and_then(|pdu| { + services + .sending + .convert_to_outgoing_federation_event(pdu) + .map(Ok) + }) + .try_collect() .await?; - services.sending.send_pdu_room(room_id, &pdu_id)?; + services.sending.send_pdu_room(room_id, &pdu_id).await?; Ok(create_join_event::v1::RoomState { - auth_chain: auth_chain_ids - .filter_map(|id| services.rooms.timeline.get_pdu_json(&id).ok().flatten()) - .map(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) - .collect(), - state: state_ids - .iter() - .filter_map(|(_, id)| services.rooms.timeline.get_pdu_json(id).ok().flatten()) - .map(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) - .collect(), + auth_chain, + state, // Event field is required if the room version supports restricted join rules. - event: Some( - to_raw_value(&CanonicalJsonValue::Object(value)) - .expect("To raw json should not fail since only change was adding signature"), - ), + event: to_raw_value(&CanonicalJsonValue::Object(value)).ok(), }) } diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index e77c5d78..81f41af0 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::{Error, Result}; +use conduit::{utils::ReadyExt, Error, Result}; use ruma::{ api::{client::error::ErrorKind, federation::membership::create_leave_event}, events::{ @@ -49,18 +49,22 @@ pub(crate) async fn create_leave_event_v2_route( async fn create_leave_event( services: &Services, origin: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, ) -> Result<()> { - if !services.rooms.metadata.exists(room_id)? { + if !services.rooms.metadata.exists(room_id).await { return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); } // ACL check origin - services.rooms.event_handler.acl_check(origin, room_id)?; + services + .rooms + .event_handler + .acl_check(origin, room_id) + .await?; let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and // hashes checks - let room_version_id = services.rooms.state.get_room_version(room_id)?; + let room_version_id = services.rooms.state.get_room_version(room_id).await?; let Ok((event_id, value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { // Event could not be converted to canonical json return Err(Error::BadRequest( @@ -114,7 +118,8 @@ async fn create_leave_event( services .rooms .event_handler - .acl_check(sender.server_name(), room_id)?; + .acl_check(sender.server_name(), room_id) + .await?; if sender.server_name() != origin { return Err(Error::BadRequest( @@ -173,10 +178,9 @@ async fn create_leave_event( .rooms .state_cache .room_servers(room_id) - .filter_map(Result::ok) - .filter(|server| !services.globals.server_is_ours(server)); + .ready_filter(|server| !services.globals.server_is_ours(server)); - services.sending.send_pdu_servers(servers, &pdu_id)?; + services.sending.send_pdu_servers(servers, &pdu_id).await?; Ok(()) } diff --git a/src/api/server/state.rs b/src/api/server/state.rs index d215236a..37a14a3f 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use axum::extract::State; -use conduit::{Error, Result}; -use ruma::api::{client::error::ErrorKind, federation::event::get_room_state}; +use conduit::{err, result::LogErr, utils::IterStream, Err, Result}; +use futures::{FutureExt, StreamExt, TryStreamExt}; +use ruma::api::federation::event::get_room_state; use crate::Ruma; @@ -17,56 +18,66 @@ pub(crate) async fn get_room_state_route( services .rooms .event_handler - .acl_check(origin, &body.room_id)?; + .acl_check(origin, &body.room_id) + .await?; if !services .rooms .state_accessor - .is_world_readable(&body.room_id)? - && !services - .rooms - .state_cache - .server_in_room(origin, &body.room_id)? + .is_world_readable(&body.room_id) + .await && !services + .rooms + .state_cache + .server_in_room(origin, &body.room_id) + .await { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); + return Err!(Request(Forbidden("Server is not in room."))); } let shortstatehash = services .rooms .state_accessor - .pdu_shortstatehash(&body.event_id)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Pdu state not found."))?; + .pdu_shortstatehash(&body.event_id) + .await + .map_err(|_| err!(Request(NotFound("PDU state not found."))))?; let pdus = services .rooms .state_accessor .state_full_ids(shortstatehash) - .await? - .into_values() - .map(|id| { + .await + .log_err() + .map_err(|_| err!(Request(NotFound("PDU state IDs not found."))))? + .values() + .try_stream() + .and_then(|id| services.rooms.timeline.get_pdu_json(id)) + .and_then(|pdu| { services .sending - .convert_to_outgoing_federation_event(services.rooms.timeline.get_pdu_json(&id).unwrap().unwrap()) + .convert_to_outgoing_federation_event(pdu) + .map(Ok) }) - .collect(); + .try_collect() + .await?; - let auth_chain_ids = services + let auth_chain = services .rooms .auth_chain .event_ids_iter(&body.room_id, vec![Arc::from(&*body.event_id)]) + .await? + .map(Ok) + .and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await }) + .and_then(|pdu| { + services + .sending + .convert_to_outgoing_federation_event(pdu) + .map(Ok) + }) + .try_collect() .await?; Ok(get_room_state::v1::Response { - auth_chain: auth_chain_ids - .filter_map(|id| { - services - .rooms - .timeline - .get_pdu_json(&id) - .ok()? - .map(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) - }) - .collect(), + auth_chain, pdus, }) } diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index d22f2df4..95ca65aa 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,9 +1,11 @@ use std::sync::Arc; use axum::extract::State; -use ruma::api::{client::error::ErrorKind, federation::event::get_room_state_ids}; +use conduit::{err, Err}; +use futures::StreamExt; +use ruma::api::federation::event::get_room_state_ids; -use crate::{Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `GET /_matrix/federation/v1/state_ids/{roomId}` /// @@ -17,31 +19,35 @@ pub(crate) async fn get_room_state_ids_route( services .rooms .event_handler - .acl_check(origin, &body.room_id)?; + .acl_check(origin, &body.room_id) + .await?; if !services .rooms .state_accessor - .is_world_readable(&body.room_id)? - && !services - .rooms - .state_cache - .server_in_room(origin, &body.room_id)? + .is_world_readable(&body.room_id) + .await && !services + .rooms + .state_cache + .server_in_room(origin, &body.room_id) + .await { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); + return Err!(Request(Forbidden("Server is not in room."))); } let shortstatehash = services .rooms .state_accessor - .pdu_shortstatehash(&body.event_id)? - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Pdu state not found."))?; + .pdu_shortstatehash(&body.event_id) + .await + .map_err(|_| err!(Request(NotFound("Pdu state not found."))))?; let pdu_ids = services .rooms .state_accessor .state_full_ids(shortstatehash) - .await? + .await + .map_err(|_| err!(Request(NotFound("State ids not found"))))? .into_values() .map(|id| (*id).to_owned()) .collect(); @@ -50,10 +56,13 @@ pub(crate) async fn get_room_state_ids_route( .rooms .auth_chain .event_ids_iter(&body.room_id, vec![Arc::from(&*body.event_id)]) - .await?; + .await? + .map(|id| (*id).to_owned()) + .collect() + .await; Ok(get_room_state_ids::v1::Response { - auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), + auth_chain_ids, pdu_ids, }) } diff --git a/src/api/server/user.rs b/src/api/server/user.rs index e9a400a7..0718da58 100644 --- a/src/api/server/user.rs +++ b/src/api/server/user.rs @@ -1,5 +1,6 @@ use axum::extract::State; use conduit::{Error, Result}; +use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::api::{ client::error::ErrorKind, federation::{ @@ -28,41 +29,51 @@ pub(crate) async fn get_devices_route( let origin = body.origin.as_ref().expect("server is authenticated"); + let user_id = &body.user_id; Ok(get_devices::v1::Response { - user_id: body.user_id.clone(), + user_id: user_id.clone(), stream_id: services .users - .get_devicelist_version(&body.user_id)? + .get_devicelist_version(user_id) + .await .unwrap_or(0) - .try_into() - .expect("version will not grow that large"), + .try_into()?, devices: services .users - .all_devices_metadata(&body.user_id) - .filter_map(Result::ok) - .filter_map(|metadata| { - let device_id_string = metadata.device_id.as_str().to_owned(); + .all_devices_metadata(user_id) + .filter_map(|metadata| async move { + let device_id = metadata.device_id.clone(); + let device_id_clone = device_id.clone(); + let device_id_string = device_id.as_str().to_owned(); let device_display_name = if services.globals.allow_device_name_federation() { - metadata.display_name + metadata.display_name.clone() } else { Some(device_id_string) }; - Some(UserDevice { - keys: services - .users - .get_device_keys(&body.user_id, &metadata.device_id) - .ok()??, - device_id: metadata.device_id, - device_display_name, - }) + + services + .users + .get_device_keys(user_id, &device_id_clone) + .map_ok(|keys| UserDevice { + device_id, + keys, + device_display_name, + }) + .map(Result::ok) + .await }) - .collect(), + .collect() + .await, master_key: services .users - .get_master_key(None, &body.user_id, &|u| u.server_name() == origin)?, + .get_master_key(None, &body.user_id, &|u| u.server_name() == origin) + .await + .ok(), self_signing_key: services .users - .get_self_signing_key(None, &body.user_id, &|u| u.server_name() == origin)?, + .get_self_signing_key(None, &body.user_id, &|u| u.server_name() == origin) + .await + .ok(), }) } diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 71364734..cb957bc9 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -67,6 +67,7 @@ ctor.workspace = true cyborgtime.workspace = true either.workspace = true figment.workspace = true +futures.workspace = true http-body-util.workspace = true http.workspace = true image.workspace = true diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 48b9b58f..79e3d5b4 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -86,7 +86,7 @@ pub enum Error { #[error("There was a problem with the '{0}' directive in your configuration: {1}")] Config(&'static str, Cow<'static, str>), #[error("{0}")] - Conflict(&'static str), // This is only needed for when a room alias already exists + Conflict(Cow<'static, str>), // This is only needed for when a room alias already exists #[error(transparent)] ContentDisposition(#[from] ruma::http_headers::ContentDispositionParseError), #[error("{0}")] @@ -107,6 +107,8 @@ pub enum Error { Request(ruma::api::client::error::ErrorKind, Cow<'static, str>, http::StatusCode), #[error(transparent)] Ruma(#[from] ruma::api::client::error::Error), + #[error(transparent)] + StateRes(#[from] ruma::state_res::Error), #[error("uiaa")] Uiaa(ruma::api::client::uiaa::UiaaInfo), diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 439c831a..cf9ffe64 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -3,8 +3,6 @@ mod count; use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; -pub use builder::PduBuilder; -pub use count::PduCount; use ruma::{ canonical_json::redact_content_in_place, events::{ @@ -23,7 +21,8 @@ use serde_json::{ value::{to_raw_value, RawValue as RawJsonValue}, }; -use crate::{err, warn, Error}; +pub use self::{builder::PduBuilder, count::PduCount}; +use crate::{err, warn, Error, Result}; #[derive(Deserialize)] struct ExtractRedactedBecause { @@ -65,11 +64,12 @@ pub struct PduEvent { impl PduEvent { #[tracing::instrument(skip(self), level = "debug")] - pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> crate::Result<()> { + pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> Result<()> { self.unsigned = None; let mut content = serde_json::from_str(self.content.get()) .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; + redact_content_in_place(&mut content, &room_version_id, self.kind.to_string()) .map_err(|e| Error::Redaction(self.sender.server_name().to_owned(), e))?; @@ -98,31 +98,38 @@ impl PduEvent { unsigned.redacted_because.is_some() } - pub fn remove_transaction_id(&mut self) -> crate::Result<()> { - if let Some(unsigned) = &self.unsigned { - let mut unsigned: BTreeMap> = serde_json::from_str(unsigned.get()) - .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; - unsigned.remove("transaction_id"); - self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); - } + pub fn remove_transaction_id(&mut self) -> Result<()> { + let Some(unsigned) = &self.unsigned else { + return Ok(()); + }; + + let mut unsigned: BTreeMap> = + serde_json::from_str(unsigned.get()).map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; + + unsigned.remove("transaction_id"); + self.unsigned = to_raw_value(&unsigned) + .map(Some) + .expect("unsigned is valid"); Ok(()) } - pub fn add_age(&mut self) -> crate::Result<()> { + pub fn add_age(&mut self) -> Result<()> { let mut unsigned: BTreeMap> = self .unsigned .as_ref() .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) - .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; + .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; // deliberately allowing for the possibility of negative age let now: i128 = MilliSecondsSinceUnixEpoch::now().get().into(); let then: i128 = self.origin_server_ts.into(); let this_age = now.saturating_sub(then); - unsigned.insert("age".to_owned(), to_raw_value(&this_age).unwrap()); - self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); + unsigned.insert("age".to_owned(), to_raw_value(&this_age).expect("age is valid")); + self.unsigned = to_raw_value(&unsigned) + .map(Some) + .expect("unsigned is valid"); Ok(()) } @@ -369,9 +376,9 @@ impl state_res::Event for PduEvent { fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - fn prev_events(&self) -> Box + '_> { Box::new(self.prev_events.iter()) } + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { self.prev_events.iter() } - fn auth_events(&self) -> Box + '_> { Box::new(self.auth_events.iter()) } + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { self.auth_events.iter() } fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } } @@ -395,7 +402,7 @@ impl Ord for PduEvent { /// CanonicalJsonValue>`. pub fn gen_event_id_canonical_json( pdu: &RawJsonValue, room_version_id: &RoomVersionId, -) -> crate::Result<(OwnedEventId, CanonicalJsonObject)> { +) -> Result<(OwnedEventId, CanonicalJsonObject)> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()) .map_err(|e| err!(BadServerResponse(warn!("Error parsing incoming event: {e:?}"))))?; diff --git a/src/core/result/log_debug_err.rs b/src/core/result/log_debug_err.rs index be2000ae..8835afd1 100644 --- a/src/core/result/log_debug_err.rs +++ b/src/core/result/log_debug_err.rs @@ -1,18 +1,14 @@ -use std::fmt; +use std::fmt::Debug; use tracing::Level; use super::{DebugInspect, Result}; use crate::error; -pub trait LogDebugErr -where - E: fmt::Debug, -{ +pub trait LogDebugErr { #[must_use] fn err_debug_log(self, level: Level) -> Self; - #[inline] #[must_use] fn log_debug_err(self) -> Self where @@ -22,15 +18,9 @@ where } } -impl LogDebugErr for Result -where - E: fmt::Debug, -{ +impl LogDebugErr for Result { #[inline] - fn err_debug_log(self, level: Level) -> Self - where - Self: Sized, - { + fn err_debug_log(self, level: Level) -> Self { self.debug_inspect_err(|error| error::inspect_debug_log_level(&error, level)) } } diff --git a/src/core/result/log_err.rs b/src/core/result/log_err.rs index 079571f5..374a5e59 100644 --- a/src/core/result/log_err.rs +++ b/src/core/result/log_err.rs @@ -1,18 +1,14 @@ -use std::fmt; +use std::fmt::Display; use tracing::Level; use super::Result; use crate::error; -pub trait LogErr -where - E: fmt::Display, -{ +pub trait LogErr { #[must_use] fn err_log(self, level: Level) -> Self; - #[inline] #[must_use] fn log_err(self) -> Self where @@ -22,15 +18,7 @@ where } } -impl LogErr for Result -where - E: fmt::Display, -{ +impl LogErr for Result { #[inline] - fn err_log(self, level: Level) -> Self - where - Self: Sized, - { - self.inspect_err(|error| error::inspect_log_level(&error, level)) - } + fn err_log(self, level: Level) -> Self { self.inspect_err(|error| error::inspect_log_level(&error, level)) } } diff --git a/src/core/utils/algorithm.rs b/src/core/utils/algorithm.rs deleted file mode 100644 index 9bc1bc8a..00000000 --- a/src/core/utils/algorithm.rs +++ /dev/null @@ -1,25 +0,0 @@ -use std::cmp::Ordering; - -#[allow(clippy::impl_trait_in_params)] -pub fn common_elements( - mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, -) -> Option>> { - let first_iterator = iterators.next()?; - let mut other_iterators = iterators.map(Iterator::peekable).collect::>(); - - Some(first_iterator.filter(move |target| { - other_iterators.iter_mut().all(|it| { - while let Some(element) = it.peek() { - match check_order(element, target) { - Ordering::Greater => return false, // We went too far - Ordering::Equal => return true, // Element is in both iters - Ordering::Less => { - // Keep searching - it.next(); - }, - } - } - false - }) - })) -} diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 03b755e9..b1ea3709 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -1,4 +1,3 @@ -pub mod algorithm; pub mod bytes; pub mod content_disposition; pub mod debug; @@ -9,25 +8,30 @@ pub mod json; pub mod math; pub mod mutex_map; pub mod rand; +pub mod set; +pub mod stream; pub mod string; pub mod sys; mod tests; pub mod time; +pub use ::conduit_macros::implement; pub use ::ctor::{ctor, dtor}; -pub use algorithm::common_elements; -pub use bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}; -pub use conduit_macros::implement; -pub use debug::slice_truncated as debug_slice_truncated; -pub use hash::calculate_hash; -pub use html::Escape as HtmlEscape; -pub use json::{deserialize_from_str, to_canonical_object}; -pub use math::clamp; -pub use mutex_map::{Guard as MutexMapGuard, MutexMap}; -pub use rand::string as random_string; -pub use string::{str_from_bytes, string_from_bytes}; -pub use sys::available_parallelism; -pub use time::now_millis as millis_since_unix_epoch; + +pub use self::{ + bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}, + debug::slice_truncated as debug_slice_truncated, + hash::calculate_hash, + html::Escape as HtmlEscape, + json::{deserialize_from_str, to_canonical_object}, + math::clamp, + mutex_map::{Guard as MutexMapGuard, MutexMap}, + rand::string as random_string, + stream::{IterStream, ReadyExt, TryReadyExt}, + string::{str_from_bytes, string_from_bytes}, + sys::available_parallelism, + time::now_millis as millis_since_unix_epoch, +}; #[inline] pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, source) } diff --git a/src/core/utils/set.rs b/src/core/utils/set.rs new file mode 100644 index 00000000..563f9df5 --- /dev/null +++ b/src/core/utils/set.rs @@ -0,0 +1,47 @@ +use std::cmp::{Eq, Ord}; + +use crate::{is_equal_to, is_less_than}; + +/// Intersection of sets +/// +/// Outputs the set of elements common to all input sets. Inputs do not have to +/// be sorted. If inputs are sorted a more optimized function is available in +/// this suite and should be used. +pub fn intersection(mut input: Iters) -> impl Iterator + Send +where + Iters: Iterator + Clone + Send, + Iter: Iterator + Send, + Item: Eq + Send, +{ + input.next().into_iter().flat_map(move |first| { + let input = input.clone(); + first.filter(move |targ| { + input + .clone() + .all(|mut other| other.any(is_equal_to!(*targ))) + }) + }) +} + +/// Intersection of sets +/// +/// Outputs the set of elements common to all input sets. Inputs must be sorted. +pub fn intersection_sorted(mut input: Iters) -> impl Iterator + Send +where + Iters: Iterator + Clone + Send, + Iter: Iterator + Send, + Item: Eq + Ord + Send, +{ + input.next().into_iter().flat_map(move |first| { + let mut input = input.clone().collect::>(); + first.filter(move |targ| { + input.iter_mut().all(|it| { + it.by_ref() + .skip_while(is_less_than!(targ)) + .peekable() + .peek() + .is_some_and(is_equal_to!(targ)) + }) + }) + }) +} diff --git a/src/core/utils/stream/cloned.rs b/src/core/utils/stream/cloned.rs new file mode 100644 index 00000000..d6a0e647 --- /dev/null +++ b/src/core/utils/stream/cloned.rs @@ -0,0 +1,20 @@ +use std::clone::Clone; + +use futures::{stream::Map, Stream, StreamExt}; + +pub trait Cloned<'a, T, S> +where + S: Stream, + T: Clone + 'a, +{ + fn cloned(self) -> Map T>; +} + +impl<'a, T, S> Cloned<'a, T, S> for S +where + S: Stream, + T: Clone + 'a, +{ + #[inline] + fn cloned(self) -> Map T> { self.map(Clone::clone) } +} diff --git a/src/core/utils/stream/expect.rs b/src/core/utils/stream/expect.rs new file mode 100644 index 00000000..3ab7181a --- /dev/null +++ b/src/core/utils/stream/expect.rs @@ -0,0 +1,17 @@ +use futures::{Stream, StreamExt, TryStream}; + +use crate::Result; + +pub trait TryExpect<'a, Item> { + fn expect_ok(self) -> impl Stream + Send + 'a; +} + +impl<'a, T, Item> TryExpect<'a, Item> for T +where + T: Stream> + TryStream + Send + 'a, +{ + #[inline] + fn expect_ok(self: T) -> impl Stream + Send + 'a { + self.map(|res| res.expect("stream expectation failure")) + } +} diff --git a/src/core/utils/stream/ignore.rs b/src/core/utils/stream/ignore.rs new file mode 100644 index 00000000..997aa4ba --- /dev/null +++ b/src/core/utils/stream/ignore.rs @@ -0,0 +1,21 @@ +use futures::{future::ready, Stream, StreamExt, TryStream}; + +use crate::{Error, Result}; + +pub trait TryIgnore<'a, Item> { + fn ignore_err(self) -> impl Stream + Send + 'a; + + fn ignore_ok(self) -> impl Stream + Send + 'a; +} + +impl<'a, T, Item> TryIgnore<'a, Item> for T +where + T: Stream> + TryStream + Send + 'a, + Item: Send + 'a, +{ + #[inline] + fn ignore_err(self: T) -> impl Stream + Send + 'a { self.filter_map(|res| ready(res.ok())) } + + #[inline] + fn ignore_ok(self: T) -> impl Stream + Send + 'a { self.filter_map(|res| ready(res.err())) } +} diff --git a/src/core/utils/stream/iter_stream.rs b/src/core/utils/stream/iter_stream.rs new file mode 100644 index 00000000..69edf64f --- /dev/null +++ b/src/core/utils/stream/iter_stream.rs @@ -0,0 +1,27 @@ +use futures::{ + stream, + stream::{Stream, TryStream}, + StreamExt, +}; + +pub trait IterStream { + /// Convert an Iterator into a Stream + fn stream(self) -> impl Stream::Item> + Send; + + /// Convert an Iterator into a TryStream + fn try_stream(self) -> impl TryStream::Item, Error = crate::Error> + Send; +} + +impl IterStream for I +where + I: IntoIterator + Send, + ::IntoIter: Send, +{ + #[inline] + fn stream(self) -> impl Stream::Item> + Send { stream::iter(self) } + + #[inline] + fn try_stream(self) -> impl TryStream::Item, Error = crate::Error> + Send { + self.stream().map(Ok) + } +} diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs new file mode 100644 index 00000000..781bd522 --- /dev/null +++ b/src/core/utils/stream/mod.rs @@ -0,0 +1,13 @@ +mod cloned; +mod expect; +mod ignore; +mod iter_stream; +mod ready; +mod try_ready; + +pub use cloned::Cloned; +pub use expect::TryExpect; +pub use ignore::TryIgnore; +pub use iter_stream::IterStream; +pub use ready::ReadyExt; +pub use try_ready::TryReadyExt; diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs new file mode 100644 index 00000000..13f730a7 --- /dev/null +++ b/src/core/utils/stream/ready.rs @@ -0,0 +1,109 @@ +//! Synchronous combinator extensions to futures::Stream + +use futures::{ + future::{ready, Ready}, + stream::{Any, Filter, FilterMap, Fold, ForEach, SkipWhile, Stream, StreamExt, TakeWhile}, +}; + +/// Synchronous combinators to augment futures::StreamExt. Most Stream +/// combinators take asynchronous arguments, but often only simple predicates +/// are required to steer a Stream like an Iterator. This suite provides a +/// convenience to reduce boilerplate by de-cluttering non-async predicates. +/// +/// This interface is not necessarily complete; feel free to add as-needed. +pub trait ReadyExt +where + S: Stream + Send + ?Sized, + Self: Stream + Send + Sized, +{ + fn ready_any(self, f: F) -> Any, impl FnMut(S::Item) -> Ready> + where + F: Fn(S::Item) -> bool; + + fn ready_filter<'a, F>(self, f: F) -> Filter, impl FnMut(&S::Item) -> Ready + 'a> + where + F: Fn(&S::Item) -> bool + 'a; + + fn ready_filter_map(self, f: F) -> FilterMap>, impl FnMut(S::Item) -> Ready>> + where + F: Fn(S::Item) -> Option; + + fn ready_fold(self, init: T, f: F) -> Fold, T, impl FnMut(T, S::Item) -> Ready> + where + F: Fn(T, S::Item) -> T; + + fn ready_for_each(self, f: F) -> ForEach, impl FnMut(S::Item) -> Ready<()>> + where + F: FnMut(S::Item); + + fn ready_take_while<'a, F>(self, f: F) -> TakeWhile, impl FnMut(&S::Item) -> Ready + 'a> + where + F: Fn(&S::Item) -> bool + 'a; + + fn ready_skip_while<'a, F>(self, f: F) -> SkipWhile, impl FnMut(&S::Item) -> Ready + 'a> + where + F: Fn(&S::Item) -> bool + 'a; +} + +impl ReadyExt for S +where + S: Stream + Send + ?Sized, + Self: Stream + Send + Sized, +{ + #[inline] + fn ready_any(self, f: F) -> Any, impl FnMut(S::Item) -> Ready> + where + F: Fn(S::Item) -> bool, + { + self.any(move |t| ready(f(t))) + } + + #[inline] + fn ready_filter<'a, F>(self, f: F) -> Filter, impl FnMut(&S::Item) -> Ready + 'a> + where + F: Fn(&S::Item) -> bool + 'a, + { + self.filter(move |t| ready(f(t))) + } + + #[inline] + fn ready_filter_map(self, f: F) -> FilterMap>, impl FnMut(S::Item) -> Ready>> + where + F: Fn(S::Item) -> Option, + { + self.filter_map(move |t| ready(f(t))) + } + + #[inline] + fn ready_fold(self, init: T, f: F) -> Fold, T, impl FnMut(T, S::Item) -> Ready> + where + F: Fn(T, S::Item) -> T, + { + self.fold(init, move |a, t| ready(f(a, t))) + } + + #[inline] + #[allow(clippy::unit_arg)] + fn ready_for_each(self, mut f: F) -> ForEach, impl FnMut(S::Item) -> Ready<()>> + where + F: FnMut(S::Item), + { + self.for_each(move |t| ready(f(t))) + } + + #[inline] + fn ready_take_while<'a, F>(self, f: F) -> TakeWhile, impl FnMut(&S::Item) -> Ready + 'a> + where + F: Fn(&S::Item) -> bool + 'a, + { + self.take_while(move |t| ready(f(t))) + } + + #[inline] + fn ready_skip_while<'a, F>(self, f: F) -> SkipWhile, impl FnMut(&S::Item) -> Ready + 'a> + where + F: Fn(&S::Item) -> bool + 'a, + { + self.skip_while(move |t| ready(f(t))) + } +} diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs new file mode 100644 index 00000000..ab37d9b3 --- /dev/null +++ b/src/core/utils/stream/try_ready.rs @@ -0,0 +1,35 @@ +//! Synchronous combinator extensions to futures::TryStream + +use futures::{ + future::{ready, Ready}, + stream::{AndThen, TryStream, TryStreamExt}, +}; + +use crate::Result; + +/// Synchronous combinators to augment futures::TryStreamExt. +/// +/// This interface is not necessarily complete; feel free to add as-needed. +pub trait TryReadyExt +where + S: TryStream> + Send + ?Sized, + Self: TryStream + Send + Sized, +{ + fn ready_and_then(self, f: F) -> AndThen>, impl FnMut(S::Ok) -> Ready>> + where + F: Fn(S::Ok) -> Result; +} + +impl TryReadyExt for S +where + S: TryStream> + Send + ?Sized, + Self: TryStream + Send + Sized, +{ + #[inline] + fn ready_and_then(self, f: F) -> AndThen>, impl FnMut(S::Ok) -> Ready>> + where + F: Fn(S::Ok) -> Result, + { + self.and_then(move |t| ready(f(t))) + } +} diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs index 5880470a..84d35936 100644 --- a/src/core/utils/tests.rs +++ b/src/core/utils/tests.rs @@ -107,3 +107,133 @@ async fn mutex_map_contend() { tokio::try_join!(join_b, join_a).expect("joined"); assert!(map.is_empty(), "Must be empty"); } + +#[test] +#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] +fn set_intersection_none() { + use utils::set::intersection; + + let a: [&str; 0] = []; + let b: [&str; 0] = []; + let i = [a.iter(), b.iter()]; + let r = intersection(i.into_iter()); + assert_eq!(r.count(), 0); + + let a: [&str; 0] = []; + let b = ["abc", "def"]; + let i = [a.iter(), b.iter()]; + let r = intersection(i.into_iter()); + assert_eq!(r.count(), 0); + let i = [b.iter(), a.iter()]; + let r = intersection(i.into_iter()); + assert_eq!(r.count(), 0); + let i = [a.iter()]; + let r = intersection(i.into_iter()); + assert_eq!(r.count(), 0); + + let a = ["foo", "bar", "baz"]; + let b = ["def", "hij", "klm", "nop"]; + let i = [a.iter(), b.iter()]; + let r = intersection(i.into_iter()); + assert_eq!(r.count(), 0); +} + +#[test] +#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] +fn set_intersection_all() { + use utils::set::intersection; + + let a = ["foo"]; + let b = ["foo"]; + let i = [a.iter(), b.iter()]; + let r = intersection(i.into_iter()); + assert!(r.eq(["foo"].iter())); + + let a = ["foo", "bar"]; + let b = ["bar", "foo"]; + let i = [a.iter(), b.iter()]; + let r = intersection(i.into_iter()); + assert!(r.eq(["foo", "bar"].iter())); + let i = [b.iter()]; + let r = intersection(i.into_iter()); + assert!(r.eq(["bar", "foo"].iter())); + + let a = ["foo", "bar", "baz"]; + let b = ["baz", "foo", "bar"]; + let c = ["bar", "baz", "foo"]; + let i = [a.iter(), b.iter(), c.iter()]; + let r = intersection(i.into_iter()); + assert!(r.eq(["foo", "bar", "baz"].iter())); +} + +#[test] +#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] +fn set_intersection_some() { + use utils::set::intersection; + + let a = ["foo"]; + let b = ["bar", "foo"]; + let i = [a.iter(), b.iter()]; + let r = intersection(i.into_iter()); + assert!(r.eq(["foo"].iter())); + let i = [b.iter(), a.iter()]; + let r = intersection(i.into_iter()); + assert!(r.eq(["foo"].iter())); + + let a = ["abcdef", "foo", "hijkl", "abc"]; + let b = ["hij", "bar", "baz", "abc", "foo"]; + let c = ["abc", "xyz", "foo", "ghi"]; + let i = [a.iter(), b.iter(), c.iter()]; + let r = intersection(i.into_iter()); + assert!(r.eq(["foo", "abc"].iter())); +} + +#[test] +#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] +fn set_intersection_sorted_some() { + use utils::set::intersection_sorted; + + let a = ["bar"]; + let b = ["bar", "foo"]; + let i = [a.iter(), b.iter()]; + let r = intersection_sorted(i.into_iter()); + assert!(r.eq(["bar"].iter())); + let i = [b.iter(), a.iter()]; + let r = intersection_sorted(i.into_iter()); + assert!(r.eq(["bar"].iter())); + + let a = ["aaa", "ccc", "eee", "ggg"]; + let b = ["aaa", "bbb", "ccc", "ddd", "eee"]; + let c = ["bbb", "ccc", "eee", "fff"]; + let i = [a.iter(), b.iter(), c.iter()]; + let r = intersection_sorted(i.into_iter()); + assert!(r.eq(["ccc", "eee"].iter())); +} + +#[test] +#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] +fn set_intersection_sorted_all() { + use utils::set::intersection_sorted; + + let a = ["foo"]; + let b = ["foo"]; + let i = [a.iter(), b.iter()]; + let r = intersection_sorted(i.into_iter()); + assert!(r.eq(["foo"].iter())); + + let a = ["bar", "foo"]; + let b = ["bar", "foo"]; + let i = [a.iter(), b.iter()]; + let r = intersection_sorted(i.into_iter()); + assert!(r.eq(["bar", "foo"].iter())); + let i = [b.iter()]; + let r = intersection_sorted(i.into_iter()); + assert!(r.eq(["bar", "foo"].iter())); + + let a = ["bar", "baz", "foo"]; + let b = ["bar", "baz", "foo"]; + let c = ["bar", "baz", "foo"]; + let i = [a.iter(), b.iter(), c.iter()]; + let r = intersection_sorted(i.into_iter()); + assert!(r.eq(["bar", "baz", "foo"].iter())); +} diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 34d98416..b5eb7612 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -37,8 +37,11 @@ zstd_compression = [ [dependencies] conduit-core.workspace = true const-str.workspace = true +futures.workspace = true log.workspace = true rust-rocksdb.workspace = true +serde.workspace = true +serde_json.workspace = true tokio.workspace = true tracing.workspace = true diff --git a/src/database/database.rs b/src/database/database.rs index c357d50f..ac6f62e9 100644 --- a/src/database/database.rs +++ b/src/database/database.rs @@ -37,7 +37,7 @@ impl Database { pub fn cork_and_sync(&self) -> Cork { Cork::new(&self.db, true, true) } #[inline] - pub fn iter_maps(&self) -> impl Iterator + '_ { self.map.iter() } + pub fn iter_maps(&self) -> impl Iterator + Send + '_ { self.map.iter() } } impl Index<&str> for Database { diff --git a/src/database/de.rs b/src/database/de.rs new file mode 100644 index 00000000..8ce25aa3 --- /dev/null +++ b/src/database/de.rs @@ -0,0 +1,261 @@ +use conduit::{checked, debug::DebugInspect, err, utils::string, Error, Result}; +use serde::{ + de, + de::{DeserializeSeed, Visitor}, + Deserialize, +}; + +pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result +where + T: Deserialize<'a>, +{ + let mut deserializer = Deserializer { + buf, + pos: 0, + }; + + T::deserialize(&mut deserializer).debug_inspect(|_| { + deserializer + .finished() + .expect("deserialization failed to consume trailing bytes"); + }) +} + +pub(crate) struct Deserializer<'de> { + buf: &'de [u8], + pos: usize, +} + +/// Directive to ignore a record. This type can be used to skip deserialization +/// until the next separator is found. +#[derive(Debug, Deserialize)] +pub struct Ignore; + +impl<'de> Deserializer<'de> { + const SEP: u8 = b'\xFF'; + + fn finished(&self) -> Result<()> { + let pos = self.pos; + let len = self.buf.len(); + let parsed = &self.buf[0..pos]; + let unparsed = &self.buf[pos..]; + let remain = checked!(len - pos)?; + let trailing_sep = remain == 1 && unparsed[0] == Self::SEP; + (remain == 0 || trailing_sep) + .then_some(()) + .ok_or(err!(SerdeDe( + "{remain} trailing of {len} bytes not deserialized.\n{parsed:?}\n{unparsed:?}", + ))) + } + + #[inline] + fn record_next(&mut self) -> &'de [u8] { + self.buf[self.pos..] + .split(|b| *b == Deserializer::SEP) + .inspect(|record| self.inc_pos(record.len())) + .next() + .expect("remainder of buf even if SEP was not found") + } + + #[inline] + fn record_trail(&mut self) -> &'de [u8] { + let record = &self.buf[self.pos..]; + self.inc_pos(record.len()); + record + } + + #[inline] + fn record_start(&mut self) { + let started = self.pos != 0; + debug_assert!( + !started || self.buf[self.pos] == Self::SEP, + "Missing expected record separator at current position" + ); + + self.inc_pos(started.into()); + } + + #[inline] + fn inc_pos(&mut self, n: usize) { + self.pos = self.pos.saturating_add(n); + debug_assert!(self.pos <= self.buf.len(), "pos out of range"); + } +} + +impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { + type Error = Error; + + fn deserialize_map(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + unimplemented!("deserialize Map not implemented") + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_seq(self) + } + + fn deserialize_tuple(self, _len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_seq(self) + } + + fn deserialize_tuple_struct(self, _name: &'static str, _len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_seq(self) + } + + fn deserialize_struct( + self, _name: &'static str, _fields: &'static [&'static str], _visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + unimplemented!("deserialize Struct not implemented") + } + + fn deserialize_unit_struct(self, name: &'static str, visitor: V) -> Result + where + V: Visitor<'de>, + { + match name { + "Ignore" => self.record_next(), + _ => unimplemented!("Unrecognized deserialization Directive {name:?}"), + }; + + visitor.visit_unit() + } + + fn deserialize_newtype_struct(self, _name: &'static str, _visitor: V) -> Result + where + V: Visitor<'de>, + { + unimplemented!("deserialize Newtype Struct not implemented") + } + + fn deserialize_enum( + self, _name: &'static str, _variants: &'static [&'static str], _visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + unimplemented!("deserialize Enum not implemented") + } + + fn deserialize_option>(self, _visitor: V) -> Result { + unimplemented!("deserialize Option not implemented") + } + + fn deserialize_bool>(self, _visitor: V) -> Result { + unimplemented!("deserialize bool not implemented") + } + + fn deserialize_i8>(self, _visitor: V) -> Result { + unimplemented!("deserialize i8 not implemented") + } + + fn deserialize_i16>(self, _visitor: V) -> Result { + unimplemented!("deserialize i16 not implemented") + } + + fn deserialize_i32>(self, _visitor: V) -> Result { + unimplemented!("deserialize i32 not implemented") + } + + fn deserialize_i64>(self, visitor: V) -> Result { + let bytes: [u8; size_of::()] = self.buf[self.pos..].try_into()?; + self.pos = self.pos.saturating_add(size_of::()); + visitor.visit_i64(i64::from_be_bytes(bytes)) + } + + fn deserialize_u8>(self, _visitor: V) -> Result { + unimplemented!("deserialize u8 not implemented") + } + + fn deserialize_u16>(self, _visitor: V) -> Result { + unimplemented!("deserialize u16 not implemented") + } + + fn deserialize_u32>(self, _visitor: V) -> Result { + unimplemented!("deserialize u32 not implemented") + } + + fn deserialize_u64>(self, visitor: V) -> Result { + let bytes: [u8; size_of::()] = self.buf[self.pos..].try_into()?; + self.pos = self.pos.saturating_add(size_of::()); + visitor.visit_u64(u64::from_be_bytes(bytes)) + } + + fn deserialize_f32>(self, _visitor: V) -> Result { + unimplemented!("deserialize f32 not implemented") + } + + fn deserialize_f64>(self, _visitor: V) -> Result { + unimplemented!("deserialize f64 not implemented") + } + + fn deserialize_char>(self, _visitor: V) -> Result { + unimplemented!("deserialize char not implemented") + } + + fn deserialize_str>(self, visitor: V) -> Result { + let input = self.record_next(); + let out = string::str_from_bytes(input)?; + visitor.visit_borrowed_str(out) + } + + fn deserialize_string>(self, visitor: V) -> Result { + let input = self.record_next(); + let out = string::string_from_bytes(input)?; + visitor.visit_string(out) + } + + fn deserialize_bytes>(self, visitor: V) -> Result { + let input = self.record_trail(); + visitor.visit_borrowed_bytes(input) + } + + fn deserialize_byte_buf>(self, _visitor: V) -> Result { + unimplemented!("deserialize Byte Buf not implemented") + } + + fn deserialize_unit>(self, _visitor: V) -> Result { + unimplemented!("deserialize Unit Struct not implemented") + } + + fn deserialize_identifier>(self, _visitor: V) -> Result { + unimplemented!("deserialize Identifier not implemented") + } + + fn deserialize_ignored_any>(self, _visitor: V) -> Result { + unimplemented!("deserialize Ignored Any not implemented") + } + + fn deserialize_any>(self, _visitor: V) -> Result { + unimplemented!("deserialize any not implemented") + } +} + +impl<'a, 'de: 'a> de::SeqAccess<'de> for &'a mut Deserializer<'de> { + type Error = Error; + + fn next_element_seed(&mut self, seed: T) -> Result> + where + T: DeserializeSeed<'de>, + { + if self.pos >= self.buf.len() { + return Ok(None); + } + + self.record_start(); + seed.deserialize(&mut **self).map(Some) + } +} diff --git a/src/database/deserialized.rs b/src/database/deserialized.rs new file mode 100644 index 00000000..7da112d5 --- /dev/null +++ b/src/database/deserialized.rs @@ -0,0 +1,34 @@ +use std::convert::identity; + +use conduit::Result; +use serde::Deserialize; + +pub trait Deserialized { + fn map_de(self, f: F) -> Result + where + F: FnOnce(T) -> U, + T: for<'de> Deserialize<'de>; + + fn map_json(self, f: F) -> Result + where + F: FnOnce(T) -> U, + T: for<'de> Deserialize<'de>; + + #[inline] + fn deserialized(self) -> Result + where + T: for<'de> Deserialize<'de>, + Self: Sized, + { + self.map_de(identity::) + } + + #[inline] + fn deserialized_json(self) -> Result + where + T: for<'de> Deserialize<'de>, + Self: Sized, + { + self.map_json(identity::) + } +} diff --git a/src/database/engine.rs b/src/database/engine.rs index 3850c1d3..067232e6 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -106,7 +106,7 @@ impl Engine { })) } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self), level = "trace")] pub(crate) fn open_cf(&self, name: &str) -> Result>> { let mut cfs = self.cfs.lock().expect("locked"); if !cfs.contains(name) { diff --git a/src/database/handle.rs b/src/database/handle.rs index 0b45a75f..89d87137 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -1,6 +1,10 @@ -use std::ops::Deref; +use std::{fmt, fmt::Debug, ops::Deref}; +use conduit::Result; use rocksdb::DBPinnableSlice; +use serde::{Deserialize, Serialize, Serializer}; + +use crate::{keyval::deserialize_val, Deserialized, Slice}; pub struct Handle<'a> { val: DBPinnableSlice<'a>, @@ -14,14 +18,91 @@ impl<'a> From> for Handle<'a> { } } +impl Debug for Handle<'_> { + fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result { + let val: &Slice = self; + let ptr = val.as_ptr(); + let len = val.len(); + write!(out, "Handle {{val: {{ptr: {ptr:?}, len: {len}}}}}") + } +} + +impl Serialize for Handle<'_> { + #[inline] + fn serialize(&self, serializer: S) -> Result { + let bytes: &Slice = self; + serializer.serialize_bytes(bytes) + } +} + impl Deref for Handle<'_> { - type Target = [u8]; + type Target = Slice; #[inline] fn deref(&self) -> &Self::Target { &self.val } } -impl AsRef<[u8]> for Handle<'_> { +impl AsRef for Handle<'_> { #[inline] - fn as_ref(&self) -> &[u8] { &self.val } + fn as_ref(&self) -> &Slice { &self.val } +} + +impl Deserialized for Result> { + #[inline] + fn map_json(self, f: F) -> Result + where + F: FnOnce(T) -> U, + T: for<'de> Deserialize<'de>, + { + self?.map_json(f) + } + + #[inline] + fn map_de(self, f: F) -> Result + where + F: FnOnce(T) -> U, + T: for<'de> Deserialize<'de>, + { + self?.map_de(f) + } +} + +impl<'a> Deserialized for Result<&'a Handle<'a>> { + #[inline] + fn map_json(self, f: F) -> Result + where + F: FnOnce(T) -> U, + T: for<'de> Deserialize<'de>, + { + self.and_then(|handle| handle.map_json(f)) + } + + #[inline] + fn map_de(self, f: F) -> Result + where + F: FnOnce(T) -> U, + T: for<'de> Deserialize<'de>, + { + self.and_then(|handle| handle.map_de(f)) + } +} + +impl<'a> Deserialized for &'a Handle<'a> { + fn map_json(self, f: F) -> Result + where + F: FnOnce(T) -> U, + T: for<'de> Deserialize<'de>, + { + serde_json::from_slice::(self.as_ref()) + .map_err(Into::into) + .map(f) + } + + fn map_de(self, f: F) -> Result + where + F: FnOnce(T) -> U, + T: for<'de> Deserialize<'de>, + { + deserialize_val(self.as_ref()).map(f) + } } diff --git a/src/database/iter.rs b/src/database/iter.rs deleted file mode 100644 index 4845e977..00000000 --- a/src/database/iter.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::{iter::FusedIterator, sync::Arc}; - -use conduit::Result; -use rocksdb::{ColumnFamily, DBRawIteratorWithThreadMode, Direction, IteratorMode, ReadOptions}; - -use crate::{ - engine::Db, - result, - slice::{OwnedKeyVal, OwnedKeyValPair}, - Engine, -}; - -type Cursor<'cursor> = DBRawIteratorWithThreadMode<'cursor, Db>; - -struct State<'cursor> { - cursor: Cursor<'cursor>, - direction: Direction, - valid: bool, - init: bool, -} - -impl<'cursor> State<'cursor> { - pub(crate) fn new( - db: &'cursor Arc, cf: &'cursor Arc, opts: ReadOptions, mode: &IteratorMode<'_>, - ) -> Self { - let mut cursor = db.db.raw_iterator_cf_opt(&**cf, opts); - let direction = into_direction(mode); - let valid = seek_init(&mut cursor, mode); - Self { - cursor, - direction, - valid, - init: true, - } - } -} - -pub struct Iter<'cursor> { - state: State<'cursor>, -} - -impl<'cursor> Iter<'cursor> { - pub(crate) fn new( - db: &'cursor Arc, cf: &'cursor Arc, opts: ReadOptions, mode: &IteratorMode<'_>, - ) -> Self { - Self { - state: State::new(db, cf, opts, mode), - } - } -} - -impl Iterator for Iter<'_> { - type Item = OwnedKeyValPair; - - fn next(&mut self) -> Option { - if !self.state.init && self.state.valid { - seek_next(&mut self.state.cursor, self.state.direction); - } else if self.state.init { - self.state.init = false; - } - - self.state - .cursor - .item() - .map(OwnedKeyVal::from) - .map(OwnedKeyVal::to_tuple) - .or_else(|| { - when_invalid(&mut self.state).expect("iterator invalidated due to error"); - None - }) - } -} - -impl FusedIterator for Iter<'_> {} - -fn when_invalid(state: &mut State<'_>) -> Result<()> { - state.valid = false; - result(state.cursor.status()) -} - -fn seek_next(cursor: &mut Cursor<'_>, direction: Direction) { - match direction { - Direction::Forward => cursor.next(), - Direction::Reverse => cursor.prev(), - } -} - -fn seek_init(cursor: &mut Cursor<'_>, mode: &IteratorMode<'_>) -> bool { - use Direction::{Forward, Reverse}; - use IteratorMode::{End, From, Start}; - - match mode { - Start => cursor.seek_to_first(), - End => cursor.seek_to_last(), - From(key, Forward) => cursor.seek(key), - From(key, Reverse) => cursor.seek_for_prev(key), - }; - - cursor.valid() -} - -fn into_direction(mode: &IteratorMode<'_>) -> Direction { - use Direction::{Forward, Reverse}; - use IteratorMode::{End, From, Start}; - - match mode { - Start | From(_, Forward) => Forward, - End | From(_, Reverse) => Reverse, - } -} diff --git a/src/database/keyval.rs b/src/database/keyval.rs new file mode 100644 index 00000000..c9d25977 --- /dev/null +++ b/src/database/keyval.rs @@ -0,0 +1,83 @@ +use conduit::Result; +use serde::Deserialize; + +use crate::de; + +pub(crate) type OwnedKeyVal = (Vec, Vec); +pub(crate) type OwnedKey = Vec; +pub(crate) type OwnedVal = Vec; + +pub type KeyVal<'a, K = &'a Slice, V = &'a Slice> = (Key<'a, K>, Val<'a, V>); +pub type Key<'a, T = &'a Slice> = T; +pub type Val<'a, T = &'a Slice> = T; + +pub type Slice = [u8]; + +#[inline] +pub(crate) fn _expect_deserialize<'a, K, V>(kv: Result>) -> KeyVal<'a, K, V> +where + K: Deserialize<'a>, + V: Deserialize<'a>, +{ + result_deserialize(kv).expect("failed to deserialize result key/val") +} + +#[inline] +pub(crate) fn _expect_deserialize_key<'a, K>(key: Result>) -> Key<'a, K> +where + K: Deserialize<'a>, +{ + result_deserialize_key(key).expect("failed to deserialize result key") +} + +#[inline] +pub(crate) fn result_deserialize<'a, K, V>(kv: Result>) -> Result> +where + K: Deserialize<'a>, + V: Deserialize<'a>, +{ + deserialize(kv?) +} + +#[inline] +pub(crate) fn result_deserialize_key<'a, K>(key: Result>) -> Result> +where + K: Deserialize<'a>, +{ + deserialize_key(key?) +} + +#[inline] +pub(crate) fn deserialize<'a, K, V>(kv: KeyVal<'a>) -> Result> +where + K: Deserialize<'a>, + V: Deserialize<'a>, +{ + Ok((deserialize_key::(kv.0)?, deserialize_val::(kv.1)?)) +} + +#[inline] +pub(crate) fn deserialize_key<'a, K>(key: Key<'a>) -> Result> +where + K: Deserialize<'a>, +{ + de::from_slice::(key) +} + +#[inline] +pub(crate) fn deserialize_val<'a, V>(val: Val<'a>) -> Result> +where + V: Deserialize<'a>, +{ + de::from_slice::(val) +} + +#[inline] +#[must_use] +pub fn to_owned(kv: KeyVal<'_>) -> OwnedKeyVal { (kv.0.to_owned(), kv.1.to_owned()) } + +#[inline] +pub fn key(kv: KeyVal<'_, K, V>) -> Key<'_, K> { kv.0 } + +#[inline] +pub fn val(kv: KeyVal<'_, K, V>) -> Val<'_, V> { kv.1 } diff --git a/src/database/map.rs b/src/database/map.rs index ddae8c81..a3cf32d4 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,15 +1,39 @@ -use std::{ffi::CStr, future::Future, mem::size_of, pin::Pin, sync::Arc}; +mod count; +mod keys; +mod keys_from; +mod keys_prefix; +mod rev_keys; +mod rev_keys_from; +mod rev_keys_prefix; +mod rev_stream; +mod rev_stream_from; +mod rev_stream_prefix; +mod stream; +mod stream_from; +mod stream_prefix; -use conduit::{utils, Result}; -use rocksdb::{ - AsColumnFamilyRef, ColumnFamily, Direction, IteratorMode, ReadOptions, WriteBatchWithTransaction, WriteOptions, +use std::{ + convert::AsRef, + ffi::CStr, + fmt, + fmt::{Debug, Display}, + future::Future, + io::Write, + pin::Pin, + sync::Arc, }; +use conduit::{err, Result}; +use futures::future; +use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteBatchWithTransaction, WriteOptions}; +use serde::Serialize; + use crate::{ - or_else, result, - slice::{Byte, Key, KeyVal, OwnedKey, OwnedKeyValPair, OwnedVal, Val}, + keyval::{OwnedKey, OwnedVal}, + ser, + util::{map_err, or_else}, watchers::Watchers, - Engine, Handle, Iter, + Engine, Handle, }; pub struct Map { @@ -21,8 +45,6 @@ pub struct Map { read_options: ReadOptions, } -type OwnedKeyValPairIter<'a> = Box + Send + 'a>; - impl Map { pub(crate) fn open(db: &Arc, name: &str) -> Result> { Ok(Arc::new(Self { @@ -35,14 +57,125 @@ impl Map { })) } - pub fn get(&self, key: &Key) -> Result>> { - let read_options = &self.read_options; - let res = self.db.db.get_pinned_cf_opt(&self.cf(), key, read_options); - - Ok(result(res)?.map(Handle::from)) + #[tracing::instrument(skip(self), fields(%self), level = "trace")] + pub fn del(&self, key: &K) + where + K: Serialize + ?Sized + Debug, + { + let mut buf = Vec::::with_capacity(64); + self.bdel(key, &mut buf); } - pub fn multi_get(&self, keys: &[&Key]) -> Result>> { + #[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] + pub fn bdel(&self, key: &K, buf: &mut B) + where + K: Serialize + ?Sized + Debug, + B: Write + AsRef<[u8]>, + { + let key = ser::serialize(buf, key).expect("failed to serialize deletion key"); + self.remove(&key); + } + + #[tracing::instrument(level = "trace")] + pub fn remove(&self, key: &K) + where + K: AsRef<[u8]> + ?Sized + Debug, + { + let write_options = &self.write_options; + self.db + .db + .delete_cf_opt(&self.cf(), key, write_options) + .or_else(or_else) + .expect("database remove error"); + + if !self.db.corked() { + self.db.flush().expect("database flush error"); + } + } + + #[tracing::instrument(skip(self, value), fields(%self), level = "trace")] + pub fn insert(&self, key: &K, value: &V) + where + K: AsRef<[u8]> + ?Sized + Debug, + V: AsRef<[u8]> + ?Sized, + { + let write_options = &self.write_options; + self.db + .db + .put_cf_opt(&self.cf(), key, value, write_options) + .or_else(or_else) + .expect("database insert error"); + + if !self.db.corked() { + self.db.flush().expect("database flush error"); + } + + self.watchers.wake(key.as_ref()); + } + + #[tracing::instrument(skip(self), fields(%self), level = "trace")] + pub fn insert_batch<'a, I, K, V>(&'a self, iter: I) + where + I: Iterator + Send + Debug, + K: AsRef<[u8]> + Sized + Debug + 'a, + V: AsRef<[u8]> + Sized + 'a, + { + let mut batch = WriteBatchWithTransaction::::default(); + for (key, val) in iter { + batch.put_cf(&self.cf(), key.as_ref(), val.as_ref()); + } + + let write_options = &self.write_options; + self.db + .db + .write_opt(batch, write_options) + .or_else(or_else) + .expect("database insert batch error"); + + if !self.db.corked() { + self.db.flush().expect("database flush error"); + } + } + + #[tracing::instrument(skip(self), fields(%self), level = "trace")] + pub fn qry(&self, key: &K) -> impl Future>> + Send + where + K: Serialize + ?Sized + Debug, + { + let mut buf = Vec::::with_capacity(64); + self.bqry(key, &mut buf) + } + + #[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] + pub fn bqry(&self, key: &K, buf: &mut B) -> impl Future>> + Send + where + K: Serialize + ?Sized + Debug, + B: Write + AsRef<[u8]>, + { + let key = ser::serialize(buf, key).expect("failed to serialize query key"); + let val = self.get(key); + future::ready(val) + } + + #[tracing::instrument(skip(self), fields(%self), level = "trace")] + pub fn get(&self, key: &K) -> Result> + where + K: AsRef<[u8]> + ?Sized + Debug, + { + self.db + .db + .get_pinned_cf_opt(&self.cf(), key, &self.read_options) + .map_err(map_err)? + .map(Handle::from) + .ok_or(err!(Request(NotFound("Not found in database")))) + } + + #[tracing::instrument(skip(self), fields(%self), level = "trace")] + pub fn multi_get<'a, I, K>(&self, keys: I) -> Vec> + where + I: Iterator + ExactSizeIterator + Send + Debug, + K: AsRef<[u8]> + Sized + Debug + 'a, + { // Optimization can be `true` if key vector is pre-sorted **by the column // comparator**. const SORTED: bool = false; @@ -57,140 +190,25 @@ impl Map { match res { Ok(Some(res)) => ret.push(Some((*res).to_vec())), Ok(None) => ret.push(None), - Err(e) => return or_else(e), + Err(e) => or_else(e).expect("database multiget error"), } } - Ok(ret) + ret } - pub fn insert(&self, key: &Key, value: &Val) -> Result<()> { - let write_options = &self.write_options; - self.db - .db - .put_cf_opt(&self.cf(), key, value, write_options) - .or_else(or_else)?; - - if !self.db.corked() { - self.db.flush()?; - } - - self.watchers.wake(key); - - Ok(()) - } - - pub fn insert_batch<'a, I>(&'a self, iter: I) -> Result<()> + #[inline] + pub fn watch_prefix<'a, K>(&'a self, prefix: &K) -> Pin + Send + 'a>> where - I: Iterator>, + K: AsRef<[u8]> + ?Sized + Debug, { - let mut batch = WriteBatchWithTransaction::::default(); - for KeyVal(key, value) in iter { - batch.put_cf(&self.cf(), key, value); - } - - let write_options = &self.write_options; - let res = self.db.db.write_opt(batch, write_options); - - if !self.db.corked() { - self.db.flush()?; - } - - result(res) - } - - pub fn remove(&self, key: &Key) -> Result<()> { - let write_options = &self.write_options; - let res = self.db.db.delete_cf_opt(&self.cf(), key, write_options); - - if !self.db.corked() { - self.db.flush()?; - } - - result(res) - } - - pub fn remove_batch<'a, I>(&'a self, iter: I) -> Result<()> - where - I: Iterator, - { - let mut batch = WriteBatchWithTransaction::::default(); - for key in iter { - batch.delete_cf(&self.cf(), key); - } - - let write_options = &self.write_options; - let res = self.db.db.write_opt(batch, write_options); - - if !self.db.corked() { - self.db.flush()?; - } - - result(res) - } - - pub fn iter(&self) -> OwnedKeyValPairIter<'_> { - let mode = IteratorMode::Start; - let read_options = read_options_default(); - Box::new(Iter::new(&self.db, &self.cf, read_options, &mode)) - } - - pub fn iter_from(&self, from: &Key, reverse: bool) -> OwnedKeyValPairIter<'_> { - let direction = if reverse { - Direction::Reverse - } else { - Direction::Forward - }; - let mode = IteratorMode::From(from, direction); - let read_options = read_options_default(); - Box::new(Iter::new(&self.db, &self.cf, read_options, &mode)) - } - - pub fn scan_prefix(&self, prefix: OwnedKey) -> OwnedKeyValPairIter<'_> { - let mode = IteratorMode::From(&prefix, Direction::Forward); - let read_options = read_options_default(); - Box::new(Iter::new(&self.db, &self.cf, read_options, &mode).take_while(move |(k, _)| k.starts_with(&prefix))) - } - - pub fn increment(&self, key: &Key) -> Result<[Byte; size_of::()]> { - let old = self.get(key)?; - let new = utils::increment(old.as_deref()); - self.insert(key, &new)?; - - if !self.db.corked() { - self.db.flush()?; - } - - Ok(new) - } - - pub fn increment_batch<'a, I>(&'a self, iter: I) -> Result<()> - where - I: Iterator, - { - let mut batch = WriteBatchWithTransaction::::default(); - for key in iter { - let old = self.get(key)?; - let new = utils::increment(old.as_deref()); - batch.put_cf(&self.cf(), key, new); - } - - let write_options = &self.write_options; - let res = self.db.db.write_opt(batch, write_options); - - if !self.db.corked() { - self.db.flush()?; - } - - result(res) - } - - pub fn watch_prefix<'a>(&'a self, prefix: &Key) -> Pin + Send + 'a>> { - self.watchers.watch(prefix) + self.watchers.watch(prefix.as_ref()) } + #[inline] pub fn property_integer(&self, name: &CStr) -> Result { self.db.property_integer(&self.cf(), name) } + #[inline] pub fn property(&self, name: &str) -> Result { self.db.property(&self.cf(), name) } #[inline] @@ -199,12 +217,12 @@ impl Map { fn cf(&self) -> impl AsColumnFamilyRef + '_ { &*self.cf } } -impl<'a> IntoIterator for &'a Map { - type IntoIter = Box + Send + 'a>; - type Item = OwnedKeyValPair; +impl Debug for Map { + fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result { write!(out, "Map {{name: {0}}}", self.name) } +} - #[inline] - fn into_iter(self) -> Self::IntoIter { self.iter() } +impl Display for Map { + fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result { write!(out, "{0}", self.name) } } fn open(db: &Arc, name: &str) -> Result> { diff --git a/src/database/map/count.rs b/src/database/map/count.rs new file mode 100644 index 00000000..4356b71f --- /dev/null +++ b/src/database/map/count.rs @@ -0,0 +1,36 @@ +use std::{fmt::Debug, future::Future}; + +use conduit::implement; +use futures::stream::StreamExt; +use serde::Serialize; + +use crate::de::Ignore; + +/// Count the total number of entries in the map. +#[implement(super::Map)] +#[inline] +pub fn count(&self) -> impl Future + Send + '_ { self.keys::().count() } + +/// Count the number of entries in the map starting from a lower-bound. +/// +/// - From is a structured key +#[implement(super::Map)] +#[inline] +pub fn count_from<'a, P>(&'a self, from: &P) -> impl Future + Send + 'a +where + P: Serialize + ?Sized + Debug + 'a, +{ + self.keys_from::(from).count() +} + +/// Count the number of entries in the map matching a prefix. +/// +/// - Prefix is structured key +#[implement(super::Map)] +#[inline] +pub fn count_prefix<'a, P>(&'a self, prefix: &P) -> impl Future + Send + 'a +where + P: Serialize + ?Sized + Debug + 'a, +{ + self.keys_prefix::(prefix).count() +} diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs new file mode 100644 index 00000000..2396494c --- /dev/null +++ b/src/database/map/keys.rs @@ -0,0 +1,21 @@ +use conduit::{implement, Result}; +use futures::{Stream, StreamExt}; +use serde::Deserialize; + +use crate::{keyval, keyval::Key, stream}; + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn keys<'a, K>(&'a self) -> impl Stream>> + Send +where + K: Deserialize<'a> + Send, +{ + self.raw_keys().map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn raw_keys(&self) -> impl Stream>> + Send { + let opts = super::read_options_default(); + stream::Keys::new(&self.db, &self.cf, opts, None) +} diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs new file mode 100644 index 00000000..1993750a --- /dev/null +++ b/src/database/map/keys_from.rs @@ -0,0 +1,49 @@ +use std::{convert::AsRef, fmt::Debug}; + +use conduit::{implement, Result}; +use futures::{Stream, StreamExt}; +use serde::{Deserialize, Serialize}; + +use crate::{keyval, keyval::Key, ser, stream}; + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn keys_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, + K: Deserialize<'a> + Send, +{ + self.keys_raw_from(from) + .map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn keys_raw_from

    (&self, from: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, +{ + let key = ser::serialize_to_vec(from).expect("failed to serialize query key"); + self.raw_keys_from(&key) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn keys_from_raw<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +where + P: AsRef<[u8]> + ?Sized + Debug + Sync, + K: Deserialize<'a> + Send, +{ + self.raw_keys_from(from) + .map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn raw_keys_from

    (&self, from: &P) -> impl Stream>> + Send +where + P: AsRef<[u8]> + ?Sized + Debug, +{ + let opts = super::read_options_default(); + stream::Keys::new(&self.db, &self.cf, opts, Some(from.as_ref())) +} diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs new file mode 100644 index 00000000..d6c0927b --- /dev/null +++ b/src/database/map/keys_prefix.rs @@ -0,0 +1,54 @@ +use std::{convert::AsRef, fmt::Debug}; + +use conduit::{implement, Result}; +use futures::{ + future, + stream::{Stream, StreamExt}, + TryStreamExt, +}; +use serde::{Deserialize, Serialize}; + +use crate::{keyval, keyval::Key, ser}; + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn keys_prefix<'a, K, P>(&'a self, prefix: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, + K: Deserialize<'a> + Send, +{ + self.keys_raw_prefix(prefix) + .map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn keys_raw_prefix

    (&self, prefix: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, +{ + let key = ser::serialize_to_vec(prefix).expect("failed to serialize query key"); + self.raw_keys_from(&key) + .try_take_while(move |k: &Key<'_>| future::ok(k.starts_with(&key))) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn keys_prefix_raw<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, + K: Deserialize<'a> + Send + 'a, +{ + self.raw_keys_prefix(prefix) + .map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn raw_keys_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, +{ + self.raw_keys_from(prefix) + .try_take_while(|k: &Key<'_>| future::ok(k.starts_with(prefix.as_ref()))) +} diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs new file mode 100644 index 00000000..449ccfff --- /dev/null +++ b/src/database/map/rev_keys.rs @@ -0,0 +1,21 @@ +use conduit::{implement, Result}; +use futures::{Stream, StreamExt}; +use serde::Deserialize; + +use crate::{keyval, keyval::Key, stream}; + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_keys<'a, K>(&'a self) -> impl Stream>> + Send +where + K: Deserialize<'a> + Send, +{ + self.rev_raw_keys().map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_raw_keys(&self) -> impl Stream>> + Send { + let opts = super::read_options_default(); + stream::KeysRev::new(&self.db, &self.cf, opts, None) +} diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs new file mode 100644 index 00000000..e012e60a --- /dev/null +++ b/src/database/map/rev_keys_from.rs @@ -0,0 +1,49 @@ +use std::{convert::AsRef, fmt::Debug}; + +use conduit::{implement, Result}; +use futures::{Stream, StreamExt}; +use serde::{Deserialize, Serialize}; + +use crate::{keyval, keyval::Key, ser, stream}; + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_keys_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, + K: Deserialize<'a> + Send, +{ + self.rev_keys_raw_from(from) + .map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_keys_raw_from

    (&self, from: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, +{ + let key = ser::serialize_to_vec(from).expect("failed to serialize query key"); + self.rev_raw_keys_from(&key) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_keys_from_raw<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +where + P: AsRef<[u8]> + ?Sized + Debug + Sync, + K: Deserialize<'a> + Send, +{ + self.rev_raw_keys_from(from) + .map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_raw_keys_from

    (&self, from: &P) -> impl Stream>> + Send +where + P: AsRef<[u8]> + ?Sized + Debug, +{ + let opts = super::read_options_default(); + stream::KeysRev::new(&self.db, &self.cf, opts, Some(from.as_ref())) +} diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs new file mode 100644 index 00000000..162c4f9b --- /dev/null +++ b/src/database/map/rev_keys_prefix.rs @@ -0,0 +1,54 @@ +use std::{convert::AsRef, fmt::Debug}; + +use conduit::{implement, Result}; +use futures::{ + future, + stream::{Stream, StreamExt}, + TryStreamExt, +}; +use serde::{Deserialize, Serialize}; + +use crate::{keyval, keyval::Key, ser}; + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_keys_prefix<'a, K, P>(&'a self, prefix: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, + K: Deserialize<'a> + Send, +{ + self.rev_keys_raw_prefix(prefix) + .map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_keys_raw_prefix

    (&self, prefix: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, +{ + let key = ser::serialize_to_vec(prefix).expect("failed to serialize query key"); + self.rev_raw_keys_from(&key) + .try_take_while(move |k: &Key<'_>| future::ok(k.starts_with(&key))) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_keys_prefix_raw<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, + K: Deserialize<'a> + Send + 'a, +{ + self.rev_raw_keys_prefix(prefix) + .map(keyval::result_deserialize_key::) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_raw_keys_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, +{ + self.rev_raw_keys_from(prefix) + .try_take_while(|k: &Key<'_>| future::ok(k.starts_with(prefix.as_ref()))) +} diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs new file mode 100644 index 00000000..de22fd5c --- /dev/null +++ b/src/database/map/rev_stream.rs @@ -0,0 +1,29 @@ +use conduit::{implement, Result}; +use futures::stream::{Stream, StreamExt}; +use serde::Deserialize; + +use crate::{keyval, keyval::KeyVal, stream}; + +/// Iterate key-value entries in the map from the end. +/// +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_stream<'a, K, V>(&'a self) -> impl Stream>> + Send +where + K: Deserialize<'a> + Send, + V: Deserialize<'a> + Send, +{ + self.rev_raw_stream() + .map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map from the end. +/// +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_raw_stream(&self) -> impl Stream>> + Send { + let opts = super::read_options_default(); + stream::ItemsRev::new(&self.db, &self.cf, opts, None) +} diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs new file mode 100644 index 00000000..650cf038 --- /dev/null +++ b/src/database/map/rev_stream_from.rs @@ -0,0 +1,68 @@ +use std::{convert::AsRef, fmt::Debug}; + +use conduit::{implement, Result}; +use futures::stream::{Stream, StreamExt}; +use serde::{Deserialize, Serialize}; + +use crate::{keyval, keyval::KeyVal, ser, stream}; + +/// Iterate key-value entries in the map starting from upper-bound. +/// +/// - Query is serialized +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_stream_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, + K: Deserialize<'a> + Send, + V: Deserialize<'a> + Send, +{ + let key = ser::serialize_to_vec(from).expect("failed to serialize query key"); + self.rev_stream_raw_from(&key) + .map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map starting from upper-bound. +/// +/// - Query is serialized +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_stream_raw_from

    (&self, from: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, +{ + let key = ser::serialize_to_vec(from).expect("failed to serialize query key"); + self.rev_raw_stream_from(&key) +} + +/// Iterate key-value entries in the map starting from upper-bound. +/// +/// - Query is raw +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_stream_from_raw<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +where + P: AsRef<[u8]> + ?Sized + Debug + Sync, + K: Deserialize<'a> + Send, + V: Deserialize<'a> + Send, +{ + self.rev_raw_stream_from(from) + .map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map starting from upper-bound. +/// +/// - Query is raw +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_raw_stream_from

    (&self, from: &P) -> impl Stream>> + Send +where + P: AsRef<[u8]> + ?Sized + Debug, +{ + let opts = super::read_options_default(); + stream::ItemsRev::new(&self.db, &self.cf, opts, Some(from.as_ref())) +} diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs new file mode 100644 index 00000000..9ef89e9c --- /dev/null +++ b/src/database/map/rev_stream_prefix.rs @@ -0,0 +1,74 @@ +use std::{convert::AsRef, fmt::Debug}; + +use conduit::{implement, Result}; +use futures::{ + future, + stream::{Stream, StreamExt}, + TryStreamExt, +}; +use serde::{Deserialize, Serialize}; + +use crate::{keyval, keyval::KeyVal, ser}; + +/// Iterate key-value entries in the map where the key matches a prefix. +/// +/// - Query is serialized +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_stream_prefix<'a, K, V, P>(&'a self, prefix: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, + K: Deserialize<'a> + Send, + V: Deserialize<'a> + Send, +{ + self.rev_stream_raw_prefix(prefix) + .map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map where the key matches a prefix. +/// +/// - Query is serialized +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_stream_raw_prefix

    (&self, prefix: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, +{ + let key = ser::serialize_to_vec(prefix).expect("failed to serialize query key"); + self.rev_raw_stream_from(&key) + .try_take_while(move |(k, _): &KeyVal<'_>| future::ok(k.starts_with(&key))) +} + +/// Iterate key-value entries in the map where the key matches a prefix. +/// +/// - Query is raw +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_stream_prefix_raw<'a, K, V, P>( + &'a self, prefix: &'a P, +) -> impl Stream>> + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, + K: Deserialize<'a> + Send + 'a, + V: Deserialize<'a> + Send + 'a, +{ + self.rev_raw_stream_prefix(prefix) + .map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map where the key matches a prefix. +/// +/// - Query is raw +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn rev_raw_stream_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, +{ + self.rev_raw_stream_from(prefix) + .try_take_while(|(k, _): &KeyVal<'_>| future::ok(k.starts_with(prefix.as_ref()))) +} diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs new file mode 100644 index 00000000..dfbea072 --- /dev/null +++ b/src/database/map/stream.rs @@ -0,0 +1,28 @@ +use conduit::{implement, Result}; +use futures::stream::{Stream, StreamExt}; +use serde::Deserialize; + +use crate::{keyval, keyval::KeyVal, stream}; + +/// Iterate key-value entries in the map from the beginning. +/// +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn stream<'a, K, V>(&'a self) -> impl Stream>> + Send +where + K: Deserialize<'a> + Send, + V: Deserialize<'a> + Send, +{ + self.raw_stream().map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map from the beginning. +/// +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn raw_stream(&self) -> impl Stream>> + Send { + let opts = super::read_options_default(); + stream::Items::new(&self.db, &self.cf, opts, None) +} diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs new file mode 100644 index 00000000..153d5bb6 --- /dev/null +++ b/src/database/map/stream_from.rs @@ -0,0 +1,68 @@ +use std::{convert::AsRef, fmt::Debug}; + +use conduit::{implement, Result}; +use futures::stream::{Stream, StreamExt}; +use serde::{Deserialize, Serialize}; + +use crate::{keyval, keyval::KeyVal, ser, stream}; + +/// Iterate key-value entries in the map starting from lower-bound. +/// +/// - Query is serialized +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn stream_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, + K: Deserialize<'a> + Send, + V: Deserialize<'a> + Send, +{ + let key = ser::serialize_to_vec(from).expect("failed to serialize query key"); + self.stream_raw_from(&key) + .map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map starting from lower-bound. +/// +/// - Query is serialized +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn stream_raw_from

    (&self, from: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, +{ + let key = ser::serialize_to_vec(from).expect("failed to serialize query key"); + self.raw_stream_from(&key) +} + +/// Iterate key-value entries in the map starting from lower-bound. +/// +/// - Query is raw +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn stream_from_raw<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +where + P: AsRef<[u8]> + ?Sized + Debug + Sync, + K: Deserialize<'a> + Send, + V: Deserialize<'a> + Send, +{ + self.raw_stream_from(from) + .map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map starting from lower-bound. +/// +/// - Query is raw +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn raw_stream_from

    (&self, from: &P) -> impl Stream>> + Send +where + P: AsRef<[u8]> + ?Sized + Debug, +{ + let opts = super::read_options_default(); + stream::Items::new(&self.db, &self.cf, opts, Some(from.as_ref())) +} diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs new file mode 100644 index 00000000..56154a8b --- /dev/null +++ b/src/database/map/stream_prefix.rs @@ -0,0 +1,74 @@ +use std::{convert::AsRef, fmt::Debug}; + +use conduit::{implement, Result}; +use futures::{ + future, + stream::{Stream, StreamExt}, + TryStreamExt, +}; +use serde::{Deserialize, Serialize}; + +use crate::{keyval, keyval::KeyVal, ser}; + +/// Iterate key-value entries in the map where the key matches a prefix. +/// +/// - Query is serialized +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn stream_prefix<'a, K, V, P>(&'a self, prefix: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, + K: Deserialize<'a> + Send, + V: Deserialize<'a> + Send, +{ + self.stream_raw_prefix(prefix) + .map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map where the key matches a prefix. +/// +/// - Query is serialized +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn stream_raw_prefix

    (&self, prefix: &P) -> impl Stream>> + Send +where + P: Serialize + ?Sized + Debug, +{ + let key = ser::serialize_to_vec(prefix).expect("failed to serialize query key"); + self.raw_stream_from(&key) + .try_take_while(move |(k, _): &KeyVal<'_>| future::ok(k.starts_with(&key))) +} + +/// Iterate key-value entries in the map where the key matches a prefix. +/// +/// - Query is raw +/// - Result is deserialized +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn stream_prefix_raw<'a, K, V, P>( + &'a self, prefix: &'a P, +) -> impl Stream>> + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, + K: Deserialize<'a> + Send + 'a, + V: Deserialize<'a> + Send + 'a, +{ + self.raw_stream_prefix(prefix) + .map(keyval::result_deserialize::) +} + +/// Iterate key-value entries in the map where the key matches a prefix. +/// +/// - Query is raw +/// - Result is raw +#[implement(super::Map)] +#[tracing::instrument(skip(self), fields(%self), level = "trace")] +pub fn raw_stream_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, +{ + self.raw_stream_from(prefix) + .try_take_while(|(k, _): &KeyVal<'_>| future::ok(k.starts_with(prefix.as_ref()))) +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 6446624c..e66abf68 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,25 +1,35 @@ mod cork; mod database; +mod de; +mod deserialized; mod engine; mod handle; -mod iter; +pub mod keyval; mod map; pub mod maps; mod opts; -mod slice; +mod ser; +mod stream; mod util; mod watchers; +pub(crate) use self::{ + engine::Engine, + util::{or_else, result}, +}; + extern crate conduit_core as conduit; extern crate rust_rocksdb as rocksdb; -pub use database::Database; -pub(crate) use engine::Engine; -pub use handle::Handle; -pub use iter::Iter; -pub use map::Map; -pub use slice::{Key, KeyVal, OwnedKey, OwnedKeyVal, OwnedVal, Val}; -pub(crate) use util::{or_else, result}; +pub use self::{ + database::Database, + de::Ignore, + deserialized::Deserialized, + handle::Handle, + keyval::{KeyVal, Slice}, + map::Map, + ser::{Interfix, Separator}, +}; conduit::mod_ctor! {} conduit::mod_dtor! {} diff --git a/src/database/ser.rs b/src/database/ser.rs new file mode 100644 index 00000000..bd4bbd9a --- /dev/null +++ b/src/database/ser.rs @@ -0,0 +1,315 @@ +use std::io::Write; + +use conduit::{err, result::DebugInspect, utils::exchange, Error, Result}; +use serde::{ser, Serialize}; + +#[inline] +pub(crate) fn serialize_to_vec(val: &T) -> Result> +where + T: Serialize + ?Sized, +{ + let mut buf = Vec::with_capacity(64); + serialize(&mut buf, val)?; + + Ok(buf) +} + +#[inline] +pub(crate) fn serialize<'a, W, T>(out: &'a mut W, val: &'a T) -> Result<&'a [u8]> +where + W: Write + AsRef<[u8]>, + T: Serialize + ?Sized, +{ + let mut serializer = Serializer { + out, + depth: 0, + sep: false, + fin: false, + }; + + val.serialize(&mut serializer) + .map_err(|error| err!(SerdeSer("{error}"))) + .debug_inspect(|()| { + debug_assert_eq!(serializer.depth, 0, "Serialization completed at non-zero recursion level"); + })?; + + Ok((*out).as_ref()) +} + +pub(crate) struct Serializer<'a, W: Write> { + out: &'a mut W, + depth: u32, + sep: bool, + fin: bool, +} + +/// Directive to force separator serialization specifically for prefix keying +/// use. This is a quirk of the database schema and prefix iterations. +#[derive(Debug, Serialize)] +pub struct Interfix; + +/// Directive to force separator serialization. Separators are usually +/// serialized automatically. +#[derive(Debug, Serialize)] +pub struct Separator; + +impl Serializer<'_, W> { + const SEP: &'static [u8] = b"\xFF"; + + fn sequence_start(&mut self) { + debug_assert!(!self.is_finalized(), "Sequence start with finalization set"); + debug_assert!(!self.sep, "Sequence start with separator set"); + if cfg!(debug_assertions) { + self.depth = self.depth.saturating_add(1); + } + } + + fn sequence_end(&mut self) { + self.sep = false; + if cfg!(debug_assertions) { + self.depth = self.depth.saturating_sub(1); + } + } + + fn record_start(&mut self) -> Result<()> { + debug_assert!(!self.is_finalized(), "Starting a record after serialization finalized"); + exchange(&mut self.sep, true) + .then(|| self.separator()) + .unwrap_or(Ok(())) + } + + fn separator(&mut self) -> Result<()> { + debug_assert!(!self.is_finalized(), "Writing a separator after serialization finalized"); + self.out.write_all(Self::SEP).map_err(Into::into) + } + + fn set_finalized(&mut self) { + debug_assert!(!self.is_finalized(), "Finalization already set"); + if cfg!(debug_assertions) { + self.fin = true; + } + } + + fn is_finalized(&self) -> bool { self.fin } +} + +impl ser::Serializer for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + type SerializeMap = Self; + type SerializeSeq = Self; + type SerializeStruct = Self; + type SerializeStructVariant = Self; + type SerializeTuple = Self; + type SerializeTupleStruct = Self; + type SerializeTupleVariant = Self; + + fn serialize_map(self, _len: Option) -> Result { + unimplemented!("serialize Map not implemented") + } + + fn serialize_seq(self, _len: Option) -> Result { + self.sequence_start(); + self.record_start()?; + Ok(self) + } + + fn serialize_tuple(self, _len: usize) -> Result { + self.sequence_start(); + Ok(self) + } + + fn serialize_tuple_struct(self, _name: &'static str, _len: usize) -> Result { + self.sequence_start(); + Ok(self) + } + + fn serialize_tuple_variant( + self, _name: &'static str, _idx: u32, _var: &'static str, _len: usize, + ) -> Result { + self.sequence_start(); + Ok(self) + } + + fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { + self.sequence_start(); + Ok(self) + } + + fn serialize_struct_variant( + self, _name: &'static str, _idx: u32, _var: &'static str, _len: usize, + ) -> Result { + self.sequence_start(); + Ok(self) + } + + fn serialize_newtype_struct(self, _name: &'static str, _value: &T) -> Result { + unimplemented!("serialize New Type Struct not implemented") + } + + fn serialize_newtype_variant( + self, _name: &'static str, _idx: u32, _var: &'static str, _value: &T, + ) -> Result { + unimplemented!("serialize New Type Variant not implemented") + } + + fn serialize_unit_struct(self, name: &'static str) -> Result { + match name { + "Interfix" => { + self.set_finalized(); + }, + "Separator" => { + self.separator()?; + }, + _ => unimplemented!("Unrecognized serialization directive: {name:?}"), + }; + + Ok(()) + } + + fn serialize_unit_variant(self, _name: &'static str, _idx: u32, _var: &'static str) -> Result { + unimplemented!("serialize Unit Variant not implemented") + } + + fn serialize_some(self, val: &T) -> Result { val.serialize(self) } + + fn serialize_none(self) -> Result { Ok(()) } + + fn serialize_char(self, v: char) -> Result { + let mut buf: [u8; 4] = [0; 4]; + self.serialize_str(v.encode_utf8(&mut buf)) + } + + fn serialize_str(self, v: &str) -> Result { self.serialize_bytes(v.as_bytes()) } + + fn serialize_bytes(self, v: &[u8]) -> Result { self.out.write_all(v).map_err(Error::Io) } + + fn serialize_f64(self, _v: f64) -> Result { unimplemented!("serialize f64 not implemented") } + + fn serialize_f32(self, _v: f32) -> Result { unimplemented!("serialize f32 not implemented") } + + fn serialize_i64(self, v: i64) -> Result { self.out.write_all(&v.to_be_bytes()).map_err(Error::Io) } + + fn serialize_i32(self, _v: i32) -> Result { unimplemented!("serialize i32 not implemented") } + + fn serialize_i16(self, _v: i16) -> Result { unimplemented!("serialize i16 not implemented") } + + fn serialize_i8(self, _v: i8) -> Result { unimplemented!("serialize i8 not implemented") } + + fn serialize_u64(self, v: u64) -> Result { self.out.write_all(&v.to_be_bytes()).map_err(Error::Io) } + + fn serialize_u32(self, _v: u32) -> Result { unimplemented!("serialize u32 not implemented") } + + fn serialize_u16(self, _v: u16) -> Result { unimplemented!("serialize u16 not implemented") } + + fn serialize_u8(self, v: u8) -> Result { self.out.write_all(&[v]).map_err(Error::Io) } + + fn serialize_bool(self, _v: bool) -> Result { unimplemented!("serialize bool not implemented") } + + fn serialize_unit(self) -> Result { unimplemented!("serialize unit not implemented") } +} + +impl ser::SerializeMap for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_key(&mut self, _key: &T) -> Result { + unimplemented!("serialize Map Key not implemented") + } + + fn serialize_value(&mut self, _val: &T) -> Result { + unimplemented!("serialize Map Val not implemented") + } + + fn end(self) -> Result { + self.sequence_end(); + Ok(()) + } +} + +impl ser::SerializeSeq for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_element(&mut self, val: &T) -> Result { val.serialize(&mut **self) } + + fn end(self) -> Result { + self.sequence_end(); + Ok(()) + } +} + +impl ser::SerializeStruct for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_field(&mut self, _key: &'static str, val: &T) -> Result { + self.record_start()?; + val.serialize(&mut **self) + } + + fn end(self) -> Result { + self.sequence_end(); + Ok(()) + } +} + +impl ser::SerializeStructVariant for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_field(&mut self, _key: &'static str, val: &T) -> Result { + self.record_start()?; + val.serialize(&mut **self) + } + + fn end(self) -> Result { + self.sequence_end(); + Ok(()) + } +} + +impl ser::SerializeTuple for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_element(&mut self, val: &T) -> Result { + self.record_start()?; + val.serialize(&mut **self) + } + + fn end(self) -> Result { + self.sequence_end(); + Ok(()) + } +} + +impl ser::SerializeTupleStruct for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_field(&mut self, val: &T) -> Result { + self.record_start()?; + val.serialize(&mut **self) + } + + fn end(self) -> Result { + self.sequence_end(); + Ok(()) + } +} + +impl ser::SerializeTupleVariant for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_field(&mut self, val: &T) -> Result { + self.record_start()?; + val.serialize(&mut **self) + } + + fn end(self) -> Result { + self.sequence_end(); + Ok(()) + } +} diff --git a/src/database/slice.rs b/src/database/slice.rs deleted file mode 100644 index 448d969d..00000000 --- a/src/database/slice.rs +++ /dev/null @@ -1,57 +0,0 @@ -pub struct OwnedKeyVal(pub OwnedKey, pub OwnedVal); -pub(crate) type OwnedKeyValPair = (OwnedKey, OwnedVal); -pub type OwnedVal = Vec; -pub type OwnedKey = Vec; - -pub struct KeyVal<'item>(pub &'item Key, pub &'item Val); -pub(crate) type KeyValPair<'item> = (&'item Key, &'item Val); -pub type Val = [Byte]; -pub type Key = [Byte]; - -pub(crate) type Byte = u8; - -impl OwnedKeyVal { - #[must_use] - pub fn as_slice(&self) -> KeyVal<'_> { KeyVal(&self.0, &self.1) } - - #[must_use] - pub fn to_tuple(self) -> OwnedKeyValPair { (self.0, self.1) } -} - -impl From for OwnedKeyVal { - fn from((key, val): OwnedKeyValPair) -> Self { Self(key, val) } -} - -impl From<&KeyVal<'_>> for OwnedKeyVal { - #[inline] - fn from(slice: &KeyVal<'_>) -> Self { slice.to_owned() } -} - -impl From> for OwnedKeyVal { - fn from((key, val): KeyValPair<'_>) -> Self { Self(Vec::from(key), Vec::from(val)) } -} - -impl From for OwnedKeyValPair { - fn from(val: OwnedKeyVal) -> Self { val.to_tuple() } -} - -impl KeyVal<'_> { - #[inline] - #[must_use] - pub fn to_owned(&self) -> OwnedKeyVal { OwnedKeyVal::from(self) } - - #[must_use] - pub fn as_tuple(&self) -> KeyValPair<'_> { (self.0, self.1) } -} - -impl<'a> From<&'a OwnedKeyVal> for KeyVal<'a> { - fn from(owned: &'a OwnedKeyVal) -> Self { owned.as_slice() } -} - -impl<'a> From<&'a OwnedKeyValPair> for KeyVal<'a> { - fn from((key, val): &'a OwnedKeyValPair) -> Self { KeyVal(key.as_slice(), val.as_slice()) } -} - -impl<'a> From> for KeyVal<'a> { - fn from((key, val): KeyValPair<'a>) -> Self { KeyVal(key, val) } -} diff --git a/src/database/stream.rs b/src/database/stream.rs new file mode 100644 index 00000000..d9b74215 --- /dev/null +++ b/src/database/stream.rs @@ -0,0 +1,122 @@ +mod items; +mod items_rev; +mod keys; +mod keys_rev; + +use std::sync::Arc; + +use conduit::{utils::exchange, Error, Result}; +use rocksdb::{ColumnFamily, DBRawIteratorWithThreadMode, ReadOptions}; + +pub(crate) use self::{items::Items, items_rev::ItemsRev, keys::Keys, keys_rev::KeysRev}; +use crate::{ + engine::Db, + keyval::{Key, KeyVal, Val}, + util::map_err, + Engine, Slice, +}; + +struct State<'a> { + inner: Inner<'a>, + seek: bool, + init: bool, +} + +trait Cursor<'a, T> { + fn state(&self) -> &State<'a>; + + fn fetch(&self) -> Option; + + fn seek(&mut self); + + fn get(&self) -> Option> { + self.fetch() + .map(Ok) + .or_else(|| self.state().status().map(Err)) + } + + fn seek_and_get(&mut self) -> Option> { + self.seek(); + self.get() + } +} + +type Inner<'a> = DBRawIteratorWithThreadMode<'a, Db>; +type From<'a> = Option>; + +impl<'a> State<'a> { + fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { + Self { + inner: db.db.raw_iterator_cf_opt(&**cf, opts), + init: true, + seek: false, + } + } + + fn init_fwd(mut self, from: From<'_>) -> Self { + if let Some(key) = from { + self.inner.seek(key); + self.seek = true; + } + + self + } + + fn init_rev(mut self, from: From<'_>) -> Self { + if let Some(key) = from { + self.inner.seek_for_prev(key); + self.seek = true; + } + + self + } + + fn seek_fwd(&mut self) { + if !exchange(&mut self.init, false) { + self.inner.next(); + } else if !self.seek { + self.inner.seek_to_first(); + } + } + + fn seek_rev(&mut self) { + if !exchange(&mut self.init, false) { + self.inner.prev(); + } else if !self.seek { + self.inner.seek_to_last(); + } + } + + fn fetch_key(&self) -> Option> { self.inner.key().map(Key::from) } + + fn _fetch_val(&self) -> Option> { self.inner.value().map(Val::from) } + + fn fetch(&self) -> Option> { self.inner.item().map(KeyVal::from) } + + fn status(&self) -> Option { self.inner.status().map_err(map_err).err() } + + fn valid(&self) -> bool { self.inner.valid() } +} + +fn keyval_longevity<'a, 'b: 'a>(item: KeyVal<'a>) -> KeyVal<'b> { + (slice_longevity::<'a, 'b>(item.0), slice_longevity::<'a, 'b>(item.1)) +} + +fn slice_longevity<'a, 'b: 'a>(item: &'a Slice) -> &'b Slice { + // SAFETY: The lifetime of the data returned by the rocksdb cursor is only valid + // between each movement of the cursor. It is hereby unsafely extended to match + // the lifetime of the cursor itself. This is due to the limitation of the + // Stream trait where the Item is incapable of conveying a lifetime; this is due + // to GAT's being unstable during its development. This unsafety can be removed + // as soon as this limitation is addressed by an upcoming version. + // + // We have done our best to mitigate the implications of this in conjunction + // with the deserialization API such that borrows being held across movements of + // the cursor do not happen accidentally. The compiler will still error when + // values herein produced try to leave a closure passed to a StreamExt API. But + // escapes can happen if you explicitly and intentionally attempt it, and there + // will be no compiler error or warning. This is primarily the case with + // calling collect() without a preceding map(ToOwned::to_owned). A collection + // of references here is illegal, but this will not be enforced by the compiler. + unsafe { std::mem::transmute(item) } +} diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs new file mode 100644 index 00000000..31d5e9e8 --- /dev/null +++ b/src/database/stream/items.rs @@ -0,0 +1,44 @@ +use std::{pin::Pin, sync::Arc}; + +use conduit::Result; +use futures::{ + stream::FusedStream, + task::{Context, Poll}, + Stream, +}; +use rocksdb::{ColumnFamily, ReadOptions}; + +use super::{keyval_longevity, Cursor, From, State}; +use crate::{keyval::KeyVal, Engine}; + +pub(crate) struct Items<'a> { + state: State<'a>, +} + +impl<'a> Items<'a> { + pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions, from: From<'_>) -> Self { + Self { + state: State::new(db, cf, opts).init_fwd(from), + } + } +} + +impl<'a> Cursor<'a, KeyVal<'a>> for Items<'a> { + fn state(&self) -> &State<'a> { &self.state } + + fn fetch(&self) -> Option> { self.state.fetch().map(keyval_longevity) } + + fn seek(&mut self) { self.state.seek_fwd(); } +} + +impl<'a> Stream for Items<'a> { + type Item = Result>; + + fn poll_next(mut self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll> { + Poll::Ready(self.seek_and_get()) + } +} + +impl FusedStream for Items<'_> { + fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } +} diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs new file mode 100644 index 00000000..ab57a250 --- /dev/null +++ b/src/database/stream/items_rev.rs @@ -0,0 +1,44 @@ +use std::{pin::Pin, sync::Arc}; + +use conduit::Result; +use futures::{ + stream::FusedStream, + task::{Context, Poll}, + Stream, +}; +use rocksdb::{ColumnFamily, ReadOptions}; + +use super::{keyval_longevity, Cursor, From, State}; +use crate::{keyval::KeyVal, Engine}; + +pub(crate) struct ItemsRev<'a> { + state: State<'a>, +} + +impl<'a> ItemsRev<'a> { + pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions, from: From<'_>) -> Self { + Self { + state: State::new(db, cf, opts).init_rev(from), + } + } +} + +impl<'a> Cursor<'a, KeyVal<'a>> for ItemsRev<'a> { + fn state(&self) -> &State<'a> { &self.state } + + fn fetch(&self) -> Option> { self.state.fetch().map(keyval_longevity) } + + fn seek(&mut self) { self.state.seek_rev(); } +} + +impl<'a> Stream for ItemsRev<'a> { + type Item = Result>; + + fn poll_next(mut self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll> { + Poll::Ready(self.seek_and_get()) + } +} + +impl FusedStream for ItemsRev<'_> { + fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } +} diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs new file mode 100644 index 00000000..1c5d12e3 --- /dev/null +++ b/src/database/stream/keys.rs @@ -0,0 +1,44 @@ +use std::{pin::Pin, sync::Arc}; + +use conduit::Result; +use futures::{ + stream::FusedStream, + task::{Context, Poll}, + Stream, +}; +use rocksdb::{ColumnFamily, ReadOptions}; + +use super::{slice_longevity, Cursor, From, State}; +use crate::{keyval::Key, Engine}; + +pub(crate) struct Keys<'a> { + state: State<'a>, +} + +impl<'a> Keys<'a> { + pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions, from: From<'_>) -> Self { + Self { + state: State::new(db, cf, opts).init_fwd(from), + } + } +} + +impl<'a> Cursor<'a, Key<'a>> for Keys<'a> { + fn state(&self) -> &State<'a> { &self.state } + + fn fetch(&self) -> Option> { self.state.fetch_key().map(slice_longevity) } + + fn seek(&mut self) { self.state.seek_fwd(); } +} + +impl<'a> Stream for Keys<'a> { + type Item = Result>; + + fn poll_next(mut self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll> { + Poll::Ready(self.seek_and_get()) + } +} + +impl FusedStream for Keys<'_> { + fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } +} diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs new file mode 100644 index 00000000..26707483 --- /dev/null +++ b/src/database/stream/keys_rev.rs @@ -0,0 +1,44 @@ +use std::{pin::Pin, sync::Arc}; + +use conduit::Result; +use futures::{ + stream::FusedStream, + task::{Context, Poll}, + Stream, +}; +use rocksdb::{ColumnFamily, ReadOptions}; + +use super::{slice_longevity, Cursor, From, State}; +use crate::{keyval::Key, Engine}; + +pub(crate) struct KeysRev<'a> { + state: State<'a>, +} + +impl<'a> KeysRev<'a> { + pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions, from: From<'_>) -> Self { + Self { + state: State::new(db, cf, opts).init_rev(from), + } + } +} + +impl<'a> Cursor<'a, Key<'a>> for KeysRev<'a> { + fn state(&self) -> &State<'a> { &self.state } + + fn fetch(&self) -> Option> { self.state.fetch_key().map(slice_longevity) } + + fn seek(&mut self) { self.state.seek_rev(); } +} + +impl<'a> Stream for KeysRev<'a> { + type Item = Result>; + + fn poll_next(mut self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll> { + Poll::Ready(self.seek_and_get()) + } +} + +impl FusedStream for KeysRev<'_> { + fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } +} diff --git a/src/database/util.rs b/src/database/util.rs index f0ccbcbe..d36e183f 100644 --- a/src/database/util.rs +++ b/src/database/util.rs @@ -1,4 +1,16 @@ use conduit::{err, Result}; +use rocksdb::{Direction, IteratorMode}; + +#[inline] +pub(crate) fn _into_direction(mode: &IteratorMode<'_>) -> Direction { + use Direction::{Forward, Reverse}; + use IteratorMode::{End, From, Start}; + + match mode { + Start | From(_, Forward) => Forward, + End | From(_, Reverse) => Reverse, + } +} #[inline] pub(crate) fn result(r: std::result::Result) -> Result { diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index cfed5a0e..737a7039 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -46,7 +46,7 @@ bytes.workspace = true conduit-core.workspace = true conduit-database.workspace = true const-str.workspace = true -futures-util.workspace = true +futures.workspace = true hickory-resolver.workspace = true http.workspace = true image.workspace = true diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs deleted file mode 100644 index 53a0e953..00000000 --- a/src/service/account_data/data.rs +++ /dev/null @@ -1,152 +0,0 @@ -use std::{collections::HashMap, sync::Arc}; - -use conduit::{Error, Result}; -use database::Map; -use ruma::{ - api::client::error::ErrorKind, - events::{AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, RoomAccountDataEventType}, - serde::Raw, - RoomId, UserId, -}; - -use crate::{globals, Dep}; - -pub(super) struct Data { - roomuserdataid_accountdata: Arc, - roomusertype_roomuserdataid: Arc, - services: Services, -} - -struct Services { - globals: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - roomuserdataid_accountdata: db["roomuserdataid_accountdata"].clone(), - roomusertype_roomuserdataid: db["roomusertype_roomuserdataid"].clone(), - services: Services { - globals: args.depend::("globals"), - }, - } - } - - /// Places one event in the account data of the user and removes the - /// previous entry. - pub(super) fn update( - &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: &RoomAccountDataEventType, - data: &serde_json::Value, - ) -> Result<()> { - let mut prefix = room_id - .map(ToString::to_string) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xFF); - - let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&self.services.globals.next_count()?.to_be_bytes()); - roomuserdataid.push(0xFF); - roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); - - let mut key = prefix; - key.extend_from_slice(event_type.to_string().as_bytes()); - - if data.get("type").is_none() || data.get("content").is_none() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Account data doesn't have all required fields.", - )); - } - - self.roomuserdataid_accountdata.insert( - &roomuserdataid, - &serde_json::to_vec(&data).expect("to_vec always works on json values"), - )?; - - let prev = self.roomusertype_roomuserdataid.get(&key)?; - - self.roomusertype_roomuserdataid - .insert(&key, &roomuserdataid)?; - - // Remove old entry - if let Some(prev) = prev { - self.roomuserdataid_accountdata.remove(&prev)?; - } - - Ok(()) - } - - /// Searches the account data for a specific kind. - pub(super) fn get( - &self, room_id: Option<&RoomId>, user_id: &UserId, kind: &RoomAccountDataEventType, - ) -> Result>> { - let mut key = room_id - .map(ToString::to_string) - .unwrap_or_default() - .as_bytes() - .to_vec(); - key.push(0xFF); - key.extend_from_slice(user_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(kind.to_string().as_bytes()); - - self.roomusertype_roomuserdataid - .get(&key)? - .and_then(|roomuserdataid| { - self.roomuserdataid_accountdata - .get(&roomuserdataid) - .transpose() - }) - .transpose()? - .map(|data| serde_json::from_slice(&data).map_err(|_| Error::bad_database("could not deserialize"))) - .transpose() - } - - /// Returns all changes to the account data that happened after `since`. - pub(super) fn changes_since( - &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result> { - let mut userdata = HashMap::new(); - - let mut prefix = room_id - .map(ToString::to_string) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xFF); - - // Skip the data that's exactly at since, because we sent that last time - let mut first_possible = prefix.clone(); - first_possible.extend_from_slice(&(since.saturating_add(1)).to_be_bytes()); - - for r in self - .roomuserdataid_accountdata - .iter_from(&first_possible, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(k, v)| { - Ok::<_, Error>(( - k, - match room_id { - None => serde_json::from_slice::>(&v) - .map(AnyRawAccountDataEvent::Global) - .map_err(|_| Error::bad_database("Database contains invalid account data."))?, - Some(_) => serde_json::from_slice::>(&v) - .map(AnyRawAccountDataEvent::Room) - .map_err(|_| Error::bad_database("Database contains invalid account data."))?, - }, - )) - }) { - let (kind, data) = r?; - userdata.insert(kind, data); - } - - Ok(userdata.into_values().collect()) - } -} diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index eaa53641..b4eb143d 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,52 +1,158 @@ -mod data; +use std::{collections::HashMap, sync::Arc}; -use std::sync::Arc; - -use conduit::Result; -use data::Data; +use conduit::{ + implement, + utils::{stream::TryIgnore, ReadyExt}, + Err, Error, Result, +}; +use database::{Deserialized, Map}; +use futures::{StreamExt, TryFutureExt}; use ruma::{ - events::{AnyRawAccountDataEvent, RoomAccountDataEventType}, + events::{AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, RoomAccountDataEventType}, + serde::Raw, RoomId, UserId, }; +use serde_json::value::RawValue; + +use crate::{globals, Dep}; pub struct Service { + services: Services, db: Data, } +struct Data { + roomuserdataid_accountdata: Arc, + roomusertype_roomuserdataid: Arc, +} + +struct Services { + globals: Dep, +} + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(&args), + services: Services { + globals: args.depend::("globals"), + }, + db: Data { + roomuserdataid_accountdata: args.db["roomuserdataid_accountdata"].clone(), + roomusertype_roomuserdataid: args.db["roomusertype_roomuserdataid"].clone(), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Places one event in the account data of the user and removes the - /// previous entry. - #[allow(clippy::needless_pass_by_value)] - pub fn update( - &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - data: &serde_json::Value, - ) -> Result<()> { - self.db.update(room_id, user_id, &event_type, data) +/// Places one event in the account data of the user and removes the +/// previous entry. +#[allow(clippy::needless_pass_by_value)] +#[implement(Service)] +pub async fn update( + &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, data: &serde_json::Value, +) -> Result<()> { + let event_type = event_type.to_string(); + let count = self.services.globals.next_count()?; + + let mut prefix = room_id + .map(ToString::to_string) + .unwrap_or_default() + .as_bytes() + .to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(user_id.as_bytes()); + prefix.push(0xFF); + + let mut roomuserdataid = prefix.clone(); + roomuserdataid.extend_from_slice(&count.to_be_bytes()); + roomuserdataid.push(0xFF); + roomuserdataid.extend_from_slice(event_type.as_bytes()); + + let mut key = prefix; + key.extend_from_slice(event_type.as_bytes()); + + if data.get("type").is_none() || data.get("content").is_none() { + return Err!(Request(InvalidParam("Account data doesn't have all required fields."))); } - /// Searches the account data for a specific kind. - #[allow(clippy::needless_pass_by_value)] - pub fn get( - &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - ) -> Result>> { - self.db.get(room_id, user_id, &event_type) + self.db.roomuserdataid_accountdata.insert( + &roomuserdataid, + &serde_json::to_vec(&data).expect("to_vec always works on json values"), + ); + + let prev_key = (room_id, user_id, &event_type); + let prev = self.db.roomusertype_roomuserdataid.qry(&prev_key).await; + + self.db + .roomusertype_roomuserdataid + .insert(&key, &roomuserdataid); + + // Remove old entry + if let Ok(prev) = prev { + self.db.roomuserdataid_accountdata.remove(&prev); } - /// Returns all changes to the account data that happened after `since`. - #[tracing::instrument(skip_all, name = "since", level = "debug")] - pub fn changes_since( - &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result> { - self.db.changes_since(room_id, user_id, since) - } + Ok(()) +} + +/// Searches the account data for a specific kind. +#[implement(Service)] +pub async fn get( + &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, +) -> Result> { + let key = (room_id, user_id, kind.to_string()); + self.db + .roomusertype_roomuserdataid + .qry(&key) + .and_then(|roomuserdataid| self.db.roomuserdataid_accountdata.qry(&roomuserdataid)) + .await + .deserialized_json() +} + +/// Returns all changes to the account data that happened after `since`. +#[implement(Service)] +pub async fn changes_since( + &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, +) -> Result> { + let mut userdata = HashMap::new(); + + let mut prefix = room_id + .map(ToString::to_string) + .unwrap_or_default() + .as_bytes() + .to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(user_id.as_bytes()); + prefix.push(0xFF); + + // Skip the data that's exactly at since, because we sent that last time + let mut first_possible = prefix.clone(); + first_possible.extend_from_slice(&(since.saturating_add(1)).to_be_bytes()); + + self.db + .roomuserdataid_accountdata + .raw_stream_from(&first_possible) + .ignore_err() + .ready_take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(k, v)| { + let v = match room_id { + None => serde_json::from_slice::>(v) + .map(AnyRawAccountDataEvent::Global) + .map_err(|_| Error::bad_database("Database contains invalid account data."))?, + Some(_) => serde_json::from_slice::>(v) + .map(AnyRawAccountDataEvent::Room) + .map_err(|_| Error::bad_database("Database contains invalid account data."))?, + }; + + Ok((k.to_owned(), v)) + }) + .ignore_err() + .ready_for_each(|(kind, data)| { + userdata.insert(kind, data); + }) + .await; + + Ok(userdata.into_values().collect()) } diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index 55bae365..0f5016e1 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -5,7 +5,7 @@ use std::{ }; use conduit::{debug, defer, error, log, Server}; -use futures_util::future::{AbortHandle, Abortable}; +use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; use termimad::MadSkin; diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 4e2b831c..7b090aa0 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -30,7 +30,7 @@ use crate::Services; pub async fn create_admin_room(services: &Services) -> Result<()> { let room_id = RoomId::new(services.globals.server_name()); - let _short_id = services.rooms.short.get_or_create_shortroomid(&room_id)?; + let _short_id = services.rooms.short.get_or_create_shortroomid(&room_id); let state_lock = services.rooms.state.mutex.lock(&room_id).await; diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index b4589ebc..4b3ebb88 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -17,108 +17,108 @@ use serde_json::value::to_raw_value; use crate::pdu::PduBuilder; -impl super::Service { - /// Invite the user to the conduit admin room. - /// - /// In conduit, this is equivalent to granting admin privileges. - pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { - let Some(room_id) = self.get_admin_room()? else { - return Ok(()); - }; +/// Invite the user to the conduit admin room. +/// +/// In conduit, this is equivalent to granting admin privileges. +#[implement(super::Service)] +pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { + let Ok(room_id) = self.get_admin_room().await else { + return Ok(()); + }; - let state_lock = self.services.state.mutex.lock(&room_id).await; + let state_lock = self.services.state.mutex.lock(&room_id).await; - // Use the server user to grant the new admin's power level - let server_user = &self.services.globals.server_user; + // Use the server user to grant the new admin's power level + let server_user = &self.services.globals.server_user; - // Invite and join the real user - self.services - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }, - server_user, - &room_id, - &state_lock, - ) - .await?; - self.services - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }, - user_id, - &room_id, - &state_lock, - ) - .await?; + // Invite and join the real user + self.services + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + timestamp: None, + }, + server_user, + &room_id, + &state_lock, + ) + .await?; + self.services + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + timestamp: None, + }, + user_id, + &room_id, + &state_lock, + ) + .await?; - // Set power level - let users = BTreeMap::from_iter([(server_user.clone(), 100.into()), (user_id.to_owned(), 100.into())]); + // Set power level + let users = BTreeMap::from_iter([(server_user.clone(), 100.into()), (user_id.to_owned(), 100.into())]); - self.services - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, - server_user, - &room_id, - &state_lock, - ) - .await?; + self.services + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + timestamp: None, + }, + server_user, + &room_id, + &state_lock, + ) + .await?; - // Set room tag - let room_tag = &self.services.server.config.admin_room_tag; - if !room_tag.is_empty() { - if let Err(e) = self.set_room_tag(&room_id, user_id, room_tag) { - error!(?room_id, ?user_id, ?room_tag, ?e, "Failed to set tag for admin grant"); - } + // Set room tag + let room_tag = &self.services.server.config.admin_room_tag; + if !room_tag.is_empty() { + if let Err(e) = self.set_room_tag(&room_id, user_id, room_tag).await { + error!(?room_id, ?user_id, ?room_tag, ?e, "Failed to set tag for admin grant"); } + } - // Send welcome message - self.services.timeline.build_and_append_pdu( + // Send welcome message + self.services.timeline.build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_markdown( @@ -135,19 +135,18 @@ impl super::Service { &state_lock, ).await?; - Ok(()) - } + Ok(()) } #[implement(super::Service)] -fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result<()> { +async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result<()> { let mut event = self .services .account_data - .get(Some(room_id), user_id, RoomAccountDataEventType::Tag)? - .map(|event| serde_json::from_str(event.get())) - .and_then(Result::ok) - .unwrap_or_else(|| TagEvent { + .get(Some(room_id), user_id, RoomAccountDataEventType::Tag) + .await + .and_then(|event| serde_json::from_str(event.get()).map_err(Into::into)) + .unwrap_or_else(|_| TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, @@ -158,12 +157,15 @@ fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result< .tags .insert(tag.to_owned().into(), TagInfo::new()); - self.services.account_data.update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &serde_json::to_value(event)?, - )?; + self.services + .account_data + .update( + Some(room_id), + user_id, + RoomAccountDataEventType::Tag, + &serde_json::to_value(event)?, + ) + .await?; Ok(()) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 3274249e..12eacc8f 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -12,6 +12,7 @@ use std::{ use async_trait::async_trait; use conduit::{debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server}; pub use create::create_admin_room; +use futures::{FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{ events::{ @@ -142,17 +143,18 @@ impl Service { /// admin room as the admin user. pub async fn send_text(&self, body: &str) { self.send_message(RoomMessageEventContent::text_markdown(body)) - .await; + .await + .ok(); } /// Sends a message to the admin room as the admin user (see send_text() for /// convenience). - pub async fn send_message(&self, message_content: RoomMessageEventContent) { - if let Ok(Some(room_id)) = self.get_admin_room() { - let user_id = &self.services.globals.server_user; - self.respond_to_room(message_content, &room_id, user_id) - .await; - } + pub async fn send_message(&self, message_content: RoomMessageEventContent) -> Result<()> { + let user_id = &self.services.globals.server_user; + let room_id = self.get_admin_room().await?; + self.respond_to_room(message_content, &room_id, user_id) + .boxed() + .await } /// Posts a command to the command processor queue and returns. Processing @@ -193,8 +195,12 @@ impl Service { async fn handle_command(&self, command: CommandInput) { match self.process_command(command).await { - Ok(Some(output)) | Err(output) => self.handle_response(output).await, Ok(None) => debug!("Command successful with no response"), + Ok(Some(output)) | Err(output) => self + .handle_response(output) + .boxed() + .await + .unwrap_or_else(default_log), } } @@ -218,71 +224,67 @@ impl Service { } /// Checks whether a given user is an admin of this server - pub async fn user_is_admin(&self, user_id: &UserId) -> Result { - if let Ok(Some(admin_room)) = self.get_admin_room() { - self.services.state_cache.is_joined(user_id, &admin_room) - } else { - Ok(false) - } + pub async fn user_is_admin(&self, user_id: &UserId) -> bool { + let Ok(admin_room) = self.get_admin_room().await else { + return false; + }; + + self.services + .state_cache + .is_joined(user_id, &admin_room) + .await } /// Gets the room ID of the admin room /// /// Errors are propagated from the database, and will have None if there is /// no admin room - pub fn get_admin_room(&self) -> Result> { - if let Some(room_id) = self + pub async fn get_admin_room(&self) -> Result { + let room_id = self .services .alias - .resolve_local_alias(&self.services.globals.admin_alias)? - { - if self - .services - .state_cache - .is_joined(&self.services.globals.server_user, &room_id)? - { - return Ok(Some(room_id)); - } - } + .resolve_local_alias(&self.services.globals.admin_alias) + .await?; - Ok(None) + self.services + .state_cache + .is_joined(&self.services.globals.server_user, &room_id) + .await + .then_some(room_id) + .ok_or_else(|| err!(Request(NotFound("Admin user not joined to admin room")))) } - async fn handle_response(&self, content: RoomMessageEventContent) { + async fn handle_response(&self, content: RoomMessageEventContent) -> Result<()> { let Some(Relation::Reply { in_reply_to, }) = content.relates_to.as_ref() else { - return; + return Ok(()); }; - let Ok(Some(pdu)) = self.services.timeline.get_pdu(&in_reply_to.event_id) else { + let Ok(pdu) = self.services.timeline.get_pdu(&in_reply_to.event_id).await else { error!( event_id = ?in_reply_to.event_id, "Missing admin command in_reply_to event" ); - return; + return Ok(()); }; - let response_sender = if self.is_admin_room(&pdu.room_id) { + let response_sender = if self.is_admin_room(&pdu.room_id).await { &self.services.globals.server_user } else { &pdu.sender }; self.respond_to_room(content, &pdu.room_id, response_sender) - .await; + .await } - async fn respond_to_room(&self, content: RoomMessageEventContent, room_id: &RoomId, user_id: &UserId) { - assert!( - self.user_is_admin(user_id) - .await - .expect("checked user is admin"), - "sender is not admin" - ); + async fn respond_to_room( + &self, content: RoomMessageEventContent, room_id: &RoomId, user_id: &UserId, + ) -> Result<()> { + assert!(self.user_is_admin(user_id).await, "sender is not admin"); - let state_lock = self.services.state.mutex.lock(room_id).await; let response_pdu = PduBuilder { event_type: TimelineEventType::RoomMessage, content: to_raw_value(&content).expect("event is valid, we just created it"), @@ -292,6 +294,7 @@ impl Service { timestamp: None, }; + let state_lock = self.services.state.mutex.lock(room_id).await; if let Err(e) = self .services .timeline @@ -302,6 +305,8 @@ impl Service { .await .unwrap_or_else(default_log); } + + Ok(()) } async fn handle_response_error( @@ -355,12 +360,12 @@ impl Service { } // Prevent unescaped !admin from being used outside of the admin room - if is_public_prefix && !self.is_admin_room(&pdu.room_id) { + if is_public_prefix && !self.is_admin_room(&pdu.room_id).await { return false; } // Only senders who are admin can proceed - if !self.user_is_admin(&pdu.sender).await.unwrap_or(false) { + if !self.user_is_admin(&pdu.sender).await { return false; } @@ -368,7 +373,7 @@ impl Service { // the administrator can execute commands as conduit let emergency_password_set = self.services.globals.emergency_password().is_some(); let from_server = pdu.sender == *server_user && !emergency_password_set; - if from_server && self.is_admin_room(&pdu.room_id) { + if from_server && self.is_admin_room(&pdu.room_id).await { return false; } @@ -377,12 +382,11 @@ impl Service { } #[must_use] - pub fn is_admin_room(&self, room_id: &RoomId) -> bool { - if let Ok(Some(admin_room_id)) = self.get_admin_room() { - admin_room_id == room_id - } else { - false - } + pub async fn is_admin_room(&self, room_id_: &RoomId) -> bool { + self.get_admin_room() + .map_ok(|room_id| room_id == room_id_) + .await + .unwrap_or(false) } /// Sets the self-reference to crate::Services which will provide context to diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index 40e641a1..d5fa5476 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -1,7 +1,8 @@ use std::sync::Arc; -use conduit::{utils, Error, Result}; -use database::{Database, Map}; +use conduit::{err, utils::stream::TryIgnore, Result}; +use database::{Database, Deserialized, Map}; +use futures::Stream; use ruma::api::appservice::Registration; pub struct Data { @@ -19,7 +20,7 @@ impl Data { pub(super) fn register_appservice(&self, yaml: &Registration) -> Result { let id = yaml.id.as_str(); self.id_appserviceregistrations - .insert(id.as_bytes(), serde_yaml::to_string(&yaml).unwrap().as_bytes())?; + .insert(id.as_bytes(), serde_yaml::to_string(&yaml).unwrap().as_bytes()); Ok(id.to_owned()) } @@ -31,24 +32,19 @@ impl Data { /// * `service_name` - the name you send to register the service previously pub(super) fn unregister_appservice(&self, service_name: &str) -> Result<()> { self.id_appserviceregistrations - .remove(service_name.as_bytes())?; + .remove(service_name.as_bytes()); Ok(()) } - pub fn get_registration(&self, id: &str) -> Result> { + pub async fn get_registration(&self, id: &str) -> Result { self.id_appserviceregistrations - .get(id.as_bytes())? - .map(|bytes| { - serde_yaml::from_slice(&bytes) - .map_err(|_| Error::bad_database("Invalid registration bytes in id_appserviceregistrations.")) - }) - .transpose() + .qry(id) + .await + .deserialized_json() + .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) } - pub(super) fn iter_ids<'a>(&'a self) -> Result> + 'a>> { - Ok(Box::new(self.id_appserviceregistrations.iter().map(|(id, _)| { - utils::string_from_bytes(&id) - .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) - }))) + pub(super) fn iter_ids(&self) -> impl Stream + Send + '_ { + self.id_appserviceregistrations.keys().ignore_err() } } diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index c0752d56..7e2dc738 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -2,9 +2,10 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; +use async_trait::async_trait; use conduit::{err, Result}; use data::Data; -use futures_util::Future; +use futures::{Future, StreamExt, TryStreamExt}; use regex::RegexSet; use ruma::{ api::appservice::{Namespace, Registration}, @@ -126,13 +127,22 @@ struct Services { sending: Dep, } +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { - let mut registration_info = BTreeMap::new(); - let db = Data::new(args.db); + Ok(Arc::new(Self { + db: Data::new(args.db), + services: Services { + sending: args.depend::("sending"), + }, + registration_info: RwLock::new(BTreeMap::new()), + })) + } + + async fn worker(self: Arc) -> Result<()> { // Inserting registrations into cache - for appservice in iter_ids(&db)? { - registration_info.insert( + for appservice in iter_ids(&self.db).await? { + self.registration_info.write().await.insert( appservice.0, appservice .1 @@ -141,13 +151,7 @@ impl crate::Service for Service { ); } - Ok(Arc::new(Self { - db, - services: Services { - sending: args.depend::("sending"), - }, - registration_info: RwLock::new(registration_info), - })) + Ok(()) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } @@ -155,7 +159,7 @@ impl crate::Service for Service { impl Service { #[inline] - pub fn all(&self) -> Result> { iter_ids(&self.db) } + pub async fn all(&self) -> Result> { iter_ids(&self.db).await } /// Registers an appservice and returns the ID to the caller pub async fn register_appservice(&self, yaml: Registration) -> Result { @@ -188,7 +192,8 @@ impl Service { // sending to the URL self.services .sending - .cleanup_events(service_name.to_owned())?; + .cleanup_events(service_name.to_owned()) + .await; Ok(()) } @@ -251,15 +256,9 @@ impl Service { } } -fn iter_ids(db: &Data) -> Result> { - db.iter_ids()? - .filter_map(Result::ok) - .map(move |id| { - Ok(( - id.clone(), - db.get_registration(&id)? - .expect("iter_ids only returns appservices that exist"), - )) - }) - .collect() +async fn iter_ids(db: &Data) -> Result> { + db.iter_ids() + .then(|id| async move { Ok((id.clone(), db.get_registration(&id).await?)) }) + .try_collect() + .await } diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 1bb0843d..98020bc2 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -33,6 +33,7 @@ impl crate::Service for Service { async fn worker(self: Arc) -> Result<()> { self.set_emergency_access() + .await .inspect_err(|e| error!("Could not set the configured emergency password for the conduit user: {e}"))?; Ok(()) @@ -44,7 +45,7 @@ impl crate::Service for Service { impl Service { /// Sets the emergency password and push rules for the @conduit account in /// case emergency password is set - fn set_emergency_access(&self) -> Result { + async fn set_emergency_access(&self) -> Result { let conduit_user = &self.services.globals.server_user; self.services @@ -56,17 +57,20 @@ impl Service { None => (Ruleset::new(), false), }; - self.services.account_data.update( - None, - conduit_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(&GlobalAccountDataEvent { - content: PushRulesEventContent { - global: ruleset, - }, - }) - .expect("to json value always works"), - )?; + self.services + .account_data + .update( + None, + conduit_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(&GlobalAccountDataEvent { + content: PushRulesEventContent { + global: ruleset, + }, + }) + .expect("to json value always works"), + ) + .await?; if pwd_set { warn!( @@ -75,7 +79,7 @@ impl Service { ); } else { // logs out any users still in the server service account and removes sessions - self.services.users.deactivate_account(conduit_user)?; + self.services.users.deactivate_account(conduit_user).await?; } Ok(pwd_set) diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 5b5d9f09..3286e40c 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -4,8 +4,8 @@ use std::{ }; use conduit::{trace, utils, Error, Result, Server}; -use database::{Database, Map}; -use futures_util::{stream::FuturesUnordered, StreamExt}; +use database::{Database, Deserialized, Map}; +use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, @@ -83,7 +83,7 @@ impl Data { .checked_add(1) .expect("counter must not overflow u64"); - self.global.insert(COUNTER, &counter.to_be_bytes())?; + self.global.insert(COUNTER, &counter.to_be_bytes()); Ok(*counter) } @@ -102,7 +102,7 @@ impl Data { fn stored_count(global: &Arc) -> Result { global - .get(COUNTER)? + .get(COUNTER) .as_deref() .map_or(Ok(0_u64), utils::u64_from_bytes) } @@ -133,36 +133,18 @@ impl Data { futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix)); // Events for rooms we are in - for room_id in self - .services - .state_cache - .rooms_joined(user_id) - .filter_map(Result::ok) - { - let short_roomid = self - .services - .short - .get_shortroomid(&room_id) - .ok() - .flatten() - .expect("room exists") - .to_be_bytes() - .to_vec(); + let rooms_joined = self.services.state_cache.rooms_joined(user_id); + + pin_mut!(rooms_joined); + while let Some(room_id) = rooms_joined.next().await { + let Ok(short_roomid) = self.services.short.get_shortroomid(room_id).await else { + continue; + }; let roomid_bytes = room_id.as_bytes().to_vec(); let mut roomid_prefix = roomid_bytes.clone(); roomid_prefix.push(0xFF); - // PDUs - futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); - - // EDUs - futures.push(Box::pin(async move { - let _result = self.services.typing.wait_for_update(&room_id).await; - })); - - futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix)); - // Key changes futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); @@ -174,6 +156,19 @@ impl Data { self.roomusertype_roomuserdataid .watch_prefix(&roomuser_prefix), ); + + // PDUs + let short_roomid = short_roomid.to_be_bytes().to_vec(); + futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); + + // EDUs + let typing_room_id = room_id.to_owned(); + let typing_wait_for_update = async move { + self.services.typing.wait_for_update(&typing_room_id).await; + }; + + futures.push(typing_wait_for_update.boxed()); + futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix)); } let mut globaluserdata_prefix = vec![0xFF]; @@ -190,12 +185,14 @@ impl Data { // One time keys futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes)); - futures.push(Box::pin(async move { + // Server shutdown + let server_shutdown = async move { while self.services.server.running() { - let _result = self.services.server.signal.subscribe().recv().await; + self.services.server.signal.subscribe().recv().await.ok(); } - })); + }; + futures.push(server_shutdown.boxed()); if !self.services.server.running() { return Ok(()); } @@ -209,10 +206,10 @@ impl Data { } pub fn load_keypair(&self) -> Result { - let keypair_bytes = self.global.get(b"keypair")?.map_or_else( - || { + let keypair_bytes = self.global.get(b"keypair").map_or_else( + |_| { let keypair = utils::generate_keypair(); - self.global.insert(b"keypair", &keypair)?; + self.global.insert(b"keypair", &keypair); Ok::<_, Error>(keypair) }, |val| Ok(val.to_vec()), @@ -241,7 +238,10 @@ impl Data { } #[inline] - pub fn remove_keypair(&self) -> Result<()> { self.global.remove(b"keypair") } + pub fn remove_keypair(&self) -> Result<()> { + self.global.remove(b"keypair"); + Ok(()) + } /// TODO: the key valid until timestamp (`valid_until_ts`) is only honored /// in room version > 4 @@ -250,15 +250,15 @@ impl Data { /// /// This doesn't actually check that the keys provided are newer than the /// old set. - pub fn add_signing_key( + pub async fn add_signing_key( &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result> { + ) -> BTreeMap { // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; + let signingkeys = self.server_signingkeys.qry(origin).await; let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(&keys).ok()) - .unwrap_or_else(|| { + .and_then(|keys| serde_json::from_slice(&keys).map_err(Into::into)) + .unwrap_or_else(|_| { // Just insert "now", it doesn't matter ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) }); @@ -275,7 +275,7 @@ impl Data { self.server_signingkeys.insert( origin.as_bytes(), &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), - )?; + ); let mut tree = keys.verify_keys; tree.extend( @@ -284,45 +284,38 @@ impl Data { .map(|old| (old.0, VerifyKey::new(old.1.key))), ); - Ok(tree) + tree } /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// for the server. - pub fn verify_keys_for(&self, origin: &ServerName) -> Result> { - let signingkeys = self - .signing_keys_for(origin)? - .map_or_else(BTreeMap::new, |keys: ServerSigningKeys| { + pub async fn verify_keys_for(&self, origin: &ServerName) -> Result> { + self.signing_keys_for(origin).await.map_or_else( + |_| Ok(BTreeMap::new()), + |keys: ServerSigningKeys| { let mut tree = keys.verify_keys; tree.extend( keys.old_verify_keys .into_iter() .map(|old| (old.0, VerifyKey::new(old.1.key))), ); - tree - }); - - Ok(signingkeys) + Ok(tree) + }, + ) } - pub fn signing_keys_for(&self, origin: &ServerName) -> Result> { - let signingkeys = self - .server_signingkeys - .get(origin.as_bytes())? - .and_then(|bytes| serde_json::from_slice(&bytes).ok()); - - Ok(signingkeys) + pub async fn signing_keys_for(&self, origin: &ServerName) -> Result { + self.server_signingkeys + .qry(origin) + .await + .deserialized_json() } - pub fn database_version(&self) -> Result { - self.global.get(b"version")?.map_or(Ok(0), |version| { - utils::u64_from_bytes(&version).map_err(|_| Error::bad_database("Database version id is invalid.")) - }) - } + pub async fn database_version(&self) -> u64 { self.global.qry("version").await.deserialized().unwrap_or(0) } #[inline] pub fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.global.insert(b"version", &new_version.to_be_bytes())?; + self.global.insert(b"version", &new_version.to_be_bytes()); Ok(()) } diff --git a/src/service/globals/migrations.rs b/src/service/globals/migrations.rs index 66917520..c7a73230 100644 --- a/src/service/globals/migrations.rs +++ b/src/service/globals/migrations.rs @@ -1,17 +1,15 @@ -use std::{ - collections::{HashMap, HashSet}, - fs::{self}, - io::Write, - mem::size_of, - sync::Arc, +use conduit::{ + debug_info, debug_warn, error, info, + result::NotFound, + utils::{stream::TryIgnore, IterStream, ReadyExt}, + warn, Err, Error, Result, }; - -use conduit::{debug, debug_info, debug_warn, error, info, utils, warn, Error, Result}; +use futures::{FutureExt, StreamExt}; use itertools::Itertools; use ruma::{ events::{push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType}, push::Ruleset, - EventId, OwnedRoomId, RoomId, UserId, + UserId, }; use crate::{media, Services}; @@ -33,12 +31,14 @@ pub(crate) const DATABASE_VERSION: u64 = 13; pub(crate) const CONDUIT_DATABASE_VERSION: u64 = 16; pub(crate) async fn migrations(services: &Services) -> Result<()> { + let users_count = services.users.count().await; + // Matrix resource ownership is based on the server name; changing it // requires recreating the database from scratch. - if services.users.count()? > 0 { + if users_count > 0 { let conduit_user = &services.globals.server_user; - if !services.users.exists(conduit_user)? { + if !services.users.exists(conduit_user).await { error!("The {} server user does not exist, and the database is not new.", conduit_user); return Err(Error::bad_database( "Cannot reuse an existing database after changing the server name, please delete the old one first.", @@ -46,7 +46,7 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> { } } - if services.users.count()? > 0 { + if users_count > 0 { migrate(services).await } else { fresh(services).await @@ -62,9 +62,9 @@ async fn fresh(services: &Services) -> Result<()> { .db .bump_database_version(DATABASE_VERSION)?; - db["global"].insert(b"feat_sha256_media", &[])?; - db["global"].insert(b"fix_bad_double_separator_in_state_cache", &[])?; - db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[])?; + db["global"].insert(b"feat_sha256_media", &[]); + db["global"].insert(b"fix_bad_double_separator_in_state_cache", &[]); + db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[]); // Create the admin room and server user on first run crate::admin::create_admin_room(services).await?; @@ -82,136 +82,109 @@ async fn migrate(services: &Services) -> Result<()> { let db = &services.db; let config = &services.server.config; - if services.globals.db.database_version()? < 1 { - db_lt_1(services).await?; + if services.globals.db.database_version().await < 11 { + return Err!(Database( + "Database schema version {} is no longer supported", + services.globals.db.database_version().await + )); } - if services.globals.db.database_version()? < 2 { - db_lt_2(services).await?; - } - - if services.globals.db.database_version()? < 3 { - db_lt_3(services).await?; - } - - if services.globals.db.database_version()? < 4 { - db_lt_4(services).await?; - } - - if services.globals.db.database_version()? < 5 { - db_lt_5(services).await?; - } - - if services.globals.db.database_version()? < 6 { - db_lt_6(services).await?; - } - - if services.globals.db.database_version()? < 7 { - db_lt_7(services).await?; - } - - if services.globals.db.database_version()? < 8 { - db_lt_8(services).await?; - } - - if services.globals.db.database_version()? < 9 { - db_lt_9(services).await?; - } - - if services.globals.db.database_version()? < 10 { - db_lt_10(services).await?; - } - - if services.globals.db.database_version()? < 11 { - db_lt_11(services).await?; - } - - if services.globals.db.database_version()? < 12 { + if services.globals.db.database_version().await < 12 { db_lt_12(services).await?; } // This migration can be reused as-is anytime the server-default rules are // updated. - if services.globals.db.database_version()? < 13 { + if services.globals.db.database_version().await < 13 { db_lt_13(services).await?; } - if db["global"].get(b"feat_sha256_media")?.is_none() { + if db["global"].qry("feat_sha256_media").await.is_not_found() { media::migrations::migrate_sha256_media(services).await?; } else if config.media_startup_check { media::migrations::checkup_sha256_media(services).await?; } if db["global"] - .get(b"fix_bad_double_separator_in_state_cache")? - .is_none() + .qry("fix_bad_double_separator_in_state_cache") + .await + .is_not_found() { fix_bad_double_separator_in_state_cache(services).await?; } if db["global"] - .get(b"retroactively_fix_bad_data_from_roomuserid_joined")? - .is_none() + .qry("retroactively_fix_bad_data_from_roomuserid_joined") + .await + .is_not_found() { retroactively_fix_bad_data_from_roomuserid_joined(services).await?; } - let version_match = services.globals.db.database_version().unwrap() == DATABASE_VERSION - || services.globals.db.database_version().unwrap() == CONDUIT_DATABASE_VERSION; + let version_match = services.globals.db.database_version().await == DATABASE_VERSION + || services.globals.db.database_version().await == CONDUIT_DATABASE_VERSION; assert!( version_match, "Failed asserting local database version {} is equal to known latest conduwuit database version {}", - services.globals.db.database_version().unwrap(), + services.globals.db.database_version().await, DATABASE_VERSION, ); { let patterns = services.globals.forbidden_usernames(); if !patterns.is_empty() { - for user_id in services + services .users - .iter() - .filter_map(Result::ok) - .filter(|user| !services.users.is_deactivated(user).unwrap_or(true)) - .filter(|user| user.server_name() == config.server_name) - { - let matches = patterns.matches(user_id.localpart()); - if matches.matched_any() { - warn!( - "User {} matches the following forbidden username patterns: {}", - user_id.to_string(), - matches - .into_iter() - .map(|x| &patterns.patterns()[x]) - .join(", ") - ); - } - } - } - } - - { - let patterns = services.globals.forbidden_alias_names(); - if !patterns.is_empty() { - for address in services.rooms.metadata.iter_ids() { - let room_id = address?; - let room_aliases = services.rooms.alias.local_aliases_for_room(&room_id); - for room_alias_result in room_aliases { - let room_alias = room_alias_result?; - let matches = patterns.matches(room_alias.alias()); + .stream() + .filter(|user_id| services.users.is_active_local(user_id)) + .ready_for_each(|user_id| { + let matches = patterns.matches(user_id.localpart()); if matches.matched_any() { warn!( - "Room with alias {} ({}) matches the following forbidden room name patterns: {}", - room_alias, - &room_id, + "User {} matches the following forbidden username patterns: {}", + user_id.to_string(), matches .into_iter() .map(|x| &patterns.patterns()[x]) .join(", ") ); } - } + }) + .await; + } + } + + { + let patterns = services.globals.forbidden_alias_names(); + if !patterns.is_empty() { + for room_id in services + .rooms + .metadata + .iter_ids() + .map(ToOwned::to_owned) + .collect::>() + .await + { + services + .rooms + .alias + .local_aliases_for_room(&room_id) + .ready_for_each(|room_alias| { + let matches = patterns.matches(room_alias.alias()); + if matches.matched_any() { + warn!( + "Room with alias {} ({}) matches the following forbidden room name patterns: {}", + room_alias, + &room_id, + matches + .into_iter() + .map(|x| &patterns.patterns()[x]) + .join(", ") + ); + } + }) + .await; } } } @@ -224,424 +197,17 @@ async fn migrate(services: &Services) -> Result<()> { Ok(()) } -async fn db_lt_1(services: &Services) -> Result<()> { - let db = &services.db; - - let roomserverids = &db["roomserverids"]; - let serverroomids = &db["serverroomids"]; - for (roomserverid, _) in roomserverids.iter() { - let mut parts = roomserverid.split(|&b| b == 0xFF); - let room_id = parts.next().expect("split always returns one element"); - let Some(servername) = parts.next() else { - error!("Migration: Invalid roomserverid in db."); - continue; - }; - let mut serverroomid = servername.to_vec(); - serverroomid.push(0xFF); - serverroomid.extend_from_slice(room_id); - - serverroomids.insert(&serverroomid, &[])?; - } - - services.globals.db.bump_database_version(1)?; - info!("Migration: 0 -> 1 finished"); - Ok(()) -} - -async fn db_lt_2(services: &Services) -> Result<()> { - let db = &services.db; - - // We accidentally inserted hashed versions of "" into the db instead of just "" - let userid_password = &db["roomserverids"]; - for (userid, password) in userid_password.iter() { - let empty_pass = utils::hash::password("").expect("our own password to be properly hashed"); - let password = std::str::from_utf8(&password).expect("password is valid utf-8"); - let empty_hashed_password = utils::hash::verify_password(password, &empty_pass).is_ok(); - if empty_hashed_password { - userid_password.insert(&userid, b"")?; - } - } - - services.globals.db.bump_database_version(2)?; - info!("Migration: 1 -> 2 finished"); - Ok(()) -} - -async fn db_lt_3(services: &Services) -> Result<()> { - let db = &services.db; - - // Move media to filesystem - let mediaid_file = &db["mediaid_file"]; - for (key, content) in mediaid_file.iter() { - if content.is_empty() { - continue; - } - - #[allow(deprecated)] - let path = services.media.get_media_file(&key); - let mut file = fs::File::create(path)?; - file.write_all(&content)?; - mediaid_file.insert(&key, &[])?; - } - - services.globals.db.bump_database_version(3)?; - info!("Migration: 2 -> 3 finished"); - Ok(()) -} - -async fn db_lt_4(services: &Services) -> Result<()> { - let config = &services.server.config; - - // Add federated users to services as deactivated - for our_user in services.users.iter() { - let our_user = our_user?; - if services.users.is_deactivated(&our_user)? { - continue; - } - for room in services.rooms.state_cache.rooms_joined(&our_user) { - for user in services.rooms.state_cache.room_members(&room?) { - let user = user?; - if user.server_name() != config.server_name { - info!(?user, "Migration: creating user"); - services.users.create(&user, None)?; - } - } - } - } - - services.globals.db.bump_database_version(4)?; - info!("Migration: 3 -> 4 finished"); - Ok(()) -} - -async fn db_lt_5(services: &Services) -> Result<()> { - let db = &services.db; - - // Upgrade user data store - let roomuserdataid_accountdata = &db["roomuserdataid_accountdata"]; - let roomusertype_roomuserdataid = &db["roomusertype_roomuserdataid"]; - for (roomuserdataid, _) in roomuserdataid_accountdata.iter() { - let mut parts = roomuserdataid.split(|&b| b == 0xFF); - let room_id = parts.next().unwrap(); - let user_id = parts.next().unwrap(); - let event_type = roomuserdataid.rsplit(|&b| b == 0xFF).next().unwrap(); - - let mut key = room_id.to_vec(); - key.push(0xFF); - key.extend_from_slice(user_id); - key.push(0xFF); - key.extend_from_slice(event_type); - - roomusertype_roomuserdataid.insert(&key, &roomuserdataid)?; - } - - services.globals.db.bump_database_version(5)?; - info!("Migration: 4 -> 5 finished"); - Ok(()) -} - -async fn db_lt_6(services: &Services) -> Result<()> { - let db = &services.db; - - // Set room member count - let roomid_shortstatehash = &db["roomid_shortstatehash"]; - for (roomid, _) in roomid_shortstatehash.iter() { - let string = utils::string_from_bytes(&roomid).unwrap(); - let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); - services.rooms.state_cache.update_joined_count(room_id)?; - } - - services.globals.db.bump_database_version(6)?; - info!("Migration: 5 -> 6 finished"); - Ok(()) -} - -async fn db_lt_7(services: &Services) -> Result<()> { - let db = &services.db; - - // Upgrade state store - let mut last_roomstates: HashMap = HashMap::new(); - let mut current_sstatehash: Option = None; - let mut current_room = None; - let mut current_state = HashSet::new(); - - let handle_state = |current_sstatehash: u64, - current_room: &RoomId, - current_state: HashSet<_>, - last_roomstates: &mut HashMap<_, _>| { - let last_roomsstatehash = last_roomstates.get(current_room); - - let states_parents = last_roomsstatehash.map_or_else( - || Ok(Vec::new()), - |&last_roomsstatehash| { - services - .rooms - .state_compressor - .load_shortstatehash_info(last_roomsstatehash) - }, - )?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew = current_state - .difference(&parent_stateinfo.1) - .copied() - .collect::>(); - - let statediffremoved = parent_stateinfo - .1 - .difference(¤t_state) - .copied() - .collect::>(); - - (statediffnew, statediffremoved) - } else { - (current_state, HashSet::new()) - }; - - services.rooms.state_compressor.save_state_from_diff( - current_sstatehash, - Arc::new(statediffnew), - Arc::new(statediffremoved), - 2, // every state change is 2 event changes on average - states_parents, - )?; - - /* - let mut tmp = services.rooms.load_shortstatehash_info(¤t_sstatehash)?; - let state = tmp.pop().unwrap(); - println!( - "{}\t{}{:?}: {:?} + {:?} - {:?}", - current_room, - " ".repeat(tmp.len()), - utils::u64_from_bytes(¤t_sstatehash).unwrap(), - tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()), - state - .2 - .iter() - .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) - .collect::>(), - state - .3 - .iter() - .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) - .collect::>() - ); - */ - - Ok::<_, Error>(()) - }; - - let stateid_shorteventid = &db["stateid_shorteventid"]; - let shorteventid_eventid = &db["shorteventid_eventid"]; - for (k, seventid) in stateid_shorteventid.iter() { - let sstatehash = utils::u64_from_bytes(&k[0..size_of::()]).expect("number of bytes is correct"); - let sstatekey = k[size_of::()..].to_vec(); - if Some(sstatehash) != current_sstatehash { - if let Some(current_sstatehash) = current_sstatehash { - handle_state( - current_sstatehash, - current_room.as_deref().unwrap(), - current_state, - &mut last_roomstates, - )?; - last_roomstates.insert(current_room.clone().unwrap(), current_sstatehash); - } - current_state = HashSet::new(); - current_sstatehash = Some(sstatehash); - - let event_id = shorteventid_eventid.get(&seventid).unwrap().unwrap(); - let string = utils::string_from_bytes(&event_id).unwrap(); - let event_id = <&EventId>::try_from(string.as_str()).unwrap(); - let pdu = services.rooms.timeline.get_pdu(event_id).unwrap().unwrap(); - - if Some(&pdu.room_id) != current_room.as_ref() { - current_room = Some(pdu.room_id.clone()); - } - } - - let mut val = sstatekey; - val.extend_from_slice(&seventid); - current_state.insert(val.try_into().expect("size is correct")); - } - - if let Some(current_sstatehash) = current_sstatehash { - handle_state( - current_sstatehash, - current_room.as_deref().unwrap(), - current_state, - &mut last_roomstates, - )?; - } - - services.globals.db.bump_database_version(7)?; - info!("Migration: 6 -> 7 finished"); - Ok(()) -} - -async fn db_lt_8(services: &Services) -> Result<()> { - let db = &services.db; - - let roomid_shortstatehash = &db["roomid_shortstatehash"]; - let roomid_shortroomid = &db["roomid_shortroomid"]; - let pduid_pdu = &db["pduid_pdu"]; - let eventid_pduid = &db["eventid_pduid"]; - - // Generate short room ids for all rooms - for (room_id, _) in roomid_shortstatehash.iter() { - let shortroomid = services.globals.next_count()?.to_be_bytes(); - roomid_shortroomid.insert(&room_id, &shortroomid)?; - info!("Migration: 8"); - } - // Update pduids db layout - let batch = pduid_pdu - .iter() - .filter_map(|(key, v)| { - if !key.starts_with(b"!") { - return None; - } - let mut parts = key.splitn(2, |&b| b == 0xFF); - let room_id = parts.next().unwrap(); - let count = parts.next().unwrap(); - - let short_room_id = roomid_shortroomid - .get(room_id) - .unwrap() - .expect("shortroomid should exist"); - - let mut new_key = short_room_id.to_vec(); - new_key.extend_from_slice(count); - - Some(database::OwnedKeyVal(new_key, v)) - }) - .collect::>(); - - pduid_pdu.insert_batch(batch.iter().map(database::KeyVal::from))?; - - let batch2 = eventid_pduid - .iter() - .filter_map(|(k, value)| { - if !value.starts_with(b"!") { - return None; - } - let mut parts = value.splitn(2, |&b| b == 0xFF); - let room_id = parts.next().unwrap(); - let count = parts.next().unwrap(); - - let short_room_id = roomid_shortroomid - .get(room_id) - .unwrap() - .expect("shortroomid should exist"); - - let mut new_value = short_room_id.to_vec(); - new_value.extend_from_slice(count); - - Some(database::OwnedKeyVal(k, new_value)) - }) - .collect::>(); - - eventid_pduid.insert_batch(batch2.iter().map(database::KeyVal::from))?; - - services.globals.db.bump_database_version(8)?; - info!("Migration: 7 -> 8 finished"); - Ok(()) -} - -async fn db_lt_9(services: &Services) -> Result<()> { - let db = &services.db; - - let tokenids = &db["tokenids"]; - let roomid_shortroomid = &db["roomid_shortroomid"]; - - // Update tokenids db layout - let mut iter = tokenids - .iter() - .filter_map(|(key, _)| { - if !key.starts_with(b"!") { - return None; - } - let mut parts = key.splitn(4, |&b| b == 0xFF); - let room_id = parts.next().unwrap(); - let word = parts.next().unwrap(); - let _pdu_id_room = parts.next().unwrap(); - let pdu_id_count = parts.next().unwrap(); - - let short_room_id = roomid_shortroomid - .get(room_id) - .unwrap() - .expect("shortroomid should exist"); - let mut new_key = short_room_id.to_vec(); - new_key.extend_from_slice(word); - new_key.push(0xFF); - new_key.extend_from_slice(pdu_id_count); - Some(database::OwnedKeyVal(new_key, Vec::::new())) - }) - .peekable(); - - while iter.peek().is_some() { - let batch = iter.by_ref().take(1000).collect::>(); - tokenids.insert_batch(batch.iter().map(database::KeyVal::from))?; - debug!("Inserted smaller batch"); - } - - info!("Deleting starts"); - - let batch2: Vec<_> = tokenids - .iter() - .filter_map(|(key, _)| { - if key.starts_with(b"!") { - Some(key) - } else { - None - } - }) - .collect(); - - for key in batch2 { - tokenids.remove(&key)?; - } - - services.globals.db.bump_database_version(9)?; - info!("Migration: 8 -> 9 finished"); - Ok(()) -} - -async fn db_lt_10(services: &Services) -> Result<()> { - let db = &services.db; - - let statekey_shortstatekey = &db["statekey_shortstatekey"]; - let shortstatekey_statekey = &db["shortstatekey_statekey"]; - - // Add other direction for shortstatekeys - for (statekey, shortstatekey) in statekey_shortstatekey.iter() { - shortstatekey_statekey.insert(&shortstatekey, &statekey)?; - } - - // Force E2EE device list updates so we can send them over federation - for user_id in services.users.iter().filter_map(Result::ok) { - services.users.mark_device_key_update(&user_id)?; - } - - services.globals.db.bump_database_version(10)?; - info!("Migration: 9 -> 10 finished"); - Ok(()) -} - -#[allow(unreachable_code)] -async fn db_lt_11(services: &Services) -> Result<()> { - error!("Dropping a column to clear data is not implemented yet."); - //let userdevicesessionid_uiaarequest = &db["userdevicesessionid_uiaarequest"]; - //userdevicesessionid_uiaarequest.clear()?; - - services.globals.db.bump_database_version(11)?; - info!("Migration: 10 -> 11 finished"); - Ok(()) -} - async fn db_lt_12(services: &Services) -> Result<()> { let config = &services.server.config; - for username in services.users.list_local_users()? { - let user = match UserId::parse_with_server_name(username.clone(), &config.server_name) { + for username in &services + .users + .list_local_users() + .map(UserId::to_owned) + .collect::>() + .await + { + let user = match UserId::parse_with_server_name(username.as_str(), &config.server_name) { Ok(u) => u, Err(e) => { warn!("Invalid username {username}: {e}"); @@ -652,7 +218,7 @@ async fn db_lt_12(services: &Services) -> Result<()> { let raw_rules_list = services .account_data .get(None, &user, GlobalAccountDataEventType::PushRules.to_string().into()) - .unwrap() + .await .expect("Username is invalid"); let mut account_data = serde_json::from_str::(raw_rules_list.get()).unwrap(); @@ -694,12 +260,15 @@ async fn db_lt_12(services: &Services) -> Result<()> { } } - services.account_data.update( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; + services + .account_data + .update( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + ) + .await?; } services.globals.db.bump_database_version(12)?; @@ -710,8 +279,14 @@ async fn db_lt_12(services: &Services) -> Result<()> { async fn db_lt_13(services: &Services) -> Result<()> { let config = &services.server.config; - for username in services.users.list_local_users()? { - let user = match UserId::parse_with_server_name(username.clone(), &config.server_name) { + for username in &services + .users + .list_local_users() + .map(UserId::to_owned) + .collect::>() + .await + { + let user = match UserId::parse_with_server_name(username.as_str(), &config.server_name) { Ok(u) => u, Err(e) => { warn!("Invalid username {username}: {e}"); @@ -722,7 +297,7 @@ async fn db_lt_13(services: &Services) -> Result<()> { let raw_rules_list = services .account_data .get(None, &user, GlobalAccountDataEventType::PushRules.to_string().into()) - .unwrap() + .await .expect("Username is invalid"); let mut account_data = serde_json::from_str::(raw_rules_list.get()).unwrap(); @@ -733,12 +308,15 @@ async fn db_lt_13(services: &Services) -> Result<()> { .global .update_with_server_default(user_default_rules); - services.account_data.update( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; + services + .account_data + .update( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + ) + .await?; } services.globals.db.bump_database_version(13)?; @@ -754,32 +332,37 @@ async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result< let _cork = db.cork_and_sync(); let mut iter_count: usize = 0; - for (mut key, value) in roomuserid_joined.iter() { - iter_count = iter_count.saturating_add(1); - debug_info!(%iter_count); - let first_sep_index = key - .iter() - .position(|&i| i == 0xFF) - .expect("found 0xFF delim"); + roomuserid_joined + .raw_stream() + .ignore_err() + .ready_for_each(|(key, value)| { + let mut key = key.to_vec(); + iter_count = iter_count.saturating_add(1); + debug_info!(%iter_count); + let first_sep_index = key + .iter() + .position(|&i| i == 0xFF) + .expect("found 0xFF delim"); - if key - .iter() - .get(first_sep_index..=first_sep_index.saturating_add(1)) - .copied() - .collect_vec() - == vec![0xFF, 0xFF] - { - debug_warn!("Found bad key: {key:?}"); - roomuserid_joined.remove(&key)?; + if key + .iter() + .get(first_sep_index..=first_sep_index.saturating_add(1)) + .copied() + .collect_vec() + == vec![0xFF, 0xFF] + { + debug_warn!("Found bad key: {key:?}"); + roomuserid_joined.remove(&key); - key.remove(first_sep_index); - debug_warn!("Fixed key: {key:?}"); - roomuserid_joined.insert(&key, &value)?; - } - } + key.remove(first_sep_index); + debug_warn!("Fixed key: {key:?}"); + roomuserid_joined.insert(&key, value); + } + }) + .await; db.db.cleanup()?; - db["global"].insert(b"fix_bad_double_separator_in_state_cache", &[])?; + db["global"].insert(b"fix_bad_double_separator_in_state_cache", &[]); info!("Finished fixing"); Ok(()) @@ -795,69 +378,71 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) .rooms .metadata .iter_ids() - .filter_map(Result::ok) - .collect_vec(); + .map(ToOwned::to_owned) + .collect::>() + .await; - for room_id in room_ids.clone() { + for room_id in &room_ids { debug_info!("Fixing room {room_id}"); let users_in_room = services .rooms .state_cache - .room_members(&room_id) - .filter_map(Result::ok) - .collect_vec(); + .room_members(room_id) + .collect::>() + .await; let joined_members = users_in_room .iter() + .stream() .filter(|user_id| { services .rooms .state_accessor - .get_member(&room_id, user_id) - .unwrap_or(None) - .map_or(false, |membership| membership.membership == MembershipState::Join) + .get_member(room_id, user_id) + .map(|member| member.map_or(false, |member| member.membership == MembershipState::Join)) }) - .collect_vec(); + .collect::>() + .await; let non_joined_members = users_in_room .iter() + .stream() .filter(|user_id| { services .rooms .state_accessor - .get_member(&room_id, user_id) - .unwrap_or(None) - .map_or(false, |membership| { - membership.membership == MembershipState::Leave || membership.membership == MembershipState::Ban - }) + .get_member(room_id, user_id) + .map(|member| member.map_or(false, |member| member.membership == MembershipState::Join)) }) - .collect_vec(); + .collect::>() + .await; for user_id in joined_members { debug_info!("User is joined, marking as joined"); - services - .rooms - .state_cache - .mark_as_joined(user_id, &room_id)?; + services.rooms.state_cache.mark_as_joined(user_id, room_id); } for user_id in non_joined_members { debug_info!("User is left or banned, marking as left"); - services.rooms.state_cache.mark_as_left(user_id, &room_id)?; + services.rooms.state_cache.mark_as_left(user_id, room_id); } } - for room_id in room_ids { + for room_id in &room_ids { debug_info!( "Updating joined count for room {room_id} to fix servers in room after correcting membership states" ); - services.rooms.state_cache.update_joined_count(&room_id)?; + services + .rooms + .state_cache + .update_joined_count(room_id) + .await; } db.db.cleanup()?; - db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[])?; + db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[]); info!("Finished fixing"); Ok(()) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 87f8f492..f777901f 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -288,8 +288,8 @@ impl Service { /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// for the server. - pub fn verify_keys_for(&self, origin: &ServerName) -> Result> { - let mut keys = self.db.verify_keys_for(origin)?; + pub async fn verify_keys_for(&self, origin: &ServerName) -> Result> { + let mut keys = self.db.verify_keys_for(origin).await?; if origin == self.server_name() { keys.insert( format!("ed25519:{}", self.keypair().version()) @@ -304,8 +304,8 @@ impl Service { Ok(keys) } - pub fn signing_keys_for(&self, origin: &ServerName) -> Result> { - self.db.signing_keys_for(origin) + pub async fn signing_keys_for(&self, origin: &ServerName) -> Result { + self.db.signing_keys_for(origin).await } pub fn well_known_client(&self) -> &Option { &self.config.well_known.client } diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs deleted file mode 100644 index 30ac593b..00000000 --- a/src/service/key_backups/data.rs +++ /dev/null @@ -1,346 +0,0 @@ -use std::{collections::BTreeMap, sync::Arc}; - -use conduit::{utils, Error, Result}; -use database::Map; -use ruma::{ - api::client::{ - backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - error::ErrorKind, - }, - serde::Raw, - OwnedRoomId, RoomId, UserId, -}; - -use crate::{globals, Dep}; - -pub(super) struct Data { - backupid_algorithm: Arc, - backupid_etag: Arc, - backupkeyid_backup: Arc, - services: Services, -} - -struct Services { - globals: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - backupid_algorithm: db["backupid_algorithm"].clone(), - backupid_etag: db["backupid_etag"].clone(), - backupkeyid_backup: db["backupkeyid_backup"].clone(), - services: Services { - globals: args.depend::("globals"), - }, - } - } - - pub(super) fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw) -> Result { - let version = self.services.globals.next_count()?.to_string(); - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm.insert( - &key, - &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), - )?; - self.backupid_etag - .insert(&key, &self.services.globals.next_count()?.to_be_bytes())?; - Ok(version) - } - - pub(super) fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm.remove(&key)?; - self.backupid_etag.remove(&key)?; - - key.push(0xFF); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } - - pub(super) fn update_backup( - &self, user_id: &UserId, version: &str, backup_metadata: &Raw, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest(ErrorKind::NotFound, "Tried to update nonexistent backup.")); - } - - self.backupid_algorithm - .insert(&key, backup_metadata.json().get().as_bytes())?; - self.backupid_etag - .insert(&key, &self.services.globals.next_count()?.to_be_bytes())?; - Ok(version.to_owned()) - } - - pub(super) fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, _)| { - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) - }) - .transpose() - } - - pub(super) fn get_latest_backup(&self, user_id: &UserId) -> Result)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, value)| { - let version = utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; - - Ok(( - version, - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid."))?, - )) - }) - .transpose() - } - - pub(super) fn get_backup(&self, user_id: &UserId, version: &str) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm - .get(&key)? - .map_or(Ok(None), |bytes| { - serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid.")) - }) - } - - pub(super) fn add_key( - &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest(ErrorKind::NotFound, "Tried to update nonexistent backup.")); - } - - self.backupid_etag - .insert(&key, &self.services.globals.next_count()?.to_be_bytes())?; - - key.push(0xFF); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(session_id.as_bytes()); - - self.backupkeyid_backup - .insert(&key, key_data.json().get().as_bytes())?; - - Ok(()) - } - - pub(super) fn count_keys(&self, user_id: &UserId, version: &str) -> Result { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(version.as_bytes()); - - Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) - } - - pub(super) fn get_etag(&self, user_id: &UserId, version: &str) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - - Ok(utils::u64_from_bytes( - &self - .backupid_etag - .get(&key)? - .ok_or_else(|| Error::bad_database("Backup has no etag."))?, - ) - .map_err(|_| Error::bad_database("etag in backupid_etag invalid."))? - .to_string()) - } - - pub(super) fn get_all(&self, user_id: &UserId, version: &str) -> Result> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xFF); - - let mut rooms = BTreeMap::::new(); - - for result in self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xFF); - - let session_id = utils::string_from_bytes( - parts - .next() - .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, - ) - .map_err(|_| Error::bad_database("backupkeyid_backup session_id is invalid."))?; - - let room_id = RoomId::parse( - utils::string_from_bytes( - parts - .next() - .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, - ) - .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, - ) - .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid room id."))?; - - let key_data = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("KeyBackupData in backupkeyid_backup is invalid."))?; - - Ok::<_, Error>((room_id, session_id, key_data)) - }) { - let (room_id, session_id, key_data) = result?; - rooms - .entry(room_id) - .or_insert_with(|| RoomKeyBackup { - sessions: BTreeMap::new(), - }) - .sessions - .insert(session_id, key_data); - } - - Ok(rooms) - } - - pub(super) fn get_room( - &self, user_id: &UserId, version: &str, room_id: &RoomId, - ) -> Result>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xFF); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xFF); - - Ok(self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xFF); - - let session_id = utils::string_from_bytes( - parts - .next() - .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, - ) - .map_err(|_| Error::bad_database("backupkeyid_backup session_id is invalid."))?; - - let key_data = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("KeyBackupData in backupkeyid_backup is invalid."))?; - - Ok::<_, Error>((session_id, key_data)) - }) - .filter_map(Result::ok) - .collect()) - } - - pub(super) fn get_session( - &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, - ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - key.push(0xFF); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(session_id.as_bytes()); - - self.backupkeyid_backup - .get(&key)? - .map(|value| { - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.")) - }) - .transpose() - } - - pub(super) fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - key.push(0xFF); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } - - pub(super) fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - key.push(0xFF); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xFF); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } - - pub(super) fn delete_room_key( - &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - key.push(0xFF); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(session_id.as_bytes()); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } -} diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 65d3c065..12712e79 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,93 +1,319 @@ -mod data; - use std::{collections::BTreeMap, sync::Arc}; -use conduit::Result; -use data::Data; +use conduit::{ + err, implement, utils, + utils::stream::{ReadyExt, TryIgnore}, + Err, Error, Result, +}; +use database::{Deserialized, Ignore, Interfix, Map}; +use futures::StreamExt; use ruma::{ api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, OwnedRoomId, RoomId, UserId, }; +use crate::{globals, Dep}; + pub struct Service { db: Data, + services: Services, +} + +struct Data { + backupid_algorithm: Arc, + backupid_etag: Arc, + backupkeyid_backup: Arc, +} + +struct Services { + globals: Dep, } impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(&args), + db: Data { + backupid_algorithm: args.db["backupid_algorithm"].clone(), + backupid_etag: args.db["backupid_etag"].clone(), + backupkeyid_backup: args.db["backupkeyid_backup"].clone(), + }, + services: Services { + globals: args.depend::("globals"), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - pub fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw) -> Result { - self.db.create_backup(user_id, backup_metadata) - } +#[implement(Service)] +pub fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw) -> Result { + let version = self.services.globals.next_count()?.to_string(); - pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { - self.db.delete_backup(user_id, version) - } + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); - pub fn update_backup( - &self, user_id: &UserId, version: &str, backup_metadata: &Raw, - ) -> Result { - self.db.update_backup(user_id, version, backup_metadata) - } + self.db.backupid_algorithm.insert( + &key, + &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), + ); - pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { - self.db.get_latest_backup_version(user_id) - } + self.db + .backupid_etag + .insert(&key, &self.services.globals.next_count()?.to_be_bytes()); - pub fn get_latest_backup(&self, user_id: &UserId) -> Result)>> { - self.db.get_latest_backup(user_id) - } - - pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result>> { - self.db.get_backup(user_id, version) - } - - pub fn add_key( - &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw, - ) -> Result<()> { - self.db - .add_key(user_id, version, room_id, session_id, key_data) - } - - pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { self.db.count_keys(user_id, version) } - - pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { self.db.get_etag(user_id, version) } - - pub fn get_all(&self, user_id: &UserId, version: &str) -> Result> { - self.db.get_all(user_id, version) - } - - pub fn get_room( - &self, user_id: &UserId, version: &str, room_id: &RoomId, - ) -> Result>> { - self.db.get_room(user_id, version, room_id) - } - - pub fn get_session( - &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, - ) -> Result>> { - self.db.get_session(user_id, version, room_id, session_id) - } - - pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { - self.db.delete_all_keys(user_id, version) - } - - pub fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> { - self.db.delete_room_keys(user_id, version, room_id) - } - - pub fn delete_room_key(&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str) -> Result<()> { - self.db - .delete_room_key(user_id, version, room_id, session_id) - } + Ok(version) +} + +#[implement(Service)] +pub async fn delete_backup(&self, user_id: &UserId, version: &str) { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + + self.db.backupid_algorithm.remove(&key); + self.db.backupid_etag.remove(&key); + + let key = (user_id, version, Interfix); + self.db + .backupkeyid_backup + .keys_raw_prefix(&key) + .ignore_err() + .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) + .await; +} + +#[implement(Service)] +pub async fn update_backup( + &self, user_id: &UserId, version: &str, backup_metadata: &Raw, +) -> Result { + let key = (user_id, version); + if self.db.backupid_algorithm.qry(&key).await.is_err() { + return Err!(Request(NotFound("Tried to update nonexistent backup."))); + } + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + + self.db + .backupid_algorithm + .insert(&key, backup_metadata.json().get().as_bytes()); + self.db + .backupid_etag + .insert(&key, &self.services.globals.next_count()?.to_be_bytes()); + + Ok(version.to_owned()) +} + +#[implement(Service)] +pub async fn get_latest_backup_version(&self, user_id: &UserId) -> Result { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.db + .backupid_algorithm + .rev_raw_keys_from(&last_possible_key) + .ignore_err() + .ready_take_while(move |key| key.starts_with(&prefix)) + .next() + .await + .ok_or_else(|| err!(Request(NotFound("No backup versions found")))) + .and_then(|key| { + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) + }) +} + +#[implement(Service)] +pub async fn get_latest_backup(&self, user_id: &UserId) -> Result<(String, Raw)> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.db + .backupid_algorithm + .rev_raw_stream_from(&last_possible_key) + .ignore_err() + .ready_take_while(move |(key, _)| key.starts_with(&prefix)) + .next() + .await + .ok_or_else(|| err!(Request(NotFound("No backup found")))) + .and_then(|(key, val)| { + let version = utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; + + let algorithm = serde_json::from_slice(val) + .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid."))?; + + Ok((version, algorithm)) + }) +} + +#[implement(Service)] +pub async fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { + let key = (user_id, version); + self.db + .backupid_algorithm + .qry(&key) + .await + .deserialized_json() +} + +#[implement(Service)] +pub async fn add_key( + &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw, +) -> Result<()> { + let key = (user_id, version); + if self.db.backupid_algorithm.qry(&key).await.is_err() { + return Err!(Request(NotFound("Tried to update nonexistent backup."))); + } + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + + self.db + .backupid_etag + .insert(&key, &self.services.globals.next_count()?.to_be_bytes()); + + key.push(0xFF); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xFF); + key.extend_from_slice(session_id.as_bytes()); + + self.db + .backupkeyid_backup + .insert(&key, key_data.json().get().as_bytes()); + + Ok(()) +} + +#[implement(Service)] +pub async fn count_keys(&self, user_id: &UserId, version: &str) -> usize { + let prefix = (user_id, version); + self.db + .backupkeyid_backup + .keys_raw_prefix(&prefix) + .count() + .await +} + +#[implement(Service)] +pub async fn get_etag(&self, user_id: &UserId, version: &str) -> String { + let key = (user_id, version); + self.db + .backupid_etag + .qry(&key) + .await + .deserialized::() + .as_ref() + .map(ToString::to_string) + .expect("Backup has no etag.") +} + +#[implement(Service)] +pub async fn get_all(&self, user_id: &UserId, version: &str) -> BTreeMap { + type KeyVal<'a> = ((Ignore, Ignore, &'a RoomId, &'a str), &'a [u8]); + + let mut rooms = BTreeMap::::new(); + let default = || RoomKeyBackup { + sessions: BTreeMap::new(), + }; + + let prefix = (user_id, version, Interfix); + self.db + .backupkeyid_backup + .stream_prefix(&prefix) + .ignore_err() + .ready_for_each(|((_, _, room_id, session_id), value): KeyVal<'_>| { + let key_data = serde_json::from_slice(value).expect("Invalid KeyBackupData JSON"); + rooms + .entry(room_id.into()) + .or_insert_with(default) + .sessions + .insert(session_id.into(), key_data); + }) + .await; + + rooms +} + +#[implement(Service)] +pub async fn get_room( + &self, user_id: &UserId, version: &str, room_id: &RoomId, +) -> BTreeMap> { + type KeyVal<'a> = ((Ignore, Ignore, Ignore, &'a str), &'a [u8]); + + let prefix = (user_id, version, room_id, Interfix); + self.db + .backupkeyid_backup + .stream_prefix(&prefix) + .ignore_err() + .map(|((.., session_id), value): KeyVal<'_>| { + let session_id = session_id.to_owned(); + let key_backup_data = serde_json::from_slice(value).expect("Invalid KeyBackupData JSON"); + (session_id, key_backup_data) + }) + .collect() + .await +} + +#[implement(Service)] +pub async fn get_session( + &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, +) -> Result> { + let key = (user_id, version, room_id, session_id); + + self.db + .backupkeyid_backup + .qry(&key) + .await + .deserialized_json() +} + +#[implement(Service)] +pub async fn delete_all_keys(&self, user_id: &UserId, version: &str) { + let key = (user_id, version, Interfix); + self.db + .backupkeyid_backup + .keys_raw_prefix(&key) + .ignore_err() + .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) + .await; +} + +#[implement(Service)] +pub async fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) { + let key = (user_id, version, room_id, Interfix); + self.db + .backupkeyid_backup + .keys_raw_prefix(&key) + .ignore_err() + .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) + .await; +} + +#[implement(Service)] +pub async fn delete_room_key(&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str) { + let key = (user_id, version, room_id, session_id); + self.db + .backupkeyid_backup + .keys_raw_prefix(&key) + .ignore_err() + .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) + .await; } diff --git a/src/service/manager.rs b/src/service/manager.rs index 42260bb3..21e0ed7c 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,7 +1,7 @@ use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; use conduit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; -use futures_util::FutureExt; +use futures::FutureExt; use tokio::{ sync::{Mutex, MutexGuard}, task::{JoinHandle, JoinSet}, diff --git a/src/service/media/data.rs b/src/service/media/data.rs index e5d6d20b..29d562cc 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -2,10 +2,11 @@ use std::sync::Arc; use conduit::{ debug, debug_info, trace, - utils::{str_from_bytes, string_from_bytes}, + utils::{str_from_bytes, stream::TryIgnore, string_from_bytes, ReadyExt}, Err, Error, Result, }; use database::{Database, Map}; +use futures::StreamExt; use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; use super::{preview::UrlPreviewData, thumbnail::Dim}; @@ -59,7 +60,7 @@ impl Data { .unwrap_or_default(), ); - self.mediaid_file.insert(&key, &[])?; + self.mediaid_file.insert(&key, &[]); if let Some(user) = user { let mut key: Vec = Vec::new(); @@ -68,13 +69,13 @@ impl Data { key.extend_from_slice(b"/"); key.extend_from_slice(mxc.media_id.as_bytes()); let user = user.as_bytes().to_vec(); - self.mediaid_user.insert(&key, &user)?; + self.mediaid_user.insert(&key, &user); } Ok(key) } - pub(super) fn delete_file_mxc(&self, mxc: &Mxc<'_>) -> Result<()> { + pub(super) async fn delete_file_mxc(&self, mxc: &Mxc<'_>) { debug!("MXC URI: {mxc}"); let mut prefix: Vec = Vec::new(); @@ -85,25 +86,31 @@ impl Data { prefix.push(0xFF); trace!("MXC db prefix: {prefix:?}"); - for (key, _) in self.mediaid_file.scan_prefix(prefix.clone()) { - debug!("Deleting key: {:?}", key); - self.mediaid_file.remove(&key)?; - } + self.mediaid_file + .raw_keys_prefix(&prefix) + .ignore_err() + .ready_for_each(|key| { + debug!("Deleting key: {:?}", key); + self.mediaid_file.remove(key); + }) + .await; - for (key, value) in self.mediaid_user.scan_prefix(prefix.clone()) { - if key.starts_with(&prefix) { - let user = str_from_bytes(&value).unwrap_or_default(); + self.mediaid_user + .raw_stream_prefix(&prefix) + .ignore_err() + .ready_for_each(|(key, val)| { + if key.starts_with(&prefix) { + let user = str_from_bytes(val).unwrap_or_default(); + debug_info!("Deleting key {key:?} which was uploaded by user {user}"); - debug_info!("Deleting key \"{key:?}\" which was uploaded by user {user}"); - self.mediaid_user.remove(&key)?; - } - } - - Ok(()) + self.mediaid_user.remove(key); + } + }) + .await; } /// Searches for all files with the given MXC - pub(super) fn search_mxc_metadata_prefix(&self, mxc: &Mxc<'_>) -> Result>> { + pub(super) async fn search_mxc_metadata_prefix(&self, mxc: &Mxc<'_>) -> Result>> { debug!("MXC URI: {mxc}"); let mut prefix: Vec = Vec::new(); @@ -115,9 +122,10 @@ impl Data { let keys: Vec> = self .mediaid_file - .scan_prefix(prefix) - .map(|(key, _)| key) - .collect(); + .keys_prefix_raw(&prefix) + .ignore_err() + .collect() + .await; if keys.is_empty() { return Err!(Database("Failed to find any keys in database for `{mxc}`",)); @@ -128,7 +136,7 @@ impl Data { Ok(keys) } - pub(super) fn search_file_metadata(&self, mxc: &Mxc<'_>, dim: &Dim) -> Result { + pub(super) async fn search_file_metadata(&self, mxc: &Mxc<'_>, dim: &Dim) -> Result { let mut prefix: Vec = Vec::new(); prefix.extend_from_slice(b"mxc://"); prefix.extend_from_slice(mxc.server_name.as_bytes()); @@ -139,10 +147,13 @@ impl Data { prefix.extend_from_slice(&dim.height.to_be_bytes()); prefix.push(0xFF); - let (key, _) = self + let key = self .mediaid_file - .scan_prefix(prefix) + .raw_keys_prefix(&prefix) + .ignore_err() + .map(ToOwned::to_owned) .next() + .await .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; let mut parts = key.rsplit(|&b| b == 0xFF); @@ -177,28 +188,31 @@ impl Data { } /// Gets all the MXCs associated with a user - pub(super) fn get_all_user_mxcs(&self, user_id: &UserId) -> Vec { - let user_id = user_id.as_bytes().to_vec(); - + pub(super) async fn get_all_user_mxcs(&self, user_id: &UserId) -> Vec { self.mediaid_user - .iter() - .filter_map(|(key, user)| { - if *user == user_id { - let mxc_s = string_from_bytes(&key).ok()?; - Some(OwnedMxcUri::from(mxc_s)) - } else { - None - } - }) + .stream() + .ignore_err() + .ready_filter_map(|(key, user): (&str, &UserId)| (user == user_id).then(|| key.into())) .collect() + .await } /// Gets all the media keys in our database (this includes all the metadata /// associated with it such as width, height, content-type, etc) - pub(crate) fn get_all_media_keys(&self) -> Vec> { self.mediaid_file.iter().map(|(key, _)| key).collect() } + pub(crate) async fn get_all_media_keys(&self) -> Vec> { + self.mediaid_file + .raw_keys() + .ignore_err() + .map(<[u8]>::to_vec) + .collect() + .await + } #[inline] - pub(super) fn remove_url_preview(&self, url: &str) -> Result<()> { self.url_previews.remove(url.as_bytes()) } + pub(super) fn remove_url_preview(&self, url: &str) -> Result<()> { + self.url_previews.remove(url.as_bytes()); + Ok(()) + } pub(super) fn set_url_preview( &self, url: &str, data: &UrlPreviewData, timestamp: std::time::Duration, @@ -233,11 +247,13 @@ impl Data { value.push(0xFF); value.extend_from_slice(&data.image_height.unwrap_or(0).to_be_bytes()); - self.url_previews.insert(url.as_bytes(), &value) + self.url_previews.insert(url.as_bytes(), &value); + + Ok(()) } - pub(super) fn get_url_preview(&self, url: &str) -> Option { - let values = self.url_previews.get(url.as_bytes()).ok()??; + pub(super) async fn get_url_preview(&self, url: &str) -> Result { + let values = self.url_previews.qry(url).await?; let mut values = values.split(|&b| b == 0xFF); @@ -291,7 +307,7 @@ impl Data { x => x, }; - Some(UrlPreviewData { + Ok(UrlPreviewData { title, description, image, diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 9968d25b..2d1b39f9 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -7,7 +7,11 @@ use std::{ time::Instant, }; -use conduit::{debug, debug_info, debug_warn, error, info, warn, Config, Result}; +use conduit::{ + debug, debug_info, debug_warn, error, info, + utils::{stream::TryIgnore, ReadyExt}, + warn, Config, Result, +}; use crate::{globals, Services}; @@ -23,12 +27,17 @@ pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { // Move old media files to new names let mut changes = Vec::<(PathBuf, PathBuf)>::new(); - for (key, _) in mediaid_file.iter() { - let old = services.media.get_media_file_b64(&key); - let new = services.media.get_media_file_sha256(&key); - debug!(?key, ?old, ?new, num = changes.len(), "change"); - changes.push((old, new)); - } + mediaid_file + .raw_keys() + .ignore_err() + .ready_for_each(|key| { + let old = services.media.get_media_file_b64(key); + let new = services.media.get_media_file_sha256(key); + debug!(?key, ?old, ?new, num = changes.len(), "change"); + changes.push((old, new)); + }) + .await; + // move the file to the new location for (old_path, path) in changes { if old_path.exists() { @@ -41,11 +50,11 @@ pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { // Apply fix from when sha256_media was backward-incompat and bumped the schema // version from 13 to 14. For users satisfying these conditions we can go back. - if services.globals.db.database_version()? == 14 && globals::migrations::DATABASE_VERSION == 13 { + if services.globals.db.database_version().await == 14 && globals::migrations::DATABASE_VERSION == 13 { services.globals.db.bump_database_version(13)?; } - db["global"].insert(b"feat_sha256_media", &[])?; + db["global"].insert(b"feat_sha256_media", &[]); info!("Finished applying sha256_media"); Ok(()) } @@ -71,7 +80,7 @@ pub(crate) async fn checkup_sha256_media(services: &Services) -> Result<()> { .filter_map(|ent| ent.map_or(None, |ent| Some(ent.path().into_os_string()))) .collect(); - for key in media.db.get_all_media_keys() { + for key in media.db.get_all_media_keys().await { let new_path = media.get_media_file_sha256(&key).into_os_string(); let old_path = media.get_media_file_b64(&key).into_os_string(); if let Err(e) = handle_media_check(&dbs, config, &files, &key, &new_path, &old_path).await { @@ -112,8 +121,8 @@ async fn handle_media_check( "Media is missing at all paths. Removing from database..." ); - mediaid_file.remove(key)?; - mediaid_user.remove(key)?; + mediaid_file.remove(key); + mediaid_user.remove(key); } if config.media_compat_file_link && !old_exists && new_exists { diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index d3765a17..c0b15726 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -97,7 +97,7 @@ impl Service { /// Deletes a file in the database and from the media directory via an MXC pub async fn delete(&self, mxc: &Mxc<'_>) -> Result<()> { - if let Ok(keys) = self.db.search_mxc_metadata_prefix(mxc) { + if let Ok(keys) = self.db.search_mxc_metadata_prefix(mxc).await { for key in keys { trace!(?mxc, "MXC Key: {key:?}"); debug_info!(?mxc, "Deleting from filesystem"); @@ -107,7 +107,7 @@ impl Service { } debug_info!(?mxc, "Deleting from database"); - _ = self.db.delete_file_mxc(mxc); + self.db.delete_file_mxc(mxc).await; } Ok(()) @@ -120,7 +120,7 @@ impl Service { /// /// currently, this is only practical for local users pub async fn delete_from_user(&self, user: &UserId) -> Result { - let mxcs = self.db.get_all_user_mxcs(user); + let mxcs = self.db.get_all_user_mxcs(user).await; let mut deletion_count: usize = 0; for mxc in mxcs { @@ -150,7 +150,7 @@ impl Service { content_disposition, content_type, key, - }) = self.db.search_file_metadata(mxc, &Dim::default()) + }) = self.db.search_file_metadata(mxc, &Dim::default()).await { let mut content = Vec::new(); let path = self.get_media_file(&key); @@ -170,7 +170,7 @@ impl Service { /// Gets all the MXC URIs in our media database pub async fn get_all_mxcs(&self) -> Result> { - let all_keys = self.db.get_all_media_keys(); + let all_keys = self.db.get_all_media_keys().await; let mut mxcs = Vec::with_capacity(all_keys.len()); @@ -209,7 +209,7 @@ impl Service { pub async fn delete_all_remote_media_at_after_time( &self, time: SystemTime, before: bool, after: bool, yes_i_want_to_delete_local_media: bool, ) -> Result { - let all_keys = self.db.get_all_media_keys(); + let all_keys = self.db.get_all_media_keys().await; let mut remote_mxcs = Vec::with_capacity(all_keys.len()); for key in all_keys { @@ -343,9 +343,10 @@ impl Service { } #[inline] - pub fn get_metadata(&self, mxc: &Mxc<'_>) -> Option { + pub async fn get_metadata(&self, mxc: &Mxc<'_>) -> Option { self.db .search_file_metadata(mxc, &Dim::default()) + .await .map(|metadata| FileMeta { content_disposition: metadata.content_disposition, content_type: metadata.content_type, diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index 5704075e..6b147383 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -71,16 +71,16 @@ pub async fn download_image(&self, url: &str) -> Result { #[implement(Service)] pub async fn get_url_preview(&self, url: &str) -> Result { - if let Some(preview) = self.db.get_url_preview(url) { + if let Ok(preview) = self.db.get_url_preview(url).await { return Ok(preview); } // ensure that only one request is made per URL let _request_lock = self.url_preview_mutex.lock(url).await; - match self.db.get_url_preview(url) { - Some(preview) => Ok(preview), - None => self.request_url_preview(url).await, + match self.db.get_url_preview(url).await { + Ok(preview) => Ok(preview), + Err(_) => self.request_url_preview(url).await, } } diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs index 630f7b3b..04ec0303 100644 --- a/src/service/media/thumbnail.rs +++ b/src/service/media/thumbnail.rs @@ -54,9 +54,9 @@ impl super::Service { // 0, 0 because that's the original file let dim = dim.normalized(); - if let Ok(metadata) = self.db.search_file_metadata(mxc, &dim) { + if let Ok(metadata) = self.db.search_file_metadata(mxc, &dim).await { self.get_thumbnail_saved(metadata).await - } else if let Ok(metadata) = self.db.search_file_metadata(mxc, &Dim::default()) { + } else if let Ok(metadata) = self.db.search_file_metadata(mxc, &Dim::default()).await { self.get_thumbnail_generate(mxc, &dim, metadata).await } else { Ok(None) diff --git a/src/service/mod.rs b/src/service/mod.rs index f588a542..cb8bfcd9 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -19,6 +19,7 @@ pub mod resolver; pub mod rooms; pub mod sending; pub mod server_keys; +pub mod sync; pub mod transaction_ids; pub mod uiaa; pub mod updates; diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index ec036b3d..0c3f3d31 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -1,7 +1,12 @@ use std::sync::Arc; -use conduit::{debug_warn, utils, Error, Result}; -use database::Map; +use conduit::{ + debug_warn, utils, + utils::{stream::TryIgnore, ReadyExt}, + Result, +}; +use database::{Deserialized, Map}; +use futures::Stream; use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; use super::Presence; @@ -31,39 +36,35 @@ impl Data { } } - pub fn get_presence(&self, user_id: &UserId) -> Result> { - if let Some(count_bytes) = self.userid_presenceid.get(user_id.as_bytes())? { - let count = utils::u64_from_bytes(&count_bytes) - .map_err(|_e| Error::bad_database("No 'count' bytes in presence key"))?; + pub async fn get_presence(&self, user_id: &UserId) -> Result<(u64, PresenceEvent)> { + let count = self + .userid_presenceid + .qry(user_id) + .await + .deserialized::()?; - let key = presenceid_key(count, user_id); - self.presenceid_presence - .get(&key)? - .map(|presence_bytes| -> Result<(u64, PresenceEvent)> { - Ok(( - count, - Presence::from_json_bytes(&presence_bytes)?.to_presence_event(user_id, &self.services.users)?, - )) - }) - .transpose() - } else { - Ok(None) - } + let key = presenceid_key(count, user_id); + let bytes = self.presenceid_presence.qry(&key).await?; + let event = Presence::from_json_bytes(&bytes)? + .to_presence_event(user_id, &self.services.users) + .await; + + Ok((count, event)) } - pub(super) fn set_presence( + pub(super) async fn set_presence( &self, user_id: &UserId, presence_state: &PresenceState, currently_active: Option, last_active_ago: Option, status_msg: Option, ) -> Result<()> { - let last_presence = self.get_presence(user_id)?; + let last_presence = self.get_presence(user_id).await; let state_changed = match last_presence { - None => true, - Some(ref presence) => presence.1.content.presence != *presence_state, + Err(_) => true, + Ok(ref presence) => presence.1.content.presence != *presence_state, }; let status_msg_changed = match last_presence { - None => true, - Some(ref last_presence) => { + Err(_) => true, + Ok(ref last_presence) => { let old_msg = last_presence .1 .content @@ -79,8 +80,8 @@ impl Data { let now = utils::millis_since_unix_epoch(); let last_last_active_ts = match last_presence { - None => 0, - Some((_, ref presence)) => now.saturating_sub(presence.content.last_active_ago.unwrap_or_default().into()), + Err(_) => 0, + Ok((_, ref presence)) => now.saturating_sub(presence.content.last_active_ago.unwrap_or_default().into()), }; let last_active_ts = match last_active_ago { @@ -90,12 +91,7 @@ impl Data { // TODO: tighten for state flicker? if !status_msg_changed && !state_changed && last_active_ts < last_last_active_ts { - debug_warn!( - "presence spam {:?} last_active_ts:{:?} < {:?}", - user_id, - last_active_ts, - last_last_active_ts - ); + debug_warn!("presence spam {user_id:?} last_active_ts:{last_active_ts:?} < {last_last_active_ts:?}",); return Ok(()); } @@ -115,41 +111,42 @@ impl Data { let key = presenceid_key(count, user_id); self.presenceid_presence - .insert(&key, &presence.to_json_bytes()?)?; + .insert(&key, &presence.to_json_bytes()?); self.userid_presenceid - .insert(user_id.as_bytes(), &count.to_be_bytes())?; + .insert(user_id.as_bytes(), &count.to_be_bytes()); - if let Some((last_count, _)) = last_presence { + if let Ok((last_count, _)) = last_presence { let key = presenceid_key(last_count, user_id); - self.presenceid_presence.remove(&key)?; + self.presenceid_presence.remove(&key); } Ok(()) } - pub(super) fn remove_presence(&self, user_id: &UserId) -> Result<()> { - if let Some(count_bytes) = self.userid_presenceid.get(user_id.as_bytes())? { - let count = utils::u64_from_bytes(&count_bytes) - .map_err(|_e| Error::bad_database("No 'count' bytes in presence key"))?; - let key = presenceid_key(count, user_id); - self.presenceid_presence.remove(&key)?; - self.userid_presenceid.remove(user_id.as_bytes())?; - } + pub(super) async fn remove_presence(&self, user_id: &UserId) { + let Ok(count) = self + .userid_presenceid + .qry(user_id) + .await + .deserialized::() + else { + return; + }; - Ok(()) + let key = presenceid_key(count, user_id); + self.presenceid_presence.remove(&key); + self.userid_presenceid.remove(user_id.as_bytes()); } - pub fn presence_since<'a>(&'a self, since: u64) -> Box)> + 'a> { - Box::new( - self.presenceid_presence - .iter() - .flat_map(|(key, presence_bytes)| -> Result<(OwnedUserId, u64, Vec)> { - let (count, user_id) = presenceid_parse(&key)?; - Ok((user_id.to_owned(), count, presence_bytes)) - }) - .filter(move |(_, count, _)| *count > since), - ) + pub fn presence_since(&self, since: u64) -> impl Stream)> + Send + '_ { + self.presenceid_presence + .raw_stream() + .ignore_err() + .ready_filter_map(move |(key, presence_bytes)| { + let (count, user_id) = presenceid_parse(key).expect("invalid presenceid_parse"); + (count > since).then(|| (user_id.to_owned(), count, presence_bytes.to_vec())) + }) } } @@ -162,7 +159,7 @@ fn presenceid_key(count: u64, user_id: &UserId) -> Vec { fn presenceid_parse(key: &[u8]) -> Result<(u64, &UserId)> { let (count, user_id) = key.split_at(8); let user_id = user_id_from_bytes(user_id)?; - let count = utils::u64_from_bytes(count).unwrap(); + let count = utils::u64_from_u8(count); Ok((count, user_id)) } diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index a54a6d7c..3b5c4caf 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -4,8 +4,8 @@ mod presence; use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduit::{checked, debug, error, Error, Result, Server}; -use futures_util::{stream::FuturesUnordered, StreamExt}; +use conduit::{checked, debug, error, result::LogErr, Error, Result, Server}; +use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; use tokio::{sync::Mutex, time::sleep}; @@ -58,7 +58,9 @@ impl crate::Service for Service { loop { debug_assert!(!receiver.is_closed(), "channel error"); tokio::select! { - Some(user_id) = presence_timers.next() => self.process_presence_timer(&user_id)?, + Some(user_id) = presence_timers.next() => { + self.process_presence_timer(&user_id).await.log_err().ok(); + }, event = receiver.recv_async() => match event { Err(_e) => return Ok(()), Ok((user_id, timeout)) => { @@ -82,28 +84,27 @@ impl crate::Service for Service { impl Service { /// Returns the latest presence event for the given user. #[inline] - pub fn get_presence(&self, user_id: &UserId) -> Result> { - if let Some((_, presence)) = self.db.get_presence(user_id)? { - Ok(Some(presence)) - } else { - Ok(None) - } + pub async fn get_presence(&self, user_id: &UserId) -> Result { + self.db + .get_presence(user_id) + .map_ok(|(_, presence)| presence) + .await } /// Pings the presence of the given user in the given room, setting the /// specified state. - pub fn ping_presence(&self, user_id: &UserId, new_state: &PresenceState) -> Result<()> { + pub async fn ping_presence(&self, user_id: &UserId, new_state: &PresenceState) -> Result<()> { const REFRESH_TIMEOUT: u64 = 60 * 25 * 1000; - let last_presence = self.db.get_presence(user_id)?; + let last_presence = self.db.get_presence(user_id).await; let state_changed = match last_presence { - None => true, - Some((_, ref presence)) => presence.content.presence != *new_state, + Err(_) => true, + Ok((_, ref presence)) => presence.content.presence != *new_state, }; let last_last_active_ago = match last_presence { - None => 0_u64, - Some((_, ref presence)) => presence.content.last_active_ago.unwrap_or_default().into(), + Err(_) => 0_u64, + Ok((_, ref presence)) => presence.content.last_active_ago.unwrap_or_default().into(), }; if !state_changed && last_last_active_ago < REFRESH_TIMEOUT { @@ -111,17 +112,18 @@ impl Service { } let status_msg = match last_presence { - Some((_, ref presence)) => presence.content.status_msg.clone(), - None => Some(String::new()), + Ok((_, ref presence)) => presence.content.status_msg.clone(), + Err(_) => Some(String::new()), }; let last_active_ago = UInt::new(0); let currently_active = *new_state == PresenceState::Online; self.set_presence(user_id, new_state, Some(currently_active), last_active_ago, status_msg) + .await } /// Adds a presence event which will be saved until a new event replaces it. - pub fn set_presence( + pub async fn set_presence( &self, user_id: &UserId, state: &PresenceState, currently_active: Option, last_active_ago: Option, status_msg: Option, ) -> Result<()> { @@ -131,7 +133,8 @@ impl Service { }; self.db - .set_presence(user_id, presence_state, currently_active, last_active_ago, status_msg)?; + .set_presence(user_id, presence_state, currently_active, last_active_ago, status_msg) + .await?; if self.timeout_remote_users || self.services.globals.user_is_local(user_id) { let timeout = match presence_state { @@ -154,28 +157,33 @@ impl Service { /// /// TODO: Why is this not used? #[allow(dead_code)] - pub fn remove_presence(&self, user_id: &UserId) -> Result<()> { self.db.remove_presence(user_id) } + pub async fn remove_presence(&self, user_id: &UserId) { self.db.remove_presence(user_id).await } /// Returns the most recent presence updates that happened after the event /// with id `since`. #[inline] - pub fn presence_since(&self, since: u64) -> Box)> + '_> { + pub fn presence_since(&self, since: u64) -> impl Stream)> + Send + '_ { self.db.presence_since(since) } - pub fn from_json_bytes_to_event(&self, bytes: &[u8], user_id: &UserId) -> Result { + #[inline] + pub async fn from_json_bytes_to_event(&self, bytes: &[u8], user_id: &UserId) -> Result { let presence = Presence::from_json_bytes(bytes)?; - presence.to_presence_event(user_id, &self.services.users) + let event = presence + .to_presence_event(user_id, &self.services.users) + .await; + + Ok(event) } - fn process_presence_timer(&self, user_id: &OwnedUserId) -> Result<()> { + async fn process_presence_timer(&self, user_id: &OwnedUserId) -> Result<()> { let mut presence_state = PresenceState::Offline; let mut last_active_ago = None; let mut status_msg = None; - let presence_event = self.get_presence(user_id)?; + let presence_event = self.get_presence(user_id).await; - if let Some(presence_event) = presence_event { + if let Ok(presence_event) = presence_event { presence_state = presence_event.content.presence; last_active_ago = presence_event.content.last_active_ago; status_msg = presence_event.content.status_msg; @@ -192,7 +200,8 @@ impl Service { ); if let Some(new_state) = new_state { - self.set_presence(user_id, &new_state, Some(false), last_active_ago, status_msg)?; + self.set_presence(user_id, &new_state, Some(false), last_active_ago, status_msg) + .await?; } Ok(()) diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index 570008f2..0d5c226b 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use conduit::{utils, Error, Result}; use ruma::{ events::presence::{PresenceEvent, PresenceEventContent}, @@ -42,7 +40,7 @@ impl Presence { } /// Creates a PresenceEvent from available data. - pub(super) fn to_presence_event(&self, user_id: &UserId, users: &Arc) -> Result { + pub(super) async fn to_presence_event(&self, user_id: &UserId, users: &users::Service) -> PresenceEvent { let now = utils::millis_since_unix_epoch(); let last_active_ago = if self.currently_active { None @@ -50,16 +48,16 @@ impl Presence { Some(UInt::new_saturating(now.saturating_sub(self.last_active_ts))) }; - Ok(PresenceEvent { + PresenceEvent { sender: user_id.to_owned(), content: PresenceEventContent { presence: self.state.clone(), status_msg: self.status_msg.clone(), currently_active: Some(self.currently_active), last_active_ago, - displayname: users.displayname(user_id)?, - avatar_url: users.avatar_url(user_id)?, + displayname: users.displayname(user_id).await.ok(), + avatar_url: users.avatar_url(user_id).await.ok(), }, - }) + } } } diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs deleted file mode 100644 index f9734334..00000000 --- a/src/service/pusher/data.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::sync::Arc; - -use conduit::{utils, Error, Result}; -use database::{Database, Map}; -use ruma::{ - api::client::push::{set_pusher, Pusher}, - UserId, -}; - -pub(super) struct Data { - senderkey_pusher: Arc, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - senderkey_pusher: db["senderkey_pusher"].clone(), - } - } - - pub(super) fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) -> Result<()> { - match pusher { - set_pusher::v3::PusherAction::Post(data) => { - let mut key = sender.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(data.pusher.ids.pushkey.as_bytes()); - self.senderkey_pusher - .insert(&key, &serde_json::to_vec(pusher).expect("Pusher is valid JSON value"))?; - Ok(()) - }, - set_pusher::v3::PusherAction::Delete(ids) => { - let mut key = sender.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(ids.pushkey.as_bytes()); - self.senderkey_pusher.remove(&key).map_err(Into::into) - }, - } - } - - pub(super) fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { - let mut senderkey = sender.as_bytes().to_vec(); - senderkey.push(0xFF); - senderkey.extend_from_slice(pushkey.as_bytes()); - - self.senderkey_pusher - .get(&senderkey)? - .map(|push| serde_json::from_slice(&push).map_err(|_| Error::bad_database("Invalid Pusher in db."))) - .transpose() - } - - pub(super) fn get_pushers(&self, sender: &UserId) -> Result> { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xFF); - - self.senderkey_pusher - .scan_prefix(prefix) - .map(|(_, push)| serde_json::from_slice(&push).map_err(|_| Error::bad_database("Invalid Pusher in db."))) - .collect() - } - - pub(super) fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a> { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xFF); - - Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| { - let mut parts = k.splitn(2, |&b| b == 0xFF); - let _senderkey = parts.next(); - let push_key = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; - let push_key_string = utils::string_from_bytes(push_key) - .map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; - - Ok(push_key_string) - })) - } -} diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index de87264c..44ff1945 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,9 +1,13 @@ -mod data; - use std::{fmt::Debug, mem, sync::Arc}; use bytes::BytesMut; -use conduit::{debug_error, err, trace, utils::string_from_bytes, warn, Err, PduEvent, Result}; +use conduit::{ + debug_error, err, trace, + utils::{stream::TryIgnore, string_from_bytes}, + Err, PduEvent, Result, +}; +use database::{Deserialized, Ignore, Interfix, Map}; +use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ api::{ @@ -22,12 +26,11 @@ use ruma::{ uint, RoomId, UInt, UserId, }; -use self::data::Data; use crate::{client, globals, rooms, users, Dep}; pub struct Service { - services: Services, db: Data, + services: Services, } struct Services { @@ -38,9 +41,16 @@ struct Services { users: Dep, } +struct Data { + senderkey_pusher: Arc, +} + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { + db: Data { + senderkey_pusher: args.db["senderkey_pusher"].clone(), + }, services: Services { globals: args.depend::("globals"), client: args.depend::("client"), @@ -48,7 +58,6 @@ impl crate::Service for Service { state_cache: args.depend::("rooms::state_cache"), users: args.depend::("users"), }, - db: Data::new(args.db), })) } @@ -56,19 +65,52 @@ impl crate::Service for Service { } impl Service { - pub fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) -> Result<()> { - self.db.set_pusher(sender, pusher) + pub fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) { + match pusher { + set_pusher::v3::PusherAction::Post(data) => { + let mut key = sender.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(data.pusher.ids.pushkey.as_bytes()); + self.db + .senderkey_pusher + .insert(&key, &serde_json::to_vec(pusher).expect("Pusher is valid JSON value")); + }, + set_pusher::v3::PusherAction::Delete(ids) => { + let mut key = sender.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(ids.pushkey.as_bytes()); + self.db.senderkey_pusher.remove(&key); + }, + } } - pub fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { - self.db.get_pusher(sender, pushkey) + pub async fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result { + let senderkey = (sender, pushkey); + self.db + .senderkey_pusher + .qry(&senderkey) + .await + .deserialized_json() } - pub fn get_pushers(&self, sender: &UserId) -> Result> { self.db.get_pushers(sender) } + pub async fn get_pushers(&self, sender: &UserId) -> Vec { + let prefix = (sender, Interfix); + self.db + .senderkey_pusher + .stream_prefix(&prefix) + .ignore_err() + .map(|(_, val): (Ignore, &[u8])| serde_json::from_slice(val).expect("Invalid Pusher in db.")) + .collect() + .await + } - #[must_use] - pub fn get_pushkeys(&self, sender: &UserId) -> Box> + '_> { - self.db.get_pushkeys(sender) + pub fn get_pushkeys<'a>(&'a self, sender: &'a UserId) -> impl Stream + Send + 'a { + let prefix = (sender, Interfix); + self.db + .senderkey_pusher + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, pushkey): (Ignore, &str)| pushkey) } #[tracing::instrument(skip(self, dest, request))] @@ -161,15 +203,18 @@ impl Service { let power_levels: RoomPowerLevelsEventContent = self .services .state_accessor - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "") + .await + .and_then(|ev| { serde_json::from_str(ev.content.get()) - .map_err(|e| err!(Database("invalid m.room.power_levels event: {e:?}"))) + .map_err(|e| err!(Database(error!("invalid m.room.power_levels event: {e:?}")))) }) - .transpose()? .unwrap_or_default(); - for action in self.get_actions(user, &ruleset, &power_levels, &pdu.to_sync_room_event(), &pdu.room_id)? { + for action in self + .get_actions(user, &ruleset, &power_levels, &pdu.to_sync_room_event(), &pdu.room_id) + .await? + { let n = match action { Action::Notify => true, Action::SetTweak(tweak) => { @@ -197,7 +242,7 @@ impl Service { } #[tracing::instrument(skip(self, user, ruleset, pdu), level = "debug")] - pub fn get_actions<'a>( + pub async fn get_actions<'a>( &self, user: &UserId, ruleset: &'a Ruleset, power_levels: &RoomPowerLevelsEventContent, pdu: &Raw, room_id: &RoomId, ) -> Result<&'a [Action]> { @@ -207,21 +252,27 @@ impl Service { notifications: power_levels.notifications.clone(), }; + let room_joined_count = self + .services + .state_cache + .room_joined_count(room_id) + .await + .unwrap_or(1) + .try_into() + .unwrap_or_else(|_| uint!(0)); + + let user_display_name = self + .services + .users + .displayname(user) + .await + .unwrap_or_else(|_| user.localpart().to_owned()); + let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), - member_count: UInt::try_from( - self.services - .state_cache - .room_joined_count(room_id)? - .unwrap_or(1), - ) - .unwrap_or_else(|_| uint!(0)), + member_count: room_joined_count, user_id: user.to_owned(), - user_display_name: self - .services - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), + user_display_name, power_levels: Some(power_levels), }; @@ -278,9 +329,14 @@ impl Service { notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - notifi.sender_display_name = self.services.users.displayname(&event.sender)?; + notifi.sender_display_name = self.services.users.displayname(&event.sender).await.ok(); - notifi.room_name = self.services.state_accessor.get_name(&event.room_id)?; + notifi.room_name = self + .services + .state_accessor + .get_name(&event.room_id) + .await + .ok(); self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) .await?; diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 07d9a0fa..ea4b1100 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -193,7 +193,7 @@ impl super::Service { .send() .await; - trace!("response: {:?}", response); + trace!("response: {response:?}"); if let Err(e) = &response { debug!("error: {e:?}"); return Ok(None); @@ -206,7 +206,7 @@ impl super::Service { } let text = response.text().await?; - trace!("response text: {:?}", text); + trace!("response text: {text:?}"); if text.len() >= 12288 { debug_warn!("response contains junk"); return Ok(None); @@ -225,7 +225,7 @@ impl super::Service { return Ok(None); } - debug_info!("{:?} found at {:?}", dest, m_server); + debug_info!("{dest:?} found at {m_server:?}"); Ok(Some(m_server.to_owned())) } diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs deleted file mode 100644 index efd2b5b7..00000000 --- a/src/service/rooms/alias/data.rs +++ /dev/null @@ -1,125 +0,0 @@ -use std::sync::Arc; - -use conduit::{utils, Error, Result}; -use database::Map; -use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId}; - -use crate::{globals, Dep}; - -pub(super) struct Data { - alias_userid: Arc, - alias_roomid: Arc, - aliasid_alias: Arc, - services: Services, -} - -struct Services { - globals: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - alias_userid: db["alias_userid"].clone(), - alias_roomid: db["alias_roomid"].clone(), - aliasid_alias: db["aliasid_alias"].clone(), - services: Services { - globals: args.depend::("globals"), - }, - } - } - - pub(super) fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { - // Comes first as we don't want a stuck alias - self.alias_userid - .insert(alias.alias().as_bytes(), user_id.as_bytes())?; - - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xFF); - aliasid.extend_from_slice(&self.services.globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, alias.as_bytes())?; - - Ok(()) - } - - pub(super) fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xFF); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - - self.alias_roomid.remove(alias.alias().as_bytes())?; - - self.alias_userid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest(ErrorKind::NotFound, "Alias does not exist or is invalid.")); - } - - Ok(()) - } - - pub(super) fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse( - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - pub(super) fn who_created_alias(&self, alias: &RoomAliasId) -> Result> { - self.alias_userid - .get(alias.alias().as_bytes())? - .map(|bytes| { - UserId::parse( - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("User ID in alias_userid is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("User ID in alias_roomid is invalid.")) - }) - .transpose() - } - - pub(super) fn local_aliases_for_room<'a>( - &'a self, room_id: &RoomId, - ) -> Box> + 'a + Send> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xFF); - - Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - })) - } - - pub(super) fn all_local_aliases<'a>(&'a self) -> Box> + 'a> { - Box::new( - self.alias_roomid - .iter() - .map(|(room_alias_bytes, room_id_bytes)| { - let room_alias_localpart = utils::string_from_bytes(&room_alias_bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))?; - - let room_id = utils::string_from_bytes(&room_id_bytes) - .map_err(|_| Error::bad_database("Invalid room_id bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid room_id in aliasid_alias."))?; - - Ok((room_id, room_alias_localpart)) - }), - ) - } -} diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index f2e01ab5..6b81a221 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,19 +1,23 @@ -mod data; mod remote; use std::sync::Arc; -use conduit::{err, Error, Result}; +use conduit::{ + err, + utils::{stream::TryIgnore, ReadyExt}, + Err, Error, Result, +}; +use database::{Deserialized, Ignore, Interfix, Map}; +use futures::{Stream, StreamExt}; use ruma::{ api::client::error::ErrorKind, events::{ room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, StateEventType, }, - OwnedRoomAliasId, OwnedRoomId, OwnedServerName, RoomAliasId, RoomId, RoomOrAliasId, UserId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId, UserId, }; -use self::data::Data; use crate::{admin, appservice, appservice::RegistrationInfo, globals, rooms, sending, Dep}; pub struct Service { @@ -21,6 +25,12 @@ pub struct Service { services: Services, } +struct Data { + alias_userid: Arc, + alias_roomid: Arc, + aliasid_alias: Arc, +} + struct Services { admin: Dep, appservice: Dep, @@ -32,7 +42,11 @@ struct Services { impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(&args), + db: Data { + alias_userid: args.db["alias_userid"].clone(), + alias_roomid: args.db["alias_roomid"].clone(), + aliasid_alias: args.db["aliasid_alias"].clone(), + }, services: Services { admin: args.depend::("admin"), appservice: args.depend::("appservice"), @@ -50,25 +64,52 @@ impl Service { #[tracing::instrument(skip(self))] pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { if alias == self.services.globals.admin_alias && user_id != self.services.globals.server_user { - Err(Error::BadRequest( + return Err(Error::BadRequest( ErrorKind::forbidden(), "Only the server user can set this alias", - )) - } else { - self.db.set_alias(alias, room_id, user_id) + )); } + + // Comes first as we don't want a stuck alias + self.db + .alias_userid + .insert(alias.alias().as_bytes(), user_id.as_bytes()); + + self.db + .alias_roomid + .insert(alias.alias().as_bytes(), room_id.as_bytes()); + + let mut aliasid = room_id.as_bytes().to_vec(); + aliasid.push(0xFF); + aliasid.extend_from_slice(&self.services.globals.next_count()?.to_be_bytes()); + self.db.aliasid_alias.insert(&aliasid, alias.as_bytes()); + + Ok(()) } #[tracing::instrument(skip(self))] pub async fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<()> { - if self.user_can_remove_alias(alias, user_id).await? { - self.db.remove_alias(alias) - } else { - Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not permitted to remove this alias.", - )) + if !self.user_can_remove_alias(alias, user_id).await? { + return Err!(Request(Forbidden("User is not permitted to remove this alias."))); } + + let alias = alias.alias(); + let Ok(room_id) = self.db.alias_roomid.qry(&alias).await else { + return Err!(Request(NotFound("Alias does not exist or is invalid."))); + }; + + let prefix = (&room_id, Interfix); + self.db + .aliasid_alias + .keys_prefix(&prefix) + .ignore_err() + .ready_for_each(|key: &[u8]| self.db.aliasid_alias.remove(&key)) + .await; + + self.db.alias_roomid.remove(alias.as_bytes()); + self.db.alias_userid.remove(alias.as_bytes()); + + Ok(()) } pub async fn resolve(&self, room: &RoomOrAliasId) -> Result { @@ -97,9 +138,9 @@ impl Service { return self.remote_resolve(room_alias, servers).await; } - let room_id: Option = match self.resolve_local_alias(room_alias)? { - Some(r) => Some(r), - None => self.resolve_appservice_alias(room_alias).await?, + let room_id: Option = match self.resolve_local_alias(room_alias).await { + Ok(r) => Some(r), + Err(_) => self.resolve_appservice_alias(room_alias).await?, }; room_id.map_or_else( @@ -109,46 +150,54 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { - self.db.resolve_local_alias(alias) + pub async fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result { + self.db.alias_roomid.qry(alias.alias()).await.deserialized() } #[tracing::instrument(skip(self), level = "debug")] - pub fn local_aliases_for_room<'a>( - &'a self, room_id: &RoomId, - ) -> Box> + 'a + Send> { - self.db.local_aliases_for_room(room_id) + pub fn local_aliases_for_room<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .aliasid_alias + .stream_prefix(&prefix) + .ignore_err() + .map(|((Ignore, Ignore), alias): ((Ignore, Ignore), &RoomAliasId)| alias) } #[tracing::instrument(skip(self), level = "debug")] - pub fn all_local_aliases<'a>(&'a self) -> Box> + 'a> { - self.db.all_local_aliases() + pub fn all_local_aliases<'a>(&'a self) -> impl Stream + Send + 'a { + self.db + .alias_roomid + .stream() + .ignore_err() + .map(|(alias_localpart, room_id): (&str, &RoomId)| (room_id, alias_localpart)) } async fn user_can_remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result { - let Some(room_id) = self.resolve_local_alias(alias)? else { - return Err(Error::BadRequest(ErrorKind::NotFound, "Alias not found.")); - }; + let room_id = self + .resolve_local_alias(alias) + .await + .map_err(|_| err!(Request(NotFound("Alias not found."))))?; let server_user = &self.services.globals.server_user; // The creator of an alias can remove it if self - .db - .who_created_alias(alias)? - .is_some_and(|user| user == user_id) + .who_created_alias(alias).await + .is_ok_and(|user| user == user_id) // Server admins can remove any local alias - || self.services.admin.user_is_admin(user_id).await? + || self.services.admin.user_is_admin(user_id).await // Always allow the server service account to remove the alias, since there may not be an admin room || server_user == user_id { Ok(true) // Checking whether the user is able to change canonical aliases of the // room - } else if let Some(event) = - self.services - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")? + } else if let Ok(event) = self + .services + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "") + .await { serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) @@ -157,10 +206,11 @@ impl Service { }) // If there is no power levels event, only the room creator can change // canonical aliases - } else if let Some(event) = - self.services - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "")? + } else if let Ok(event) = self + .services + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCreate, "") + .await { Ok(event.sender == user_id) } else { @@ -168,6 +218,10 @@ impl Service { } } + async fn who_created_alias(&self, alias: &RoomAliasId) -> Result { + self.db.alias_userid.qry(alias.alias()).await.deserialized() + } + async fn resolve_appservice_alias(&self, room_alias: &RoomAliasId) -> Result> { use ruma::api::appservice::query::query_room_alias; @@ -185,10 +239,11 @@ impl Service { .await, Ok(Some(_opt_result)) ) { - return Ok(Some( - self.resolve_local_alias(room_alias)? - .ok_or_else(|| err!(Request(NotFound("Room does not exist."))))?, - )); + return self + .resolve_local_alias(room_alias) + .await + .map_err(|_| err!(Request(NotFound("Room does not exist.")))) + .map(Some); } } diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 6e7c7835..3d00374e 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -24,7 +24,7 @@ impl Data { } } - pub(super) fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>> { + pub(super) async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>> { // Check RAM cache if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { return Ok(Some(Arc::clone(result))); @@ -33,17 +33,14 @@ impl Data { // We only save auth chains for single events in the db if key.len() == 1 { // Check DB cache - let chain = self - .shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(utils::u64_from_u8) - .collect::>() - }); + let chain = self.shorteventid_authchain.qry(&key[0]).await.map(|chain| { + chain + .chunks_exact(size_of::()) + .map(utils::u64_from_u8) + .collect::>() + }); - if let Some(chain) = chain { + if let Ok(chain) = chain { // Cache in RAM self.auth_chain_cache .lock() @@ -66,7 +63,7 @@ impl Data { .iter() .flat_map(|s| s.to_be_bytes().to_vec()) .collect::>(), - )?; + ); } // Cache in RAM diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index d0bc425f..7bc239d7 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -5,7 +5,8 @@ use std::{ sync::Arc, }; -use conduit::{debug, error, trace, validated, warn, Err, Result}; +use conduit::{debug, debug_error, trace, utils::IterStream, validated, warn, Err, Result}; +use futures::{FutureExt, Stream, StreamExt}; use ruma::{EventId, RoomId}; use self::data::Data; @@ -38,7 +39,7 @@ impl crate::Service for Service { impl Service { pub async fn event_ids_iter<'a>( &'a self, room_id: &RoomId, starting_events_: Vec>, - ) -> Result> + 'a> { + ) -> Result> + Send + 'a> { let mut starting_events: Vec<&EventId> = Vec::with_capacity(starting_events_.len()); for starting_event in &starting_events_ { starting_events.push(starting_event); @@ -48,7 +49,13 @@ impl Service { .get_auth_chain(room_id, &starting_events) .await? .into_iter() - .filter_map(move |sid| self.services.short.get_eventid_from_short(sid).ok())) + .stream() + .filter_map(|sid| { + self.services + .short + .get_eventid_from_short(sid) + .map(Result::ok) + })) } #[tracing::instrument(skip_all, name = "auth_chain")] @@ -61,7 +68,8 @@ impl Service { for (i, &short) in self .services .short - .multi_get_or_create_shorteventid(starting_events)? + .multi_get_or_create_shorteventid(starting_events) + .await .iter() .enumerate() { @@ -85,7 +93,7 @@ impl Service { } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = self.get_cached_eventid_authchain(&chunk_key)? { + if let Some(cached) = self.get_cached_eventid_authchain(&chunk_key).await? { trace!("Found cache entry for whole chunk"); full_auth_chain.extend(cached.iter().copied()); hits = hits.saturating_add(1); @@ -96,12 +104,12 @@ impl Service { let mut misses2: usize = 0; let mut chunk_cache = Vec::with_capacity(chunk.len()); for (sevent_id, event_id) in chunk { - if let Some(cached) = self.get_cached_eventid_authchain(&[sevent_id])? { + if let Some(cached) = self.get_cached_eventid_authchain(&[sevent_id]).await? { trace!(?event_id, "Found cache entry for event"); chunk_cache.extend(cached.iter().copied()); hits2 = hits2.saturating_add(1); } else { - let auth_chain = self.get_auth_chain_inner(room_id, event_id)?; + let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; self.cache_auth_chain(vec![sevent_id], &auth_chain)?; chunk_cache.extend(auth_chain.iter()); misses2 = misses2.saturating_add(1); @@ -143,15 +151,16 @@ impl Service { } #[tracing::instrument(skip(self, room_id))] - fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + async fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { let mut todo = vec![Arc::from(event_id)]; let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { trace!(?event_id, "processing auth event"); - match self.services.timeline.get_pdu(&event_id) { - Ok(Some(pdu)) => { + match self.services.timeline.get_pdu(&event_id).await { + Err(e) => debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"), + Ok(pdu) => { if pdu.room_id != room_id { return Err!(Request(Forbidden( "auth event {event_id:?} for incorrect room {} which is not {}", @@ -160,7 +169,11 @@ impl Service { ))); } for auth_event in &pdu.auth_events { - let sauthevent = self.services.short.get_or_create_shorteventid(auth_event)?; + let sauthevent = self + .services + .short + .get_or_create_shorteventid(auth_event) + .await; if found.insert(sauthevent) { trace!(?event_id, ?auth_event, "adding auth event to processing queue"); @@ -168,20 +181,14 @@ impl Service { } } }, - Ok(None) => { - warn!(?event_id, "Could not find pdu mentioned in auth events"); - }, - Err(error) => { - error!(?event_id, ?error, "Could not load event in auth chain"); - }, } } Ok(found) } - pub fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>> { - self.db.get_cached_eventid_authchain(key) + pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>> { + self.db.get_cached_eventid_authchain(key).await } #[tracing::instrument(skip(self), level = "debug")] diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs deleted file mode 100644 index 713ee057..00000000 --- a/src/service/rooms/directory/data.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::sync::Arc; - -use conduit::{utils, Error, Result}; -use database::{Database, Map}; -use ruma::{OwnedRoomId, RoomId}; - -pub(super) struct Data { - publicroomids: Arc, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - publicroomids: db["publicroomids"].clone(), - } - } - - pub(super) fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[]) - } - - pub(super) fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes()) - } - - pub(super) fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - pub(super) fn public_rooms<'a>(&'a self) -> Box> + 'a> { - Box::new(self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - })) - } -} diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 706e6c2e..3585205d 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,36 +1,44 @@ -mod data; - use std::sync::Arc; -use conduit::Result; -use ruma::{OwnedRoomId, RoomId}; - -use self::data::Data; +use conduit::{implement, utils::stream::TryIgnore, Result}; +use database::{Ignore, Map}; +use futures::{Stream, StreamExt}; +use ruma::RoomId; pub struct Service { db: Data, } +struct Data { + publicroomids: Arc, +} + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(args.db), + db: Data { + publicroomids: args.db["publicroomids"].clone(), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - #[tracing::instrument(skip(self), level = "debug")] - pub fn set_public(&self, room_id: &RoomId) -> Result<()> { self.db.set_public(room_id) } +#[implement(Service)] +pub fn set_public(&self, room_id: &RoomId) { self.db.publicroomids.insert(room_id.as_bytes(), &[]); } - #[tracing::instrument(skip(self), level = "debug")] - pub fn set_not_public(&self, room_id: &RoomId) -> Result<()> { self.db.set_not_public(room_id) } +#[implement(Service)] +pub fn set_not_public(&self, room_id: &RoomId) { self.db.publicroomids.remove(room_id.as_bytes()); } - #[tracing::instrument(skip(self), level = "debug")] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { self.db.is_public_room(room_id) } +#[implement(Service)] +pub async fn is_public_room(&self, room_id: &RoomId) -> bool { self.db.publicroomids.qry(room_id).await.is_ok() } - #[tracing::instrument(skip(self), level = "debug")] - pub fn public_rooms(&self) -> impl Iterator> + '_ { self.db.public_rooms() } +#[implement(Service)] +pub fn public_rooms(&self) -> impl Stream + Send { + self.db + .publicroomids + .keys() + .ignore_err() + .map(|(room_id, _): (&RoomId, Ignore)| room_id) } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index bee986de..07d6e4db 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -3,17 +3,18 @@ mod parse_incoming_pdu; use std::{ collections::{hash_map, BTreeMap, HashMap, HashSet}, fmt::Write, - pin::Pin, sync::{Arc, RwLock as StdRwLock}, time::Instant, }; use conduit::{ - debug, debug_error, debug_info, err, error, info, pdu, trace, - utils::{math::continue_exponential_backoff_secs, MutexMap}, - warn, Error, PduEvent, Result, + debug, debug_error, debug_info, debug_warn, err, info, pdu, + result::LogErr, + trace, + utils::{math::continue_exponential_backoff_secs, IterStream, MutexMap}, + warn, Err, Error, PduEvent, Result, }; -use futures_util::Future; +use futures::{future, future::ready, FutureExt, StreamExt, TryFutureExt}; use ruma::{ api::{ client::error::ErrorKind, @@ -27,7 +28,7 @@ use ruma::{ }, int, serde::Base64, - state_res::{self, RoomVersion, StateMap}, + state_res::{self, EventTypeExt, RoomVersion, StateMap}, uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, ServerName, }; @@ -60,14 +61,6 @@ struct Services { type RoomMutexMap = MutexMap; type HandleTimeMap = HashMap; -// We use some AsyncRecursiveType hacks here so we can call async funtion -// recursively. -type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; -type AsyncRecursiveCanonicalJsonVec<'a> = - AsyncRecursiveType<'a, Vec<(Arc, Option>)>>; -type AsyncRecursiveCanonicalJsonResult<'a> = - AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>>; - impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -142,17 +135,17 @@ impl Service { pub_key_map: &'a RwLock>>, ) -> Result>> { // 1. Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = self.services.timeline.get_pdu_id(event_id)? { + if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await { return Ok(Some(pdu_id.to_vec())); } // 1.1 Check the server is in the room - if !self.services.metadata.exists(room_id)? { + if !self.services.metadata.exists(room_id).await { return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); } // 1.2 Check if the room is disabled - if self.services.metadata.is_disabled(room_id)? { + if self.services.metadata.is_disabled(room_id).await { return Err(Error::BadRequest( ErrorKind::forbidden(), "Federation of this room is currently disabled on this server.", @@ -160,7 +153,7 @@ impl Service { } // 1.3.1 Check room ACL on origin field/server - self.acl_check(origin, room_id)?; + self.acl_check(origin, room_id).await?; // 1.3.2 Check room ACL on sender's server name let sender: OwnedUserId = serde_json::from_value( @@ -172,26 +165,23 @@ impl Service { ) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "User ID in sender is invalid"))?; - self.acl_check(sender.server_name(), room_id)?; + self.acl_check(sender.server_name(), room_id).await?; // Fetch create event let create_event = self .services .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await?; // Procure the room version let room_version_id = Self::get_room_version_id(&create_event)?; - let first_pdu_in_room = self - .services - .timeline - .first_pdu_in_room(room_id)? - .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; let (incoming_pdu, val) = self .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false, pub_key_map) + .boxed() .await?; Self::check_room_id(room_id, &incoming_pdu)?; @@ -235,7 +225,7 @@ impl Service { { Ok(()) => continue, Err(e) => { - warn!("Prev event {} failed: {}", prev_id, e); + warn!("Prev event {prev_id} failed: {e}"); match self .services .globals @@ -287,7 +277,7 @@ impl Service { create_event: &Arc, first_pdu_in_room: &Arc, prev_id: &EventId, ) -> Result<()> { // Check for disabled again because it might have changed - if self.services.metadata.is_disabled(room_id)? { + if self.services.metadata.is_disabled(room_id).await { debug!( "Federaton of room {room_id} is currently disabled on this server. Request by origin {origin} and \ event ID {event_id}" @@ -349,149 +339,153 @@ impl Service { } #[allow(clippy::too_many_arguments)] - fn handle_outlier_pdu<'a>( - &'a self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, + async fn handle_outlier_pdu<'a>( + &self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, mut value: BTreeMap, auth_events_known: bool, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveCanonicalJsonResult<'a> { - Box::pin(async move { - // 1. Remove unsigned field - value.remove("unsigned"); + ) -> Result<(Arc, BTreeMap)> { + // 1. Remove unsigned field + value.remove("unsigned"); - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - // 2. Check signatures, otherwise drop - // 3. check content hash, redact if doesn't match - let room_version_id = Self::get_room_version_id(create_event)?; + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match + let room_version_id = Self::get_room_version_id(create_event)?; - let guard = pub_key_map.read().await; - let mut val = match ruma::signatures::verify_event(&guard, &value, &room_version_id) { - Err(e) => { - // Drop - warn!("Dropping bad event {}: {}", event_id, e,); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Signature verification failed")); - }, - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - debug_info!("Calculated hash does not match (redaction): {event_id}"); - let Ok(obj) = ruma::canonical_json::redact(value, &room_version_id, None) else { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Redaction failed")); - }; + let guard = pub_key_map.read().await; + let mut val = match ruma::signatures::verify_event(&guard, &value, &room_version_id) { + Err(e) => { + // Drop + warn!("Dropping bad event {event_id}: {e}"); + return Err!(Request(InvalidParam("Signature verification failed"))); + }, + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + debug_info!("Calculated hash does not match (redaction): {event_id}"); + let Ok(obj) = ruma::canonical_json::redact(value, &room_version_id, None) else { + return Err!(Request(InvalidParam("Redaction failed"))); + }; - // Skip the PDU if it is redacted and we already have it as an outlier event - if self.services.timeline.get_pdu_json(event_id)?.is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Event was redacted and we already knew about it", - )); - } + // Skip the PDU if it is redacted and we already have it as an outlier event + if self.services.timeline.get_pdu_json(event_id).await.is_ok() { + return Err!(Request(InvalidParam("Event was redacted and we already knew about it"))); + } - obj - }, - Ok(ruma::signatures::Verified::All) => value, - }; + obj + }, + Ok(ruma::signatures::Verified::All) => value, + }; - drop(guard); + drop(guard); - // Now that we have checked the signature and hashes we can add the eventID and - // convert to our PduEvent type - val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); - let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; + // Now that we have checked the signature and hashes we can add the eventID and + // convert to our PduEvent type + val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + let incoming_pdu = serde_json::from_value::( + serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; - Self::check_room_id(room_id, &incoming_pdu)?; + Self::check_room_id(room_id, &incoming_pdu)?; - if !auth_events_known { - // 4. fetch any missing auth events doing all checks listed here starting at 1. - // These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of - // the auth events are also rejected "due to auth events" - // NOTE: Step 5 is not applied anymore because it failed too often - debug!("Fetching auth events"); + if !auth_events_known { + // 4. fetch any missing auth events doing all checks listed here starting at 1. + // These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of + // the auth events are also rejected "due to auth events" + // NOTE: Step 5 is not applied anymore because it failed too often + debug!("Fetching auth events"); + Box::pin( self.fetch_and_handle_outliers( origin, &incoming_pdu .auth_events .iter() .map(|x| Arc::from(&**x)) - .collect::>(), + .collect::>>(), create_event, room_id, &room_version_id, pub_key_map, - ) - .await; - } - - // 6. Reject "due to auth events" if the event doesn't pass auth based on the - // auth events - debug!("Checking based on auth events"); - // Build map of auth events - let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); - for id in &incoming_pdu.auth_events { - let Some(auth_event) = self.services.timeline.get_pdu(id)? else { - warn!("Could not find auth event {}", id); - continue; - }; - - Self::check_room_id(room_id, &auth_event)?; - - match auth_events.entry(( - auth_event.kind.to_string().into(), - auth_event - .state_key - .clone() - .expect("all auth events have state keys"), - )) { - hash_map::Entry::Vacant(v) => { - v.insert(auth_event); - }, - hash_map::Entry::Occupied(_) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Auth event's type and state_key combination exists multiple times.", - )); - }, - } - } - - // The original create event must be in the auth events - if !matches!( - auth_events - .get(&(StateEventType::RoomCreate, String::new())) - .map(AsRef::as_ref), - Some(_) | None - ) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Incoming event refers to wrong create event.", - )); - } - - if !state_res::event_auth::auth_check( - &Self::to_room_version(&room_version_id), - &incoming_pdu, - None::, // TODO: third party invite - |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), + ), ) - .map_err(|_e| Error::BadRequest(ErrorKind::forbidden(), "Auth check failed"))? - { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Auth check failed")); + .await; + } + + // 6. Reject "due to auth events" if the event doesn't pass auth based on the + // auth events + debug!("Checking based on auth events"); + // Build map of auth events + let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); + for id in &incoming_pdu.auth_events { + let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { + warn!("Could not find auth event {id}"); + continue; + }; + + Self::check_room_id(room_id, &auth_event)?; + + match auth_events.entry(( + auth_event.kind.to_string().into(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + hash_map::Entry::Vacant(v) => { + v.insert(auth_event); + }, + hash_map::Entry::Occupied(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth event's type and state_key combination exists multiple times.", + )); + }, } + } - trace!("Validation successful."); + // The original create event must be in the auth events + if !matches!( + auth_events + .get(&(StateEventType::RoomCreate, String::new())) + .map(AsRef::as_ref), + Some(_) | None + ) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Incoming event refers to wrong create event.", + )); + } - // 7. Persist the event as an outlier. - self.services - .outlier - .add_pdu_outlier(&incoming_pdu.event_id, &val)?; + let state_fetch = |ty: &'static StateEventType, sk: &str| { + let key = ty.with_state_key(sk); + ready(auth_events.get(&key)) + }; - trace!("Added pdu as outlier."); + let auth_check = state_res::event_auth::auth_check( + &Self::to_room_version(&room_version_id), + &incoming_pdu, + None, // TODO: third party invite + state_fetch, + ) + .await + .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; - Ok((Arc::new(incoming_pdu), val)) - }) + if !auth_check { + return Err!(Request(Forbidden("Auth check failed"))); + } + + trace!("Validation successful."); + + // 7. Persist the event as an outlier. + self.services + .outlier + .add_pdu_outlier(&incoming_pdu.event_id, &val); + + trace!("Added pdu as outlier."); + + Ok((Arc::new(incoming_pdu), val)) } pub async fn upgrade_outlier_to_timeline_pdu( @@ -499,16 +493,22 @@ impl Service { origin: &ServerName, room_id: &RoomId, pub_key_map: &RwLock>>, ) -> Result>> { // Skip the PDU if we already have it as a timeline event - if let Ok(Some(pduid)) = self.services.timeline.get_pdu_id(&incoming_pdu.event_id) { + if let Ok(pduid) = self + .services + .timeline + .get_pdu_id(&incoming_pdu.event_id) + .await + { return Ok(Some(pduid.to_vec())); } if self .services .pdu_metadata - .is_event_soft_failed(&incoming_pdu.event_id)? + .is_event_soft_failed(&incoming_pdu.event_id) + .await { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + return Err!(Request(InvalidParam("Event has been soft failed"))); } debug!("Upgrading to timeline pdu"); @@ -545,57 +545,69 @@ impl Service { debug!("Performing auth check"); // 11. Check the auth of the event passes based on the state of the event - let check_result = state_res::event_auth::auth_check( + let state_fetch_state = &state_at_incoming_event; + let state_fetch = |k: &'static StateEventType, s: String| async move { + let shortstatekey = self.services.short.get_shortstatekey(k, &s).await.ok()?; + + let event_id = state_fetch_state.get(&shortstatekey)?; + self.services.timeline.get_pdu(event_id).await.ok() + }; + + let auth_check = state_res::event_auth::auth_check( &room_version, &incoming_pdu, - None::, // TODO: third party invite - |k, s| { - self.services - .short - .get_shortstatekey(&k.to_string().into(), s) - .ok() - .flatten() - .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| self.services.timeline.get_pdu(event_id).ok().flatten()) - }, + None, // TODO: third party invite + |k, s| state_fetch(k, s.to_owned()), ) - .map_err(|_e| Error::BadRequest(ErrorKind::forbidden(), "Auth check failed."))?; + .await + .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; - if !check_result { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Event has failed auth check with state at the event.", - )); + if !auth_check { + return Err!(Request(Forbidden("Event has failed auth check with state at the event."))); } debug!("Gathering auth events"); - let auth_events = self.services.state.get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - )?; + let auth_events = self + .services + .state + .get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + ) + .await?; + + let state_fetch = |k: &'static StateEventType, s: &str| { + let key = k.with_state_key(s); + ready(auth_events.get(&key).cloned()) + }; + + let auth_check = state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None, // third-party invite + state_fetch, + ) + .await + .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; // Soft fail check before doing state res debug!("Performing soft-fail check"); let soft_fail = { use RoomVersionId::*; - !state_res::event_auth::auth_check(&room_version, &incoming_pdu, None::, |k, s| { - auth_events.get(&(k.clone(), s.to_owned())) - }) - .map_err(|_e| Error::BadRequest(ErrorKind::forbidden(), "Auth check failed."))? + !auth_check || incoming_pdu.kind == TimelineEventType::RoomRedaction && match room_version_id { V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { if let Some(redact_id) = &incoming_pdu.redacts { - !self.services.state_accessor.user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - )? + !self + .services + .state_accessor + .user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .await? } else { false } @@ -605,12 +617,11 @@ impl Service { .map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?; if let Some(redact_id) = &content.redacts { - !self.services.state_accessor.user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - )? + !self + .services + .state_accessor + .user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .await? } else { false } @@ -627,28 +638,52 @@ impl Service { // Now we calculate the set of extremities this room has after the incoming // event has been applied. We start with the previous extremities (aka leaves) trace!("Calculating extremities"); - let mut extremities = self.services.state.get_forward_extremities(room_id)?; - trace!("Calculated {} extremities", extremities.len()); + let mut extremities: HashSet<_> = self + .services + .state + .get_forward_extremities(room_id) + .map(ToOwned::to_owned) + .collect() + .await; // Remove any forward extremities that are referenced by this incoming event's // prev_events + trace!( + "Calculated {} extremities; checking against {} prev_events", + extremities.len(), + incoming_pdu.prev_events.len() + ); for prev_event in &incoming_pdu.prev_events { - extremities.remove(prev_event); + extremities.remove(&(**prev_event)); } // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(self.services.pdu_metadata.is_event_referenced(room_id, id), Ok(true))); + let mut retained = HashSet::new(); + for id in &extremities { + if !self + .services + .pdu_metadata + .is_event_referenced(room_id, id) + .await + { + retained.insert(id.clone()); + } + } + + extremities.retain(|id| retained.contains(id)); debug!("Retained {} extremities. Compressing state", extremities.len()); - let state_ids_compressed = Arc::new( - state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - self.services - .state_compressor - .compress_state_event(*shortstatekey, id) - }) - .collect::>()?, - ); + + let mut state_ids_compressed = HashSet::new(); + for (shortstatekey, id) in &state_at_incoming_event { + state_ids_compressed.insert( + self.services + .state_compressor + .compress_state_event(*shortstatekey, id) + .await, + ); + } + + let state_ids_compressed = Arc::new(state_ids_compressed); if incoming_pdu.state_key.is_some() { debug!("Event is a state-event. Deriving new room state"); @@ -659,9 +694,11 @@ impl Service { let shortstatekey = self .services .short - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)?; + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) + .await; - state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); + let event_id = &incoming_pdu.event_id; + state_after.insert(shortstatekey, event_id.clone()); } let new_room_state = self @@ -673,7 +710,8 @@ impl Service { let (sstatehash, new, removed) = self .services .state_compressor - .save_state(room_id, new_room_state)?; + .save_state(room_id, new_room_state) + .await?; self.services .state @@ -698,16 +736,16 @@ impl Service { .await?; // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {:?}", incoming_pdu); + warn!("Event was soft failed: {incoming_pdu:?}"); self.services .pdu_metadata - .mark_event_soft_failed(&incoming_pdu.event_id)?; + .mark_event_soft_failed(&incoming_pdu.event_id); return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); } trace!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone()); + extremities.insert(incoming_pdu.event_id.clone().into()); // Now that the event has passed all auth it is added into the timeline. // We use the `state_at_event` instead of `state_after` so we accurately @@ -718,7 +756,7 @@ impl Service { .append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(|e| (**e).to_owned()).collect(), + extremities.into_iter().collect(), state_ids_compressed, soft_fail, &state_lock, @@ -742,8 +780,9 @@ impl Service { let current_sstatehash = self .services .state - .get_room_shortstatehash(room_id)? - .expect("every room has state"); + .get_room_shortstatehash(room_id) + .await + .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}"))))?; let current_state_ids = self .services @@ -752,7 +791,6 @@ impl Service { .await?; let fork_states = [current_state_ids, incoming_state]; - let mut auth_chain_sets = Vec::with_capacity(fork_states.len()); for state in &fork_states { auth_chain_sets.push( @@ -760,62 +798,59 @@ impl Service { .auth_chain .event_ids_iter(room_id, state.iter().map(|(_, id)| id.clone()).collect()) .await? - .collect(), + .collect::>>() + .await, ); } debug!("Loading fork states"); - let fork_states: Vec<_> = fork_states + let fork_states: Vec>> = fork_states .into_iter() - .map(|map| { - map.into_iter() + .stream() + .then(|fork_state| { + fork_state + .into_iter() + .stream() .filter_map(|(k, id)| { self.services .short .get_statekey_from_short(k) - .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) - .ok() + .map_ok_or_else(|_| None, move |(ty, st_key)| Some(((ty, st_key), id))) }) - .collect::>() + .collect() }) - .collect(); - - let lock = self.services.globals.stateres_mutex.lock(); + .collect() + .boxed() + .await; debug!("Resolving state"); - let state_resolve = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = self.services.timeline.get_pdu(id); - if let Err(e) = &res { - error!("Failed to fetch event: {}", e); - } - res.ok().flatten() - }); + let lock = self.services.globals.stateres_mutex.lock(); - let state = match state_resolve { - Ok(new_state) => new_state, - Err(e) => { - error!("State resolution failed: {}", e); - return Err(Error::bad_database( - "State resolution failed, either an event could not be found or deserialization", - )); - }, - }; + let event_fetch = |event_id| self.event_fetch(event_id); + let event_exists = |event_id| self.event_exists(event_id); + let state = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) + .await + .map_err(|e| err!(Database(error!("State resolution failed: {e:?}"))))?; drop(lock); debug!("State resolution done. Compressing state"); - let new_room_state = state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; - self.services - .state_compressor - .compress_state_event(shortstatekey, &event_id) - }) - .collect::>()?; + let mut new_room_state = HashSet::new(); + for ((event_type, state_key), event_id) in state { + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key) + .await; + + let compressed = self + .services + .state_compressor + .compress_state_event(shortstatekey, &event_id) + .await; + + new_room_state.insert(compressed); + } Ok(Arc::new(new_room_state)) } @@ -827,46 +862,47 @@ impl Service { &self, incoming_pdu: &Arc, ) -> Result>>> { let prev_event = &*incoming_pdu.prev_events[0]; - let prev_event_sstatehash = self + let Ok(prev_event_sstatehash) = self .services .state_accessor - .pdu_shortstatehash(prev_event)?; - - let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some( - self.services - .state_accessor - .state_full_ids(shortstatehash) - .await, - ) - } else { - None + .pdu_shortstatehash(prev_event) + .await + else { + return Ok(None); }; - if let Some(Ok(mut state)) = state { - debug!("Using cached state"); - let prev_pdu = self + let Ok(mut state) = self + .services + .state_accessor + .state_full_ids(prev_event_sstatehash) + .await + .log_err() + else { + return Ok(None); + }; + + debug!("Using cached state"); + let prev_pdu = self + .services + .timeline + .get_pdu(prev_event) + .await + .map_err(|e| err!(Database("Could not find prev event, but we know the state: {e:?}")))?; + + if let Some(state_key) = &prev_pdu.state_key { + let shortstatekey = self .services - .timeline - .get_pdu(prev_event) - .ok() - .flatten() - .ok_or_else(|| Error::bad_database("Could not find prev event, but we know the state."))?; + .short + .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) + .await; - if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key)?; - - state.insert(shortstatekey, Arc::from(prev_event)); - // Now it's the state after the pdu - } - - return Ok(Some(state)); + state.insert(shortstatekey, Arc::from(prev_event)); + // Now it's the state after the pdu } - Ok(None) + debug_assert!(!state.is_empty(), "should be returning None for empty HashMap result"); + + Ok(Some(state)) } #[tracing::instrument(skip_all, name = "state")] @@ -878,15 +914,16 @@ impl Service { let mut okay = true; for prev_eventid in &incoming_pdu.prev_events { - let Ok(Some(prev_event)) = self.services.timeline.get_pdu(prev_eventid) else { + let Ok(prev_event) = self.services.timeline.get_pdu(prev_eventid).await else { okay = false; break; }; - let Ok(Some(sstatehash)) = self + let Ok(sstatehash) = self .services .state_accessor .pdu_shortstatehash(prev_eventid) + .await else { okay = false; break; @@ -901,20 +938,25 @@ impl Service { let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: HashMap<_, _> = self + let Ok(mut leaf_state) = self .services .state_accessor .state_full_ids(sstatehash) - .await?; + .await + else { + continue; + }; if let Some(state_key) = &prev_event.state_key { let shortstatekey = self .services .short - .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key)?; - leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); + .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) + .await; + + let event_id = &prev_event.event_id; + leaf_state.insert(shortstatekey, event_id.clone()); // Now it's the state after the pdu } @@ -922,13 +964,18 @@ impl Service { let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok((ty, st_key)) = self.services.short.get_statekey_from_short(k) { + if let Ok((ty, st_key)) = self + .services + .short + .get_statekey_from_short(k) + .await + .log_err() + { // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType state.insert((ty.to_string().into(), st_key), id.clone()); - } else { - warn!("Failed to get_statekey_from_short."); } + starting_events.push(id); } @@ -937,43 +984,40 @@ impl Service { .auth_chain .event_ids_iter(room_id, starting_events) .await? - .collect(), + .collect() + .await, ); fork_states.push(state); } let lock = self.services.globals.stateres_mutex.lock(); - let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = self.services.timeline.get_pdu(id); - if let Err(e) = &res { - error!("Failed to fetch event: {}", e); - } - res.ok().flatten() - }); + + let event_fetch = |event_id| self.event_fetch(event_id); + let event_exists = |event_id| self.event_exists(event_id); + let result = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) + .await + .map_err(|e| err!(Database(warn!(?e, "State resolution on prev events failed.")))); + drop(lock); - Ok(match result { - Ok(new_state) => Some( - new_state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; - Ok((shortstatekey, event_id)) - }) - .collect::>()?, - ), - Err(e) => { - warn!( - "State resolution on prev events failed, either an event could not be found or deserialization: {}", - e - ); - None - }, - }) + let Ok(new_state) = result else { + return Ok(None); + }; + + new_state + .iter() + .stream() + .then(|((event_type, state_key), event_id)| { + self.services + .short + .get_or_create_shortstatekey(event_type, state_key) + .map(move |shortstatekey| (shortstatekey, event_id.clone())) + }) + .collect() + .map(Some) + .map(Ok) + .await } /// Call /state_ids to find out what the state at this pdu is. We trust the @@ -985,7 +1029,7 @@ impl Service { pub_key_map: &RwLock>>, event_id: &EventId, ) -> Result>>> { debug!("Fetching state ids"); - match self + let res = self .services .sending .send_federation_request( @@ -996,61 +1040,57 @@ impl Service { }, ) .await - { - Ok(res) => { - debug!("Fetching state events"); - let collect = res - .pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(); + .inspect_err(|e| warn!("Fetching state for event failed: {e}"))?; - let state_vec = self - .fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id, pub_key_map) - .await; + debug!("Fetching state events"); + let collect = res + .pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(); - let mut state: HashMap<_, Arc> = HashMap::with_capacity(state_vec.len()); - for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; + let state_vec = self + .fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id, pub_key_map) + .boxed() + .await; - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key)?; + let mut state: HashMap<_, Arc> = HashMap::with_capacity(state_vec.len()); + for (pdu, _) in state_vec { + let state_key = pdu + .state_key + .clone() + .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; - match state.entry(shortstatekey) { - hash_map::Entry::Vacant(v) => { - v.insert(Arc::from(&*pdu.event_id)); - }, - hash_map::Entry::Occupied(_) => { - return Err(Error::bad_database( - "State event's type and state_key combination exists multiple times.", - )) - }, - } - } + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key) + .await; - // The original create event must still be in the state - let create_shortstatekey = self - .services - .short - .get_shortstatekey(&StateEventType::RoomCreate, "")? - .expect("Room exists"); - - if state.get(&create_shortstatekey).map(AsRef::as_ref) != Some(&create_event.event_id) { - return Err(Error::bad_database("Incoming event refers to wrong create event.")); - } - - Ok(Some(state)) - }, - Err(e) => { - warn!("Fetching state for event failed: {}", e); - Err(e) - }, + match state.entry(shortstatekey) { + hash_map::Entry::Vacant(v) => { + v.insert(Arc::from(&*pdu.event_id)); + }, + hash_map::Entry::Occupied(_) => { + return Err(Error::bad_database( + "State event's type and state_key combination exists multiple times.", + )) + }, + } } + + // The original create event must still be in the state + let create_shortstatekey = self + .services + .short + .get_shortstatekey(&StateEventType::RoomCreate, "") + .await?; + + if state.get(&create_shortstatekey).map(AsRef::as_ref) != Some(&create_event.event_id) { + return Err!(Database("Incoming event refers to wrong create event.")); + } + + Ok(Some(state)) } /// Find the event and auth it. Once the event is validated (steps 1 - 8) @@ -1062,191 +1102,196 @@ impl Service { /// b. Look at outlier pdu tree /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? - pub fn fetch_and_handle_outliers<'a>( - &'a self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, + pub async fn fetch_and_handle_outliers<'a>( + &self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, room_version_id: &'a RoomVersionId, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveCanonicalJsonVec<'a> { - Box::pin(async move { - let back_off = |id| async { - match self + ) -> Vec<(Arc, Option>)> { + let back_off = |id| match self + .services + .globals + .bad_event_ratelimiter + .write() + .expect("locked") + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)), + }; + + let mut events_with_auth_events = Vec::with_capacity(events.len()); + for id in events { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { + trace!("Found {id} in db"); + events_with_auth_events.push((id, Some(local_pdu), vec![])); + continue; + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len()); + let mut events_all = HashSet::with_capacity(todo_auth_events.len()); + let mut i: u64 = 0; + while let Some(next_id) = todo_auth_events.pop() { + if let Some((time, tries)) = self .services .globals .bad_event_ratelimiter - .write() + .read() .expect("locked") - .entry(id) + .get(&*next_id) { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - }, - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)), + // Exponential backoff + const MIN_DURATION: u64 = 5 * 60; + const MAX_DURATION: u64 = 60 * 60 * 24; + if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { + info!("Backing off from {next_id}"); + continue; + } } - }; - let mut events_with_auth_events = Vec::with_capacity(events.len()); - for id in events { - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = self.services.timeline.get_pdu(id) { - trace!("Found {} in db", id); - events_with_auth_events.push((id, Some(local_pdu), vec![])); + if events_all.contains(&next_id) { continue; } - // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; - let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len()); - let mut events_all = HashSet::with_capacity(todo_auth_events.len()); - let mut i: u64 = 0; - while let Some(next_id) = todo_auth_events.pop() { - if let Some((time, tries)) = self - .services - .globals - .bad_event_ratelimiter - .read() - .expect("locked") - .get(&*next_id) - { - // Exponential backoff - const MIN_DURATION: u64 = 5 * 60; - const MAX_DURATION: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { - info!("Backing off from {next_id}"); + i = i.saturating_add(1); + if i % 100 == 0 { + tokio::task::yield_now().await; + } + + if self.services.timeline.get_pdu(&next_id).await.is_ok() { + trace!("Found {next_id} in db"); + continue; + } + + debug!("Fetching {next_id} over federation."); + match self + .services + .sending + .send_federation_request( + origin, + get_event::v1::Request { + event_id: (*next_id).to_owned(), + }, + ) + .await + { + Ok(res) => { + debug!("Got {next_id} over federation"); + let Ok((calculated_event_id, value)) = + pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) + else { + back_off((*next_id).to_owned()); continue; + }; + + if calculated_event_id != *next_id { + warn!( + "Server didn't return event id we requested: requested: {next_id}, we got \ + {calculated_event_id}. Event: {:?}", + &res.pdu + ); } - } - if events_all.contains(&next_id) { - continue; - } - - i = i.saturating_add(1); - if i % 100 == 0 { - tokio::task::yield_now().await; - } - - if let Ok(Some(_)) = self.services.timeline.get_pdu(&next_id) { - trace!("Found {} in db", next_id); - continue; - } - - debug!("Fetching {} over federation.", next_id); - match self - .services - .sending - .send_federation_request( - origin, - get_event::v1::Request { - event_id: (*next_id).to_owned(), - }, - ) - .await - { - Ok(res) => { - debug!("Got {} over federation", next_id); - let Ok((calculated_event_id, value)) = - pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) - else { - back_off((*next_id).to_owned()).await; - continue; - }; - - if calculated_event_id != *next_id { - warn!( - "Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - next_id, calculated_event_id, &res.pdu - ); - } - - if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { - for auth_event in auth_events { - if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { - let a: Arc = auth_event; - todo_auth_events.push(a); - } else { - warn!("Auth event id is not valid"); - } + if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { + for auth_event in auth_events { + if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); } - } else { - warn!("Auth event list invalid"); } - - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - }, - Err(e) => { - debug_error!("Failed to fetch event {next_id}: {e}"); - back_off((*next_id).to_owned()).await; - }, - } - } - events_with_auth_events.push((id, None, events_in_reverse_order)); - } - - // We go through all the signatures we see on the PDUs and their unresolved - // dependencies and fetch the corresponding signing keys - self.services - .server_keys - .fetch_required_signing_keys( - events_with_auth_events - .iter() - .flat_map(|(_id, _local_pdu, events)| events) - .map(|(_event_id, event)| event), - pub_key_map, - ) - .await - .unwrap_or_else(|e| { - warn!("Could not fetch all signatures for PDUs from {}: {:?}", origin, e); - }); - - let mut pdus = Vec::with_capacity(events_with_auth_events.len()); - for (id, local_pdu, events_in_reverse_order) in events_with_auth_events { - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Some(local_pdu) = local_pdu { - trace!("Found {} in db", id); - pdus.push((local_pdu, None)); - } - for (next_id, value) in events_in_reverse_order.iter().rev() { - if let Some((time, tries)) = self - .services - .globals - .bad_event_ratelimiter - .read() - .expect("locked") - .get(&**next_id) - { - // Exponential backoff - const MIN_DURATION: u64 = 5 * 60; - const MAX_DURATION: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { - debug!("Backing off from {next_id}"); - continue; + } else { + warn!("Auth event list invalid"); } - } - match self - .handle_outlier_pdu(origin, create_event, next_id, room_id, value.clone(), true, pub_key_map) - .await - { - Ok((pdu, json)) => { - if next_id == id { - pdus.push((pdu, Some(json))); - } - }, - Err(e) => { - warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((**next_id).to_owned()).await; - }, - } + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + }, + Err(e) => { + debug_error!("Failed to fetch event {next_id}: {e}"); + back_off((*next_id).to_owned()); + }, } } - pdus - }) + events_with_auth_events.push((id, None, events_in_reverse_order)); + } + + // We go through all the signatures we see on the PDUs and their unresolved + // dependencies and fetch the corresponding signing keys + self.services + .server_keys + .fetch_required_signing_keys( + events_with_auth_events + .iter() + .flat_map(|(_id, _local_pdu, events)| events) + .map(|(_event_id, event)| event), + pub_key_map, + ) + .await + .unwrap_or_else(|e| { + warn!("Could not fetch all signatures for PDUs from {origin}: {e:?}"); + }); + + let mut pdus = Vec::with_capacity(events_with_auth_events.len()); + for (id, local_pdu, events_in_reverse_order) in events_with_auth_events { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Some(local_pdu) = local_pdu { + trace!("Found {id} in db"); + pdus.push((local_pdu.clone(), None)); + } + + for (next_id, value) in events_in_reverse_order.into_iter().rev() { + if let Some((time, tries)) = self + .services + .globals + .bad_event_ratelimiter + .read() + .expect("locked") + .get(&*next_id) + { + // Exponential backoff + const MIN_DURATION: u64 = 5 * 60; + const MAX_DURATION: u64 = 60 * 60 * 24; + if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { + debug!("Backing off from {next_id}"); + continue; + } + } + + match Box::pin(self.handle_outlier_pdu( + origin, + create_event, + &next_id, + room_id, + value.clone(), + true, + pub_key_map, + )) + .await + { + Ok((pdu, json)) => { + if next_id == *id { + pdus.push((pdu, Some(json))); + } + }, + Err(e) => { + warn!("Authentication of event {next_id} failed: {e:?}"); + back_off(next_id.into()); + }, + } + } + } + pdus } #[allow(clippy::type_complexity)] @@ -1262,16 +1307,12 @@ impl Service { let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; - let first_pdu_in_room = self - .services - .timeline - .first_pdu_in_room(room_id)? - .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; let mut amount = 0; while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = self + if let Some((pdu, mut json_opt)) = self .fetch_and_handle_outliers( origin, &[prev_event_id.clone()], @@ -1280,28 +1321,29 @@ impl Service { room_version_id, pub_key_map, ) + .boxed() .await .pop() { Self::check_room_id(room_id, &pdu)?; - if amount > self.services.globals.max_fetch_prev_events() { - // Max limit reached - debug!( - "Max prev event limit reached! Limit: {}", - self.services.globals.max_fetch_prev_events() - ); + let limit = self.services.globals.max_fetch_prev_events(); + if amount > limit { + debug_warn!("Max prev event limit reached! Limit: {limit}"); graph.insert(prev_event_id.clone(), HashSet::new()); continue; } - if let Some(json) = json_opt.or_else(|| { - self.services + if json_opt.is_none() { + json_opt = self + .services .outlier .get_outlier_pdu_json(&prev_event_id) - .ok() - .flatten() - }) { + .await + .ok(); + } + + if let Some(json) = json_opt { if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { amount = amount.saturating_add(1); for prev_prev in &pdu.prev_events { @@ -1327,56 +1369,42 @@ impl Service { } } - let sorted = state_res::lexicographical_topological_sort(&graph, |event_id| { + let event_fetch = |event_id| { + let origin_server_ts = eventid_info + .get(&event_id) + .cloned() + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts); + // This return value is the key used for sorting events, // events are then sorted by power level, time, // and lexically by event_id. - Ok(( - int!(0), - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - )) - }) - .map_err(|e| { - error!("Error sorting prev events: {e}"); - Error::bad_database("Error sorting prev events") - })?; + future::ok((int!(0), MilliSecondsSinceUnixEpoch(origin_server_ts))) + }; + + let sorted = state_res::lexicographical_topological_sort(&graph, &event_fetch) + .await + .map_err(|e| err!(Database(error!("Error sorting prev events: {e}"))))?; Ok((sorted, eventid_info)) } /// Returns Ok if the acl allows the server #[tracing::instrument(skip_all)] - pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { - let acl_event = if let Some(acl) = - self.services - .state_accessor - .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? - { - trace!("ACL event found: {acl:?}"); - acl - } else { - trace!("No ACL event found"); + pub async fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { + let Ok(acl_event_content) = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomServerAcl, "") + .await + .map(|c: RoomServerAclEventContent| c) + .inspect(|acl| trace!("ACL content found: {acl:?}")) + .inspect_err(|e| trace!("No ACL content found: {e:?}")) + else { return Ok(()); }; - let acl_event_content: RoomServerAclEventContent = match serde_json::from_str(acl_event.content.get()) { - Ok(content) => { - trace!("Found ACL event contents: {content:?}"); - content - }, - Err(e) => { - warn!("Invalid ACL event: {e}"); - return Ok(()); - }, - }; - if acl_event_content.allow.is_empty() { warn!("Ignoring broken ACL event (allow key is empty)"); - // Ignore broken acl events return Ok(()); } @@ -1384,16 +1412,18 @@ impl Service { trace!("server {server_name} is allowed by ACL"); Ok(()) } else { - debug!("Server {} was denied by room ACL in {}", server_name, room_id); - Err(Error::BadRequest(ErrorKind::forbidden(), "Server was denied by room ACL")) + debug!("Server {server_name} was denied by room ACL in {room_id}"); + Err!(Request(Forbidden("Server was denied by room ACL"))) } } fn check_room_id(room_id: &RoomId, pdu: &PduEvent) -> Result<()> { if pdu.room_id != room_id { - warn!("Found event from room {} in room {}", pdu.room_id, room_id); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has wrong room id")); + return Err!(Request(InvalidParam( + warn!(pdu_event_id = ?pdu.event_id, pdu_room_id = ?pdu.room_id, ?room_id, "Found event from room in room") + ))); } + Ok(()) } @@ -1408,4 +1438,10 @@ impl Service { fn to_room_version(room_version_id: &RoomVersionId) -> RoomVersion { RoomVersion::new(room_version_id).expect("room version is supported") } + + async fn event_exists(&self, event_id: Arc) -> bool { self.services.timeline.pdu_exists(&event_id).await } + + async fn event_fetch(&self, event_id: Arc) -> Option> { + self.services.timeline.get_pdu(&event_id).await.ok() + } } diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index a7ffe193..2de3e28e 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -3,7 +3,9 @@ use ruma::{CanonicalJsonObject, OwnedEventId, OwnedRoomId, RoomId}; use serde_json::value::RawValue as RawJsonValue; impl super::Service { - pub fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { + pub async fn parse_incoming_pdu( + &self, pdu: &RawJsonValue, + ) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { debug_warn!("Error parsing incoming event {pdu:#?}"); err!(BadServerResponse("Error parsing incoming event {e:?}")) @@ -14,7 +16,7 @@ impl super::Service { .and_then(|id| RoomId::parse(id.as_str()?).ok()) .ok_or(err!(Request(InvalidParam("Invalid room id in pdu"))))?; - let Ok(room_version_id) = self.services.state.get_room_version(&room_id) else { + let Ok(room_version_id) = self.services.state.get_room_version(&room_id).await else { return Err!("Server is not in room {room_id}"); }; diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs deleted file mode 100644 index 073d45f5..00000000 --- a/src/service/rooms/lazy_loading/data.rs +++ /dev/null @@ -1,65 +0,0 @@ -use std::sync::Arc; - -use conduit::Result; -use database::{Database, Map}; -use ruma::{DeviceId, RoomId, UserId}; - -pub(super) struct Data { - lazyloadedids: Arc, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - lazyloadedids: db["lazyloadedids"].clone(), - } - } - - pub(super) fn lazy_load_was_sent_before( - &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - pub(super) fn lazy_load_confirm_delivery( - &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - confirmed_user_ids: &mut dyn Iterator, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xFF); - - for ll_id in confirmed_user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - - Ok(()) - } - - pub(super) fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xFF); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 0a9d4cf2..e0816d3f 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,21 +1,26 @@ -mod data; - use std::{ collections::{HashMap, HashSet}, fmt::Write, sync::{Arc, Mutex}, }; -use conduit::{PduCount, Result}; +use conduit::{ + implement, + utils::{stream::TryIgnore, ReadyExt}, + PduCount, Result, +}; +use database::{Interfix, Map}; use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; -use self::data::Data; - pub struct Service { - pub lazy_load_waiting: Mutex, + lazy_load_waiting: Mutex, db: Data, } +struct Data { + lazyloadedids: Arc, +} + type LazyLoadWaiting = HashMap; type LazyLoadWaitingKey = (OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount); type LazyLoadWaitingVal = HashSet; @@ -23,8 +28,10 @@ type LazyLoadWaitingVal = HashSet; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - lazy_load_waiting: Mutex::new(HashMap::new()), - db: Data::new(args.db), + lazy_load_waiting: LazyLoadWaiting::new().into(), + db: Data { + lazyloadedids: args.db["lazyloadedids"].clone(), + }, })) } @@ -40,47 +47,60 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - #[tracing::instrument(skip(self), level = "debug")] - pub fn lazy_load_was_sent_before( - &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, ll_user: &UserId, - ) -> Result { - self.db - .lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) - } +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +#[inline] +pub async fn lazy_load_was_sent_before( + &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, ll_user: &UserId, +) -> bool { + let key = (user_id, device_id, room_id, ll_user); + self.db.lazyloadedids.qry(&key).await.is_ok() +} - #[tracing::instrument(skip(self), level = "debug")] - pub async fn lazy_load_mark_sent( - &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, lazy_load: HashSet, - count: PduCount, - ) { - self.lazy_load_waiting - .lock() - .expect("locked") - .insert((user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), count), lazy_load); - } +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn lazy_load_mark_sent( + &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, lazy_load: HashSet, count: PduCount, +) { + let key = (user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), count); - #[tracing::instrument(skip(self), level = "debug")] - pub async fn lazy_load_confirm_delivery( - &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, since: PduCount, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().expect("locked").remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - self.db - .lazy_load_confirm_delivery(user_id, device_id, room_id, &mut user_ids.iter().map(|u| &**u))?; - } else { - // Ignore - } + self.lazy_load_waiting + .lock() + .expect("locked") + .insert(key, lazy_load); +} - Ok(()) - } +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn lazy_load_confirm_delivery(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, since: PduCount) { + let key = (user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), since); - #[tracing::instrument(skip(self), level = "debug")] - pub fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId) -> Result<()> { - self.db.lazy_load_reset(user_id, device_id, room_id) + let Some(user_ids) = self.lazy_load_waiting.lock().expect("locked").remove(&key) else { + return; + }; + + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xFF); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xFF); + + for ll_id in &user_ids { + let mut key = prefix.clone(); + key.extend_from_slice(ll_id.as_bytes()); + self.db.lazyloadedids.insert(&key, &[]); } } + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId) { + let prefix = (user_id, device_id, room_id, Interfix); + self.db + .lazyloadedids + .keys_raw_prefix(&prefix) + .ignore_err() + .ready_for_each(|key| self.db.lazyloadedids.remove(key)) + .await; +} diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs deleted file mode 100644 index efe681b1..00000000 --- a/src/service/rooms/metadata/data.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::sync::Arc; - -use conduit::{error, utils, Error, Result}; -use database::Map; -use ruma::{OwnedRoomId, RoomId}; - -use crate::{rooms, Dep}; - -pub(super) struct Data { - disabledroomids: Arc, - bannedroomids: Arc, - roomid_shortroomid: Arc, - pduid_pdu: Arc, - services: Services, -} - -struct Services { - short: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - disabledroomids: db["disabledroomids"].clone(), - bannedroomids: db["bannedroomids"].clone(), - roomid_shortroomid: db["roomid_shortroomid"].clone(), - pduid_pdu: db["pduid_pdu"].clone(), - services: Services { - short: args.depend::("rooms::short"), - }, - } - } - - pub(super) fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.services.short.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - pub(super) fn iter_ids<'a>(&'a self) -> Box> + 'a> { - Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - })) - } - - #[inline] - pub(super) fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - #[inline] - pub(super) fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { - if disabled { - self.disabledroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.disabledroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[inline] - pub(super) fn is_banned(&self, room_id: &RoomId) -> Result { - Ok(self.bannedroomids.get(room_id.as_bytes())?.is_some()) - } - - #[inline] - pub(super) fn ban_room(&self, room_id: &RoomId, banned: bool) -> Result<()> { - if banned { - self.bannedroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.bannedroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - pub(super) fn list_banned_rooms<'a>(&'a self) -> Box> + 'a> { - Box::new(self.bannedroomids.iter().map( - |(room_id_bytes, _ /* non-banned rooms should not be in this table */)| { - let room_id = utils::string_from_bytes(&room_id_bytes) - .map_err(|e| { - error!("Invalid room_id bytes in bannedroomids: {e}"); - Error::bad_database("Invalid room_id in bannedroomids.") - })? - .try_into() - .map_err(|e| { - error!("Invalid room_id in bannedroomids: {e}"); - Error::bad_database("Invalid room_id in bannedroomids") - })?; - - Ok(room_id) - }, - )) - } -} diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 7415c53b..5d4a47c7 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,51 +1,92 @@ -mod data; - use std::sync::Arc; -use conduit::Result; -use ruma::{OwnedRoomId, RoomId}; +use conduit::{implement, utils::stream::TryIgnore, Result}; +use database::Map; +use futures::{Stream, StreamExt}; +use ruma::RoomId; -use self::data::Data; +use crate::{rooms, Dep}; pub struct Service { db: Data, + services: Services, +} + +struct Data { + disabledroomids: Arc, + bannedroomids: Arc, + roomid_shortroomid: Arc, + pduid_pdu: Arc, +} + +struct Services { + short: Dep, } impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(&args), + db: Data { + disabledroomids: args.db["disabledroomids"].clone(), + bannedroomids: args.db["bannedroomids"].clone(), + roomid_shortroomid: args.db["roomid_shortroomid"].clone(), + pduid_pdu: args.db["pduid_pdu"].clone(), + }, + services: Services { + short: args.depend::("rooms::short"), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Checks if a room exists. - #[inline] - pub fn exists(&self, room_id: &RoomId) -> Result { self.db.exists(room_id) } +#[implement(Service)] +pub async fn exists(&self, room_id: &RoomId) -> bool { + let Ok(prefix) = self.services.short.get_shortroomid(room_id).await else { + return false; + }; - #[must_use] - pub fn iter_ids<'a>(&'a self) -> Box> + 'a> { self.db.iter_ids() } + // Look for PDUs in that room. + self.db + .pduid_pdu + .keys_raw_prefix(&prefix) + .ignore_err() + .next() + .await + .is_some() +} - #[inline] - pub fn is_disabled(&self, room_id: &RoomId) -> Result { self.db.is_disabled(room_id) } +#[implement(Service)] +pub fn iter_ids(&self) -> impl Stream + Send + '_ { self.db.roomid_shortroomid.keys().ignore_err() } - #[inline] - pub fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { - self.db.disable_room(room_id, disabled) - } - - #[inline] - pub fn is_banned(&self, room_id: &RoomId) -> Result { self.db.is_banned(room_id) } - - #[inline] - pub fn ban_room(&self, room_id: &RoomId, banned: bool) -> Result<()> { self.db.ban_room(room_id, banned) } - - #[inline] - #[must_use] - pub fn list_banned_rooms<'a>(&'a self) -> Box> + 'a> { - self.db.list_banned_rooms() +#[implement(Service)] +#[inline] +pub fn disable_room(&self, room_id: &RoomId, disabled: bool) { + if disabled { + self.db.disabledroomids.insert(room_id.as_bytes(), &[]); + } else { + self.db.disabledroomids.remove(room_id.as_bytes()); } } + +#[implement(Service)] +#[inline] +pub fn ban_room(&self, room_id: &RoomId, banned: bool) { + if banned { + self.db.bannedroomids.insert(room_id.as_bytes(), &[]); + } else { + self.db.bannedroomids.remove(room_id.as_bytes()); + } +} + +#[implement(Service)] +pub fn list_banned_rooms(&self) -> impl Stream + Send + '_ { self.db.bannedroomids.keys().ignore_err() } + +#[implement(Service)] +#[inline] +pub async fn is_disabled(&self, room_id: &RoomId) -> bool { self.db.disabledroomids.qry(room_id).await.is_ok() } + +#[implement(Service)] +#[inline] +pub async fn is_banned(&self, room_id: &RoomId) -> bool { self.db.bannedroomids.qry(room_id).await.is_ok() } diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs deleted file mode 100644 index aa804721..00000000 --- a/src/service/rooms/outlier/data.rs +++ /dev/null @@ -1,42 +0,0 @@ -use std::sync::Arc; - -use conduit::{Error, Result}; -use database::{Database, Map}; -use ruma::{CanonicalJsonObject, EventId}; - -use crate::PduEvent; - -pub(super) struct Data { - eventid_outlierpdu: Arc, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - eventid_outlierpdu: db["eventid_outlierpdu"].clone(), - } - } - - pub(super) fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - pub(super) fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - pub(super) fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 22bd2092..277b5982 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,9 +1,7 @@ -mod data; - use std::sync::Arc; -use conduit::Result; -use data::Data; +use conduit::{implement, Result}; +use database::{Deserialized, Map}; use ruma::{CanonicalJsonObject, EventId}; use crate::PduEvent; @@ -12,31 +10,48 @@ pub struct Service { db: Data, } +struct Data { + eventid_outlierpdu: Arc, +} + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(args.db), + db: Data { + eventid_outlierpdu: args.db["eventid_outlierpdu"].clone(), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Returns the pdu from the outlier tree. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.db.get_outlier_pdu_json(event_id) - } - - /// Returns the pdu from the outlier tree. - /// - /// TODO: use this? - #[allow(dead_code)] - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.db.get_outlier_pdu(event_id) } - - /// Append the PDU as an outlier. - #[tracing::instrument(skip(self, pdu), level = "debug")] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.db.add_pdu_outlier(event_id, pdu) - } +/// Returns the pdu from the outlier tree. +#[implement(Service)] +pub async fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result { + self.db + .eventid_outlierpdu + .qry(event_id) + .await + .deserialized_json() +} + +/// Returns the pdu from the outlier tree. +#[implement(Service)] +pub async fn get_pdu_outlier(&self, event_id: &EventId) -> Result { + self.db + .eventid_outlierpdu + .qry(event_id) + .await + .deserialized_json() +} + +/// Append the PDU as an outlier. +#[implement(Service)] +#[tracing::instrument(skip(self, pdu), level = "debug")] +pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) { + self.db.eventid_outlierpdu.insert( + event_id.as_bytes(), + &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), + ); } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index d1649da8..f2323475 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,7 +1,13 @@ use std::{mem::size_of, sync::Arc}; -use conduit::{utils, Error, PduCount, PduEvent, Result}; +use conduit::{ + result::LogErr, + utils, + utils::{stream::TryIgnore, ReadyExt}, + PduCount, PduEvent, +}; use database::Map; +use futures::{Stream, StreamExt}; use ruma::{EventId, RoomId, UserId}; use crate::{rooms, Dep}; @@ -17,8 +23,7 @@ struct Services { timeline: Dep, } -type PdusIterItem = Result<(PduCount, PduEvent)>; -type PdusIterator<'a> = Box + 'a>; +pub(super) type PdusIterItem = (PduCount, PduEvent); impl Data { pub(super) fn new(args: &crate::Args<'_>) -> Self { @@ -33,19 +38,17 @@ impl Data { } } - pub(super) fn add_relation(&self, from: u64, to: u64) -> Result<()> { + pub(super) fn add_relation(&self, from: u64, to: u64) { let mut key = to.to_be_bytes().to_vec(); key.extend_from_slice(&from.to_be_bytes()); - self.tofrom_relation.insert(&key, &[])?; - Ok(()) + self.tofrom_relation.insert(&key, &[]); } pub(super) fn relations_until<'a>( &'a self, user_id: &'a UserId, shortroomid: u64, target: u64, until: PduCount, - ) -> Result> { + ) -> impl Stream + Send + 'a + '_ { let prefix = target.to_be_bytes().to_vec(); let mut current = prefix.clone(); - let count_raw = match until { PduCount::Normal(x) => x.saturating_sub(1), PduCount::Backfilled(x) => { @@ -55,53 +58,42 @@ impl Data { }; current.extend_from_slice(&count_raw.to_be_bytes()); - Ok(Box::new( - self.tofrom_relation - .iter_from(¤t, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(tofrom, _data)| { - let from = utils::u64_from_bytes(&tofrom[(size_of::())..]) - .map_err(|_| Error::bad_database("Invalid count in tofrom_relation."))?; + self.tofrom_relation + .rev_raw_keys_from(¤t) + .ignore_err() + .ready_take_while(move |key| key.starts_with(&prefix)) + .map(|to_from| utils::u64_from_u8(&to_from[(size_of::())..])) + .filter_map(move |from| async move { + let mut pduid = shortroomid.to_be_bytes().to_vec(); + pduid.extend_from_slice(&from.to_be_bytes()); + let mut pdu = self.services.timeline.get_pdu_from_id(&pduid).await.ok()?; - let mut pduid = shortroomid.to_be_bytes().to_vec(); - pduid.extend_from_slice(&from.to_be_bytes()); + if pdu.sender != user_id { + pdu.remove_transaction_id().log_err().ok(); + } - let mut pdu = self - .services - .timeline - .get_pdu_from_id(&pduid)? - .ok_or_else(|| Error::bad_database("Pdu in tofrom_relation is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((PduCount::Normal(from), pdu)) - }), - )) + Some((PduCount::Normal(from), pdu)) + }) } - pub(super) fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { + pub(super) fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; + self.referencedevents.insert(&key, &[]); } - - Ok(()) } - pub(super) fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) + pub(super) async fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> bool { + let key = (room_id, event_id); + self.referencedevents.qry(&key).await.is_ok() } - pub(super) fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) + pub(super) fn mark_event_soft_failed(&self, event_id: &EventId) { + self.softfailedeventids.insert(event_id.as_bytes(), &[]); } - pub(super) fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) + pub(super) async fn is_event_soft_failed(&self, event_id: &EventId) -> bool { + self.softfailedeventids.qry(event_id).await.is_ok() } } diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index d9eaf324..dbaebfbf 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,8 +1,8 @@ mod data; - use std::sync::Arc; -use conduit::{PduCount, PduEvent, Result}; +use conduit::{utils::stream::IterStream, PduCount, Result}; +use futures::StreamExt; use ruma::{ api::{client::relations::get_relating_events, Direction}, events::{relation::RelationType, TimelineEventType}, @@ -10,7 +10,7 @@ use ruma::{ }; use serde::Deserialize; -use self::data::Data; +use self::data::{Data, PdusIterItem}; use crate::{rooms, Dep}; pub struct Service { @@ -51,21 +51,19 @@ impl crate::Service for Service { impl Service { #[tracing::instrument(skip(self, from, to), level = "debug")] - pub fn add_relation(&self, from: PduCount, to: PduCount) -> Result<()> { + pub fn add_relation(&self, from: PduCount, to: PduCount) { match (from, to) { (PduCount::Normal(f), PduCount::Normal(t)) => self.db.add_relation(f, t), _ => { // TODO: Relations with backfilled pdus - - Ok(()) }, } } #[allow(clippy::too_many_arguments)] - pub fn paginate_relations_with_filter( - &self, sender_user: &UserId, room_id: &RoomId, target: &EventId, filter_event_type: &Option, - filter_rel_type: &Option, from: &Option, to: &Option, limit: &Option, + pub async fn paginate_relations_with_filter( + &self, sender_user: &UserId, room_id: &RoomId, target: &EventId, filter_event_type: Option, + filter_rel_type: Option, from: Option<&String>, to: Option<&String>, limit: Option, recurse: bool, dir: Direction, ) -> Result { let from = match from { @@ -76,7 +74,7 @@ impl Service { }, }; - let to = to.as_ref().and_then(|t| PduCount::try_from_string(t).ok()); + let to = to.and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = limit @@ -92,30 +90,32 @@ impl Service { 1 }; - let relations_until = &self.relations_until(sender_user, room_id, target, from, depth)?; - let events: Vec<_> = relations_until // TODO: should be relations_after - .iter() - .filter(|(_, pdu)| { - filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) - && if let Ok(content) = - serde_json::from_str::(pdu.content.get()) - { - filter_rel_type - .as_ref() - .map_or(true, |r| &content.relates_to.rel_type == r) - } else { - false - } - }) - .take(limit) - .filter(|(_, pdu)| { - self.services - .state_accessor - .user_can_see_event(sender_user, room_id, &pdu.event_id) - .unwrap_or(false) - }) - .take_while(|(k, _)| Some(k) != to.as_ref()) // Stop at `to` - .collect(); + let relations_until: Vec = self + .relations_until(sender_user, room_id, target, from, depth) + .await?; + + // TODO: should be relations_after + let events: Vec<_> = relations_until + .into_iter() + .filter(move |(_, pdu): &PdusIterItem| { + if !filter_event_type.as_ref().map_or(true, |t| pdu.kind == *t) { + return false; + } + + let Ok(content) = serde_json::from_str::(pdu.content.get()) else { + return false; + }; + + filter_rel_type + .as_ref() + .map_or(true, |r| *r == content.relates_to.rel_type) + }) + .take(limit) + .take_while(|(k, _)| Some(*k) != to) + .stream() + .filter_map(|item| self.visibility_filter(sender_user, item)) + .collect() + .await; let next_token = events.last().map(|(count, _)| count).copied(); @@ -125,9 +125,9 @@ impl Service { .map(|(_, pdu)| pdu.to_message_like_event()) .collect(), Direction::Backward => events - .into_iter() - .rev() // relations are always most recent first - .map(|(_, pdu)| pdu.to_message_like_event()) + .into_iter() + .rev() // relations are always most recent first + .map(|(_, pdu)| pdu.to_message_like_event()) .collect(), }; @@ -135,68 +135,85 @@ impl Service { chunk: events_chunk, next_batch: next_token.map(|t| t.stringify()), prev_batch: Some(from.stringify()), - recursion_depth: if recurse { - Some(depth.into()) - } else { - None - }, + recursion_depth: recurse.then_some(depth.into()), }) } - pub fn relations_until<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, target: &'a EventId, until: PduCount, max_depth: u8, - ) -> Result> { - let room_id = self.services.short.get_or_create_shortroomid(room_id)?; - #[allow(unknown_lints)] - #[allow(clippy::manual_unwrap_or_default)] - let target = match self.services.timeline.get_pdu_count(target)? { - Some(PduCount::Normal(c)) => c, + async fn visibility_filter(&self, sender_user: &UserId, item: PdusIterItem) -> Option { + let (_, pdu) = &item; + + self.services + .state_accessor + .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) + .await + .then_some(item) + } + + pub async fn relations_until( + &self, user_id: &UserId, room_id: &RoomId, target: &EventId, until: PduCount, max_depth: u8, + ) -> Result> { + let room_id = self.services.short.get_or_create_shortroomid(room_id).await; + + let target = match self.services.timeline.get_pdu_count(target).await { + Ok(PduCount::Normal(c)) => c, // TODO: Support backfilled relations _ => 0, // This will result in an empty iterator }; - self.db + let mut pdus: Vec = self + .db .relations_until(user_id, room_id, target, until) - .map(|mut relations| { - let mut pdus: Vec<_> = (*relations).into_iter().filter_map(Result::ok).collect(); - let mut stack: Vec<_> = pdus.clone().iter().map(|pdu| (pdu.to_owned(), 1)).collect(); + .collect() + .await; - while let Some(stack_pdu) = stack.pop() { - let target = match stack_pdu.0 .0 { - PduCount::Normal(c) => c, - // TODO: Support backfilled relations - PduCount::Backfilled(_) => 0, // This will result in an empty iterator - }; + let mut stack: Vec<_> = pdus.clone().into_iter().map(|pdu| (pdu, 1)).collect(); - if let Ok(relations) = self.db.relations_until(user_id, room_id, target, until) { - for relation in relations.flatten() { - if stack_pdu.1 < max_depth { - stack.push((relation.clone(), stack_pdu.1.saturating_add(1))); - } + while let Some(stack_pdu) = stack.pop() { + let target = match stack_pdu.0 .0 { + PduCount::Normal(c) => c, + // TODO: Support backfilled relations + PduCount::Backfilled(_) => 0, // This will result in an empty iterator + }; - pdus.push(relation); - } - } + let relations: Vec = self + .db + .relations_until(user_id, room_id, target, until) + .collect() + .await; + + for relation in relations { + if stack_pdu.1 < max_depth { + stack.push((relation.clone(), stack_pdu.1.saturating_add(1))); } - pdus.sort_by(|a, b| a.0.cmp(&b.0)); - pdus - }) + pdus.push(relation); + } + } + + pdus.sort_by(|a, b| a.0.cmp(&b.0)); + + Ok(pdus) } + #[inline] #[tracing::instrument(skip_all, level = "debug")] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - self.db.mark_as_referenced(room_id, event_ids) + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) { + self.db.mark_as_referenced(room_id, event_ids); } + #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - self.db.is_event_referenced(room_id, event_id) + pub async fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> bool { + self.db.is_event_referenced(room_id, event_id).await } + #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { self.db.mark_event_soft_failed(event_id) } + pub fn mark_event_soft_failed(&self, event_id: &EventId) { self.db.mark_event_soft_failed(event_id) } + #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { self.db.is_event_soft_failed(event_id) } + pub async fn is_event_soft_failed(&self, event_id: &EventId) -> bool { + self.db.is_event_soft_failed(event_id).await + } } diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index 0c156df3..a2c0fabc 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -1,10 +1,18 @@ use std::{mem::size_of, sync::Arc}; -use conduit::{utils, Error, Result}; -use database::Map; -use ruma::{events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, RoomId, UserId}; +use conduit::{ + utils, + utils::{stream::TryIgnore, ReadyExt}, + Error, Result, +}; +use database::{Deserialized, Map}; +use futures::{Stream, StreamExt}; +use ruma::{ + events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent}, + serde::Raw, + CanonicalJsonObject, OwnedUserId, RoomId, UserId, +}; -use super::AnySyncEphemeralRoomEventIter; use crate::{globals, Dep}; pub(super) struct Data { @@ -18,6 +26,8 @@ struct Services { globals: Dep, } +pub(super) type ReceiptItem = (OwnedUserId, u64, Raw); + impl Data { pub(super) fn new(args: &crate::Args<'_>) -> Self { let db = &args.db; @@ -31,7 +41,9 @@ impl Data { } } - pub(super) fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) -> Result<()> { + pub(super) async fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) { + type KeyVal<'a> = (&'a RoomId, u64, &'a UserId); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xFF); @@ -39,108 +51,90 @@ impl Data { last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } + self.readreceiptid_readreceipt + .rev_keys_from_raw(&last_possible_key) + .ignore_err() + .ready_take_while(|(r, ..): &KeyVal<'_>| *r == room_id) + .ready_filter_map(|(r, c, u): KeyVal<'_>| (u == user_id).then_some((r, c, u))) + .ready_for_each(|old: KeyVal<'_>| { + // This is the old room_latest + self.readreceiptid_readreceipt.del(&old); + }) + .await; let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&self.services.globals.next_count()?.to_be_bytes()); + room_latest_id.extend_from_slice(&self.services.globals.next_count().unwrap().to_be_bytes()); room_latest_id.push(0xFF); room_latest_id.extend_from_slice(user_id.as_bytes()); self.readreceiptid_readreceipt.insert( &room_latest_id, &serde_json::to_vec(event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) + ); } - pub(super) fn readreceipts_since<'a>(&'a self, room_id: &RoomId, since: u64) -> AnySyncEphemeralRoomEventIter<'a> { + pub(super) fn readreceipts_since<'a>( + &'a self, room_id: &'a RoomId, since: u64, + ) -> impl Stream + Send + 'a { + let after_since = since.saturating_add(1); // +1 so we don't send the event at since + let first_possible_edu = (room_id, after_since); + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xFF); let prefix2 = prefix.clone(); - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since.saturating_add(1)).to_be_bytes()); // +1 so we don't send the event at since + self.readreceiptid_readreceipt + .stream_raw_from(&first_possible_edu) + .ignore_err() + .ready_take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(k, v)| { + let count_offset = prefix.len().saturating_add(size_of::()); + let user_id_offset = count_offset.saturating_add(1); - Box::new( - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count_offset = prefix.len().saturating_add(size_of::()); - let count = utils::u64_from_bytes(&k[prefix.len()..count_offset]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id_offset = count_offset.saturating_add(1); - let user_id = UserId::parse( - utils::string_from_bytes(&k[user_id_offset..]) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid bytes in db."))?, - ) + let count = utils::u64_from_bytes(&k[prefix.len()..count_offset]) + .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; + + let user_id_str = utils::string_from_bytes(&k[user_id_offset..]) + .map_err(|_| Error::bad_database("Invalid readreceiptid userid bytes in db."))?; + + let user_id = UserId::parse(user_id_str) .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - let mut json = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json."))?; - json.remove("room_id"); + let mut json = serde_json::from_slice::(v) + .map_err(|_| Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json."))?; - Ok(( - user_id, - count, - Raw::from_json(serde_json::value::to_raw_value(&json).expect("json is valid raw value")), - )) - }), - ) + json.remove("room_id"); + + let event = Raw::from_json(serde_json::value::to_raw_value(&json)?); + + Ok((user_id, count, event)) + }) + .ignore_err() } - pub(super) fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { + pub(super) fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) { let mut key = room_id.as_bytes().to_vec(); key.push(0xFF); key.extend_from_slice(user_id.as_bytes()); self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; + .insert(&key, &count.to_be_bytes()); self.roomuserid_lastprivatereadupdate - .insert(&key, &self.services.globals.next_count()?.to_be_bytes()) + .insert(&key, &self.services.globals.next_count().unwrap().to_be_bytes()); } - pub(super) fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some( - utils::u64_from_bytes(&v).map_err(|_| Error::bad_database("Invalid private read marker bytes"))?, - )) - }) + pub(super) async fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result { + let key = (room_id, user_id); + self.roomuserid_privateread.qry(&key).await.deserialized() } - pub(super) fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")) - }) - .transpose()? - .unwrap_or(0)) + pub(super) async fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + let key = (room_id, user_id); + self.roomuserid_lastprivatereadupdate + .qry(&key) + .await + .deserialized() + .unwrap_or(0) } } diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index da11e2a0..ec34361e 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -3,16 +3,17 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; use conduit::{debug, Result}; -use data::Data; +use futures::Stream; use ruma::{ events::{ receipt::{ReceiptEvent, ReceiptEventContent}, - AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, + SyncEphemeralRoomEvent, }, serde::Raw, - OwnedUserId, RoomId, UserId, + RoomId, UserId, }; +use self::data::{Data, ReceiptItem}; use crate::{sending, Dep}; pub struct Service { @@ -24,9 +25,6 @@ struct Services { sending: Dep, } -type AnySyncEphemeralRoomEventIter<'a> = - Box)>> + 'a>; - impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -42,44 +40,53 @@ impl crate::Service for Service { impl Service { /// Replaces the previous read receipt. - pub fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event)?; - self.services.sending.flush_room(room_id)?; - - Ok(()) + pub async fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) { + self.db.readreceipt_update(user_id, room_id, event).await; + self.services + .sending + .flush_room(room_id) + .await + .expect("room flush failed"); } /// Returns an iterator over the most recent read_receipts in a room that /// happened after the event with id `since`. + #[inline] #[tracing::instrument(skip(self), level = "debug")] pub fn readreceipts_since<'a>( - &'a self, room_id: &RoomId, since: u64, - ) -> impl Iterator)>> + 'a { + &'a self, room_id: &'a RoomId, since: u64, + ) -> impl Stream + Send + 'a { self.db.readreceipts_since(room_id, since) } /// Sets a private read marker at `count`. + #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { - self.db.private_read_set(room_id, user_id, count) + pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) { + self.db.private_read_set(room_id, user_id, count); } /// Returns the private read marker. + #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.private_read_get(room_id, user_id) + pub async fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result { + self.db.private_read_get(room_id, user_id).await } /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_privateread_update(user_id, room_id) + #[inline] + pub async fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + self.db.last_privateread_update(user_id, room_id).await } } #[must_use] -pub fn pack_receipts(receipts: AnySyncEphemeralRoomEventIter<'_>) -> Raw> { +pub fn pack_receipts(receipts: I) -> Raw> +where + I: Iterator, +{ let mut json = BTreeMap::new(); - for (_user, _count, value) in receipts.flatten() { + for (_, _, value) in receipts { let receipt = serde_json::from_str::>(value.json().get()); if let Ok(value) = receipt { for (event, receipt) in value.content { diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index a0086095..de98beee 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,13 +1,12 @@ use std::sync::Arc; -use conduit::{utils, Result}; +use conduit::utils::{set, stream::TryIgnore, IterStream, ReadyExt}; use database::Map; +use futures::StreamExt; use ruma::RoomId; use crate::{rooms, Dep}; -type SearchPdusResult<'a> = Result> + 'a>, Vec)>>; - pub(super) struct Data { tokenids: Arc, services: Services, @@ -28,7 +27,7 @@ impl Data { } } - pub(super) fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { + pub(super) fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { let batch = tokenize(message_body) .map(|word| { let mut key = shortroomid.to_be_bytes().to_vec(); @@ -39,11 +38,10 @@ impl Data { }) .collect::>(); - self.tokenids - .insert_batch(batch.iter().map(database::KeyVal::from)) + self.tokenids.insert_batch(batch.iter()); } - pub(super) fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { + pub(super) fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { let batch = tokenize(message_body).map(|word| { let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(word.as_bytes()); @@ -53,46 +51,53 @@ impl Data { }); for token in batch { - self.tokenids.remove(&token)?; + self.tokenids.remove(&token); } - - Ok(()) } - pub(super) fn search_pdus<'a>(&'a self, room_id: &RoomId, search_string: &str) -> SearchPdusResult<'a> { + pub(super) async fn search_pdus( + &self, room_id: &RoomId, search_string: &str, + ) -> Option<(Vec>, Vec)> { let prefix = self .services .short - .get_shortroomid(room_id)? - .expect("room exists") + .get_shortroomid(room_id) + .await + .ok()? .to_be_bytes() .to_vec(); let words: Vec<_> = tokenize(search_string).collect(); - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xFF); - let prefix3 = prefix2.clone(); + let bufs: Vec<_> = words + .clone() + .into_iter() + .stream() + .then(move |word| { + let mut prefix2 = prefix.clone(); + prefix2.extend_from_slice(word.as_bytes()); + prefix2.push(0xFF); + let prefix3 = prefix2.clone(); - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); + let mut last_possible_id = prefix2.clone(); + last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(key, _)| key[prefix3.len()..].to_vec()) - }); + self.tokenids + .rev_raw_keys_from(&last_possible_id) // Newest pdus first + .ignore_err() + .ready_take_while(move |key| key.starts_with(&prefix2)) + .map(move |key| key[prefix3.len()..].to_vec()) + .collect::>() + }) + .collect() + .await; - let Some(common_elements) = utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) else { - return Ok(None); - }; - - Ok(Some((Box::new(common_elements), words))) + Some(( + set::intersection(bufs.iter().map(|buf| buf.iter())) + .cloned() + .collect(), + words, + )) } } @@ -100,7 +105,7 @@ impl Data { /// /// This may be used to tokenize both message bodies (for indexing) or search /// queries (for querying). -fn tokenize(body: &str) -> impl Iterator + '_ { +fn tokenize(body: &str) -> impl Iterator + Send + '_ { body.split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) .filter(|word| word.len() <= 50) diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 8caa0ce3..80b58804 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -21,20 +21,21 @@ impl crate::Service for Service { } impl Service { + #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { - self.db.index_pdu(shortroomid, pdu_id, message_body) + pub fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { + self.db.index_pdu(shortroomid, pdu_id, message_body); } + #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { - self.db.deindex_pdu(shortroomid, pdu_id, message_body) + pub fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { + self.db.deindex_pdu(shortroomid, pdu_id, message_body); } + #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub fn search_pdus<'a>( - &'a self, room_id: &RoomId, search_string: &str, - ) -> Result> + 'a, Vec)>> { - self.db.search_pdus(room_id, search_string) + pub async fn search_pdus(&self, room_id: &RoomId, search_string: &str) -> Option<(Vec>, Vec)> { + self.db.search_pdus(room_id, search_string).await } } diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs index 17fbb64e..f6a82488 100644 --- a/src/service/rooms/short/data.rs +++ b/src/service/rooms/short/data.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use conduit::{utils, warn, Error, Result}; -use database::Map; +use conduit::{err, utils, Error, Result}; +use database::{Deserialized, Map}; use ruma::{events::StateEventType, EventId, RoomId}; use crate::{globals, Dep}; @@ -36,44 +36,46 @@ impl Data { } } - pub(super) fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { - let short = if let Some(shorteventid) = self.eventid_shorteventid.get(event_id.as_bytes())? { - utils::u64_from_bytes(&shorteventid).map_err(|_| Error::bad_database("Invalid shorteventid in db."))? - } else { - let shorteventid = self.services.globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - }; + pub(super) async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { + if let Ok(shorteventid) = self.eventid_shorteventid.qry(event_id).await.deserialized() { + return shorteventid; + } - Ok(short) + let shorteventid = self.services.globals.next_count().unwrap(); + self.eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes()); + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes()); + + shorteventid } - pub(super) fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Result> { + pub(super) async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Vec { let mut ret: Vec = Vec::with_capacity(event_ids.len()); let keys = event_ids .iter() .map(|id| id.as_bytes()) .collect::>(); + for (i, short) in self .eventid_shorteventid - .multi_get(&keys)? + .multi_get(keys.iter()) .iter() .enumerate() { #[allow(clippy::single_match_else)] match short { Some(short) => ret.push( - utils::u64_from_bytes(short).map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, + utils::u64_from_bytes(short) + .map_err(|_| Error::bad_database("Invalid shorteventid in db.")) + .unwrap(), ), None => { - let short = self.services.globals.next_count()?; + let short = self.services.globals.next_count().unwrap(); self.eventid_shorteventid - .insert(keys[i], &short.to_be_bytes())?; + .insert(keys[i], &short.to_be_bytes()); self.shorteventid_eventid - .insert(&short.to_be_bytes(), keys[i])?; + .insert(&short.to_be_bytes(), keys[i]); debug_assert!(ret.len() == i, "position of result must match input"); ret.push(short); @@ -81,115 +83,85 @@ impl Data { } } - Ok(ret) + ret } - pub(super) fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result> { - let mut statekey_vec = event_type.to_string().as_bytes().to_vec(); - statekey_vec.push(0xFF); - statekey_vec.extend_from_slice(state_key.as_bytes()); + pub(super) async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { + let key = (event_type, state_key); + self.statekey_shortstatekey.qry(&key).await.deserialized() + } - let short = self - .statekey_shortstatekey - .get(&statekey_vec)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey).map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) + pub(super) async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> u64 { + let key = (event_type.to_string(), state_key); + if let Ok(shortstatekey) = self.statekey_shortstatekey.qry(&key).await.deserialized() { + return shortstatekey; + } + + let mut key = event_type.to_string().as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(state_key.as_bytes()); + + let shortstatekey = self.services.globals.next_count().unwrap(); + self.statekey_shortstatekey + .insert(&key, &shortstatekey.to_be_bytes()); + self.shortstatekey_statekey + .insert(&shortstatekey.to_be_bytes(), &key); + + shortstatekey + } + + pub(super) async fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { + self.shorteventid_eventid + .qry(&shorteventid) + .await + .deserialized() + .map_err(|e| err!(Database("Failed to find EventId from short {shorteventid:?}: {e:?}"))) + } + + pub(super) async fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { + self.shortstatekey_statekey + .qry(&shortstatekey) + .await + .deserialized() + .map_err(|e| { + err!(Database( + "Failed to find (StateEventType, state_key) from short {shortstatekey:?}: {e:?}" + )) }) - .transpose()?; - - Ok(short) - } - - pub(super) fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { - let mut statekey_vec = event_type.to_string().as_bytes().to_vec(); - statekey_vec.push(0xFF); - statekey_vec.extend_from_slice(state_key.as_bytes()); - - let short = if let Some(shortstatekey) = self.statekey_shortstatekey.get(&statekey_vec)? { - utils::u64_from_bytes(&shortstatekey).map_err(|_| Error::bad_database("Invalid shortstatekey in db."))? - } else { - let shortstatekey = self.services.globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey_vec, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey_vec)?; - shortstatekey - }; - - Ok(short) - } - - pub(super) fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc( - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("EventID in shorteventid_eventid is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - Ok(event_id) - } - - pub(super) fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xFF); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|e| { - warn!("Event type in shortstatekey_statekey is invalid: {}", e); - Error::bad_database("Event type in shortstatekey_statekey is invalid.") - })?); - - let state_key = utils::string_from_bytes(statekey_bytes) - .map_err(|_| Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode."))?; - - let result = (event_type, state_key); - - Ok(result) } /// Returns (shortstatehash, already_existed) - pub(super) fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { - Ok(if let Some(shortstatehash) = self.statehash_shortstatehash.get(state_hash)? { - ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ) - } else { - let shortstatehash = self.services.globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - }) + pub(super) async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, bool) { + if let Ok(shortstatehash) = self + .statehash_shortstatehash + .qry(state_hash) + .await + .deserialized() + { + return (shortstatehash, true); + } + + let shortstatehash = self.services.globals.next_count().unwrap(); + self.statehash_shortstatehash + .insert(state_hash, &shortstatehash.to_be_bytes()); + + (shortstatehash, false) } - pub(super) fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + pub(super) async fn get_shortroomid(&self, room_id: &RoomId) -> Result { + self.roomid_shortroomid.qry(room_id).await.deserialized() + } + + pub(super) async fn get_or_create_shortroomid(&self, room_id: &RoomId) -> u64 { self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid shortroomid in db."))) - .transpose() - } - - pub(super) fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { - Ok(if let Some(short) = self.roomid_shortroomid.get(room_id.as_bytes())? { - utils::u64_from_bytes(&short).map_err(|_| Error::bad_database("Invalid shortroomid in db."))? - } else { - let short = self.services.globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - }) + .qry(room_id) + .await + .deserialized() + .unwrap_or_else(|_| { + let short = self.services.globals.next_count().unwrap(); + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes()); + short + }) } } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index bfe0e9a0..00bb7cb1 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -22,38 +22,40 @@ impl crate::Service for Service { } impl Service { - pub fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { - self.db.get_or_create_shorteventid(event_id) + pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { + self.db.get_or_create_shorteventid(event_id).await } - pub fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Result> { - self.db.multi_get_or_create_shorteventid(event_ids) + pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Vec { + self.db.multi_get_or_create_shorteventid(event_ids).await } - pub fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result> { - self.db.get_shortstatekey(event_type, state_key) + pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { + self.db.get_shortstatekey(event_type, state_key).await } - pub fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { - self.db.get_or_create_shortstatekey(event_type, state_key) + pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> u64 { + self.db + .get_or_create_shortstatekey(event_type, state_key) + .await } - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - self.db.get_eventid_from_short(shorteventid) + pub async fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { + self.db.get_eventid_from_short(shorteventid).await } - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - self.db.get_statekey_from_short(shortstatekey) + pub async fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { + self.db.get_statekey_from_short(shortstatekey).await } /// Returns (shortstatehash, already_existed) - pub fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { - self.db.get_or_create_shortstatehash(state_hash) + pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, bool) { + self.db.get_or_create_shortstatehash(state_hash).await } - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { self.db.get_shortroomid(room_id) } + pub async fn get_shortroomid(&self, room_id: &RoomId) -> Result { self.db.get_shortroomid(room_id).await } - pub fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { - self.db.get_or_create_shortroomid(room_id) + pub async fn get_or_create_shortroomid(&self, room_id: &RoomId) -> u64 { + self.db.get_or_create_shortroomid(room_id).await } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 24d612d8..17fbf0ef 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -7,7 +7,12 @@ use std::{ sync::Arc, }; -use conduit::{checked, debug, debug_info, err, utils::math::usize_from_f64, warn, Error, Result}; +use conduit::{ + checked, debug, debug_info, err, + utils::{math::usize_from_f64, IterStream}, + Error, Result, +}; +use futures::{StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ api::{ @@ -211,12 +216,15 @@ impl Service { .as_ref() { return Ok(if let Some(cached) = cached { - if self.is_accessible_child( - current_room, - &cached.summary.join_rule, - &identifier, - &cached.summary.allowed_room_ids, - ) { + if self + .is_accessible_child( + current_room, + &cached.summary.join_rule, + &identifier, + &cached.summary.allowed_room_ids, + ) + .await + { Some(SummaryAccessibility::Accessible(Box::new(cached.summary.clone()))) } else { Some(SummaryAccessibility::Inaccessible) @@ -228,7 +236,9 @@ impl Service { Ok( if let Some(children_pdus) = self.get_stripped_space_child_events(current_room).await? { - let summary = self.get_room_summary(current_room, children_pdus, &identifier); + let summary = self + .get_room_summary(current_room, children_pdus, &identifier) + .await; if let Ok(summary) = summary { self.roomid_spacehierarchy_cache.lock().await.insert( current_room.clone(), @@ -322,12 +332,15 @@ impl Service { ); } } - if self.is_accessible_child( - current_room, - &response.room.join_rule, - &Identifier::UserId(user_id), - &response.room.allowed_room_ids, - ) { + if self + .is_accessible_child( + current_room, + &response.room.join_rule, + &Identifier::UserId(user_id), + &response.room.allowed_room_ids, + ) + .await + { return Ok(Some(SummaryAccessibility::Accessible(Box::new(summary.clone())))); } @@ -358,7 +371,7 @@ impl Service { } } - fn get_room_summary( + async fn get_room_summary( &self, current_room: &OwnedRoomId, children_state: Vec>, identifier: &Identifier<'_>, ) -> Result { @@ -367,48 +380,43 @@ impl Service { let join_rule = self .services .state_accessor - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")? - .map(|s| { + .room_state_get(room_id, &StateEventType::RoomJoinRules, "") + .await + .map_or(JoinRule::Invite, |s| { serde_json::from_str(s.content.get()) .map(|c: RoomJoinRulesEventContent| c.join_rule) .map_err(|e| err!(Database(error!("Invalid room join rule event in database: {e}")))) - }) - .transpose()? - .unwrap_or(JoinRule::Invite); + .unwrap() + }); let allowed_room_ids = self .services .state_accessor .allowed_room_ids(join_rule.clone()); - if !self.is_accessible_child(current_room, &join_rule.clone().into(), identifier, &allowed_room_ids) { + if !self + .is_accessible_child(current_room, &join_rule.clone().into(), identifier, &allowed_room_ids) + .await + { debug!("User is not allowed to see room {room_id}"); // This error will be caught later return Err(Error::BadRequest(ErrorKind::forbidden(), "User is not allowed to see the room")); } - let join_rule = join_rule.into(); - Ok(SpaceHierarchyParentSummary { canonical_alias: self .services .state_accessor .get_canonical_alias(room_id) - .unwrap_or(None), - name: self - .services - .state_accessor - .get_name(room_id) - .unwrap_or(None), + .await + .ok(), + name: self.services.state_accessor.get_name(room_id).await.ok(), num_joined_members: self .services .state_cache .room_joined_count(room_id) - .unwrap_or_default() - .unwrap_or_else(|| { - warn!("Room {room_id} has no member count"); - 0 - }) + .await + .unwrap_or(0) .try_into() .expect("user count should not be that big"), room_id: room_id.to_owned(), @@ -416,18 +424,29 @@ impl Service { .services .state_accessor .get_room_topic(room_id) - .unwrap_or(None), - world_readable: self.services.state_accessor.is_world_readable(room_id)?, - guest_can_join: self.services.state_accessor.guest_can_join(room_id)?, + .await + .ok(), + world_readable: self + .services + .state_accessor + .is_world_readable(room_id) + .await, + guest_can_join: self.services.state_accessor.guest_can_join(room_id).await, avatar_url: self .services .state_accessor - .get_avatar(room_id)? + .get_avatar(room_id) + .await .into_option() .unwrap_or_default() .url, - join_rule, - room_type: self.services.state_accessor.get_room_type(room_id)?, + join_rule: join_rule.into(), + room_type: self + .services + .state_accessor + .get_room_type(room_id) + .await + .ok(), children_state, allowed_room_ids, }) @@ -474,21 +493,22 @@ impl Service { results.push(summary_to_chunk(*summary.clone())); } else { children = children - .into_iter() - .rev() - .skip_while(|(room, _)| { - if let Ok(short) = self.services.short.get_shortroomid(room) - { - short.as_ref() != short_room_ids.get(parents.len()) - } else { - false - } - }) - .collect::>() - // skip_while doesn't implement DoubleEndedIterator, which is needed for rev - .into_iter() - .rev() - .collect(); + .iter() + .rev() + .stream() + .skip_while(|(room, _)| { + self.services + .short + .get_shortroomid(room) + .map_ok(|short| Some(&short) != short_room_ids.get(parents.len())) + .unwrap_or_else(|_| false) + }) + .map(Clone::clone) + .collect::)>>() + .await + .into_iter() + .rev() + .collect(); if children.is_empty() { return Err(Error::BadRequest( @@ -531,7 +551,7 @@ impl Service { let mut short_room_ids = vec![]; for room in parents { - short_room_ids.push(self.services.short.get_or_create_shortroomid(&room)?); + short_room_ids.push(self.services.short.get_or_create_shortroomid(&room).await); } Some( @@ -554,7 +574,7 @@ impl Service { async fn get_stripped_space_child_events( &self, room_id: &RoomId, ) -> Result>>, Error> { - let Some(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id)? else { + let Ok(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id).await else { return Ok(None); }; @@ -562,10 +582,13 @@ impl Service { .services .state_accessor .state_full_ids(current_shortstatehash) - .await?; + .await + .map_err(|e| err!(Database("State in space not found: {e}")))?; + let mut children_pdus = Vec::new(); for (key, id) in state { - let (event_type, state_key) = self.services.short.get_statekey_from_short(key)?; + let (event_type, state_key) = self.services.short.get_statekey_from_short(key).await?; + if event_type != StateEventType::SpaceChild { continue; } @@ -573,8 +596,9 @@ impl Service { let pdu = self .services .timeline - .get_pdu(&id)? - .ok_or_else(|| Error::bad_database("Event in space state not found"))?; + .get_pdu(&id) + .await + .map_err(|e| err!(Database("Event {id:?} in space state not found: {e:?}")))?; if serde_json::from_str::(pdu.content.get()) .ok() @@ -593,7 +617,7 @@ impl Service { } /// With the given identifier, checks if a room is accessable - fn is_accessible_child( + async fn is_accessible_child( &self, current_room: &OwnedRoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>, allowed_room_ids: &Vec, ) -> bool { @@ -607,6 +631,7 @@ impl Service { .services .event_handler .acl_check(server_name, room_id) + .await .is_err() { return false; @@ -617,12 +642,11 @@ impl Service { .services .state_cache .is_joined(user_id, current_room) - .unwrap_or_default() - || self - .services - .state_cache - .is_invited(user_id, current_room) - .unwrap_or_default() + .await || self + .services + .state_cache + .is_invited(user_id, current_room) + .await { return true; } @@ -633,22 +657,12 @@ impl Service { for room in allowed_room_ids { match identifier { Identifier::UserId(user) => { - if self - .services - .state_cache - .is_joined(user, room) - .unwrap_or_default() - { + if self.services.state_cache.is_joined(user, room).await { return true; } }, Identifier::ServerName(server) => { - if self - .services - .state_cache - .server_in_room(server, room) - .unwrap_or_default() - { + if self.services.state_cache.server_in_room(server, room).await { return true; } }, diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 3c110afc..ccf7509a 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,34 +1,31 @@ -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; -use conduit::{utils, Error, Result}; -use database::{Database, Map}; -use ruma::{EventId, OwnedEventId, RoomId}; +use conduit::{ + utils::{stream::TryIgnore, ReadyExt}, + Result, +}; +use database::{Database, Deserialized, Interfix, Map}; +use ruma::{OwnedEventId, RoomId}; use super::RoomMutexGuard; pub(super) struct Data { shorteventid_shortstatehash: Arc, - roomid_pduleaves: Arc, roomid_shortstatehash: Arc, + pub(super) roomid_pduleaves: Arc, } impl Data { pub(super) fn new(db: &Arc) -> Self { Self { shorteventid_shortstatehash: db["shorteventid_shortstatehash"].clone(), - roomid_pduleaves: db["roomid_pduleaves"].clone(), roomid_shortstatehash: db["roomid_shortstatehash"].clone(), + roomid_pduleaves: db["roomid_pduleaves"].clone(), } } - pub(super) fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) + pub(super) async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result { + self.roomid_shortstatehash.qry(room_id).await.deserialized() } #[inline] @@ -37,53 +34,35 @@ impl Data { room_id: &RoomId, new_shortstatehash: u64, _mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) { self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) + .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes()); } - pub(super) fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()> { + pub(super) fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) { self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) + .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes()); } - pub(super) fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xFF); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc( - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("EventID in roomid_pduleaves is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - pub(super) fn set_forward_extremities( + pub(super) async fn set_forward_extremities( &self, room_id: &RoomId, event_ids: Vec, _mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) { + let prefix = (room_id, Interfix); + self.roomid_pduleaves + .keys_raw_prefix(&prefix) + .ignore_err() + .ready_for_each(|key| self.roomid_pduleaves.remove(key)) + .await; + let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xFF); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - for event_id in event_ids { let mut key = prefix.clone(); key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + self.roomid_pduleaves.insert(&key, event_id.as_bytes()); } - - Ok(()) } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index cb219bc0..c7f6605c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -7,12 +7,14 @@ use std::{ }; use conduit::{ - utils::{calculate_hash, MutexMap, MutexMapGuard}, - warn, Error, PduEvent, Result, + err, + utils::{calculate_hash, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard}, + warn, PduEvent, Result, }; use data::Data; +use database::{Ignore, Interfix}; +use futures::{pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ - api::client::error::ErrorKind, events::{ room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, AnyStrippedStateEvent, StateEventType, TimelineEventType, @@ -81,14 +83,16 @@ impl Service { _statediffremoved: Arc>, state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { - for event_id in statediffnew.iter().filter_map(|new| { + let event_ids = statediffnew.iter().stream().filter_map(|new| { self.services .state_compressor .parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let Some(pdu) = self.services.timeline.get_pdu_json(&event_id)? else { + .map_ok_or_else(|_| None, |(_, event_id)| Some(event_id)) + }); + + pin_mut!(event_ids); + while let Some(event_id) = event_ids.next().await { + let Ok(pdu) = self.services.timeline.get_pdu_json(&event_id).await else { continue; }; @@ -113,15 +117,10 @@ impl Service { continue; }; - self.services.state_cache.update_membership( - room_id, - &user_id, - membership_event, - &pdu.sender, - None, - None, - false, - )?; + self.services + .state_cache + .update_membership(room_id, &user_id, membership_event, &pdu.sender, None, None, false) + .await?; }, TimelineEventType::SpaceChild => { self.services @@ -135,10 +134,9 @@ impl Service { } } - self.services.state_cache.update_joined_count(room_id)?; + self.services.state_cache.update_joined_count(room_id).await; - self.db - .set_room_state(room_id, shortstatehash, state_lock)?; + self.db.set_room_state(room_id, shortstatehash, state_lock); Ok(()) } @@ -148,12 +146,16 @@ impl Service { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. #[tracing::instrument(skip(self, state_ids_compressed), level = "debug")] - pub fn set_event_state( + pub async fn set_event_state( &self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: Arc>, ) -> Result { - let shorteventid = self.services.short.get_or_create_shorteventid(event_id)?; + let shorteventid = self + .services + .short + .get_or_create_shorteventid(event_id) + .await; - let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; + let previous_shortstatehash = self.db.get_room_shortstatehash(room_id).await; let state_hash = calculate_hash( &state_ids_compressed @@ -165,13 +167,18 @@ impl Service { let (shortstatehash, already_existed) = self .services .short - .get_or_create_shortstatehash(&state_hash)?; + .get_or_create_shortstatehash(&state_hash) + .await; if !already_existed { - let states_parents = previous_shortstatehash.map_or_else( - || Ok(Vec::new()), - |p| self.services.state_compressor.load_shortstatehash_info(p), - )?; + let states_parents = if let Ok(p) = previous_shortstatehash { + self.services + .state_compressor + .load_shortstatehash_info(p) + .await? + } else { + Vec::new() + }; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { let statediffnew: HashSet<_> = state_ids_compressed @@ -198,7 +205,7 @@ impl Service { )?; } - self.db.set_event_state(shorteventid, shortstatehash)?; + self.db.set_event_state(shorteventid, shortstatehash); Ok(shortstatehash) } @@ -208,34 +215,40 @@ impl Service { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. #[tracing::instrument(skip(self, new_pdu), level = "debug")] - pub fn append_to_state(&self, new_pdu: &PduEvent) -> Result { + pub async fn append_to_state(&self, new_pdu: &PduEvent) -> Result { let shorteventid = self .services .short - .get_or_create_shorteventid(&new_pdu.event_id)?; + .get_or_create_shorteventid(&new_pdu.event_id) + .await; - let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; + let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id).await; - if let Some(p) = previous_shortstatehash { - self.db.set_event_state(shorteventid, p)?; + if let Ok(p) = previous_shortstatehash { + self.db.set_event_state(shorteventid, p); } if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash.map_or_else( - || Ok(Vec::new()), - #[inline] - |p| self.services.state_compressor.load_shortstatehash_info(p), - )?; + let states_parents = if let Ok(p) = previous_shortstatehash { + self.services + .state_compressor + .load_shortstatehash_info(p) + .await? + } else { + Vec::new() + }; let shortstatekey = self .services .short - .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key)?; + .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) + .await; let new = self .services .state_compressor - .compress_state_event(shortstatekey, &new_pdu.event_id)?; + .compress_state_event(shortstatekey, &new_pdu.event_id) + .await; let replaces = states_parents .last() @@ -276,49 +289,55 @@ impl Service { } #[tracing::instrument(skip(self, invite_event), level = "debug")] - pub fn calculate_invite_state(&self, invite_event: &PduEvent) -> Result>> { + pub async fn calculate_invite_state(&self, invite_event: &PduEvent) -> Result>> { let mut state = Vec::new(); // Add recommended events - if let Some(e) = - self.services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? + if let Ok(e) = self + .services + .state_accessor + .room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "") + .await { state.push(e.to_stripped_state_event()); } - if let Some(e) = - self.services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? + if let Ok(e) = self + .services + .state_accessor + .room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "") + .await { state.push(e.to_stripped_state_event()); } - if let Some(e) = self.services.state_accessor.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? + if let Ok(e) = self + .services + .state_accessor + .room_state_get(&invite_event.room_id, &StateEventType::RoomCanonicalAlias, "") + .await { state.push(e.to_stripped_state_event()); } - if let Some(e) = - self.services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? + if let Ok(e) = self + .services + .state_accessor + .room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "") + .await { state.push(e.to_stripped_state_event()); } - if let Some(e) = self.services.state_accessor.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { + if let Ok(e) = self + .services + .state_accessor + .room_state_get(&invite_event.room_id, &StateEventType::RoomName, "") + .await + { + state.push(e.to_stripped_state_event()); + } + if let Ok(e) = self + .services + .state_accessor + .room_state_get(&invite_event.room_id, &StateEventType::RoomMember, invite_event.sender.as_str()) + .await + { state.push(e.to_stripped_state_event()); } @@ -333,101 +352,108 @@ impl Service { room_id: &RoomId, shortstatehash: u64, mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.db.set_room_state(room_id, shortstatehash, mutex_lock) + ) { + self.db.set_room_state(room_id, shortstatehash, mutex_lock); } /// Returns the room's version. #[tracing::instrument(skip(self), level = "debug")] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self - .services + pub async fn get_room_version(&self, room_id: &RoomId) -> Result { + self.services .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: RoomCreateEventContent = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()? - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "No create event found"))?; - - Ok(create_event_content.room_version) + .room_state_get_content(room_id, &StateEventType::RoomCreate, "") + .await + .map(|content: RoomCreateEventContent| content.room_version) + .map_err(|e| err!(Request(NotFound("No create event found: {e:?}")))) } #[inline] - pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.db.get_room_shortstatehash(room_id) + pub async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result { + self.db.get_room_shortstatehash(room_id).await } - pub fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { - self.db.get_forward_extremities(room_id) + pub fn get_forward_extremities<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + '_ { + let prefix = (room_id, Interfix); + + self.db + .roomid_pduleaves + .keys_prefix(&prefix) + .map_ok(|(_, event_id): (Ignore, &EventId)| event_id) + .ignore_err() } - pub fn set_forward_extremities( + pub async fn set_forward_extremities( &self, room_id: &RoomId, event_ids: Vec, state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) { self.db .set_forward_extremities(room_id, event_ids, state_lock) + .await; } /// This fetches auth events from the current state. #[tracing::instrument(skip(self), level = "debug")] - pub fn get_auth_events( + pub async fn get_auth_events( &self, room_id: &RoomId, kind: &TimelineEventType, sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, ) -> Result>> { - let Some(shortstatehash) = self.get_room_shortstatehash(room_id)? else { + let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else { return Ok(HashMap::new()); }; - let auth_events = - state_res::auth_types_for_event(kind, sender, state_key, content).expect("content is a valid JSON object"); + let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content)?; - let mut sauthevents = auth_events - .into_iter() + let mut sauthevents: HashMap<_, _> = auth_events + .iter() + .stream() .filter_map(|(event_type, state_key)| { self.services .short - .get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) + .get_shortstatekey(event_type, state_key) + .map_ok(move |s| (s, (event_type, state_key))) + .map(Result::ok) }) - .collect::>(); + .collect() + .await; let full_state = self .services .state_compressor - .load_shortstatehash_info(shortstatehash)? + .load_shortstatehash_info(shortstatehash) + .await + .map_err(|e| { + err!(Database( + "Missing shortstatehash info for {room_id:?} at {shortstatehash:?}: {e:?}" + )) + })? .pop() .expect("there is always one layer") .1; - Ok(full_state - .iter() - .filter_map(|compressed| { - self.services - .state_compressor - .parse_compressed_state_event(compressed) - .ok() - }) - .filter_map(|(shortstatekey, event_id)| sauthevents.remove(&shortstatekey).map(|k| (k, event_id))) - .filter_map(|(k, event_id)| { - self.services - .timeline - .get_pdu(&event_id) - .ok() - .flatten() - .map(|pdu| (k, pdu)) - }) - .collect()) + let mut ret = HashMap::new(); + for compressed in full_state.iter() { + let Ok((shortstatekey, event_id)) = self + .services + .state_compressor + .parse_compressed_state_event(compressed) + .await + else { + continue; + }; + + let Some((ty, state_key)) = sauthevents.remove(&shortstatekey) else { + continue; + }; + + let Ok(pdu) = self.services.timeline.get_pdu(&event_id).await else { + continue; + }; + + ret.insert((ty.to_owned(), state_key.to_owned()), pdu); + } + + Ok(ret) } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 4c85148d..79a98325 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,7 +1,8 @@ use std::{collections::HashMap, sync::Arc}; -use conduit::{utils, Error, PduEvent, Result}; -use database::Map; +use conduit::{err, PduEvent, Result}; +use database::{Deserialized, Map}; +use futures::TryFutureExt; use ruma::{events::StateEventType, EventId, RoomId}; use crate::{rooms, Dep}; @@ -39,17 +40,22 @@ impl Data { let full_state = self .services .state_compressor - .load_shortstatehash_info(shortstatehash)? + .load_shortstatehash_info(shortstatehash) + .await + .map_err(|e| err!(Database("Missing state IDs: {e}")))? .pop() .expect("there is always one layer") .1; + let mut result = HashMap::new(); let mut i: u8 = 0; for compressed in full_state.iter() { let parsed = self .services .state_compressor - .parse_compressed_state_event(compressed)?; + .parse_compressed_state_event(compressed) + .await?; + result.insert(parsed.0, parsed.1); i = i.wrapping_add(1); @@ -57,6 +63,7 @@ impl Data { tokio::task::yield_now().await; } } + Ok(result) } @@ -67,7 +74,8 @@ impl Data { let full_state = self .services .state_compressor - .load_shortstatehash_info(shortstatehash)? + .load_shortstatehash_info(shortstatehash) + .await? .pop() .expect("there is always one layer") .1; @@ -78,18 +86,13 @@ impl Data { let (_, eventid) = self .services .state_compressor - .parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.services.timeline.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); + .parse_compressed_state_event(compressed) + .await?; + + if let Ok(pdu) = self.services.timeline.get_pdu(&eventid).await { + if let Some(state_key) = pdu.state_key.as_ref() { + result.insert((pdu.kind.to_string().into(), state_key.clone()), pdu); + } } i = i.wrapping_add(1); @@ -101,61 +104,63 @@ impl Data { Ok(result) } - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). #[allow(clippy::unused_self)] - pub(super) fn state_get_id( + pub(super) async fn state_get_id( &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - let Some(shortstatekey) = self + ) -> Result> { + let shortstatekey = self .services .short - .get_shortstatekey(event_type, state_key)? - else { - return Ok(None); - }; + .get_shortstatekey(event_type, state_key) + .await?; + let full_state = self .services .state_compressor - .load_shortstatehash_info(shortstatehash)? + .load_shortstatehash_info(shortstatehash) + .await + .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? .pop() .expect("there is always one layer") .1; - Ok(full_state + + let compressed = full_state .iter() .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.services - .state_compressor - .parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) + .ok_or(err!(Database("No shortstatekey in compressed state")))?; + + self.services + .state_compressor + .parse_compressed_state_event(compressed) + .map_ok(|(_, id)| id) + .map_err(|e| { + err!(Database(error!( + ?event_type, + ?state_key, + ?shortstatekey, + "Failed to parse compressed: {e:?}" + ))) + }) + .await } - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub(super) fn state_get( + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub(super) async fn state_get( &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.services.timeline.get_pdu(&event_id)) + ) -> Result> { + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id| async move { self.services.timeline.get_pdu(&event_id).await }) + .await } /// Returns the state hash for this pdu. - pub(super) fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { + pub(super) async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash bytes in shorteventid_shortstatehash") - }) - }) - .transpose() - }) + .qry(event_id) + .and_then(|shorteventid| self.shorteventid_shortstatehash.qry(&shorteventid)) + .await + .deserialized() } /// Returns the full room state. @@ -163,34 +168,33 @@ impl Data { pub(super) async fn room_state_full( &self, room_id: &RoomId, ) -> Result>> { - if let Some(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_full(shortstatehash)) + .map_err(|e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .await } - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub(super) fn room_state_get_id( + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub(super) async fn room_state_get_id( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } + ) -> Result> { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) + .await } - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub(super) fn room_state_get( + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub(super) async fn room_state_get( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } + ) -> Result> { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 58fa31b3..4c28483c 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -6,8 +6,13 @@ use std::{ sync::{Arc, Mutex as StdMutex, Mutex}, }; -use conduit::{err, error, pdu::PduBuilder, utils::math::usize_from_f64, warn, Error, PduEvent, Result}; -use data::Data; +use conduit::{ + err, error, + pdu::PduBuilder, + utils::{math::usize_from_f64, ReadyExt}, + Error, PduEvent, Result, +}; +use futures::StreamExt; use lru_cache::LruCache; use ruma::{ events::{ @@ -31,8 +36,10 @@ use ruma::{ EventEncryptionAlgorithm, EventId, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; +use serde::Deserialize; use serde_json::value::to_raw_value; +use self::data::Data; use crate::{rooms, rooms::state::RoomMutexGuard, Dep}; pub struct Service { @@ -99,54 +106,58 @@ impl Service { /// Returns a single PDU from `room_id` with key (`event_type`, /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] - pub fn state_get_id( + pub async fn state_get_id( &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - self.db.state_get_id(shortstatehash, event_type, state_key) + ) -> Result> { + self.db + .state_get_id(shortstatehash, event_type, state_key) + .await } /// Returns a single PDU from `room_id` with key (`event_type`, /// `state_key`). #[inline] - pub fn state_get( + pub async fn state_get( &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - self.db.state_get(shortstatehash, event_type, state_key) + ) -> Result> { + self.db + .state_get(shortstatehash, event_type, state_key) + .await } /// Get membership for given user in state - fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> Result { - self.state_get(shortstatehash, &StateEventType::RoomMember, user_id.as_str())? - .map_or(Ok(MembershipState::Leave), |s| { + async fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> MembershipState { + self.state_get(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .await + .map_or(MembershipState::Leave, |s| { serde_json::from_str(s.content.get()) .map(|c: RoomMemberEventContent| c.membership) .map_err(|_| Error::bad_database("Invalid room membership event in database.")) + .unwrap() }) } /// The user was a joined member at this state (potentially in the past) #[inline] - fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id) - .is_ok_and(|s| s == MembershipState::Join) - // Return sensible default, i.e. - // false + async fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id).await == MembershipState::Join } /// The user was an invited or joined room member at this state (potentially /// in the past) #[inline] - fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id) - .is_ok_and(|s| s == MembershipState::Join || s == MembershipState::Invite) - // Return sensible default, i.e. false + async fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { + let s = self.user_membership(shortstatehash, user_id).await; + s == MembershipState::Join || s == MembershipState::Invite } /// Whether a server is allowed to see an event through federation, based on /// the room's history_visibility at that event's state. #[tracing::instrument(skip(self, origin, room_id, event_id))] - pub fn server_can_see_event(&self, origin: &ServerName, room_id: &RoomId, event_id: &EventId) -> Result { - let Some(shortstatehash) = self.pdu_shortstatehash(event_id)? else { + pub async fn server_can_see_event( + &self, origin: &ServerName, room_id: &RoomId, event_id: &EventId, + ) -> Result { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { return Ok(true); }; @@ -160,8 +171,9 @@ impl Service { } let history_visibility = self - .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(HistoryVisibility::Shared), |s| { + .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) .map_err(|e| { @@ -171,25 +183,28 @@ impl Service { ); Error::bad_database("Invalid history visibility event in database.") }) - }) - .unwrap_or(HistoryVisibility::Shared); + .unwrap() + }); - let mut current_server_members = self + let current_server_members = self .services .state_cache .room_members(room_id) - .filter_map(Result::ok) - .filter(|member| member.server_name() == origin); + .ready_filter(|member| member.server_name() == origin); let visibility = match history_visibility { HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny - current_server_members.any(|member| self.user_was_invited(shortstatehash, &member)) + current_server_members + .any(|member| self.user_was_invited(shortstatehash, member)) + .await }, HistoryVisibility::Joined => { // Allow if any member on requested server was joined, else deny - current_server_members.any(|member| self.user_was_joined(shortstatehash, &member)) + current_server_members + .any(|member| self.user_was_joined(shortstatehash, member)) + .await }, _ => { error!("Unknown history visibility {history_visibility}"); @@ -208,9 +223,9 @@ impl Service { /// Whether a user is allowed to see an event, based on /// the room's history_visibility at that event's state. #[tracing::instrument(skip(self, user_id, room_id, event_id))] - pub fn user_can_see_event(&self, user_id: &UserId, room_id: &RoomId, event_id: &EventId) -> Result { - let Some(shortstatehash) = self.pdu_shortstatehash(event_id)? else { - return Ok(true); + pub async fn user_can_see_event(&self, user_id: &UserId, room_id: &RoomId, event_id: &EventId) -> bool { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { + return true; }; if let Some(visibility) = self @@ -219,14 +234,15 @@ impl Service { .unwrap() .get_mut(&(user_id.to_owned(), shortstatehash)) { - return Ok(*visibility); + return *visibility; } - let currently_member = self.services.state_cache.is_joined(user_id, room_id)?; + let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; let history_visibility = self - .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(HistoryVisibility::Shared), |s| { + .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) .map_err(|e| { @@ -236,19 +252,19 @@ impl Service { ); Error::bad_database("Invalid history visibility event in database.") }) - }) - .unwrap_or(HistoryVisibility::Shared); + .unwrap() + }); let visibility = match history_visibility { HistoryVisibility::WorldReadable => true, HistoryVisibility::Shared => currently_member, HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, user_id) + self.user_was_invited(shortstatehash, user_id).await }, HistoryVisibility::Joined => { // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, user_id) + self.user_was_joined(shortstatehash, user_id).await }, _ => { error!("Unknown history visibility {history_visibility}"); @@ -261,17 +277,18 @@ impl Service { .unwrap() .insert((user_id.to_owned(), shortstatehash), visibility); - Ok(visibility) + visibility } /// Whether a user is allowed to see an event, based on /// the room's history_visibility at that event's state. #[tracing::instrument(skip(self, user_id, room_id))] - pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let currently_member = self.services.state_cache.is_joined(user_id, room_id)?; + pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { + let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; let history_visibility = self - .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? + .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "") + .await .map_or(Ok(HistoryVisibility::Shared), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) @@ -285,11 +302,13 @@ impl Service { }) .unwrap_or(HistoryVisibility::Shared); - Ok(currently_member || history_visibility == HistoryVisibility::WorldReadable) + currently_member || history_visibility == HistoryVisibility::WorldReadable } /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.db.pdu_shortstatehash(event_id) } + pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + self.db.pdu_shortstatehash(event_id).await + } /// Returns the full room state. #[tracing::instrument(skip(self), level = "debug")] @@ -300,47 +319,61 @@ impl Service { /// Returns a single PDU from `room_id` with key (`event_type`, /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_get_id( + pub async fn room_state_get_id( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - self.db.room_state_get_id(room_id, event_type, state_key) + ) -> Result> { + self.db + .room_state_get_id(room_id, event_type, state_key) + .await } /// Returns a single PDU from `room_id` with key (`event_type`, /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_get( + pub async fn room_state_get( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - self.db.room_state_get(room_id, event_type, state_key) + ) -> Result> { + self.db.room_state_get(room_id, event_type, state_key).await } - pub fn get_name(&self, room_id: &RoomId) -> Result> { - self.room_state_get(room_id, &StateEventType::RoomName, "")? - .map_or(Ok(None), |s| { - Ok(serde_json::from_str(s.content.get()).map_or_else(|_| None, |c: RoomNameEventContent| Some(c.name))) - }) + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub async fn room_state_get_content( + &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + ) -> Result + where + T: for<'de> Deserialize<'de> + Send, + { + use serde_json::from_str; + + self.room_state_get(room_id, event_type, state_key) + .await + .and_then(|event| from_str::(event.content.get()).map_err(Into::into)) } - pub fn get_avatar(&self, room_id: &RoomId) -> Result> { - self.room_state_get(room_id, &StateEventType::RoomAvatar, "")? - .map_or(Ok(ruma::JsOption::Undefined), |s| { + pub async fn get_name(&self, room_id: &RoomId) -> Result { + self.room_state_get_content(room_id, &StateEventType::RoomName, "") + .await + .map(|c: RoomNameEventContent| c.name) + } + + pub async fn get_avatar(&self, room_id: &RoomId) -> ruma::JsOption { + self.room_state_get(room_id, &StateEventType::RoomAvatar, "") + .await + .map_or(ruma::JsOption::Undefined, |s| { serde_json::from_str(s.content.get()) .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) + .unwrap() }) } - pub fn get_member(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map_err(|_| Error::bad_database("Invalid room member event in database.")) - }) + pub async fn get_member(&self, room_id: &RoomId, user_id: &UserId) -> Result { + self.room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str()) + .await } - pub fn user_can_invite( + pub async fn user_can_invite( &self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &RoomMutexGuard, - ) -> Result { + ) -> bool { let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite)) .expect("Event content always serializes"); @@ -353,122 +386,101 @@ impl Service { timestamp: None, }; - Ok(self - .services + self.services .timeline .create_hash_and_sign_event(new_event, sender, room_id, state_lock) - .is_ok()) + .await + .is_ok() } /// Checks if guests are able to view room content without joining - pub fn is_world_readable(&self, room_id: &RoomId) -> Result { - self.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(false), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| { - c.history_visibility == HistoryVisibility::WorldReadable - }) - .map_err(|e| { - error!( - "Invalid room history visibility event in database for room {room_id}, assuming not world \ - readable: {e} " - ); - Error::bad_database("Invalid room history visibility event in database.") - }) - }) + pub async fn is_world_readable(&self, room_id: &RoomId) -> bool { + self.room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") + .await + .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility == HistoryVisibility::WorldReadable) + .unwrap_or(false) } /// Checks if guests are able to join a given room - pub fn guest_can_join(&self, room_id: &RoomId) -> Result { - self.room_state_get(room_id, &StateEventType::RoomGuestAccess, "")? - .map_or(Ok(false), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomGuestAccessEventContent| c.guest_access == GuestAccess::CanJoin) - .map_err(|_| Error::bad_database("Invalid room guest access event in database.")) - }) + pub async fn guest_can_join(&self, room_id: &RoomId) -> bool { + self.room_state_get_content(room_id, &StateEventType::RoomGuestAccess, "") + .await + .map(|c: RoomGuestAccessEventContent| c.guest_access == GuestAccess::CanJoin) + .unwrap_or(false) } /// Gets the primary alias from canonical alias event - pub fn get_canonical_alias(&self, room_id: &RoomId) -> Result, Error> { - self.room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomCanonicalAliasEventContent| c.alias) - .map_err(|_| Error::bad_database("Invalid canonical alias event in database.")) + pub async fn get_canonical_alias(&self, room_id: &RoomId) -> Result { + self.room_state_get_content(room_id, &StateEventType::RoomCanonicalAlias, "") + .await + .and_then(|c: RoomCanonicalAliasEventContent| { + c.alias + .ok_or_else(|| err!(Request(NotFound("No alias found in event content.")))) }) } /// Gets the room topic - pub fn get_room_topic(&self, room_id: &RoomId) -> Result, Error> { - self.room_state_get(room_id, &StateEventType::RoomTopic, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomTopicEventContent| Some(c.topic)) - .map_err(|e| { - error!("Invalid room topic event in database for room {room_id}: {e}"); - Error::bad_database("Invalid room topic event in database.") - }) - }) + pub async fn get_room_topic(&self, room_id: &RoomId) -> Result { + self.room_state_get_content(room_id, &StateEventType::RoomTopic, "") + .await + .map(|c: RoomTopicEventContent| c.topic) } /// Checks if a given user can redact a given event /// /// If federation is true, it allows redaction events from any user of the /// same server as the original event sender - pub fn user_can_redact( + pub async fn user_can_redact( &self, redacts: &EventId, sender: &UserId, room_id: &RoomId, federation: bool, ) -> Result { - self.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? - .map_or_else( - || { - // Falling back on m.room.create to judge power level - if let Some(pdu) = self.room_state_get(room_id, &StateEventType::RoomCreate, "")? { - Ok(pdu.sender == sender - || if let Ok(Some(pdu)) = self.services.timeline.get_pdu(redacts) { - pdu.sender == sender - } else { - false - }) + if let Ok(event) = self + .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") + .await + { + let Ok(event) = serde_json::from_str(event.content.get()) + .map(|content: RoomPowerLevelsEventContent| content.into()) + .map(|event: RoomPowerLevels| event) + else { + return Ok(false); + }; + + Ok(event.user_can_redact_event_of_other(sender) + || event.user_can_redact_own_event(sender) + && if let Ok(pdu) = self.services.timeline.get_pdu(redacts).await { + if federation { + pdu.sender.server_name() == sender.server_name() + } else { + pdu.sender == sender + } } else { - Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", - )) - } - }, - |event| { - serde_json::from_str(event.content.get()) - .map(|content: RoomPowerLevelsEventContent| content.into()) - .map(|event: RoomPowerLevels| { - event.user_can_redact_event_of_other(sender) - || event.user_can_redact_own_event(sender) - && if let Ok(Some(pdu)) = self.services.timeline.get_pdu(redacts) { - if federation { - pdu.sender.server_name() == sender.server_name() - } else { - pdu.sender == sender - } - } else { - false - } - }) - .map_err(|_| Error::bad_database("Invalid m.room.power_levels event in database")) - }, - ) + false + }) + } else { + // Falling back on m.room.create to judge power level + if let Ok(pdu) = self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + Ok(pdu.sender == sender + || if let Ok(pdu) = self.services.timeline.get_pdu(redacts).await { + pdu.sender == sender + } else { + false + }) + } else { + Err(Error::bad_database( + "No m.room.power_levels or m.room.create events in database for room", + )) + } + } } /// Returns the join rule (`SpaceRoomJoinRule`) for a given room - pub fn get_join_rule(&self, room_id: &RoomId) -> Result<(SpaceRoomJoinRule, Vec), Error> { - Ok(self - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")? - .map(|s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomJoinRulesEventContent| { - (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) - }) - .map_err(|e| err!(Database(error!("Invalid room join rule event in database: {e}")))) - }) - .transpose()? - .unwrap_or((SpaceRoomJoinRule::Invite, vec![]))) + pub async fn get_join_rule(&self, room_id: &RoomId) -> Result<(SpaceRoomJoinRule, Vec)> { + self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") + .await + .map(|c: RoomJoinRulesEventContent| (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule))) + .or_else(|_| Ok((SpaceRoomJoinRule::Invite, vec![]))) } /// Returns an empty vec if not a restricted room @@ -487,25 +499,21 @@ impl Service { room_ids } - pub fn get_room_type(&self, room_id: &RoomId) -> Result> { - Ok(self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .map(|s| { - serde_json::from_str::(s.content.get()) - .map_err(|e| err!(Database(error!("Invalid room create event in database: {e}")))) + pub async fn get_room_type(&self, room_id: &RoomId) -> Result { + self.room_state_get_content(room_id, &StateEventType::RoomCreate, "") + .await + .and_then(|content: RoomCreateEventContent| { + content + .room_type + .ok_or_else(|| err!(Request(NotFound("No type found in event content")))) }) - .transpose()? - .and_then(|e| e.room_type)) } /// Gets the room's encryption algorithm if `m.room.encryption` state event /// is found - pub fn get_room_encryption(&self, room_id: &RoomId) -> Result> { - self.room_state_get(room_id, &StateEventType::RoomEncryption, "")? - .map_or(Ok(None), |s| { - serde_json::from_str::(s.content.get()) - .map(|content| Some(content.algorithm)) - .map_err(|e| err!(Database(error!("Invalid room encryption event in database: {e}")))) - }) + pub async fn get_room_encryption(&self, room_id: &RoomId) -> Result { + self.room_state_get_content(room_id, &StateEventType::RoomEncryption, "") + .await + .map(|content: RoomEncryptionEventContent| content.algorithm) } } diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index 19c73ea1..38e504f6 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,43 +1,42 @@ use std::{ - collections::{HashMap, HashSet}, + collections::HashMap, sync::{Arc, RwLock}, }; -use conduit::{utils, Error, Result}; -use database::Map; -use itertools::Itertools; +use conduit::{utils, utils::stream::TryIgnore, Error, Result}; +use database::{Deserialized, Interfix, Map}; +use futures::{Stream, StreamExt}; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, - OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, + OwnedRoomId, RoomId, UserId, }; -use crate::{appservice::RegistrationInfo, globals, users, Dep}; +use crate::{globals, Dep}; -type StrippedStateEventIter<'a> = Box>)>> + 'a>; -type AnySyncStateEventIter<'a> = Box>)>> + 'a>; type AppServiceInRoomCache = RwLock>>; +type StrippedStateEventItem = (OwnedRoomId, Vec>); +type SyncStateEventItem = (OwnedRoomId, Vec>); pub(super) struct Data { pub(super) appservice_in_room_cache: AppServiceInRoomCache, - roomid_invitedcount: Arc, - roomid_inviteviaservers: Arc, - roomid_joinedcount: Arc, - roomserverids: Arc, - roomuserid_invitecount: Arc, - roomuserid_joined: Arc, - roomuserid_leftcount: Arc, - roomuseroncejoinedids: Arc, - serverroomids: Arc, - userroomid_invitestate: Arc, - userroomid_joined: Arc, - userroomid_leftstate: Arc, + pub(super) roomid_invitedcount: Arc, + pub(super) roomid_inviteviaservers: Arc, + pub(super) roomid_joinedcount: Arc, + pub(super) roomserverids: Arc, + pub(super) roomuserid_invitecount: Arc, + pub(super) roomuserid_joined: Arc, + pub(super) roomuserid_leftcount: Arc, + pub(super) roomuseroncejoinedids: Arc, + pub(super) serverroomids: Arc, + pub(super) userroomid_invitestate: Arc, + pub(super) userroomid_joined: Arc, + pub(super) userroomid_leftstate: Arc, services: Services, } struct Services { globals: Dep, - users: Dep, } impl Data { @@ -59,19 +58,18 @@ impl Data { userroomid_leftstate: db["userroomid_leftstate"].clone(), services: Services { globals: args.depend::("globals"), - users: args.depend::("users"), }, } } - pub(super) fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + pub(super) fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); - self.roomuseroncejoinedids.insert(&userroom_id, &[]) + self.roomuseroncejoinedids.insert(&userroom_id, &[]); } - pub(super) fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + pub(super) fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { let roomid = room_id.as_bytes().to_vec(); let mut roomuser_id = roomid.clone(); @@ -82,64 +80,17 @@ impl Data { userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; + self.userroomid_joined.insert(&userroom_id, &[]); + self.roomuserid_joined.insert(&roomuser_id, &[]); + self.userroomid_invitestate.remove(&userroom_id); + self.roomuserid_invitecount.remove(&roomuser_id); + self.userroomid_leftstate.remove(&userroom_id); + self.roomuserid_leftcount.remove(&roomuser_id); - self.roomid_inviteviaservers.remove(&roomid)?; - - Ok(()) + self.roomid_inviteviaservers.remove(&roomid); } - pub(super) fn mark_as_invited( - &self, user_id: &UserId, room_id: &RoomId, last_state: Option>>, - invite_via: Option>, - ) -> Result<()> { - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xFF); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()).expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &self.services.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - if let Some(servers) = invite_via { - let mut prev_servers = self - .servers_invite_via(room_id) - .filter_map(Result::ok) - .collect_vec(); - #[allow(clippy::redundant_clone)] // this is a necessary clone? - prev_servers.append(servers.clone().as_mut()); - let servers = prev_servers.iter().rev().unique().rev().collect_vec(); - - let servers = servers - .iter() - .map(|server| server.as_bytes()) - .collect_vec() - .join(&[0xFF][..]); - - self.roomid_inviteviaservers - .insert(room_id.as_bytes(), &servers)?; - } - - Ok(()) - } - - pub(super) fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + pub(super) fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { let roomid = room_id.as_bytes().to_vec(); let mut roomuser_id = roomid.clone(); @@ -153,115 +104,20 @@ impl Data { self.userroomid_leftstate.insert( &userroom_id, &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO + ); // TODO self.roomuserid_leftcount - .insert(&roomuser_id, &self.services.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; + .insert(&roomuser_id, &self.services.globals.next_count().unwrap().to_be_bytes()); + self.userroomid_joined.remove(&userroom_id); + self.roomuserid_joined.remove(&roomuser_id); + self.userroomid_invitestate.remove(&userroom_id); + self.roomuserid_invitecount.remove(&roomuser_id); - self.roomid_inviteviaservers.remove(&roomid)?; - - Ok(()) - } - - pub(super) fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(Result::ok) { - joined_servers.insert(joined.server_name().to_owned()); - joinedcount = joinedcount.saturating_add(1); - } - - for _invited in self.room_members_invited(room_id).filter_map(Result::ok) { - invitedcount = invitedcount.saturating_add(1); - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - for old_joined_server in self.room_servers(room_id).filter_map(Result::ok) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xFF); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xFF); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xFF); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xFF); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, appservice), level = "debug")] - pub(super) fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.registration.id)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else { - let bridge_user_id = UserId::parse_with_server_name( - appservice.registration.sender_localpart.as_str(), - self.services.globals.server_name(), - ) - .ok(); - - let in_room = bridge_user_id.map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self - .room_members(room_id) - .any(|userid| userid.map_or(false, |userid| appservice.users.is_match(userid.as_str()))); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.registration.id.clone(), in_room); - - Ok(in_room) - } + self.roomid_inviteviaservers.remove(&roomid); } /// Makes a user forget a room. #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { + pub(super) fn forget(&self, room_id: &RoomId, user_id: &UserId) { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -270,397 +126,69 @@ impl Data { roomuser_id.push(0xFF); roomuser_id.extend_from_slice(user_id.as_bytes()); - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn room_servers<'a>( - &'a self, room_id: &RoomId, - ) -> Box> + 'a> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xFF); - - Box::new(self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - })) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we - /// know). - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn server_rooms<'a>( - &'a self, server: &ServerName, - ) -> Box> + 'a> { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xFF); - - Box::new(self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - })) - } - - /// Returns an iterator of all joined members of a room. - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn room_members<'a>( - &'a self, room_id: &RoomId, - ) -> Box> + Send + 'a> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xFF); - - Box::new(self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - })) - } - - /// Returns an iterator of all our local users in the room, even if they're - /// deactivated/guests - pub(super) fn local_users_in_room<'a>(&'a self, room_id: &RoomId) -> Box + 'a> { - Box::new( - self.room_members(room_id) - .filter_map(Result::ok) - .filter(|user| self.services.globals.user_is_local(user)), - ) - } - - /// Returns an iterator of all our local joined users in a room who are - /// active (not deactivated, not guest) - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn active_local_users_in_room<'a>( - &'a self, room_id: &RoomId, - ) -> Box + 'a> { - Box::new( - self.local_users_in_room(room_id) - .filter(|user| !self.services.users.is_deactivated(user).unwrap_or(true)), - ) - } - - /// Returns the number of users which are currently in a room - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| utils::u64_from_bytes(&b).map_err(|_| Error::bad_database("Invalid joinedcount in db."))) - .transpose() - } - - /// Returns the number of users which are currently invited to a room - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| utils::u64_from_bytes(&b).map_err(|_| Error::bad_database("Invalid joinedcount in db."))) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn room_useroncejoined<'a>( - &'a self, room_id: &RoomId, - ) -> Box> + 'a> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xFF); - - Box::new( - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }), - ) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn room_members_invited<'a>( - &'a self, room_id: &RoomId, - ) -> Box> + 'a> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xFF); - - Box::new( - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }), - ) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some( - utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid invitecount in db."))?, - )) - }) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid leftcount in db."))) - .transpose() - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn rooms_joined(&self, user_id: &UserId) -> Box> + '_> { - Box::new( - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }), - ) + self.userroomid_leftstate.remove(&userroom_id); + self.roomuserid_leftcount.remove(&roomuser_id); } /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn rooms_invited<'a>(&'a self, user_id: &UserId) -> StrippedStateEventIter<'a> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - - Box::new( - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }), - ) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn invite_state( - &self, user_id: &UserId, room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(room_id.as_bytes()); - + #[inline] + pub(super) fn rooms_invited<'a>( + &'a self, user_id: &'a UserId, + ) -> impl Stream + Send + 'a { + let prefix = (user_id, Interfix); self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + .stream_raw_prefix(&prefix) + .ignore_err() + .map(|(key, val)| { + let room_id = key.rsplit(|&b| b == 0xFF).next().unwrap(); + let room_id = utils::string_from_bytes(room_id).unwrap(); + let room_id = RoomId::parse(room_id).unwrap(); + let state = serde_json::from_slice(val) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate.")) + .unwrap(); - Ok(state) + (room_id, state) }) - .transpose() } #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn left_state( + pub(super) async fn invite_state( &self, user_id: &UserId, room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(room_id.as_bytes()); + ) -> Result>> { + let key = (user_id, room_id); + self.userroomid_invitestate + .qry(&key) + .await + .deserialized_json() + } + #[tracing::instrument(skip(self), level = "debug")] + pub(super) async fn left_state( + &self, user_id: &UserId, room_id: &RoomId, + ) -> Result>> { + let key = (user_id, room_id); self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() + .qry(&key) + .await + .deserialized_json() } /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn rooms_left<'a>(&'a self, user_id: &UserId) -> AnySyncStateEventIter<'a> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); + #[inline] + pub(super) fn rooms_left<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + let prefix = (user_id, Interfix); + self.userroomid_leftstate + .stream_raw_prefix(&prefix) + .ignore_err() + .map(|(key, val)| { + let room_id = key.rsplit(|&b| b == 0xFF).next().unwrap(); + let room_id = utils::string_from_bytes(room_id).unwrap(); + let room_id = RoomId::parse(room_id).unwrap(); + let state = serde_json::from_slice(val) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate.")) + .unwrap(); - Box::new( - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }), - ) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn servers_invite_via<'a>( - &'a self, room_id: &RoomId, - ) -> Box> + 'a> { - let key = room_id.as_bytes().to_vec(); - - Box::new( - self.roomid_inviteviaservers - .scan_prefix(key) - .map(|(_, servers)| { - ServerName::parse( - utils::string_from_bytes( - servers - .rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomid_inviteviaservers is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomid_inviteviaservers is invalid.")) - }), - ) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn add_servers_invite_via(&self, room_id: &RoomId, servers: &[OwnedServerName]) -> Result<()> { - let mut prev_servers = self - .servers_invite_via(room_id) - .filter_map(Result::ok) - .collect_vec(); - prev_servers.extend(servers.to_owned()); - prev_servers.sort_unstable(); - prev_servers.dedup(); - - let servers = prev_servers - .iter() - .map(|server| server.as_bytes()) - .collect_vec() - .join(&[0xFF][..]); - - self.roomid_inviteviaservers - .insert(room_id.as_bytes(), &servers)?; - - Ok(()) + (room_id, state) + }) } } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 71899ceb..ce5b024b 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -1,9 +1,15 @@ mod data; -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; -use conduit::{err, error, warn, Error, Result}; +use conduit::{ + err, + utils::{stream::TryIgnore, ReadyExt}, + warn, Result, +}; use data::Data; +use database::{Deserialized, Ignore, Interfix}; +use futures::{Stream, StreamExt}; use itertools::Itertools; use ruma::{ events::{ @@ -18,7 +24,7 @@ use ruma::{ }, int, serde::Raw, - OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, + OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; use crate::{account_data, appservice::RegistrationInfo, globals, rooms, users, Dep}; @@ -55,7 +61,7 @@ impl Service { /// Update current membership data. #[tracing::instrument(skip(self, last_state))] #[allow(clippy::too_many_arguments)] - pub fn update_membership( + pub async fn update_membership( &self, room_id: &RoomId, user_id: &UserId, membership_event: RoomMemberEventContent, sender: &UserId, last_state: Option>>, invite_via: Option>, update_joined_count: bool, @@ -68,7 +74,7 @@ impl Service { // update #[allow(clippy::collapsible_if)] if !self.services.globals.user_is_local(user_id) { - if !self.services.users.exists(user_id)? { + if !self.services.users.exists(user_id).await { self.services.users.create(user_id, None)?; } @@ -100,17 +106,17 @@ impl Service { match &membership { MembershipState::Join => { // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { + if !self.once_joined(user_id, room_id).await { // Add the user ID to the join list then - self.db.mark_as_once_joined(user_id, room_id)?; + self.db.mark_as_once_joined(user_id, room_id); // Check if the room has a predecessor - if let Some(predecessor) = self + if let Ok(Some(predecessor)) = self .services .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) + .room_state_get_content(room_id, &StateEventType::RoomCreate, "") + .await + .map(|content: RoomCreateEventContent| content.predecessor) { // Copy user settings from predecessor to the current room: // - Push rules @@ -138,32 +144,33 @@ impl Service { // .ok(); // Copy old tags to new room - if let Some(tag_event) = self + if let Ok(tag_event) = self .services .account_data - .get(Some(&predecessor.room_id), user_id, RoomAccountDataEventType::Tag)? - .map(|event| { + .get(Some(&predecessor.room_id), user_id, RoomAccountDataEventType::Tag) + .await + .and_then(|event| { serde_json::from_str(event.get()) .map_err(|e| err!(Database(warn!("Invalid account data event in db: {e:?}")))) }) { self.services .account_data - .update(Some(room_id), user_id, RoomAccountDataEventType::Tag, &tag_event?) + .update(Some(room_id), user_id, RoomAccountDataEventType::Tag, &tag_event) + .await .ok(); }; // Copy direct chat flag - if let Some(direct_event) = self + if let Ok(mut direct_event) = self .services .account_data - .get(None, user_id, GlobalAccountDataEventType::Direct.to_string().into())? - .map(|event| { + .get(None, user_id, GlobalAccountDataEventType::Direct.to_string().into()) + .await + .and_then(|event| { serde_json::from_str::(event.get()) .map_err(|e| err!(Database(warn!("Invalid account data event in db: {e:?}")))) }) { - let mut direct_event = direct_event?; let mut room_ids_updated = false; - for room_ids in direct_event.content.0.values_mut() { if room_ids.iter().any(|r| r == &predecessor.room_id) { room_ids.push(room_id.to_owned()); @@ -172,18 +179,21 @@ impl Service { } if room_ids_updated { - self.services.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &serde_json::to_value(&direct_event).expect("to json always works"), - )?; + self.services + .account_data + .update( + None, + user_id, + GlobalAccountDataEventType::Direct.to_string().into(), + &serde_json::to_value(&direct_event).expect("to json always works"), + ) + .await?; } }; } } - self.db.mark_as_joined(user_id, room_id)?; + self.db.mark_as_joined(user_id, room_id); }, MembershipState::Invite => { // We want to know if the sender is ignored by the receiver @@ -196,12 +206,12 @@ impl Service { GlobalAccountDataEventType::IgnoredUserList .to_string() .into(), - )? - .map(|event| { + ) + .await + .and_then(|event| { serde_json::from_str::(event.get()) .map_err(|e| err!(Database(warn!("Invalid account data event in db: {e:?}")))) }) - .transpose()? .map_or(false, |ignored| { ignored .content @@ -214,194 +224,282 @@ impl Service { return Ok(()); } - self.db - .mark_as_invited(user_id, room_id, last_state, invite_via)?; + self.mark_as_invited(user_id, room_id, last_state, invite_via) + .await; }, MembershipState::Leave | MembershipState::Ban => { - self.db.mark_as_left(user_id, room_id)?; + self.db.mark_as_left(user_id, room_id); }, _ => {}, } if update_joined_count { - self.update_joined_count(room_id)?; + self.update_joined_count(room_id).await; } Ok(()) } - #[tracing::instrument(skip(self, room_id), level = "debug")] - pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { self.db.update_joined_count(room_id) } - #[tracing::instrument(skip(self, room_id, appservice), level = "debug")] - pub fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result { - self.db.appservice_in_room(room_id, appservice) + pub async fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> bool { + let maybe = self + .db + .appservice_in_room_cache + .read() + .unwrap() + .get(room_id) + .and_then(|map| map.get(&appservice.registration.id)) + .copied(); + + if let Some(b) = maybe { + b + } else { + let bridge_user_id = UserId::parse_with_server_name( + appservice.registration.sender_localpart.as_str(), + self.services.globals.server_name(), + ) + .ok(); + + let in_room = if let Some(id) = &bridge_user_id { + self.is_joined(id, room_id).await + } else { + false + }; + + let in_room = in_room + || self + .room_members(room_id) + .ready_any(|userid| appservice.users.is_match(userid.as_str())) + .await; + + self.db + .appservice_in_room_cache + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default() + .insert(appservice.registration.id.clone(), in_room); + + in_room + } } /// Direct DB function to directly mark a user as left. It is not /// recommended to use this directly. You most likely should use /// `update_membership` instead #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - self.db.mark_as_left(user_id, room_id) - } + pub fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { self.db.mark_as_left(user_id, room_id); } /// Direct DB function to directly mark a user as joined. It is not /// recommended to use this directly. You most likely should use /// `update_membership` instead #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - self.db.mark_as_joined(user_id, room_id) - } + pub fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { self.db.mark_as_joined(user_id, room_id); } /// Makes a user forget a room. #[tracing::instrument(skip(self), level = "debug")] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { self.db.forget(room_id, user_id) } + pub fn forget(&self, room_id: &RoomId, user_id: &UserId) { self.db.forget(room_id, user_id); } /// Returns an iterator of all servers participating in this room. #[tracing::instrument(skip(self), level = "debug")] - pub fn room_servers(&self, room_id: &RoomId) -> impl Iterator> + '_ { - self.db.room_servers(room_id) + pub fn room_servers<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomserverids + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, server): (Ignore, &ServerName)| server) } #[tracing::instrument(skip(self), level = "debug")] - pub fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result { - self.db.server_in_room(server, room_id) + pub async fn server_in_room<'a>(&'a self, server: &'a ServerName, room_id: &'a RoomId) -> bool { + let key = (server, room_id); + self.db.serverroomids.qry(&key).await.is_ok() } /// Returns an iterator of all rooms a server participates in (as far as we /// know). #[tracing::instrument(skip(self), level = "debug")] - pub fn server_rooms(&self, server: &ServerName) -> impl Iterator> + '_ { - self.db.server_rooms(server) + pub fn server_rooms<'a>(&'a self, server: &'a ServerName) -> impl Stream + Send + 'a { + let prefix = (server, Interfix); + self.db + .serverroomids + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, room_id): (Ignore, &RoomId)| room_id) } /// Returns true if server can see user by sharing at least one room. #[tracing::instrument(skip(self), level = "debug")] - pub fn server_sees_user(&self, server: &ServerName, user_id: &UserId) -> Result { - Ok(self - .server_rooms(server) - .filter_map(Result::ok) - .any(|room_id: OwnedRoomId| self.is_joined(user_id, &room_id).unwrap_or(false))) + pub async fn server_sees_user(&self, server: &ServerName, user_id: &UserId) -> bool { + self.server_rooms(server) + .any(|room_id| self.is_joined(user_id, room_id)) + .await } /// Returns true if user_a and user_b share at least one room. #[tracing::instrument(skip(self), level = "debug")] - pub fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> Result { + pub async fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> bool { // Minimize number of point-queries by iterating user with least nr rooms - let (a, b) = if self.rooms_joined(user_a).count() < self.rooms_joined(user_b).count() { + let (a, b) = if self.rooms_joined(user_a).count().await < self.rooms_joined(user_b).count().await { (user_a, user_b) } else { (user_b, user_a) }; - Ok(self - .rooms_joined(a) - .filter_map(Result::ok) - .any(|room_id| self.is_joined(b, &room_id).unwrap_or(false))) + self.rooms_joined(a) + .any(|room_id| self.is_joined(b, room_id)) + .await } - /// Returns an iterator over all joined members of a room. + /// Returns an iterator of all joined members of a room. #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members(&self, room_id: &RoomId) -> impl Iterator> + Send + '_ { - self.db.room_members(room_id) + pub fn room_members<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomuserid_joined + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, user_id): (Ignore, &UserId)| user_id) } /// Returns the number of users which are currently in a room #[tracing::instrument(skip(self), level = "debug")] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { self.db.room_joined_count(room_id) } + pub async fn room_joined_count(&self, room_id: &RoomId) -> Result { + self.db.roomid_joinedcount.qry(room_id).await.deserialized() + } #[tracing::instrument(skip(self), level = "debug")] /// Returns an iterator of all our local users in the room, even if they're /// deactivated/guests - pub fn local_users_in_room<'a>(&'a self, room_id: &RoomId) -> impl Iterator + 'a { - self.db.local_users_in_room(room_id) + pub fn local_users_in_room<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + self.room_members(room_id) + .ready_filter(|user| self.services.globals.user_is_local(user)) } #[tracing::instrument(skip(self), level = "debug")] /// Returns an iterator of all our local joined users in a room who are /// active (not deactivated, not guest) - pub fn active_local_users_in_room<'a>(&'a self, room_id: &RoomId) -> impl Iterator + 'a { - self.db.active_local_users_in_room(room_id) + pub fn active_local_users_in_room<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + self.local_users_in_room(room_id) + .filter(|user| self.services.users.is_active(user)) } /// Returns the number of users which are currently invited to a room #[tracing::instrument(skip(self), level = "debug")] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { self.db.room_invited_count(room_id) } + pub async fn room_invited_count(&self, room_id: &RoomId) -> Result { + self.db + .roomid_invitedcount + .qry(room_id) + .await + .deserialized() + } /// Returns an iterator over all User IDs who ever joined a room. #[tracing::instrument(skip(self), level = "debug")] - pub fn room_useroncejoined(&self, room_id: &RoomId) -> impl Iterator> + '_ { - self.db.room_useroncejoined(room_id) + pub fn room_useroncejoined<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomuseroncejoinedids + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, user_id): (Ignore, &UserId)| user_id) } /// Returns an iterator over all invited members of a room. #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator> + '_ { - self.db.room_members_invited(room_id) + pub fn room_members_invited<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomuserid_invitecount + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, user_id): (Ignore, &UserId)| user_id) } #[tracing::instrument(skip(self), level = "debug")] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.get_invite_count(room_id, user_id) + pub async fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + let key = (room_id, user_id); + self.db + .roomuserid_invitecount + .qry(&key) + .await + .deserialized() } #[tracing::instrument(skip(self), level = "debug")] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.get_left_count(room_id, user_id) + pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + let key = (room_id, user_id); + self.db.roomuserid_leftcount.qry(&key).await.deserialized() } /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_joined(&self, user_id: &UserId) -> impl Iterator> + '_ { - self.db.rooms_joined(user_id) + pub fn rooms_joined(&self, user_id: &UserId) -> impl Stream + Send { + self.db + .userroomid_joined + .keys_prefix(user_id) + .ignore_err() + .map(|(_, room_id): (Ignore, &RoomId)| room_id) } /// Returns an iterator over all rooms a user was invited to. #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_invited( - &self, user_id: &UserId, - ) -> impl Iterator>)>> + '_ { + pub fn rooms_invited<'a>( + &'a self, user_id: &'a UserId, + ) -> impl Stream>)> + Send + 'a { self.db.rooms_invited(user_id) } #[tracing::instrument(skip(self), level = "debug")] - pub fn invite_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>>> { - self.db.invite_state(user_id, room_id) + pub async fn invite_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>> { + self.db.invite_state(user_id, room_id).await } #[tracing::instrument(skip(self), level = "debug")] - pub fn left_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>>> { - self.db.left_state(user_id, room_id) + pub async fn left_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>> { + self.db.left_state(user_id, room_id).await } /// Returns an iterator over all rooms a user left. #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_left( - &self, user_id: &UserId, - ) -> impl Iterator>)>> + '_ { + pub fn rooms_left<'a>( + &'a self, user_id: &'a UserId, + ) -> impl Stream>)> + Send + 'a { self.db.rooms_left(user_id) } #[tracing::instrument(skip(self), level = "debug")] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.once_joined(user_id, room_id) + pub async fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> bool { + let key = (user_id, room_id); + self.db.roomuseroncejoinedids.qry(&key).await.is_ok() } #[tracing::instrument(skip(self), level = "debug")] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { self.db.is_joined(user_id, room_id) } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.is_invited(user_id, room_id) + pub async fn is_joined<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { + let key = (user_id, room_id); + self.db.userroomid_joined.qry(&key).await.is_ok() } #[tracing::instrument(skip(self), level = "debug")] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { self.db.is_left(user_id, room_id) } + pub async fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> bool { + let key = (user_id, room_id); + self.db.userroomid_invitestate.qry(&key).await.is_ok() + } #[tracing::instrument(skip(self), level = "debug")] - pub fn servers_invite_via(&self, room_id: &RoomId) -> impl Iterator> + '_ { - self.db.servers_invite_via(room_id) + pub async fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> bool { + let key = (user_id, room_id); + self.db.userroomid_leftstate.qry(&key).await.is_ok() + } + + #[tracing::instrument(skip(self), level = "debug")] + pub fn servers_invite_via<'a>(&'a self, room_id: &RoomId) -> impl Stream + Send + 'a { + self.db + .roomid_inviteviaservers + .stream_prefix(room_id) + .ignore_err() + .map(|(_, servers): (Ignore, Vec<&ServerName>)| &**(servers.last().expect("at least one servername"))) } /// Gets up to three servers that are likely to be in the room in the @@ -409,37 +507,27 @@ impl Service { /// /// See #[tracing::instrument(skip(self))] - pub fn servers_route_via(&self, room_id: &RoomId) -> Result> { + pub async fn servers_route_via(&self, room_id: &RoomId) -> Result> { let most_powerful_user_server = self .services .state_accessor - .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? - .map(|pdu| { - serde_json::from_str(pdu.content.get()).map(|conent: RoomPowerLevelsEventContent| { - conent - .users - .iter() - .max_by_key(|(_, power)| *power) - .and_then(|x| { - if x.1 >= &int!(50) { - Some(x) - } else { - None - } - }) - .map(|(user, _power)| user.server_name().to_owned()) - }) + .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") + .await + .map(|content: RoomPowerLevelsEventContent| { + content + .users + .iter() + .max_by_key(|(_, power)| *power) + .and_then(|x| (x.1 >= &int!(50)).then_some(x)) + .map(|(user, _power)| user.server_name().to_owned()) }) - .transpose() - .map_err(|e| { - error!("Invalid power levels event content in database: {e}"); - Error::bad_database("Invalid power levels event content in database") - })? - .flatten(); + .map_err(|e| err!(Database(error!(?e, "Invalid power levels event content in database."))))?; let mut servers: Vec = self .room_members(room_id) - .filter_map(Result::ok) + .collect::>() + .await + .iter() .counts_by(|user| user.server_name().to_owned()) .iter() .sorted_by_key(|(_, users)| *users) @@ -468,4 +556,139 @@ impl Service { .expect("locked") .clear(); } + + pub async fn update_joined_count(&self, room_id: &RoomId) { + let mut joinedcount = 0_u64; + let mut invitedcount = 0_u64; + let mut joined_servers = HashSet::new(); + + self.room_members(room_id) + .ready_for_each(|joined| { + joined_servers.insert(joined.server_name().to_owned()); + joinedcount = joinedcount.saturating_add(1); + }) + .await; + + invitedcount = invitedcount.saturating_add( + self.room_members_invited(room_id) + .count() + .await + .try_into() + .unwrap_or(0), + ); + + self.db + .roomid_joinedcount + .insert(room_id.as_bytes(), &joinedcount.to_be_bytes()); + + self.db + .roomid_invitedcount + .insert(room_id.as_bytes(), &invitedcount.to_be_bytes()); + + self.room_servers(room_id) + .ready_for_each(|old_joined_server| { + if !joined_servers.remove(old_joined_server) { + // Server not in room anymore + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xFF); + roomserver_id.extend_from_slice(old_joined_server.as_bytes()); + + let mut serverroom_id = old_joined_server.as_bytes().to_vec(); + serverroom_id.push(0xFF); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.db.roomserverids.remove(&roomserver_id); + self.db.serverroomids.remove(&serverroom_id); + } + }) + .await; + + // Now only new servers are in joined_servers anymore + for server in joined_servers { + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xFF); + roomserver_id.extend_from_slice(server.as_bytes()); + + let mut serverroom_id = server.as_bytes().to_vec(); + serverroom_id.push(0xFF); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.db.roomserverids.insert(&roomserver_id, &[]); + self.db.serverroomids.insert(&serverroom_id, &[]); + } + + self.db + .appservice_in_room_cache + .write() + .unwrap() + .remove(room_id); + } + + pub async fn mark_as_invited( + &self, user_id: &UserId, room_id: &RoomId, last_state: Option>>, + invite_via: Option>, + ) { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xFF); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.db.userroomid_invitestate.insert( + &userroom_id, + &serde_json::to_vec(&last_state.unwrap_or_default()).expect("state to bytes always works"), + ); + self.db + .roomuserid_invitecount + .insert(&roomuser_id, &self.services.globals.next_count().unwrap().to_be_bytes()); + self.db.userroomid_joined.remove(&userroom_id); + self.db.roomuserid_joined.remove(&roomuser_id); + self.db.userroomid_leftstate.remove(&userroom_id); + self.db.roomuserid_leftcount.remove(&roomuser_id); + + if let Some(servers) = invite_via { + let mut prev_servers = self + .servers_invite_via(room_id) + .map(ToOwned::to_owned) + .collect::>() + .await; + #[allow(clippy::redundant_clone)] // this is a necessary clone? + prev_servers.append(servers.clone().as_mut()); + let servers = prev_servers.iter().rev().unique().rev().collect_vec(); + + let servers = servers + .iter() + .map(|server| server.as_bytes()) + .collect_vec() + .join(&[0xFF][..]); + + self.db + .roomid_inviteviaservers + .insert(room_id.as_bytes(), &servers); + } + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn add_servers_invite_via(&self, room_id: &RoomId, servers: &[OwnedServerName]) { + let mut prev_servers = self + .servers_invite_via(room_id) + .map(ToOwned::to_owned) + .collect::>() + .await; + prev_servers.extend(servers.to_owned()); + prev_servers.sort_unstable(); + prev_servers.dedup(); + + let servers = prev_servers + .iter() + .map(|server| server.as_bytes()) + .collect_vec() + .join(&[0xFF][..]); + + self.db + .roomid_inviteviaservers + .insert(room_id.as_bytes(), &servers); + } } diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index 33773001..9a9f70a2 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, mem::size_of, sync::Arc}; -use conduit::{checked, utils, Error, Result}; +use conduit::{err, expected, utils, Result}; use database::{Database, Map}; use super::CompressedStateEvent; @@ -22,11 +22,13 @@ impl Data { } } - pub(super) fn get_statediff(&self, shortstatehash: u64) -> Result { + pub(super) async fn get_statediff(&self, shortstatehash: u64) -> Result { let value = self .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; + .qry(&shortstatehash) + .await + .map_err(|e| err!(Database("Failed to find StateDiff from short {shortstatehash:?}: {e}")))?; + let parent = utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); let parent = if parent != 0 { Some(parent) @@ -40,10 +42,10 @@ impl Data { let stride = size_of::(); let mut i = stride; - while let Some(v) = value.get(i..checked!(i + 2 * stride)?) { + while let Some(v) = value.get(i..expected!(i + 2 * stride)) { if add_mode && v.starts_with(&0_u64.to_be_bytes()) { add_mode = false; - i = checked!(i + stride)?; + i = expected!(i + stride); continue; } if add_mode { @@ -51,7 +53,7 @@ impl Data { } else { removed.insert(v.try_into().expect("we checked the size above")); } - i = checked!(i + 2 * stride)?; + i = expected!(i + 2 * stride); } Ok(StateDiff { @@ -61,7 +63,7 @@ impl Data { }) } - pub(super) fn save_statediff(&self, shortstatehash: u64, diff: &StateDiff) -> Result<()> { + pub(super) fn save_statediff(&self, shortstatehash: u64, diff: &StateDiff) { let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); for new in diff.added.iter() { value.extend_from_slice(&new[..]); @@ -75,6 +77,6 @@ impl Data { } self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value) + .insert(&shortstatehash.to_be_bytes(), &value); } } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 2550774e..cd3f2f73 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -27,14 +27,12 @@ type StateInfoLruCache = Mutex< >, >; -type ShortStateInfoResult = Result< - Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed - )>, ->; +type ShortStateInfoResult = Vec<( + u64, // sstatehash + Arc>, // full state + Arc>, // added + Arc>, // removed +)>; type ParentStatesVec = Vec<( u64, // sstatehash @@ -43,7 +41,7 @@ type ParentStatesVec = Vec<( Arc>, // removed )>; -type HashSetCompressStateEvent = Result<(u64, Arc>, Arc>)>; +type HashSetCompressStateEvent = (u64, Arc>, Arc>); pub type CompressedStateEvent = [u8; 2 * size_of::()]; pub struct Service { @@ -86,12 +84,11 @@ impl crate::Service for Service { impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and /// removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self), level = "debug")] - pub fn load_shortstatehash_info(&self, shortstatehash: u64) -> ShortStateInfoResult { + pub async fn load_shortstatehash_info(&self, shortstatehash: u64) -> Result { if let Some(r) = self .stateinfo_cache .lock() - .unwrap() + .expect("locked") .get_mut(&shortstatehash) { return Ok(r.clone()); @@ -101,11 +98,11 @@ impl Service { parent, added, removed, - } = self.db.get_statediff(shortstatehash)?; + } = self.db.get_statediff(shortstatehash).await?; if let Some(parent) = parent { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = (*response.last().unwrap().1).clone(); + let mut response = Box::pin(self.load_shortstatehash_info(parent)).await?; + let mut state = (*response.last().expect("at least one response").1).clone(); state.extend(added.iter().copied()); let removed = (*removed).clone(); for r in &removed { @@ -116,7 +113,7 @@ impl Service { self.stateinfo_cache .lock() - .unwrap() + .expect("locked") .insert(shortstatehash, response.clone()); Ok(response) @@ -124,33 +121,42 @@ impl Service { let response = vec![(shortstatehash, added.clone(), added, removed)]; self.stateinfo_cache .lock() - .unwrap() + .expect("locked") .insert(shortstatehash, response.clone()); + Ok(response) } } - pub fn compress_state_event(&self, shortstatekey: u64, event_id: &EventId) -> Result { + pub async fn compress_state_event(&self, shortstatekey: u64, event_id: &EventId) -> CompressedStateEvent { let mut v = shortstatekey.to_be_bytes().to_vec(); v.extend_from_slice( &self .services .short - .get_or_create_shorteventid(event_id)? + .get_or_create_shorteventid(event_id) + .await .to_be_bytes(), ); - Ok(v.try_into().expect("we checked the size above")) + + v.try_into().expect("we checked the size above") } /// Returns shortstatekey, event id #[inline] - pub fn parse_compressed_state_event(&self, compressed_event: &CompressedStateEvent) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]).expect("bytes have right length"), - self.services.short.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]).expect("bytes have right length"), - )?, - )) + pub async fn parse_compressed_state_event( + &self, compressed_event: &CompressedStateEvent, + ) -> Result<(u64, Arc)> { + use utils::u64_from_u8; + + let shortstatekey = u64_from_u8(&compressed_event[0..size_of::()]); + let event_id = self + .services + .short + .get_eventid_from_short(u64_from_u8(&compressed_event[size_of::()..])) + .await?; + + Ok((shortstatekey, event_id)) } /// Creates a new shortstatehash that often is just a diff to an already @@ -227,7 +233,7 @@ impl Service { added: statediffnew, removed: statediffremoved, }, - )?; + ); return Ok(()); }; @@ -280,7 +286,7 @@ impl Service { added: statediffnew, removed: statediffremoved, }, - )?; + ); } Ok(()) @@ -288,10 +294,15 @@ impl Service { /// Returns the new shortstatehash, and the state diff from the previous /// room state - pub fn save_state( + pub async fn save_state( &self, room_id: &RoomId, new_state_ids_compressed: Arc>, - ) -> HashSetCompressStateEvent { - let previous_shortstatehash = self.services.state.get_room_shortstatehash(room_id)?; + ) -> Result { + let previous_shortstatehash = self + .services + .state + .get_room_shortstatehash(room_id) + .await + .ok(); let state_hash = utils::calculate_hash( &new_state_ids_compressed @@ -303,14 +314,18 @@ impl Service { let (new_shortstatehash, already_existed) = self .services .short - .get_or_create_shortstatehash(&state_hash)?; + .get_or_create_shortstatehash(&state_hash) + .await; if Some(new_shortstatehash) == previous_shortstatehash { return Ok((new_shortstatehash, Arc::new(HashSet::new()), Arc::new(HashSet::new()))); } - let states_parents = - previous_shortstatehash.map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + let states_parents = if let Some(p) = previous_shortstatehash { + self.load_shortstatehash_info(p).await.unwrap_or_default() + } else { + ShortStateInfoResult::new() + }; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { let statediffnew: HashSet<_> = new_state_ids_compressed diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs index fb279a00..f50b812c 100644 --- a/src/service/rooms/threads/data.rs +++ b/src/service/rooms/threads/data.rs @@ -1,13 +1,18 @@ use std::{mem::size_of, sync::Arc}; -use conduit::{checked, utils, Error, PduEvent, Result}; -use database::Map; +use conduit::{ + checked, + result::LogErr, + utils, + utils::{stream::TryIgnore, ReadyExt}, + PduEvent, Result, +}; +use database::{Deserialized, Map}; +use futures::{Stream, StreamExt}; use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; use crate::{rooms, Dep}; -type PduEventIterResult<'a> = Result> + 'a>>; - pub(super) struct Data { threadid_userids: Arc, services: Services, @@ -30,38 +35,37 @@ impl Data { } } - pub(super) fn threads_until<'a>( + pub(super) async fn threads_until<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: u64, _include: &'a IncludeThreads, - ) -> PduEventIterResult<'a> { + ) -> Result + Send + 'a> { let prefix = self .services .short - .get_shortroomid(room_id)? - .expect("room exists") + .get_shortroomid(room_id) + .await? .to_be_bytes() .to_vec(); let mut current = prefix.clone(); current.extend_from_slice(&(checked!(until - 1)?).to_be_bytes()); - Ok(Box::new( - self.threadid_userids - .iter_from(¤t, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pduid, _users)| { - let count = utils::u64_from_bytes(&pduid[(size_of::())..]) - .map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?; - let mut pdu = self - .services - .timeline - .get_pdu_from_id(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid reference in threadid_userids"))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((count, pdu)) - }), - )) + let stream = self + .threadid_userids + .rev_raw_keys_from(¤t) + .ignore_err() + .ready_take_while(move |key| key.starts_with(&prefix)) + .map(|pduid| (utils::u64_from_u8(&pduid[(size_of::())..]), pduid)) + .filter_map(move |(count, pduid)| async move { + let mut pdu = self.services.timeline.get_pdu_from_id(pduid).await.ok()?; + + if pdu.sender != user_id { + pdu.remove_transaction_id().log_err().ok(); + } + + Some((count, pdu)) + }); + + Ok(stream) } pub(super) fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()> { @@ -71,28 +75,12 @@ impl Data { .collect::>() .join(&[0xFF][..]); - self.threadid_userids.insert(root_id, &users)?; + self.threadid_userids.insert(root_id, &users); Ok(()) } - pub(super) fn get_participants(&self, root_id: &[u8]) -> Result>> { - if let Some(users) = self.threadid_userids.get(root_id)? { - Ok(Some( - users - .split(|b| *b == 0xFF) - .map(|bytes| { - UserId::parse( - utils::string_from_bytes(bytes) - .map_err(|_| Error::bad_database("Invalid UserId bytes in threadid_userids."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in threadid_userids.")) - }) - .filter_map(Result::ok) - .collect(), - )) - } else { - Ok(None) - } + pub(super) async fn get_participants(&self, root_id: &[u8]) -> Result> { + self.threadid_userids.qry(root_id).await.deserialized() } } diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index ae51cd0f..2eafe5d5 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -2,12 +2,12 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduit::{Error, PduEvent, Result}; +use conduit::{err, PduEvent, Result}; use data::Data; +use futures::Stream; use ruma::{ - api::client::{error::ErrorKind, threads::get_threads::v1::IncludeThreads}, - events::relation::BundledThread, - uint, CanonicalJsonValue, EventId, RoomId, UserId, + api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, CanonicalJsonValue, + EventId, RoomId, UserId, }; use serde_json::json; @@ -36,30 +36,35 @@ impl crate::Service for Service { } impl Service { - pub fn threads_until<'a>( + pub async fn threads_until<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: u64, include: &'a IncludeThreads, - ) -> Result> + 'a> { - self.db.threads_until(user_id, room_id, until, include) + ) -> Result + Send + 'a> { + self.db + .threads_until(user_id, room_id, until, include) + .await } - pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { + pub async fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { let root_id = self .services .timeline - .get_pdu_id(root_event_id)? - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Invalid event id in thread message"))?; + .get_pdu_id(root_event_id) + .await + .map_err(|e| err!(Request(InvalidParam("Invalid event_id in thread message: {e:?}"))))?; let root_pdu = self .services .timeline - .get_pdu_from_id(&root_id)? - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found"))?; + .get_pdu_from_id(&root_id) + .await + .map_err(|e| err!(Request(InvalidParam("Thread root not found: {e:?}"))))?; let mut root_pdu_json = self .services .timeline - .get_pdu_json_from_id(&root_id)? - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found"))?; + .get_pdu_json_from_id(&root_id) + .await + .map_err(|e| err!(Request(InvalidParam("Thread root pdu not found: {e:?}"))))?; if let CanonicalJsonValue::Object(unsigned) = root_pdu_json .entry("unsigned".to_owned()) @@ -103,11 +108,12 @@ impl Service { self.services .timeline - .replace_pdu(&root_id, &root_pdu_json, &root_pdu)?; + .replace_pdu(&root_id, &root_pdu_json, &root_pdu) + .await?; } let mut users = Vec::new(); - if let Some(userids) = self.db.get_participants(&root_id)? { + if let Ok(userids) = self.db.get_participants(&root_id).await { users.extend_from_slice(&userids); } else { users.push(root_pdu.sender); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 2f0c8f25..cd746be4 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,12 +1,20 @@ use std::{ collections::{hash_map, HashMap}, mem::size_of, - sync::{Arc, Mutex}, + sync::Arc, }; -use conduit::{checked, error, utils, Error, PduCount, PduEvent, Result}; -use database::{Database, Map}; -use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId}; +use conduit::{ + err, expected, + result::{LogErr, NotFound}, + utils, + utils::{stream::TryIgnore, u64_from_u8, ReadyExt}, + Err, PduCount, PduEvent, Result, +}; +use database::{Database, Deserialized, KeyVal, Map}; +use futures::{FutureExt, Stream, StreamExt}; +use ruma::{CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId}; +use tokio::sync::Mutex; use crate::{rooms, Dep}; @@ -25,8 +33,7 @@ struct Services { short: Dep, } -type PdusIterItem = Result<(PduCount, PduEvent)>; -type PdusIterator<'a> = Box + 'a>; +pub type PdusIterItem = (PduCount, PduEvent); type LastTimelineCountCache = Mutex>; impl Data { @@ -46,23 +53,20 @@ impl Data { } } - pub(super) fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + pub(super) async fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache .lock() - .expect("locked") + .await .entry(room_id.to_owned()) { hash_map::Entry::Vacant(v) => { if let Some(last_count) = self - .pdus_until(sender_user, room_id, PduCount::max())? - .find_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) { + .pdus_until(sender_user, room_id, PduCount::max()) + .await? + .next() + .await + { Ok(*v.insert(last_count.0)) } else { Ok(PduCount::Normal(0)) @@ -73,232 +77,215 @@ impl Data { } /// Returns the `count` of this pdu's id. - pub(super) fn get_pdu_count(&self, event_id: &EventId) -> Result> { + pub(super) async fn get_pdu_count(&self, event_id: &EventId) -> Result { self.eventid_pduid - .get(event_id.as_bytes())? + .qry(event_id) + .await .map(|pdu_id| pdu_count(&pdu_id)) - .transpose() } /// Returns the json of a pdu. - pub(super) fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.get_non_outlier_pdu_json(event_id)?.map_or_else( - || { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))) - .transpose() - }, - |x| Ok(Some(x)), - ) + pub(super) async fn get_pdu_json(&self, event_id: &EventId) -> Result { + if let Ok(pdu) = self.get_non_outlier_pdu_json(event_id).await { + return Ok(pdu); + } + + self.eventid_outlierpdu + .qry(event_id) + .await + .deserialized_json() } /// Returns the json of a pdu. - pub(super) fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))) - .transpose() + pub(super) async fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result { + let pduid = self.get_pdu_id(event_id).await?; + + self.pduid_pdu.qry(&pduid).await.deserialized_json() } /// Returns the pdu's id. #[inline] - pub(super) fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) + pub(super) async fn get_pdu_id(&self, event_id: &EventId) -> Result> { + self.eventid_pduid.qry(event_id).await } /// Returns the pdu directly from `eventid_pduid` only. - pub(super) fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))) - .transpose() + pub(super) async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { + let pduid = self.get_pdu_id(event_id).await?; + + self.pduid_pdu.qry(&pduid).await.deserialized_json() + } + + /// Like get_non_outlier_pdu(), but without the expense of fetching and + /// parsing the PduEvent + pub(super) async fn non_outlier_pdu_exists(&self, event_id: &EventId) -> Result<()> { + let pduid = self.get_pdu_id(event_id).await?; + + self.pduid_pdu.qry(&pduid).await?; + + Ok(()) } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub(super) fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(pdu) = self - .get_non_outlier_pdu(event_id)? - .map_or_else( - || { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))) - .transpose() - }, - |x| Ok(Some(x)), - )? - .map(Arc::new) - { - Ok(Some(pdu)) - } else { - Ok(None) + pub(super) async fn get_pdu(&self, event_id: &EventId) -> Result> { + if let Ok(pdu) = self.get_non_outlier_pdu(event_id).await { + return Ok(Arc::new(pdu)); } + + self.eventid_outlierpdu + .qry(event_id) + .await + .deserialized_json() + .map(Arc::new) + } + + /// Like get_non_outlier_pdu(), but without the expense of fetching and + /// parsing the PduEvent + pub(super) async fn outlier_pdu_exists(&self, event_id: &EventId) -> Result<()> { + self.eventid_outlierpdu.qry(event_id).await?; + + Ok(()) + } + + /// Like get_pdu(), but without the expense of fetching and parsing the data + pub(super) async fn pdu_exists(&self, event_id: &EventId) -> bool { + let non_outlier = self.non_outlier_pdu_exists(event_id).map(|res| res.is_ok()); + let outlier = self.outlier_pdu_exists(event_id).map(|res| res.is_ok()); + + //TODO: parallelize + non_outlier.await || outlier.await } /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub(super) fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + pub(super) async fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result { + self.pduid_pdu.qry(pdu_id).await.deserialized_json() } /// Returns the pdu as a `BTreeMap`. - pub(super) fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + pub(super) async fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result { + self.pduid_pdu.qry(pdu_id).await.deserialized_json() } - pub(super) fn append_pdu( - &self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64, - ) -> Result<()> { + pub(super) async fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) { self.pduid_pdu.insert( pdu_id, &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), - )?; + ); self.lasttimelinecount_cache .lock() - .expect("locked") + .await .insert(pdu.room_id.clone(), PduCount::Normal(count)); - self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - Ok(()) + self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id); + self.eventid_outlierpdu.remove(pdu.event_id.as_bytes()); } - pub(super) fn prepend_backfill_pdu( - &self, pdu_id: &[u8], event_id: &EventId, json: &CanonicalJsonObject, - ) -> Result<()> { + pub(super) fn prepend_backfill_pdu(&self, pdu_id: &[u8], event_id: &EventId, json: &CanonicalJsonObject) { self.pduid_pdu.insert( pdu_id, &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), - )?; + ); - self.eventid_pduid.insert(event_id.as_bytes(), pdu_id)?; - self.eventid_outlierpdu.remove(event_id.as_bytes())?; - - Ok(()) + self.eventid_pduid.insert(event_id.as_bytes(), pdu_id); + self.eventid_outlierpdu.remove(event_id.as_bytes()); } /// Removes a pdu and creates a new one with the same id. - pub(super) fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, _pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - } else { - return Err(Error::BadRequest(ErrorKind::NotFound, "PDU does not exist.")); + pub(super) async fn replace_pdu( + &self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, _pdu: &PduEvent, + ) -> Result<()> { + if self.pduid_pdu.qry(pdu_id).await.is_not_found() { + return Err!(Request(NotFound("PDU does not exist."))); } + let pdu = serde_json::to_vec(pdu_json)?; + self.pduid_pdu.insert(pdu_id, &pdu); + Ok(()) } /// Returns an iterator over all events and their tokens in a room that /// happened before the event with id `until` in reverse-chronological /// order. - pub(super) fn pdus_until(&self, user_id: &UserId, room_id: &RoomId, until: PduCount) -> Result> { - let (prefix, current) = self.count_to_id(room_id, until, 1, true)?; + pub(super) async fn pdus_until<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, + ) -> Result + Send + 'a> { + let (prefix, current) = self.count_to_id(room_id, until, 1, true).await?; + let stream = self + .pduid_pdu + .rev_raw_stream_from(¤t) + .ignore_err() + .ready_take_while(move |(key, _)| key.starts_with(&prefix)) + .map(move |item| Self::each_pdu(item, user_id)); - let user_id = user_id.to_owned(); - - Ok(Box::new( - self.pduid_pdu - .iter_from(¤t, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - pdu.add_age()?; - let count = pdu_count(&pdu_id)?; - Ok((count, pdu)) - }), - )) + Ok(stream) } - pub(super) fn pdus_after(&self, user_id: &UserId, room_id: &RoomId, from: PduCount) -> Result> { - let (prefix, current) = self.count_to_id(room_id, from, 1, false)?; + pub(super) async fn pdus_after<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, from: PduCount, + ) -> Result + Send + 'a> { + let (prefix, current) = self.count_to_id(room_id, from, 1, false).await?; + let stream = self + .pduid_pdu + .raw_stream_from(¤t) + .ignore_err() + .ready_take_while(move |(key, _)| key.starts_with(&prefix)) + .map(move |item| Self::each_pdu(item, user_id)); - let user_id = user_id.to_owned(); + Ok(stream) + } - Ok(Box::new( - self.pduid_pdu - .iter_from(¤t, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - pdu.add_age()?; - let count = pdu_count(&pdu_id)?; - Ok((count, pdu)) - }), - )) + fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: &UserId) -> PdusIterItem { + let mut pdu = + serde_json::from_slice::(pdu).expect("PduEvent in pduid_pdu database column is invalid JSON"); + + if pdu.sender != user_id { + pdu.remove_transaction_id().log_err().ok(); + } + + pdu.add_age().log_err().ok(); + let count = pdu_count(pdu_id); + + (count, pdu) } pub(super) fn increment_notification_counts( &self, room_id: &RoomId, notifies: Vec, highlights: Vec, - ) -> Result<()> { - let mut notifies_batch = Vec::new(); - let mut highlights_batch = Vec::new(); + ) { + let _cork = self.db.cork(); + for user in notifies { let mut userroom_id = user.as_bytes().to_vec(); userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); - notifies_batch.push(userroom_id); + increment(&self.userroomid_notificationcount, &userroom_id); } + for user in highlights { let mut userroom_id = user.as_bytes().to_vec(); userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); - highlights_batch.push(userroom_id); + increment(&self.userroomid_highlightcount, &userroom_id); } - - self.userroomid_notificationcount - .increment_batch(notifies_batch.iter().map(Vec::as_slice))?; - self.userroomid_highlightcount - .increment_batch(highlights_batch.iter().map(Vec::as_slice))?; - Ok(()) } - pub(super) fn count_to_id( + pub(super) async fn count_to_id( &self, room_id: &RoomId, count: PduCount, offset: u64, subtract: bool, ) -> Result<(Vec, Vec)> { let prefix = self .services .short - .get_shortroomid(room_id)? - .ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))? + .get_shortroomid(room_id) + .await + .map_err(|e| err!(Request(NotFound("Room {room_id:?} not found: {e:?}"))))? .to_be_bytes() .to_vec(); + let mut pdu_id = prefix.clone(); // +1 so we don't send the base event let count_raw = match count { @@ -326,17 +313,23 @@ impl Data { } /// Returns the `count` of this pdu's id. -pub(super) fn pdu_count(pdu_id: &[u8]) -> Result { - let stride = size_of::(); - let pdu_id_len = pdu_id.len(); - let last_u64 = utils::u64_from_bytes(&pdu_id[checked!(pdu_id_len - stride)?..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?; - let second_last_u64 = - utils::u64_from_bytes(&pdu_id[checked!(pdu_id_len - 2 * stride)?..checked!(pdu_id_len - stride)?]); +pub(super) fn pdu_count(pdu_id: &[u8]) -> PduCount { + const STRIDE: usize = size_of::(); - if matches!(second_last_u64, Ok(0)) { - Ok(PduCount::Backfilled(u64::MAX.saturating_sub(last_u64))) + let pdu_id_len = pdu_id.len(); + let last_u64 = u64_from_u8(&pdu_id[expected!(pdu_id_len - STRIDE)..]); + let second_last_u64 = u64_from_u8(&pdu_id[expected!(pdu_id_len - 2 * STRIDE)..expected!(pdu_id_len - STRIDE)]); + + if second_last_u64 == 0 { + PduCount::Backfilled(u64::MAX.saturating_sub(last_u64)) } else { - Ok(PduCount::Normal(last_u64)) + PduCount::Normal(last_u64) } } + +//TODO: this is an ABA +fn increment(db: &Arc, key: &[u8]) { + let old = db.get(key); + let new = utils::increment(old.ok().as_deref()); + db.insert(key, &new); +} diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 04d9559d..5360d2c9 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,19 +1,20 @@ mod data; use std::{ + cmp, collections::{BTreeMap, HashSet}, fmt::Write, sync::Arc, }; use conduit::{ - debug, error, info, + debug, err, error, info, pdu::{EventHash, PduBuilder, PduCount, PduEvent}, utils, - utils::{MutexMap, MutexMapGuard}, - validated, warn, Error, Result, Server, + utils::{stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, + validated, warn, Err, Error, Result, Server, }; -use itertools::Itertools; +use futures::{future, future::ready, Future, Stream, StreamExt, TryStreamExt}; use ruma::{ api::{client::error::ErrorKind, federation}, canonical_json::to_canonical_value, @@ -39,6 +40,7 @@ use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use tokio::sync::RwLock; use self::data::Data; +pub use self::data::PdusIterItem; use crate::{ account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, rooms::state_compressor::CompressedStateEvent, sending, server_keys, Dep, @@ -129,6 +131,7 @@ impl crate::Service for Service { } fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + /* let lasttimelinecount_cache = self .db .lasttimelinecount_cache @@ -136,6 +139,7 @@ impl crate::Service for Service { .expect("locked") .len(); writeln!(out, "lasttimelinecount_cache: {lasttimelinecount_cache}")?; + */ let mutex_insert = self.mutex_insert.len(); writeln!(out, "insert_mutex: {mutex_insert}")?; @@ -144,11 +148,13 @@ impl crate::Service for Service { } fn clear_cache(&self) { + /* self.db .lasttimelinecount_cache .lock() .expect("locked") .clear(); + */ } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } @@ -156,28 +162,32 @@ impl crate::Service for Service { impl Service { #[tracing::instrument(skip(self), level = "debug")] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? + pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result> { + self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id) + .await? .next() - .map(|o| o.map(|(_, p)| Arc::new(p))) - .transpose() + .await + .map(|(_, p)| Arc::new(p)) + .ok_or_else(|| err!(Request(NotFound("No PDU found in room")))) } #[tracing::instrument(skip(self), level = "debug")] - pub fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - self.all_pdus(user_id!("@placeholder:conduwuit.placeholder"), room_id)? - .last() - .map(|o| o.map(|(_, p)| Arc::new(p))) - .transpose() + pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result> { + self.pdus_until(user_id!("@placeholder:conduwuit.placeholder"), room_id, PduCount::max()) + .await? + .next() + .await + .map(|(_, p)| Arc::new(p)) + .ok_or_else(|| err!(Request(NotFound("No PDU found in room")))) } #[tracing::instrument(skip(self), level = "debug")] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - self.db.last_timeline_count(sender_user, room_id) + pub async fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + self.db.last_timeline_count(sender_user, room_id).await } /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.db.get_pdu_count(event_id) } + pub async fn get_pdu_count(&self, event_id: &EventId) -> Result { self.db.get_pdu_count(event_id).await } // TODO Is this the same as the function above? /* @@ -203,49 +213,56 @@ impl Service { */ /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.db.get_pdu_json(event_id) + pub async fn get_pdu_json(&self, event_id: &EventId) -> Result { + self.db.get_pdu_json(event_id).await } /// Returns the json of a pdu. #[inline] - pub fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.db.get_non_outlier_pdu_json(event_id) + pub async fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result { + self.db.get_non_outlier_pdu_json(event_id).await } /// Returns the pdu's id. #[inline] - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.db.get_pdu_id(event_id) + pub async fn get_pdu_id(&self, event_id: &EventId) -> Result> { + self.db.get_pdu_id(event_id).await } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. #[inline] - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.db.get_non_outlier_pdu(event_id) + pub async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { + self.db.get_non_outlier_pdu(event_id).await } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { self.db.get_pdu(event_id) } + pub async fn get_pdu(&self, event_id: &EventId) -> Result> { self.db.get_pdu(event_id).await } + + /// Checks if pdu exists + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + pub fn pdu_exists<'a>(&'a self, event_id: &'a EventId) -> impl Future + Send + 'a { + self.db.pdu_exists(event_id) + } /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { self.db.get_pdu_from_id(pdu_id) } + pub async fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result { self.db.get_pdu_from_id(pdu_id).await } /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.db.get_pdu_json_from_id(pdu_id) + pub async fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result { + self.db.get_pdu_json_from_id(pdu_id).await } /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self), level = "debug")] - pub fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, pdu: &PduEvent) -> Result<()> { - self.db.replace_pdu(pdu_id, pdu_json, pdu) + pub async fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, pdu: &PduEvent) -> Result<()> { + self.db.replace_pdu(pdu_id, pdu_json, pdu).await } /// Creates a new persisted data unit and adds it to a room. @@ -268,8 +285,9 @@ impl Service { let shortroomid = self .services .short - .get_shortroomid(&pdu.room_id)? - .expect("room exists"); + .get_shortroomid(&pdu.room_id) + .await + .map_err(|_| err!(Database("Room does not exist")))?; // Make unsigned fields correct. This is not properly documented in the spec, // but state events need to have previous content in the unsigned field, so @@ -279,17 +297,17 @@ impl Service { .entry("unsigned".to_owned()) .or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default())) { - if let Some(shortstatehash) = self + if let Ok(shortstatehash) = self .services .state_accessor .pdu_shortstatehash(&pdu.event_id) - .unwrap() + .await { - if let Some(prev_state) = self + if let Ok(prev_state) = self .services .state_accessor .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() + .await { unsigned.insert( "prev_content".to_owned(), @@ -318,10 +336,12 @@ impl Service { // We must keep track of all events that have been referenced. self.services .pdu_metadata - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + .mark_as_referenced(&pdu.room_id, &pdu.prev_events); + self.services .state - .set_forward_extremities(&pdu.room_id, leaves, state_lock)?; + .set_forward_extremities(&pdu.room_id, leaves, state_lock) + .await; let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; @@ -330,17 +350,17 @@ impl Service { // appending fails self.services .read_receipt - .private_read_set(&pdu.room_id, &pdu.sender, count1)?; + .private_read_set(&pdu.room_id, &pdu.sender, count1); self.services .user - .reset_notification_counts(&pdu.sender, &pdu.room_id)?; + .reset_notification_counts(&pdu.sender, &pdu.room_id); - let count2 = self.services.globals.next_count()?; + let count2 = self.services.globals.next_count().unwrap(); let mut pdu_id = shortroomid.to_be_bytes().to_vec(); pdu_id.extend_from_slice(&count2.to_be_bytes()); // Insert pdu - self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2)?; + self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2).await; drop(insert_lock); @@ -348,12 +368,9 @@ impl Service { let power_levels: RoomPowerLevelsEventContent = self .services .state_accessor - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? + .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") + .await + .map_err(|_| err!(Database("invalid m.room.power_levels event"))) .unwrap_or_default(); let sync_pdu = pdu.to_sync_room_event(); @@ -365,7 +382,9 @@ impl Service { .services .state_cache .active_local_users_in_room(&pdu.room_id) - .collect_vec(); + .map(ToOwned::to_owned) + .collect::>() + .await; if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { @@ -386,23 +405,20 @@ impl Service { let rules_for_user = self .services .account_data - .get(None, user, GlobalAccountDataEventType::PushRules.to_string().into())? - .map(|event| { - serde_json::from_str::(event.get()).map_err(|e| { - warn!("Invalid push rules event in db for user ID {user}: {e}"); - Error::bad_database("Invalid push rules event in db.") - }) - }) - .transpose()? - .map_or_else(|| Ruleset::server_default(user), |ev: PushRulesEvent| ev.content.global); + .get(None, user, GlobalAccountDataEventType::PushRules.to_string().into()) + .await + .and_then(|event| serde_json::from_str::(event.get()).map_err(Into::into)) + .map_err(|e| err!(Database(warn!(?user, ?e, "Invalid push rules event in db for user")))) + .map_or_else(|_| Ruleset::server_default(user), |ev: PushRulesEvent| ev.content.global); let mut highlight = false; let mut notify = false; - for action in - self.services - .pusher - .get_actions(user, &rules_for_user, &power_levels, &sync_pdu, &pdu.room_id)? + for action in self + .services + .pusher + .get_actions(user, &rules_for_user, &power_levels, &sync_pdu, &pdu.room_id) + .await? { match action { Action::Notify => notify = true, @@ -421,31 +437,36 @@ impl Service { highlights.push(user.clone()); } - for push_key in self.services.pusher.get_pushkeys(user) { - self.services - .sending - .send_pdu_push(&pdu_id, user, push_key?)?; - } + self.services + .pusher + .get_pushkeys(user) + .ready_for_each(|push_key| { + self.services + .sending + .send_pdu_push(&pdu_id, user, push_key.to_owned()) + .expect("TODO: replace with future"); + }) + .await; } self.db - .increment_notification_counts(&pdu.room_id, notifies, highlights)?; + .increment_notification_counts(&pdu.room_id, notifies, highlights); match pdu.kind { TimelineEventType::RoomRedaction => { use RoomVersionId::*; - let room_version_id = self.services.state.get_room_version(&pdu.room_id)?; + let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; match room_version_id { V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { if let Some(redact_id) = &pdu.redacts { - if self.services.state_accessor.user_can_redact( - redact_id, - &pdu.sender, - &pdu.room_id, - false, - )? { - self.redact_pdu(redact_id, pdu, shortroomid)?; + if self + .services + .state_accessor + .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) + .await? + { + self.redact_pdu(redact_id, pdu, shortroomid).await?; } } }, @@ -457,13 +478,13 @@ impl Service { })?; if let Some(redact_id) = &content.redacts { - if self.services.state_accessor.user_can_redact( - redact_id, - &pdu.sender, - &pdu.room_id, - false, - )? { - self.redact_pdu(redact_id, pdu, shortroomid)?; + if self + .services + .state_accessor + .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) + .await? + { + self.redact_pdu(redact_id, pdu, shortroomid).await?; } } }, @@ -492,7 +513,7 @@ impl Service { let invite_state = match content.membership { MembershipState::Invite => { - let state = self.services.state.calculate_invite_state(pdu)?; + let state = self.services.state.calculate_invite_state(pdu).await?; Some(state) }, _ => None, @@ -500,15 +521,18 @@ impl Service { // Update our membership info, we do this here incase a user is invited // and immediately leaves we need the DB to record the invite event for auth - self.services.state_cache.update_membership( - &pdu.room_id, - &target_user_id, - content, - &pdu.sender, - invite_state, - None, - true, - )?; + self.services + .state_cache + .update_membership( + &pdu.room_id, + &target_user_id, + content, + &pdu.sender, + invite_state, + None, + true, + ) + .await?; } }, TimelineEventType::RoomMessage => { @@ -516,9 +540,7 @@ impl Service { .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if let Some(body) = content.body { - self.services - .search - .index_pdu(shortroomid, &pdu_id, &body)?; + self.services.search.index_pdu(shortroomid, &pdu_id, &body); if self.services.admin.is_admin_command(pdu, &body).await { self.services @@ -531,10 +553,10 @@ impl Service { } if let Ok(content) = serde_json::from_str::(pdu.content.get()) { - if let Some(related_pducount) = self.get_pdu_count(&content.relates_to.event_id)? { + if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await { self.services .pdu_metadata - .add_relation(PduCount::Normal(count2), related_pducount)?; + .add_relation(PduCount::Normal(count2), related_pducount); } } @@ -545,14 +567,17 @@ impl Service { } => { // We need to do it again here, because replies don't have // event_id as a top level field - if let Some(related_pducount) = self.get_pdu_count(&in_reply_to.event_id)? { + if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await { self.services .pdu_metadata - .add_relation(PduCount::Normal(count2), related_pducount)?; + .add_relation(PduCount::Normal(count2), related_pducount); } }, Relation::Thread(thread) => { - self.services.threads.add_to_thread(&thread.event_id, pdu)?; + self.services + .threads + .add_to_thread(&thread.event_id, pdu) + .await?; }, _ => {}, // TODO: Aggregate other types } @@ -562,7 +587,8 @@ impl Service { if self .services .state_cache - .appservice_in_room(&pdu.room_id, appservice)? + .appservice_in_room(&pdu.room_id, appservice) + .await { self.services .sending @@ -596,15 +622,14 @@ impl Service { .as_ref() .map_or(false, |state_key| users.is_match(state_key)) }; - let matching_aliases = |aliases: &NamespaceRegex| { + let matching_aliases = |aliases: NamespaceRegex| { self.services .alias .local_aliases_for_room(&pdu.room_id) - .filter_map(Result::ok) - .any(|room_alias| aliases.is_match(room_alias.as_str())) + .ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) }; - if matching_aliases(&appservice.aliases) + if matching_aliases(appservice.aliases.clone()).await || appservice.rooms.is_match(pdu.room_id.as_str()) || matching_users(&appservice.users) { @@ -617,7 +642,7 @@ impl Service { Ok(pdu_id) } - pub fn create_hash_and_sign_event( + pub async fn create_hash_and_sign_event( &self, pdu_builder: PduBuilder, sender: &UserId, @@ -636,47 +661,59 @@ impl Service { let prev_events: Vec<_> = self .services .state - .get_forward_extremities(room_id)? - .into_iter() + .get_forward_extremities(room_id) .take(20) - .collect(); + .map(Arc::from) + .collect() + .await; // If there was no create event yet, assume we are creating a room - let room_version_id = self.services.state.get_room_version(room_id).or_else(|_| { - if event_type == TimelineEventType::RoomCreate { - let content = serde_json::from_str::(content.get()) - .expect("Invalid content in RoomCreate pdu."); - Ok(content.room_version) - } else { - Err(Error::InconsistentRoomState( - "non-create event for room of unknown version", - room_id.to_owned(), - )) - } - })?; + let room_version_id = self + .services + .state + .get_room_version(room_id) + .await + .or_else(|_| { + if event_type == TimelineEventType::RoomCreate { + let content = serde_json::from_str::(content.get()) + .expect("Invalid content in RoomCreate pdu."); + Ok(content.room_version) + } else { + Err(Error::InconsistentRoomState( + "non-create event for room of unknown version", + room_id.to_owned(), + )) + } + })?; let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - let auth_events = - self.services - .state - .get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; + let auth_events = self + .services + .state + .get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content) + .await?; // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) + .stream() + .map(Ok) + .and_then(|event_id| self.get_pdu(event_id)) + .and_then(|pdu| future::ok(pdu.depth)) + .ignore_err() + .ready_fold(uint!(0), cmp::max) + .await .saturating_add(uint!(1)); let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.services - .state_accessor - .room_state_get(room_id, &event_type.to_string().into(), state_key)? + if let Ok(prev_pdu) = self + .services + .state_accessor + .room_state_get(room_id, &event_type.to_string().into(), state_key) + .await { unsigned.insert( "prev_content".to_owned(), @@ -727,19 +764,22 @@ impl Service { signatures: None, }; + let auth_fetch = |k: &StateEventType, s: &str| { + let key = (k.clone(), s.to_owned()); + ready(auth_events.get(&key)) + }; + let auth_check = state_res::auth_check( &room_version, &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), + None, // TODO: third_party_invite + auth_fetch, ) - .map_err(|e| { - error!("Auth check failed: {:?}", e); - Error::BadRequest(ErrorKind::forbidden(), "Auth check failed.") - })?; + .await + .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; if !auth_check { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Event is not authorized.")); + return Err!(Request(Forbidden("Event is not authorized."))); } // Hash and sign @@ -795,7 +835,8 @@ impl Service { let _shorteventid = self .services .short - .get_or_create_shorteventid(&pdu.event_id)?; + .get_or_create_shorteventid(&pdu.event_id) + .await; Ok((pdu, pdu_json)) } @@ -811,108 +852,117 @@ impl Service { room_id: &RoomId, state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; - if let Some(admin_room) = self.services.admin.get_admin_room()? { - if admin_room == room_id { - match pdu.event_type() { - TimelineEventType::RoomEncryption => { - warn!("Encryption is not allowed in the admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Encryption is not allowed in the admins room", - )); - }, - TimelineEventType::RoomMember => { - let target = pdu - .state_key() - .filter(|v| v.starts_with('@')) - .unwrap_or(sender.as_str()); - let server_user = &self.services.globals.server_user.to_string(); + let (pdu, pdu_json) = self + .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) + .await?; - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu"))?; + if self.services.admin.is_admin_room(&pdu.room_id).await { + match pdu.event_type() { + TimelineEventType::RoomEncryption => { + warn!("Encryption is not allowed in the admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Encryption is not allowed in the admins room", + )); + }, + TimelineEventType::RoomMember => { + let target = pdu + .state_key() + .filter(|v| v.starts_with('@')) + .unwrap_or(sender.as_str()); + let server_user = &self.services.globals.server_user.to_string(); - if content.membership == MembershipState::Leave { - if target == server_user { - warn!("Server user cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server user cannot leave from admins room.", - )); - } + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu"))?; - let count = self - .services - .state_cache - .room_members(room_id) - .filter_map(Result::ok) - .filter(|m| self.services.globals.server_is_ours(m.server_name()) && m != target) - .count(); - if count < 2 { - warn!("Last admin cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Last admin cannot leave from admins room.", - )); - } + if content.membership == MembershipState::Leave { + if target == server_user { + warn!("Server user cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server user cannot leave from admins room.", + )); } - if content.membership == MembershipState::Ban && pdu.state_key().is_some() { - if target == server_user { - warn!("Server user cannot be banned in admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server user cannot be banned in admins room.", - )); - } + let count = self + .services + .state_cache + .room_members(&pdu.room_id) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .ready_filter(|user| *user != target) + .boxed() + .count() + .await; - let count = self - .services - .state_cache - .room_members(room_id) - .filter_map(Result::ok) - .filter(|m| self.services.globals.server_is_ours(m.server_name()) && m != target) - .count(); - if count < 2 { - warn!("Last admin cannot be banned in admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Last admin cannot be banned in admins room.", - )); - } + if count < 2 { + warn!("Last admin cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Last admin cannot leave from admins room.", + )); } - }, - _ => {}, - } + } + + if content.membership == MembershipState::Ban && pdu.state_key().is_some() { + if target == server_user { + warn!("Server user cannot be banned in admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server user cannot be banned in admins room.", + )); + } + + let count = self + .services + .state_cache + .room_members(&pdu.room_id) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .ready_filter(|user| *user != target) + .boxed() + .count() + .await; + + if count < 2 { + warn!("Last admin cannot be banned in admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Last admin cannot be banned in admins room.", + )); + } + } + }, + _ => {}, } } // If redaction event is not authorized, do not append it to the timeline if pdu.kind == TimelineEventType::RoomRedaction { use RoomVersionId::*; - match self.services.state.get_room_version(&pdu.room_id)? { + match self.services.state.get_room_version(&pdu.room_id).await? { V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { if let Some(redact_id) = &pdu.redacts { if !self .services .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false)? + .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) + .await? { - return Err(Error::BadRequest(ErrorKind::forbidden(), "User cannot redact this event.")); + return Err!(Request(Forbidden("User cannot redact this event."))); } }; }, _ => { let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?; + .map_err(|e| err!(Database("Invalid content in redaction pdu: {e:?}")))?; if let Some(redact_id) = &content.redacts { if !self .services .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false)? + .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) + .await? { - return Err(Error::BadRequest(ErrorKind::forbidden(), "User cannot redact this event.")); + return Err!(Request(Forbidden("User cannot redact this event."))); } } }, @@ -922,7 +972,7 @@ impl Service { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. - let statehashid = self.services.state.append_to_state(&pdu)?; + let statehashid = self.services.state.append_to_state(&pdu).await?; let pdu_id = self .append_pdu( @@ -939,14 +989,15 @@ impl Service { // in time where events in the current room state do not exist self.services .state - .set_room_state(room_id, statehashid, state_lock)?; + .set_room_state(&pdu.room_id, statehashid, state_lock); let mut servers: HashSet = self .services .state_cache - .room_servers(room_id) - .filter_map(Result::ok) - .collect(); + .room_servers(&pdu.room_id) + .map(ToOwned::to_owned) + .collect() + .await; // In case we are kicking or banning a user, we need to inform their server of // the change @@ -966,7 +1017,8 @@ impl Service { self.services .sending - .send_pdu_servers(servers.into_iter(), &pdu_id)?; + .send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id) + .await?; Ok(pdu.event_id) } @@ -988,15 +1040,19 @@ impl Service { // fail. self.services .state - .set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed)?; + .set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed) + .await?; if soft_fail { self.services .pdu_metadata - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + .mark_as_referenced(&pdu.room_id, &pdu.prev_events); + self.services .state - .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)?; + .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) + .await; + return Ok(None); } @@ -1009,71 +1065,71 @@ impl Service { /// Returns an iterator over all PDUs in a room. #[inline] - pub fn all_pdus<'a>( - &'a self, user_id: &UserId, room_id: &RoomId, - ) -> Result> + 'a> { - self.pdus_after(user_id, room_id, PduCount::min()) + pub async fn all_pdus<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, + ) -> Result + Send + 'a> { + self.pdus_after(user_id, room_id, PduCount::min()).await } /// Returns an iterator over all events and their tokens in a room that /// happened before the event with id `until` in reverse-chronological /// order. #[tracing::instrument(skip(self), level = "debug")] - pub fn pdus_until<'a>( - &'a self, user_id: &UserId, room_id: &RoomId, until: PduCount, - ) -> Result> + 'a> { - self.db.pdus_until(user_id, room_id, until) + pub async fn pdus_until<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, + ) -> Result + Send + 'a> { + self.db.pdus_until(user_id, room_id, until).await } /// Returns an iterator over all events and their token in a room that /// happened after the event with id `from` in chronological order. #[tracing::instrument(skip(self), level = "debug")] - pub fn pdus_after<'a>( - &'a self, user_id: &UserId, room_id: &RoomId, from: PduCount, - ) -> Result> + 'a> { - self.db.pdus_after(user_id, room_id, from) + pub async fn pdus_after<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, from: PduCount, + ) -> Result + Send + 'a> { + self.db.pdus_after(user_id, room_id, from).await } /// Replace a PDU with the redacted form. #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent, shortroomid: u64) -> Result<()> { + pub async fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent, shortroomid: u64) -> Result<()> { // TODO: Don't reserialize, keep original json - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; + let Ok(pdu_id) = self.get_pdu_id(event_id).await else { + // If event does not exist, just noop + return Ok(()); + }; - if let Ok(content) = serde_json::from_str::(pdu.content.get()) { - if let Some(body) = content.body { - self.services - .search - .deindex_pdu(shortroomid, &pdu_id, &body)?; - } + let mut pdu = self + .get_pdu_from_id(&pdu_id) + .await + .map_err(|e| err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))))?; + + if let Ok(content) = serde_json::from_str::(pdu.content.get()) { + if let Some(body) = content.body { + self.services + .search + .deindex_pdu(shortroomid, &pdu_id, &body); } - - let room_version_id = self.services.state.get_room_version(&pdu.room_id)?; - - pdu.redact(room_version_id, reason)?; - - self.replace_pdu( - &pdu_id, - &utils::to_canonical_object(&pdu).map_err(|e| { - error!("Failed to convert PDU to canonical JSON: {}", e); - Error::bad_database("Failed to convert PDU to canonical JSON.") - })?, - &pdu, - )?; } - // If event does not exist, just noop - Ok(()) + + let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; + + pdu.redact(room_version_id, reason)?; + + let obj = utils::to_canonical_object(&pdu) + .map_err(|e| err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON"))))?; + + self.replace_pdu(&pdu_id, &obj, &pdu).await } #[tracing::instrument(skip(self))] pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { let first_pdu = self - .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? + .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id) + .await? .next() - .expect("Room is not empty")?; + .await + .expect("Room is not empty"); if first_pdu.0 < from { // No backfill required, there are still events between them @@ -1083,17 +1139,18 @@ impl Service { let power_levels: RoomPowerLevelsEventContent = self .services .state_accessor - .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? + .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") + .await .map(|ev| { serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + .unwrap() }) - .transpose()? .unwrap_or_default(); let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| { if level > &power_levels.users_default && !self.services.globals.user_is_local(user_id) { - Some(user_id.server_name().to_owned()) + Some(user_id.server_name()) } else { None } @@ -1103,34 +1160,43 @@ impl Service { .services .alias .local_aliases_for_room(room_id) - .filter_map(|alias| { - alias - .ok() - .filter(|alias| !self.services.globals.server_is_ours(alias.server_name())) - .map(|alias| alias.server_name().to_owned()) + .ready_filter_map(|alias| { + self.services + .globals + .server_is_ours(alias.server_name()) + .then_some(alias.server_name()) }); - let servers = room_mods + let mut servers = room_mods + .stream() .chain(room_alias_servers) - .chain(self.services.server.config.trusted_servers.clone()) - .filter(|server_name| { - if self.services.globals.server_is_ours(server_name) { - return false; - } - + .map(ToOwned::to_owned) + .chain( + self.services + .server + .config + .trusted_servers + .iter() + .map(ToOwned::to_owned) + .stream(), + ) + .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)) + .filter_map(|server_name| async move { self.services .state_cache - .server_in_room(server_name, room_id) - .unwrap_or(false) - }); + .server_in_room(&server_name, room_id) + .await + .then_some(server_name) + }) + .boxed(); - for backfill_server in servers { + while let Some(ref backfill_server) = servers.next().await { info!("Asking {backfill_server} for backfill"); let response = self .services .sending .send_federation_request( - &backfill_server, + backfill_server, federation::backfill::get_backfill::v1::Request { room_id: room_id.to_owned(), v: vec![first_pdu.1.event_id.as_ref().to_owned()], @@ -1142,7 +1208,7 @@ impl Service { Ok(response) => { let pub_key_map = RwLock::new(BTreeMap::new()); for pdu in response.pdus { - if let Err(e) = self.backfill_pdu(&backfill_server, pdu, &pub_key_map).await { + if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await { warn!("Failed to add backfilled pdu in room {room_id}: {e}"); } } @@ -1163,7 +1229,7 @@ impl Service { &self, origin: &ServerName, pdu: Box, pub_key_map: &RwLock>>, ) -> Result<()> { - let (event_id, value, room_id) = self.services.event_handler.parse_incoming_pdu(&pdu)?; + let (event_id, value, room_id) = self.services.event_handler.parse_incoming_pdu(&pdu).await?; // Lock so we cannot backfill the same pdu twice at the same time let mutex_lock = self @@ -1174,7 +1240,7 @@ impl Service { .await; // Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = self.get_pdu_id(&event_id)? { + if let Ok(pdu_id) = self.get_pdu_id(&event_id).await { let pdu_id = pdu_id.to_vec(); debug!("We already know {event_id} at {pdu_id:?}"); return Ok(()); @@ -1190,36 +1256,38 @@ impl Service { .handle_incoming_pdu(origin, &room_id, &event_id, value, false, pub_key_map) .await?; - let value = self.get_pdu_json(&event_id)?.expect("We just created it"); - let pdu = self.get_pdu(&event_id)?.expect("We just created it"); + let value = self + .get_pdu_json(&event_id) + .await + .expect("We just created it"); + let pdu = self.get_pdu(&event_id).await.expect("We just created it"); let shortroomid = self .services .short - .get_shortroomid(&room_id)? + .get_shortroomid(&room_id) + .await .expect("room exists"); let insert_lock = self.mutex_insert.lock(&room_id).await; let max = u64::MAX; - let count = self.services.globals.next_count()?; + let count = self.services.globals.next_count().unwrap(); let mut pdu_id = shortroomid.to_be_bytes().to_vec(); pdu_id.extend_from_slice(&0_u64.to_be_bytes()); pdu_id.extend_from_slice(&(validated!(max - count)).to_be_bytes()); // Insert pdu - self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?; + self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value); drop(insert_lock); if pdu.kind == TimelineEventType::RoomMessage { let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + .map_err(|e| err!(Database("Invalid content in pdu: {e:?}")))?; if let Some(body) = content.body { - self.services - .search - .index_pdu(shortroomid, &pdu_id, &body)?; + self.services.search.index_pdu(shortroomid, &pdu_id, &body); } } drop(mutex_lock); diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index 3cf1cdd5..bcfce616 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -46,7 +46,7 @@ impl Service { /// Sets a user as typing until the timeout timestamp is reached or /// roomtyping_remove is called. pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { - debug_info!("typing started {:?} in {:?} timeout:{:?}", user_id, room_id, timeout); + debug_info!("typing started {user_id:?} in {room_id:?} timeout:{timeout:?}"); // update clients self.typing .write() @@ -54,17 +54,19 @@ impl Service { .entry(room_id.to_owned()) .or_default() .insert(user_id.to_owned(), timeout); + self.last_typing_update .write() .await .insert(room_id.to_owned(), self.services.globals.next_count()?); + if self.typing_update_sender.send(room_id.to_owned()).is_err() { trace!("receiver found what it was looking for and is no longer interested"); } // update federation if self.services.globals.user_is_local(user_id) { - self.federation_send(room_id, user_id, true)?; + self.federation_send(room_id, user_id, true).await?; } Ok(()) @@ -72,7 +74,7 @@ impl Service { /// Removes a user from typing before the timeout is reached. pub async fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - debug_info!("typing stopped {:?} in {:?}", user_id, room_id); + debug_info!("typing stopped {user_id:?} in {room_id:?}"); // update clients self.typing .write() @@ -80,31 +82,31 @@ impl Service { .entry(room_id.to_owned()) .or_default() .remove(user_id); + self.last_typing_update .write() .await .insert(room_id.to_owned(), self.services.globals.next_count()?); + if self.typing_update_sender.send(room_id.to_owned()).is_err() { trace!("receiver found what it was looking for and is no longer interested"); } // update federation if self.services.globals.user_is_local(user_id) { - self.federation_send(room_id, user_id, false)?; + self.federation_send(room_id, user_id, false).await?; } Ok(()) } - pub async fn wait_for_update(&self, room_id: &RoomId) -> Result<()> { + pub async fn wait_for_update(&self, room_id: &RoomId) { let mut receiver = self.typing_update_sender.subscribe(); while let Ok(next) = receiver.recv().await { if next == room_id { break; } } - - Ok(()) } /// Makes sure that typing events with old timestamps get removed. @@ -123,30 +125,30 @@ impl Service { removable.push(user.clone()); } } - - drop(typing); }; if !removable.is_empty() { let typing = &mut self.typing.write().await; let room = typing.entry(room_id.to_owned()).or_default(); for user in &removable { - debug_info!("typing timeout {:?} in {:?}", &user, room_id); + debug_info!("typing timeout {user:?} in {room_id:?}"); room.remove(user); } + // update clients self.last_typing_update .write() .await .insert(room_id.to_owned(), self.services.globals.next_count()?); + if self.typing_update_sender.send(room_id.to_owned()).is_err() { trace!("receiver found what it was looking for and is no longer interested"); } // update federation - for user in removable { - if self.services.globals.user_is_local(&user) { - self.federation_send(room_id, &user, false)?; + for user in &removable { + if self.services.globals.user_is_local(user) { + self.federation_send(room_id, user, false).await?; } } } @@ -183,7 +185,7 @@ impl Service { }) } - fn federation_send(&self, room_id: &RoomId, user_id: &UserId, typing: bool) -> Result<()> { + async fn federation_send(&self, room_id: &RoomId, user_id: &UserId, typing: bool) -> Result<()> { debug_assert!( self.services.globals.user_is_local(user_id), "tried to broadcast typing status of remote user", @@ -197,7 +199,8 @@ impl Service { self.services .sending - .send_edu_room(room_id, serde_json::to_vec(&edu).expect("Serialized Edu::Typing"))?; + .send_edu_room(room_id, serde_json::to_vec(&edu).expect("Serialized Edu::Typing")) + .await?; Ok(()) } diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index c7131615..d4d9874c 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,8 +1,9 @@ use std::sync::Arc; -use conduit::{utils, Error, Result}; -use database::Map; -use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; +use conduit::Result; +use database::{Deserialized, Map}; +use futures::{Stream, StreamExt}; +use ruma::{RoomId, UserId}; use crate::{globals, rooms, Dep}; @@ -11,13 +12,13 @@ pub(super) struct Data { userroomid_highlightcount: Arc, roomuserid_lastnotificationread: Arc, roomsynctoken_shortstatehash: Arc, - userroomid_joined: Arc, services: Services, } struct Services { globals: Dep, short: Dep, + state_cache: Dep, } impl Data { @@ -28,15 +29,15 @@ impl Data { userroomid_highlightcount: db["userroomid_highlightcount"].clone(), roomuserid_lastnotificationread: db["userroomid_highlightcount"].clone(), //< NOTE: known bug from conduit roomsynctoken_shortstatehash: db["roomsynctoken_shortstatehash"].clone(), - userroomid_joined: db["userroomid_joined"].clone(), services: Services { globals: args.depend::("globals"), short: args.depend::("rooms::short"), + state_cache: args.depend::("rooms::state_cache"), }, } } - pub(super) fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + pub(super) fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -45,128 +46,73 @@ impl Data { roomuser_id.extend_from_slice(user_id.as_bytes()); self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; + .insert(&userroom_id, &0_u64.to_be_bytes()); self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; + .insert(&userroom_id, &0_u64.to_be_bytes()); self.roomuserid_lastnotificationread - .insert(&roomuser_id, &self.services.globals.next_count()?.to_be_bytes())?; - - Ok(()) + .insert(&roomuser_id, &self.services.globals.next_count().unwrap().to_be_bytes()); } - pub(super) fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - + pub(super) async fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + let key = (user_id, room_id); self.userroomid_notificationcount - .get(&userroom_id)? - .map_or(Ok(0), |bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) + .qry(&key) + .await + .deserialized() + .unwrap_or(0) } - pub(super) fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - + pub(super) async fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + let key = (user_id, room_id); self.userroomid_highlightcount - .get(&userroom_id)? - .map_or(Ok(0), |bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) + .qry(&key) + .await + .deserialized() + .unwrap_or(0) } - pub(super) fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastnotificationread - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")) - }) - .transpose()? - .unwrap_or(0)) + pub(super) async fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + let key = (room_id, user_id); + self.roomuserid_lastnotificationread + .qry(&key) + .await + .deserialized() + .unwrap_or(0) } - pub(super) fn associate_token_shortstatehash( - &self, room_id: &RoomId, token: u64, shortstatehash: u64, - ) -> Result<()> { + pub(super) async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) { let shortroomid = self .services .short - .get_shortroomid(room_id)? + .get_shortroomid(room_id) + .await .expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) + .insert(&key, &shortstatehash.to_be_bytes()); } - pub(super) fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self - .services - .short - .get_shortroomid(room_id)? - .expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); + pub(super) async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result { + let shortroomid = self.services.short.get_shortroomid(room_id).await?; + let key: &[u64] = &[shortroomid, token]; self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash")) - }) - .transpose() + .qry(key) + .await + .deserialized() } + //TODO: optimize; replace point-queries with dual iteration pub(super) fn get_shared_rooms<'a>( - &'a self, users: Vec, - ) -> Result> + 'a>> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xFF) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - .saturating_add(1); // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(Result::ok) - }); - - // We use the default compare function because keys are sorted correctly (not - // reversed) - Ok(Box::new( - utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse( - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid RoomId bytes in userroomid_joined"))?, - ) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - }), - )) + &'a self, user_a: &'a UserId, user_b: &'a UserId, + ) -> impl Stream + Send + 'a { + self.services + .state_cache + .rooms_joined(user_a) + .filter(|room_id| self.services.state_cache.is_joined(user_b, room_id)) } } diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 93d38470..d9d90ecf 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -3,7 +3,8 @@ mod data; use std::sync::Arc; use conduit::Result; -use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; +use futures::{pin_mut, Stream, StreamExt}; +use ruma::{RoomId, UserId}; use self::data::Data; @@ -22,32 +23,49 @@ impl crate::Service for Service { } impl Service { - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - self.db.reset_notification_counts(user_id, room_id) + #[inline] + pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) { + self.db.reset_notification_counts(user_id, room_id); } - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.notification_count(user_id, room_id) + #[inline] + pub async fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + self.db.notification_count(user_id, room_id).await } - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.highlight_count(user_id, room_id) + #[inline] + pub async fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + self.db.highlight_count(user_id, room_id).await } - pub fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_notification_read(user_id, room_id) + #[inline] + pub async fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + self.db.last_notification_read(user_id, room_id).await } - pub fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) -> Result<()> { + #[inline] + pub async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) { self.db .associate_token_shortstatehash(room_id, token, shortstatehash) + .await; } - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - self.db.get_token_shortstatehash(room_id, token) + #[inline] + pub async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result { + self.db.get_token_shortstatehash(room_id, token).await } - pub fn get_shared_rooms(&self, users: Vec) -> Result> + '_> { - self.db.get_shared_rooms(users) + #[inline] + pub fn get_shared_rooms<'a>( + &'a self, user_a: &'a UserId, user_b: &'a UserId, + ) -> impl Stream + Send + 'a { + self.db.get_shared_rooms(user_a, user_b) + } + + pub async fn has_shared_rooms<'a>(&'a self, user_a: &'a UserId, user_b: &'a UserId) -> bool { + let get_shared_rooms = self.get_shared_rooms(user_a, user_b); + + pin_mut!(get_shared_rooms); + get_shared_rooms.next().await.is_some() } } diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 6c8e2544..b96f9a03 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -1,14 +1,21 @@ use std::sync::Arc; -use conduit::{utils, Error, Result}; -use database::{Database, Map}; +use conduit::{ + utils, + utils::{stream::TryIgnore, ReadyExt}, + Error, Result, +}; +use database::{Database, Deserialized, Map}; +use futures::{Stream, StreamExt}; use ruma::{ServerName, UserId}; use super::{Destination, SendingEvent}; use crate::{globals, Dep}; -type OutgoingSendingIter<'a> = Box, Destination, SendingEvent)>> + 'a>; -type SendingEventIter<'a> = Box, SendingEvent)>> + 'a>; +pub(super) type OutgoingItem = (Key, SendingEvent, Destination); +pub(super) type SendingItem = (Key, SendingEvent); +pub(super) type QueueItem = (Key, SendingEvent); +pub(super) type Key = Vec; pub struct Data { servercurrentevent_data: Arc, @@ -36,85 +43,34 @@ impl Data { } } - #[inline] - pub fn active_requests(&self) -> OutgoingSendingIter<'_> { - Box::new( - self.servercurrentevent_data - .iter() - .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(k, e)| (key, k, e))), - ) - } + pub(super) fn delete_active_request(&self, key: &[u8]) { self.servercurrentevent_data.remove(key); } - #[inline] - pub fn active_requests_for<'a>(&'a self, destination: &Destination) -> SendingEventIter<'a> { + pub(super) async fn delete_all_active_requests_for(&self, destination: &Destination) { let prefix = destination.get_prefix(); - Box::new( - self.servercurrentevent_data - .scan_prefix(prefix) - .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(_, e)| (key, e))), - ) + self.servercurrentevent_data + .raw_keys_prefix(&prefix) + .ignore_err() + .ready_for_each(|key| self.servercurrentevent_data.remove(key)) + .await; } - pub(super) fn delete_active_request(&self, key: &[u8]) -> Result<()> { self.servercurrentevent_data.remove(key) } - - pub(super) fn delete_all_active_requests_for(&self, destination: &Destination) -> Result<()> { + pub(super) async fn delete_all_requests_for(&self, destination: &Destination) { let prefix = destination.get_prefix(); - for (key, _) in self.servercurrentevent_data.scan_prefix(prefix) { - self.servercurrentevent_data.remove(&key)?; - } + self.servercurrentevent_data + .raw_keys_prefix(&prefix) + .ignore_err() + .ready_for_each(|key| self.servercurrentevent_data.remove(key)) + .await; - Ok(()) - } - - pub(super) fn delete_all_requests_for(&self, destination: &Destination) -> Result<()> { - let prefix = destination.get_prefix(); - for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { - self.servercurrentevent_data.remove(&key).unwrap(); - } - - for (key, _) in self.servernameevent_data.scan_prefix(prefix) { - self.servernameevent_data.remove(&key).unwrap(); - } - - Ok(()) - } - - pub(super) fn queue_requests(&self, requests: &[(&Destination, SendingEvent)]) -> Result>> { - let mut batch = Vec::new(); - let mut keys = Vec::new(); - for (destination, event) in requests { - let mut key = destination.get_prefix(); - if let SendingEvent::Pdu(value) = &event { - key.extend_from_slice(value); - } else { - key.extend_from_slice(&self.services.globals.next_count()?.to_be_bytes()); - } - let value = if let SendingEvent::Edu(value) = &event { - &**value - } else { - &[] - }; - batch.push((key.clone(), value.to_owned())); - keys.push(key); - } self.servernameevent_data - .insert_batch(batch.iter().map(database::KeyVal::from))?; - Ok(keys) + .raw_keys_prefix(&prefix) + .ignore_err() + .ready_for_each(|key| self.servernameevent_data.remove(key)) + .await; } - pub fn queued_requests<'a>( - &'a self, destination: &Destination, - ) -> Box)>> + 'a> { - let prefix = destination.get_prefix(); - return Box::new( - self.servernameevent_data - .scan_prefix(prefix) - .map(|(k, v)| parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k))), - ); - } - - pub(super) fn mark_as_active(&self, events: &[(SendingEvent, Vec)]) -> Result<()> { - for (e, key) in events { + pub(super) fn mark_as_active(&self, events: &[QueueItem]) { + for (key, e) in events { if key.is_empty() { continue; } @@ -124,29 +80,87 @@ impl Data { } else { &[] }; - self.servercurrentevent_data.insert(key, value)?; - self.servernameevent_data.remove(key)?; + self.servercurrentevent_data.insert(key, value); + self.servernameevent_data.remove(key); + } + } + + #[inline] + pub fn active_requests(&self) -> impl Stream + Send + '_ { + self.servercurrentevent_data + .raw_stream() + .ignore_err() + .map(|(key, val)| { + let (dest, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); + + (key.to_vec(), event, dest) + }) + } + + #[inline] + pub fn active_requests_for<'a>(&'a self, destination: &Destination) -> impl Stream + Send + 'a { + let prefix = destination.get_prefix(); + self.servercurrentevent_data + .stream_raw_prefix(&prefix) + .ignore_err() + .map(|(key, val)| { + let (_, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); + + (key.to_vec(), event) + }) + } + + pub(super) fn queue_requests(&self, requests: &[(&SendingEvent, &Destination)]) -> Vec> { + let mut batch = Vec::new(); + let mut keys = Vec::new(); + for (event, destination) in requests { + let mut key = destination.get_prefix(); + if let SendingEvent::Pdu(value) = &event { + key.extend_from_slice(value); + } else { + key.extend_from_slice(&self.services.globals.next_count().unwrap().to_be_bytes()); + } + let value = if let SendingEvent::Edu(value) = &event { + &**value + } else { + &[] + }; + batch.push((key.clone(), value.to_owned())); + keys.push(key); } - Ok(()) + self.servernameevent_data.insert_batch(batch.iter()); + keys } - pub(super) fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) -> Result<()> { - self.servername_educount - .insert(server_name.as_bytes(), &last_count.to_be_bytes()) - } + pub fn queued_requests<'a>(&'a self, destination: &Destination) -> impl Stream + Send + 'a { + let prefix = destination.get_prefix(); + self.servernameevent_data + .stream_raw_prefix(&prefix) + .ignore_err() + .map(|(key, val)| { + let (_, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); - pub fn get_latest_educount(&self, server_name: &ServerName) -> Result { - self.servername_educount - .get(server_name.as_bytes())? - .map_or(Ok(0), |bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) + (key.to_vec(), event) }) } + + pub(super) fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) { + self.servername_educount + .insert(server_name.as_bytes(), &last_count.to_be_bytes()); + } + + pub async fn get_latest_educount(&self, server_name: &ServerName) -> u64 { + self.servername_educount + .qry(server_name) + .await + .deserialized() + .unwrap_or(0) + } } #[tracing::instrument(skip(key), level = "debug")] -fn parse_servercurrentevent(key: &[u8], value: Vec) -> Result<(Destination, SendingEvent)> { +fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, SendingEvent)> { // Appservices start with a plus Ok::<_, Error>(if key.starts_with(b"+") { let mut parts = key[1..].splitn(2, |&b| b == 0xFF); @@ -164,7 +178,7 @@ fn parse_servercurrentevent(key: &[u8], value: Vec) -> Result<(Destination, if value.is_empty() { SendingEvent::Pdu(event.to_vec()) } else { - SendingEvent::Edu(value) + SendingEvent::Edu(value.to_vec()) }, ) } else if key.starts_with(b"$") { @@ -192,7 +206,7 @@ fn parse_servercurrentevent(key: &[u8], value: Vec) -> Result<(Destination, SendingEvent::Pdu(event.to_vec()) } else { // I'm pretty sure this should never be called - SendingEvent::Edu(value) + SendingEvent::Edu(value.to_vec()) }, ) } else { @@ -214,7 +228,7 @@ fn parse_servercurrentevent(key: &[u8], value: Vec) -> Result<(Destination, if value.is_empty() { SendingEvent::Pdu(event.to_vec()) } else { - SendingEvent::Edu(value) + SendingEvent::Edu(value.to_vec()) }, ) }) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b90ea361..e3582f2e 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -7,10 +7,11 @@ mod sender; use std::{fmt::Debug, sync::Arc}; use async_trait::async_trait; -use conduit::{err, warn, Result, Server}; +use conduit::{err, utils::ReadyExt, warn, Result, Server}; +use futures::{future::ready, Stream, StreamExt, TryStreamExt}; use ruma::{ api::{appservice::Registration, OutgoingRequest}, - OwnedServerName, RoomId, ServerName, UserId, + RoomId, ServerName, UserId, }; use tokio::sync::Mutex; @@ -104,7 +105,7 @@ impl Service { let dest = Destination::Push(user.to_owned(), pushkey); let event = SendingEvent::Pdu(pdu_id.to_owned()); let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(&[(&dest, event.clone())])?; + let keys = self.db.queue_requests(&[(&event, &dest)]); self.dispatch(Msg { dest, event, @@ -117,7 +118,7 @@ impl Service { let dest = Destination::Appservice(appservice_id); let event = SendingEvent::Pdu(pdu_id); let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(&[(&dest, event.clone())])?; + let keys = self.db.queue_requests(&[(&event, &dest)]); self.dispatch(Msg { dest, event, @@ -126,30 +127,31 @@ impl Service { } #[tracing::instrument(skip(self, room_id, pdu_id), level = "debug")] - pub fn send_pdu_room(&self, room_id: &RoomId, pdu_id: &[u8]) -> Result<()> { + pub async fn send_pdu_room(&self, room_id: &RoomId, pdu_id: &[u8]) -> Result<()> { let servers = self .services .state_cache .room_servers(room_id) - .filter_map(Result::ok) - .filter(|server_name| !self.services.globals.server_is_ours(server_name)); + .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)); - self.send_pdu_servers(servers, pdu_id) + self.send_pdu_servers(servers, pdu_id).await } #[tracing::instrument(skip(self, servers, pdu_id), level = "debug")] - pub fn send_pdu_servers>(&self, servers: I, pdu_id: &[u8]) -> Result<()> { - let requests = servers - .into_iter() - .map(|server| (Destination::Normal(server), SendingEvent::Pdu(pdu_id.to_owned()))) - .collect::>(); + pub async fn send_pdu_servers<'a, S>(&self, servers: S, pdu_id: &[u8]) -> Result<()> + where + S: Stream + Send + 'a, + { let _cork = self.db.db.cork(); - let keys = self.db.queue_requests( - &requests - .iter() - .map(|(o, e)| (o, e.clone())) - .collect::>(), - )?; + let requests = servers + .map(|server| (Destination::Normal(server.into()), SendingEvent::Pdu(pdu_id.into()))) + .collect::>() + .await; + + let keys = self + .db + .queue_requests(&requests.iter().map(|(o, e)| (e, o)).collect::>()); + for ((dest, event), queue_id) in requests.into_iter().zip(keys) { self.dispatch(Msg { dest, @@ -166,7 +168,7 @@ impl Service { let dest = Destination::Normal(server.to_owned()); let event = SendingEvent::Edu(serialized); let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(&[(&dest, event.clone())])?; + let keys = self.db.queue_requests(&[(&event, &dest)]); self.dispatch(Msg { dest, event, @@ -175,30 +177,30 @@ impl Service { } #[tracing::instrument(skip(self, room_id, serialized), level = "debug")] - pub fn send_edu_room(&self, room_id: &RoomId, serialized: Vec) -> Result<()> { + pub async fn send_edu_room(&self, room_id: &RoomId, serialized: Vec) -> Result<()> { let servers = self .services .state_cache .room_servers(room_id) - .filter_map(Result::ok) - .filter(|server_name| !self.services.globals.server_is_ours(server_name)); + .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)); - self.send_edu_servers(servers, serialized) + self.send_edu_servers(servers, serialized).await } #[tracing::instrument(skip(self, servers, serialized), level = "debug")] - pub fn send_edu_servers>(&self, servers: I, serialized: Vec) -> Result<()> { - let requests = servers - .into_iter() - .map(|server| (Destination::Normal(server), SendingEvent::Edu(serialized.clone()))) - .collect::>(); + pub async fn send_edu_servers<'a, S>(&self, servers: S, serialized: Vec) -> Result<()> + where + S: Stream + Send + 'a, + { let _cork = self.db.db.cork(); - let keys = self.db.queue_requests( - &requests - .iter() - .map(|(o, e)| (o, e.clone())) - .collect::>(), - )?; + let requests = servers + .map(|server| (Destination::Normal(server.to_owned()), SendingEvent::Edu(serialized.clone()))) + .collect::>() + .await; + + let keys = self + .db + .queue_requests(&requests.iter().map(|(o, e)| (e, o)).collect::>()); for ((dest, event), queue_id) in requests.into_iter().zip(keys) { self.dispatch(Msg { @@ -212,29 +214,33 @@ impl Service { } #[tracing::instrument(skip(self, room_id), level = "debug")] - pub fn flush_room(&self, room_id: &RoomId) -> Result<()> { + pub async fn flush_room(&self, room_id: &RoomId) -> Result<()> { let servers = self .services .state_cache .room_servers(room_id) - .filter_map(Result::ok) - .filter(|server_name| !self.services.globals.server_is_ours(server_name)); + .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)); - self.flush_servers(servers) + self.flush_servers(servers).await } #[tracing::instrument(skip(self, servers), level = "debug")] - pub fn flush_servers>(&self, servers: I) -> Result<()> { - let requests = servers.into_iter().map(Destination::Normal); - for dest in requests { - self.dispatch(Msg { - dest, - event: SendingEvent::Flush, - queue_id: Vec::::new(), - })?; - } - - Ok(()) + pub async fn flush_servers<'a, S>(&self, servers: S) -> Result<()> + where + S: Stream + Send + 'a, + { + servers + .map(ToOwned::to_owned) + .map(Destination::Normal) + .map(Ok) + .try_for_each(|dest| { + ready(self.dispatch(Msg { + dest, + event: SendingEvent::Flush, + queue_id: Vec::::new(), + })) + }) + .await } #[tracing::instrument(skip_all, name = "request")] @@ -263,11 +269,10 @@ impl Service { /// Cleanup event data /// Used for instance after we remove an appservice registration #[tracing::instrument(skip(self), level = "debug")] - pub fn cleanup_events(&self, appservice_id: String) -> Result<()> { + pub async fn cleanup_events(&self, appservice_id: String) { self.db - .delete_all_requests_for(&Destination::Appservice(appservice_id))?; - - Ok(()) + .delete_all_requests_for(&Destination::Appservice(appservice_id)) + .await; } fn dispatch(&self, msg: Msg) -> Result<()> { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 206bf92b..4db9922a 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -7,18 +7,15 @@ use std::{ use base64::{engine::general_purpose, Engine as _}; use conduit::{ - debug, debug_warn, error, trace, - utils::{calculate_hash, math::continue_exponential_backoff_secs}, + debug, debug_warn, err, trace, + utils::{calculate_hash, math::continue_exponential_backoff_secs, ReadyExt}, warn, Error, Result, }; -use federation::transactions::send_transaction_message; -use futures_util::{future::BoxFuture, stream::FuturesUnordered, StreamExt}; +use futures::{future::BoxFuture, pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; use ruma::{ - api::federation::{ - self, - transactions::edu::{ - DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, ReceiptData, ReceiptMap, - }, + api::federation::transactions::{ + edu::{DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, ReceiptData, ReceiptMap}, + send_transaction_message, }, device_id, events::{push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType}, @@ -28,7 +25,7 @@ use ruma::{ use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use tokio::time::sleep_until; -use super::{appservice, Destination, Msg, SendingEvent, Service}; +use super::{appservice, data::QueueItem, Destination, Msg, SendingEvent, Service}; #[derive(Debug)] enum TransactionStatus { @@ -50,20 +47,20 @@ const CLEANUP_TIMEOUT_MS: u64 = 3500; impl Service { #[tracing::instrument(skip_all, name = "sender")] pub(super) async fn sender(&self) -> Result<()> { - let receiver = self.receiver.lock().await; - let mut futures: SendingFutures<'_> = FuturesUnordered::new(); let mut statuses: CurTransactionStatus = CurTransactionStatus::new(); + let mut futures: SendingFutures<'_> = FuturesUnordered::new(); + let receiver = self.receiver.lock().await; - self.initial_requests(&futures, &mut statuses); + self.initial_requests(&mut futures, &mut statuses).await; loop { debug_assert!(!receiver.is_closed(), "channel error"); tokio::select! { request = receiver.recv_async() => match request { - Ok(request) => self.handle_request(request, &futures, &mut statuses), + Ok(request) => self.handle_request(request, &mut futures, &mut statuses).await, Err(_) => break, }, Some(response) = futures.next() => { - self.handle_response(response, &futures, &mut statuses); + self.handle_response(response, &mut futures, &mut statuses).await; }, } } @@ -72,18 +69,16 @@ impl Service { Ok(()) } - fn handle_response<'a>( - &'a self, response: SendingResult, futures: &SendingFutures<'a>, statuses: &mut CurTransactionStatus, + async fn handle_response<'a>( + &'a self, response: SendingResult, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus, ) { match response { - Ok(dest) => self.handle_response_ok(&dest, futures, statuses), - Err((dest, e)) => Self::handle_response_err(dest, futures, statuses, &e), + Ok(dest) => self.handle_response_ok(&dest, futures, statuses).await, + Err((dest, e)) => Self::handle_response_err(dest, statuses, &e), }; } - fn handle_response_err( - dest: Destination, _futures: &SendingFutures<'_>, statuses: &mut CurTransactionStatus, e: &Error, - ) { + fn handle_response_err(dest: Destination, statuses: &mut CurTransactionStatus, e: &Error) { debug!(dest = ?dest, "{e:?}"); statuses.entry(dest).and_modify(|e| { *e = match e { @@ -94,39 +89,40 @@ impl Service { }); } - fn handle_response_ok<'a>( - &'a self, dest: &Destination, futures: &SendingFutures<'a>, statuses: &mut CurTransactionStatus, + #[allow(clippy::needless_pass_by_ref_mut)] + async fn handle_response_ok<'a>( + &'a self, dest: &Destination, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus, ) { let _cork = self.db.db.cork(); - self.db - .delete_all_active_requests_for(dest) - .expect("all active requests deleted"); + self.db.delete_all_active_requests_for(dest).await; // Find events that have been added since starting the last request let new_events = self .db .queued_requests(dest) - .filter_map(Result::ok) .take(DEQUEUE_LIMIT) - .collect::>(); + .collect::>() + .await; // Insert any pdus we found if !new_events.is_empty() { - self.db - .mark_as_active(&new_events) - .expect("marked as active"); - let new_events_vec = new_events.into_iter().map(|(event, _)| event).collect(); - futures.push(Box::pin(self.send_events(dest.clone(), new_events_vec))); + self.db.mark_as_active(&new_events); + + let new_events_vec = new_events.into_iter().map(|(_, event)| event).collect(); + futures.push(self.send_events(dest.clone(), new_events_vec).boxed()); } else { statuses.remove(dest); } } - fn handle_request<'a>(&'a self, msg: Msg, futures: &SendingFutures<'a>, statuses: &mut CurTransactionStatus) { - let iv = vec![(msg.event, msg.queue_id)]; - if let Ok(Some(events)) = self.select_events(&msg.dest, iv, statuses) { + #[allow(clippy::needless_pass_by_ref_mut)] + async fn handle_request<'a>( + &'a self, msg: Msg, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus, + ) { + let iv = vec![(msg.queue_id, msg.event)]; + if let Ok(Some(events)) = self.select_events(&msg.dest, iv, statuses).await { if !events.is_empty() { - futures.push(Box::pin(self.send_events(msg.dest, events))); + futures.push(self.send_events(msg.dest, events).boxed()); } else { statuses.remove(&msg.dest); } @@ -142,7 +138,7 @@ impl Service { tokio::select! { () = sleep_until(deadline.into()) => break, response = futures.next() => match response { - Some(response) => self.handle_response(response, futures, statuses), + Some(response) => self.handle_response(response, futures, statuses).await, None => return, } } @@ -151,16 +147,17 @@ impl Service { debug_warn!("Leaving with {} unfinished requests...", futures.len()); } - fn initial_requests<'a>(&'a self, futures: &SendingFutures<'a>, statuses: &mut CurTransactionStatus) { + #[allow(clippy::needless_pass_by_ref_mut)] + async fn initial_requests<'a>(&'a self, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus) { let keep = usize::try_from(self.server.config.startup_netburst_keep).unwrap_or(usize::MAX); let mut txns = HashMap::>::new(); - for (key, dest, event) in self.db.active_requests().filter_map(Result::ok) { + let mut active = self.db.active_requests().boxed(); + + while let Some((key, event, dest)) = active.next().await { let entry = txns.entry(dest.clone()).or_default(); if self.server.config.startup_netburst_keep >= 0 && entry.len() >= keep { - warn!("Dropping unsent event {:?} {:?}", dest, String::from_utf8_lossy(&key)); - self.db - .delete_active_request(&key) - .expect("active request deleted"); + warn!("Dropping unsent event {dest:?} {:?}", String::from_utf8_lossy(&key)); + self.db.delete_active_request(&key); } else { entry.push(event); } @@ -169,16 +166,16 @@ impl Service { for (dest, events) in txns { if self.server.config.startup_netburst && !events.is_empty() { statuses.insert(dest.clone(), TransactionStatus::Running); - futures.push(Box::pin(self.send_events(dest.clone(), events))); + futures.push(self.send_events(dest.clone(), events).boxed()); } } } #[tracing::instrument(skip_all, level = "debug")] - fn select_events( + async fn select_events( &self, dest: &Destination, - new_events: Vec<(SendingEvent, Vec)>, // Events we want to send: event and full key + new_events: Vec, // Events we want to send: event and full key statuses: &mut CurTransactionStatus, ) -> Result>> { let (allow, retry) = self.select_events_current(dest.clone(), statuses)?; @@ -195,8 +192,8 @@ impl Service { if retry { self.db .active_requests_for(dest) - .filter_map(Result::ok) - .for_each(|(_, e)| events.push(e)); + .ready_for_each(|(_, e)| events.push(e)) + .await; return Ok(Some(events)); } @@ -204,17 +201,17 @@ impl Service { // Compose the next transaction let _cork = self.db.db.cork(); if !new_events.is_empty() { - self.db.mark_as_active(&new_events)?; - for (e, _) in new_events { + self.db.mark_as_active(&new_events); + for (_, e) in new_events { events.push(e); } } // Add EDU's into the transaction if let Destination::Normal(server_name) = dest { - if let Ok((select_edus, last_count)) = self.select_edus(server_name) { + if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { events.extend(select_edus.into_iter().map(SendingEvent::Edu)); - self.db.set_latest_educount(server_name, last_count)?; + self.db.set_latest_educount(server_name, last_count); } } @@ -248,26 +245,32 @@ impl Service { } #[tracing::instrument(skip_all, level = "debug")] - fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { + async fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { // u64: count of last edu - let since = self.db.get_latest_educount(server_name)?; + let since = self.db.get_latest_educount(server_name).await; let mut events = Vec::new(); let mut max_edu_count = since; let mut device_list_changes = HashSet::new(); - for room_id in self.services.state_cache.server_rooms(server_name) { - let room_id = room_id?; + let server_rooms = self.services.state_cache.server_rooms(server_name); + + pin_mut!(server_rooms); + while let Some(room_id) = server_rooms.next().await { // Look for device list updates in this room device_list_changes.extend( self.services .users - .keys_changed(room_id.as_ref(), since, None) - .filter_map(Result::ok) - .filter(|user_id| self.services.globals.user_is_local(user_id)), + .keys_changed(room_id.as_str(), since, None) + .ready_filter(|user_id| self.services.globals.user_is_local(user_id)) + .map(ToOwned::to_owned) + .collect::>() + .await, ); if self.server.config.allow_outgoing_read_receipts - && !self.select_edus_receipts(&room_id, since, &mut max_edu_count, &mut events)? + && !self + .select_edus_receipts(room_id, since, &mut max_edu_count, &mut events) + .await? { break; } @@ -290,19 +293,22 @@ impl Service { } if self.server.config.allow_outgoing_presence { - self.select_edus_presence(server_name, since, &mut max_edu_count, &mut events)?; + self.select_edus_presence(server_name, since, &mut max_edu_count, &mut events) + .await?; } Ok((events, max_edu_count)) } /// Look for presence - fn select_edus_presence( + async fn select_edus_presence( &self, server_name: &ServerName, since: u64, max_edu_count: &mut u64, events: &mut Vec>, ) -> Result { - // Look for presence updates for this server + let presence_since = self.services.presence.presence_since(since); + + pin_mut!(presence_since); let mut presence_updates = Vec::new(); - for (user_id, count, presence_bytes) in self.services.presence.presence_since(since) { + while let Some((user_id, count, presence_bytes)) = presence_since.next().await { *max_edu_count = cmp::max(count, *max_edu_count); if !self.services.globals.user_is_local(&user_id) { @@ -312,7 +318,8 @@ impl Service { if !self .services .state_cache - .server_sees_user(server_name, &user_id)? + .server_sees_user(server_name, &user_id) + .await { continue; } @@ -320,7 +327,9 @@ impl Service { let presence_event = self .services .presence - .from_json_bytes_to_event(&presence_bytes, &user_id)?; + .from_json_bytes_to_event(&presence_bytes, &user_id) + .await?; + presence_updates.push(PresenceUpdate { user_id, presence: presence_event.content.presence, @@ -346,32 +355,33 @@ impl Service { } /// Look for read receipts in this room - fn select_edus_receipts( + async fn select_edus_receipts( &self, room_id: &RoomId, since: u64, max_edu_count: &mut u64, events: &mut Vec>, ) -> Result { - for r in self + let receipts = self .services .read_receipt - .readreceipts_since(room_id, since) - { - let (user_id, count, read_receipt) = r?; - *max_edu_count = cmp::max(count, *max_edu_count); + .readreceipts_since(room_id, since); + pin_mut!(receipts); + while let Some((user_id, count, read_receipt)) = receipts.next().await { + *max_edu_count = cmp::max(count, *max_edu_count); if !self.services.globals.user_is_local(&user_id) { continue; } let event = serde_json::from_str(read_receipt.json().get()) .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; + let federation_event = if let AnySyncEphemeralRoomEvent::Receipt(r) = event { let mut read = BTreeMap::new(); - let (event_id, mut receipt) = r .content .0 .into_iter() .next() .expect("we only use one event per read receipt"); + let receipt = receipt .remove(&ReceiptType::Read) .expect("our read receipts always set this") @@ -427,24 +437,17 @@ impl Service { async fn send_events_dest_appservice( &self, dest: &Destination, id: &str, events: Vec, ) -> SendingResult { - let mut pdu_jsons = Vec::new(); + let Some(appservice) = self.services.appservice.get_registration(id).await else { + return Err((dest.clone(), err!(Database(warn!(?id, "Missing appservice registration"))))); + }; + let mut pdu_jsons = Vec::new(); for event in &events { match event { SendingEvent::Pdu(pdu_id) => { - pdu_jsons.push( - self.services - .timeline - .get_pdu_from_id(pdu_id) - .map_err(|e| (dest.clone(), e))? - .ok_or_else(|| { - ( - dest.clone(), - Error::bad_database("[Appservice] Event in servernameevent_data not found in db."), - ) - })? - .to_room_event(), - ); + if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { + pdu_jsons.push(pdu.to_room_event()); + } }, SendingEvent::Edu(_) | SendingEvent::Flush => { // Appservices don't need EDUs (?) and flush only; @@ -453,32 +456,24 @@ impl Service { } } + let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(calculate_hash( + &events + .iter() + .map(|e| match e { + SendingEvent::Edu(b) | SendingEvent::Pdu(b) => &**b, + SendingEvent::Flush => &[], + }) + .collect::>(), + )); + //debug_assert!(!pdu_jsons.is_empty(), "sending empty transaction"); let client = &self.services.client.appservice; match appservice::send_request( client, - self.services - .appservice - .get_registration(id) - .await - .ok_or_else(|| { - ( - dest.clone(), - Error::bad_database("[Appservice] Could not load registration from db."), - ) - })?, + appservice, ruma::api::appservice::event::push_events::v1::Request { events: pdu_jsons, - txn_id: (&*general_purpose::URL_SAFE_NO_PAD.encode(calculate_hash( - &events - .iter() - .map(|e| match e { - SendingEvent::Edu(b) | SendingEvent::Pdu(b) => &**b, - SendingEvent::Flush => &[], - }) - .collect::>(), - ))) - .into(), + txn_id: txn_id.into(), ephemeral: Vec::new(), to_device: Vec::new(), }, @@ -494,23 +489,17 @@ impl Service { async fn send_events_dest_push( &self, dest: &Destination, userid: &OwnedUserId, pushkey: &str, events: Vec, ) -> SendingResult { - let mut pdus = Vec::new(); + let Ok(pusher) = self.services.pusher.get_pusher(userid, pushkey).await else { + return Err((dest.clone(), err!(Database(error!(?userid, ?pushkey, "Missing pusher"))))); + }; + let mut pdus = Vec::new(); for event in &events { match event { SendingEvent::Pdu(pdu_id) => { - pdus.push( - self.services - .timeline - .get_pdu_from_id(pdu_id) - .map_err(|e| (dest.clone(), e))? - .ok_or_else(|| { - ( - dest.clone(), - Error::bad_database("[Push] Event in servernameevent_data not found in db."), - ) - })?, - ); + if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { + pdus.push(pdu); + } }, SendingEvent::Edu(_) | SendingEvent::Flush => { // Push gateways don't need EDUs (?) and flush only; @@ -529,28 +518,22 @@ impl Service { } } - let Some(pusher) = self - .services - .pusher - .get_pusher(userid, pushkey) - .map_err(|e| (dest.clone(), e))? - else { - continue; - }; - let rules_for_user = self .services .account_data .get(None, userid, GlobalAccountDataEventType::PushRules.to_string().into()) - .unwrap_or_default() - .and_then(|event| serde_json::from_str::(event.get()).ok()) - .map_or_else(|| push::Ruleset::server_default(userid), |ev: PushRulesEvent| ev.content.global); + .await + .and_then(|event| serde_json::from_str::(event.get()).map_err(Into::into)) + .map_or_else( + |_| push::Ruleset::server_default(userid), + |ev: PushRulesEvent| ev.content.global, + ); let unread: UInt = self .services .user .notification_count(userid, &pdu.room_id) - .map_err(|e| (dest.clone(), e))? + .await .try_into() .expect("notification count can't go that high"); @@ -559,7 +542,6 @@ impl Service { .pusher .send_push_notice(userid, unread, &pusher, rules_for_user, &pdu) .await - .map(|_response| dest.clone()) .map_err(|e| (dest.clone(), e)); } @@ -586,21 +568,11 @@ impl Service { for event in &events { match event { // TODO: check room version and remove event_id if needed - SendingEvent::Pdu(pdu_id) => pdu_jsons.push( - self.convert_to_outgoing_federation_event( - self.services - .timeline - .get_pdu_json_from_id(pdu_id) - .map_err(|e| (dest.clone(), e))? - .ok_or_else(|| { - error!(?dest, ?server, ?pdu_id, "event not found"); - ( - dest.clone(), - Error::bad_database("[Normal] Event in servernameevent_data not found in db."), - ) - })?, - ), - ), + SendingEvent::Pdu(pdu_id) => { + if let Ok(pdu) = self.services.timeline.get_pdu_json_from_id(pdu_id).await { + pdu_jsons.push(self.convert_to_outgoing_federation_event(pdu).await); + } + }, SendingEvent::Edu(edu) => { if let Ok(raw) = serde_json::from_slice(edu) { edu_jsons.push(raw); @@ -647,7 +619,7 @@ impl Service { } /// This does not return a full `Pdu` it is only to satisfy ruma's types. - pub fn convert_to_outgoing_federation_event(&self, mut pdu_json: CanonicalJsonObject) -> Box { + pub async fn convert_to_outgoing_federation_event(&self, mut pdu_json: CanonicalJsonObject) -> Box { if let Some(unsigned) = pdu_json .get_mut("unsigned") .and_then(|val| val.as_object_mut()) @@ -660,7 +632,7 @@ impl Service { .get("room_id") .and_then(|val| RoomId::parse(val.as_str()?).ok()) { - match self.services.state.get_room_version(&room_id) { + match self.services.state.get_room_version(&room_id).await { Ok(room_version_id) => match room_version_id { RoomVersionId::V1 | RoomVersionId::V2 => {}, _ => _ = pdu_json.remove("event_id"), diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index a565e500..ae2b8c3c 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -5,7 +5,7 @@ use std::{ }; use conduit::{debug, debug_error, debug_warn, err, error, info, trace, warn, Err, Result}; -use futures_util::{stream::FuturesUnordered, StreamExt}; +use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::federation::{ discovery::{ @@ -179,7 +179,8 @@ impl Service { let result: BTreeMap<_, _> = self .services .globals - .verify_keys_for(origin)? + .verify_keys_for(origin) + .await? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(); @@ -236,7 +237,8 @@ impl Service { .services .globals .db - .add_signing_key(&k.server_name, k.clone())? + .add_signing_key(&k.server_name, k.clone()) + .await .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect::>(); @@ -283,7 +285,8 @@ impl Service { .services .globals .db - .add_signing_key(&origin, key)? + .add_signing_key(&origin, key) + .await .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(); @@ -384,7 +387,8 @@ impl Service { let mut result: BTreeMap<_, _> = self .services .globals - .verify_keys_for(origin)? + .verify_keys_for(origin) + .await? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(); @@ -431,7 +435,8 @@ impl Service { self.services .globals .db - .add_signing_key(origin, k.clone())?; + .add_signing_key(origin, k.clone()) + .await; result.extend( k.verify_keys .into_iter() @@ -462,7 +467,8 @@ impl Service { self.services .globals .db - .add_signing_key(origin, server_key.clone())?; + .add_signing_key(origin, server_key.clone()) + .await; result.extend( server_key @@ -495,7 +501,8 @@ impl Service { self.services .globals .db - .add_signing_key(origin, server_key.clone())?; + .add_signing_key(origin, server_key.clone()) + .await; result.extend( server_key @@ -545,7 +552,8 @@ impl Service { self.services .globals .db - .add_signing_key(origin, k.clone())?; + .add_signing_key(origin, k.clone()) + .await; result.extend( k.verify_keys .into_iter() diff --git a/src/service/services.rs b/src/service/services.rs index 3aa095b8..da22fb2d 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -14,7 +14,7 @@ use crate::{ manager::Manager, media, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, - transaction_ids, uiaa, updates, users, + sync, transaction_ids, uiaa, updates, users, }; pub struct Services { @@ -32,6 +32,7 @@ pub struct Services { pub rooms: rooms::Service, pub sending: Arc, pub server_keys: Arc, + pub sync: Arc, pub transaction_ids: Arc, pub uiaa: Arc, pub updates: Arc, @@ -96,6 +97,7 @@ impl Services { }, sending: build!(sending::Service), server_keys: build!(server_keys::Service), + sync: build!(sync::Service), transaction_ids: build!(transaction_ids::Service), uiaa: build!(uiaa::Service), updates: build!(updates::Service), diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs new file mode 100644 index 00000000..1bf4610f --- /dev/null +++ b/src/service/sync/mod.rs @@ -0,0 +1,233 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::{Arc, Mutex, Mutex as StdMutex}, +}; + +use conduit::Result; +use ruma::{ + api::client::sync::sync_events::{ + self, + v4::{ExtensionsConfig, SyncRequestList}, + }, + OwnedDeviceId, OwnedRoomId, OwnedUserId, +}; + +pub struct Service { + connections: DbConnections, +} + +struct SlidingSyncCache { + lists: BTreeMap, + subscriptions: BTreeMap, + known_rooms: BTreeMap>, // For every room, the roomsince number + extensions: ExtensionsConfig, +} + +type DbConnections = Mutex>; +type DbConnectionsKey = (OwnedUserId, OwnedDeviceId, String); +type DbConnectionsVal = Arc>; + +impl crate::Service for Service { + fn build(_args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + connections: StdMutex::new(BTreeMap::new()), + })) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +impl Service { + pub fn remembered(&self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String) -> bool { + self.connections + .lock() + .unwrap() + .contains_key(&(user_id, device_id, conn_id)) + } + + pub fn forget_sync_request_connection(&self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String) { + self.connections + .lock() + .expect("locked") + .remove(&(user_id, device_id, conn_id)); + } + + pub fn update_sync_request_with_cache( + &self, user_id: OwnedUserId, device_id: OwnedDeviceId, request: &mut sync_events::v4::Request, + ) -> BTreeMap> { + let Some(conn_id) = request.conn_id.clone() else { + return BTreeMap::new(); + }; + + let mut cache = self.connections.lock().expect("locked"); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().expect("locked"); + drop(cache); + + for (list_id, list) in &mut request.lists { + if let Some(cached_list) = cached.lists.get(list_id) { + if list.sort.is_empty() { + list.sort.clone_from(&cached_list.sort); + }; + if list.room_details.required_state.is_empty() { + list.room_details + .required_state + .clone_from(&cached_list.room_details.required_state); + }; + list.room_details.timeline_limit = list + .room_details + .timeline_limit + .or(cached_list.room_details.timeline_limit); + list.include_old_rooms = list + .include_old_rooms + .clone() + .or_else(|| cached_list.include_old_rooms.clone()); + match (&mut list.filters, cached_list.filters.clone()) { + (Some(list_filters), Some(cached_filters)) => { + list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); + if list_filters.spaces.is_empty() { + list_filters.spaces = cached_filters.spaces; + } + list_filters.is_encrypted = list_filters.is_encrypted.or(cached_filters.is_encrypted); + list_filters.is_invite = list_filters.is_invite.or(cached_filters.is_invite); + if list_filters.room_types.is_empty() { + list_filters.room_types = cached_filters.room_types; + } + if list_filters.not_room_types.is_empty() { + list_filters.not_room_types = cached_filters.not_room_types; + } + list_filters.room_name_like = list_filters + .room_name_like + .clone() + .or(cached_filters.room_name_like); + if list_filters.tags.is_empty() { + list_filters.tags = cached_filters.tags; + } + if list_filters.not_tags.is_empty() { + list_filters.not_tags = cached_filters.not_tags; + } + }, + (_, Some(cached_filters)) => list.filters = Some(cached_filters), + (Some(list_filters), _) => list.filters = Some(list_filters.clone()), + (..) => {}, + } + if list.bump_event_types.is_empty() { + list.bump_event_types + .clone_from(&cached_list.bump_event_types); + }; + } + cached.lists.insert(list_id.clone(), list.clone()); + } + + cached + .subscriptions + .extend(request.room_subscriptions.clone()); + request + .room_subscriptions + .extend(cached.subscriptions.clone()); + + request.extensions.e2ee.enabled = request + .extensions + .e2ee + .enabled + .or(cached.extensions.e2ee.enabled); + + request.extensions.to_device.enabled = request + .extensions + .to_device + .enabled + .or(cached.extensions.to_device.enabled); + + request.extensions.account_data.enabled = request + .extensions + .account_data + .enabled + .or(cached.extensions.account_data.enabled); + request.extensions.account_data.lists = request + .extensions + .account_data + .lists + .clone() + .or_else(|| cached.extensions.account_data.lists.clone()); + request.extensions.account_data.rooms = request + .extensions + .account_data + .rooms + .clone() + .or_else(|| cached.extensions.account_data.rooms.clone()); + + cached.extensions = request.extensions.clone(); + + cached.known_rooms.clone() + } + + pub fn update_sync_subscriptions( + &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, + subscriptions: BTreeMap, + ) { + let mut cache = self.connections.lock().expect("locked"); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().expect("locked"); + drop(cache); + + cached.subscriptions = subscriptions; + } + + pub fn update_sync_known_rooms( + &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, list_id: String, + new_cached_rooms: BTreeSet, globalsince: u64, + ) { + let mut cache = self.connections.lock().expect("locked"); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().expect("locked"); + drop(cache); + + for (roomid, lastsince) in cached + .known_rooms + .entry(list_id.clone()) + .or_default() + .iter_mut() + { + if !new_cached_rooms.contains(roomid) { + *lastsince = 0; + } + } + let list = cached.known_rooms.entry(list_id).or_default(); + for roomid in new_cached_rooms { + list.insert(roomid, globalsince); + } + } +} diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs deleted file mode 100644 index 791b46f0..00000000 --- a/src/service/transaction_ids/data.rs +++ /dev/null @@ -1,44 +0,0 @@ -use std::sync::Arc; - -use conduit::Result; -use database::{Database, Map}; -use ruma::{DeviceId, TransactionId, UserId}; - -pub struct Data { - userdevicetxnid_response: Arc, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - userdevicetxnid_response: db["userdevicetxnid_response"].clone(), - } - } - - pub(super) fn add_txnid( - &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, data: &[u8], - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default()); - key.push(0xFF); - key.extend_from_slice(txn_id.as_bytes()); - - self.userdevicetxnid_response.insert(&key, data)?; - - Ok(()) - } - - pub(super) fn existing_txnid( - &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, - ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default()); - key.push(0xFF); - key.extend_from_slice(txn_id.as_bytes()); - - // If there's no entry, this is a new transaction - self.userdevicetxnid_response.get(&key) - } -} diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 78e6337f..72f60adb 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,35 +1,45 @@ -mod data; - use std::sync::Arc; -use conduit::Result; -use data::Data; +use conduit::{implement, Result}; +use database::{Handle, Map}; use ruma::{DeviceId, TransactionId, UserId}; pub struct Service { - pub db: Data, + db: Data, +} + +struct Data { + userdevicetxnid_response: Arc, } impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(args.db), + db: Data { + userdevicetxnid_response: args.db["userdevicetxnid_response"].clone(), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - pub fn add_txnid( - &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, data: &[u8], - ) -> Result<()> { - self.db.add_txnid(user_id, device_id, txn_id, data) - } +#[implement(Service)] +pub fn add_txnid(&self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, data: &[u8]) { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default()); + key.push(0xFF); + key.extend_from_slice(txn_id.as_bytes()); - pub fn existing_txnid( - &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, - ) -> Result>> { - self.db.existing_txnid(user_id, device_id, txn_id) - } + self.db.userdevicetxnid_response.insert(&key, data); +} + +// If there's no entry, this is a new transaction +#[implement(Service)] +pub async fn existing_txnid( + &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, +) -> Result> { + let key = (user_id, device_id, txn_id); + self.db.userdevicetxnid_response.qry(&key).await } diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs deleted file mode 100644 index ce071da0..00000000 --- a/src/service/uiaa/data.rs +++ /dev/null @@ -1,87 +0,0 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, RwLock}, -}; - -use conduit::{Error, Result}; -use database::{Database, Map}; -use ruma::{ - api::client::{error::ErrorKind, uiaa::UiaaInfo}, - CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, -}; - -pub struct Data { - userdevicesessionid_uiaarequest: RwLock>, - userdevicesessionid_uiaainfo: Arc, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), - userdevicesessionid_uiaainfo: db["userdevicesessionid_uiaainfo"].clone(), - } - } - - pub(super) fn set_uiaa_request( - &self, user_id: &UserId, device_id: &DeviceId, session: &str, request: &CanonicalJsonValue, - ) -> Result<()> { - self.userdevicesessionid_uiaarequest - .write() - .unwrap() - .insert( - (user_id.to_owned(), device_id.to_owned(), session.to_owned()), - request.to_owned(), - ); - - Ok(()) - } - - pub(super) fn get_uiaa_request( - &self, user_id: &UserId, device_id: &DeviceId, session: &str, - ) -> Option { - self.userdevicesessionid_uiaarequest - .read() - .unwrap() - .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) - .map(ToOwned::to_owned) - } - - pub(super) fn update_uiaa_session( - &self, user_id: &UserId, device_id: &DeviceId, session: &str, uiaainfo: Option<&UiaaInfo>, - ) -> Result<()> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xFF); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xFF); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - if let Some(uiaainfo) = uiaainfo { - self.userdevicesessionid_uiaainfo.insert( - &userdevicesessionid, - &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), - )?; - } else { - self.userdevicesessionid_uiaainfo - .remove(&userdevicesessionid)?; - } - - Ok(()) - } - - pub(super) fn get_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session: &str) -> Result { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xFF); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xFF); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - serde_json::from_slice( - &self - .userdevicesessionid_uiaainfo - .get(&userdevicesessionid)? - .ok_or(Error::BadRequest(ErrorKind::forbidden(), "UIAA session does not exist."))?, - ) - .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) - } -} diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 6041bbd3..7e231514 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,174 +1,243 @@ -mod data; +use std::{ + collections::BTreeMap, + sync::{Arc, RwLock}, +}; -use std::sync::Arc; - -use conduit::{error, utils, utils::hash, Error, Result, Server}; -use data::Data; +use conduit::{ + err, error, implement, utils, + utils::{hash, string::EMPTY}, + Error, Result, Server, +}; +use database::{Deserialized, Map}; use ruma::{ api::client::{ error::ErrorKind, uiaa::{AuthData, AuthType, Password, UiaaInfo, UserIdentifier}, }, - CanonicalJsonValue, DeviceId, UserId, + CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, }; use crate::{globals, users, Dep}; -pub const SESSION_ID_LENGTH: usize = 32; - pub struct Service { - server: Arc, + userdevicesessionid_uiaarequest: RwLock, + db: Data, services: Services, - pub db: Data, } struct Services { + server: Arc, globals: Dep, users: Dep, } +struct Data { + userdevicesessionid_uiaainfo: Arc, +} + +type RequestMap = BTreeMap; +type RequestKey = (OwnedUserId, OwnedDeviceId, String); + +pub const SESSION_ID_LENGTH: usize = 32; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - server: args.server.clone(), + userdevicesessionid_uiaarequest: RwLock::new(RequestMap::new()), + db: Data { + userdevicesessionid_uiaainfo: args.db["userdevicesessionid_uiaainfo"].clone(), + }, services: Services { + server: args.server.clone(), globals: args.depend::("globals"), users: args.depend::("users"), }, - db: Data::new(args.db), })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Creates a new Uiaa session. Make sure the session token is unique. - pub fn create( - &self, user_id: &UserId, device_id: &DeviceId, uiaainfo: &UiaaInfo, json_body: &CanonicalJsonValue, - ) -> Result<()> { - self.db.set_uiaa_request( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), /* TODO: better session error handling (why - * is it optional in ruma?) */ - json_body, - )?; - self.db.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), - Some(uiaainfo), - ) +/// Creates a new Uiaa session. Make sure the session token is unique. +#[implement(Service)] +pub fn create(&self, user_id: &UserId, device_id: &DeviceId, uiaainfo: &UiaaInfo, json_body: &CanonicalJsonValue) { + // TODO: better session error handling (why is uiaainfo.session optional in + // ruma?) + self.set_uiaa_request( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session should be set"), + json_body, + ); + + self.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session should be set"), + Some(uiaainfo), + ); +} + +#[implement(Service)] +pub async fn try_auth( + &self, user_id: &UserId, device_id: &DeviceId, auth: &AuthData, uiaainfo: &UiaaInfo, +) -> Result<(bool, UiaaInfo)> { + let mut uiaainfo = if let Some(session) = auth.session() { + self.get_uiaa_session(user_id, device_id, session).await? + } else { + uiaainfo.clone() + }; + + if uiaainfo.session.is_none() { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); } - pub fn try_auth( - &self, user_id: &UserId, device_id: &DeviceId, auth: &AuthData, uiaainfo: &UiaaInfo, - ) -> Result<(bool, UiaaInfo)> { - let mut uiaainfo = auth.session().map_or_else( - || Ok(uiaainfo.clone()), - |session| self.db.get_uiaa_session(user_id, device_id, session), - )?; + match auth { + // Find out what the user completed + AuthData::Password(Password { + identifier, + password, + #[cfg(feature = "element_hacks")] + user, + .. + }) => { + #[cfg(feature = "element_hacks")] + let username = if let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier { + username + } else if let Some(username) = user { + username + } else { + return Err(Error::BadRequest(ErrorKind::Unrecognized, "Identifier type not recognized.")); + }; - if uiaainfo.session.is_none() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - } + #[cfg(not(feature = "element_hacks"))] + let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier + else { + return Err(Error::BadRequest(ErrorKind::Unrecognized, "Identifier type not recognized.")); + }; - match auth { - // Find out what the user completed - AuthData::Password(Password { - identifier, - password, - #[cfg(feature = "element_hacks")] - user, - .. - }) => { - #[cfg(feature = "element_hacks")] - let username = if let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier { - username - } else if let Some(username) = user { - username - } else { - return Err(Error::BadRequest(ErrorKind::Unrecognized, "Identifier type not recognized.")); - }; + let user_id = UserId::parse_with_server_name(username.clone(), self.services.globals.server_name()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; - #[cfg(not(feature = "element_hacks"))] - let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier - else { - return Err(Error::BadRequest(ErrorKind::Unrecognized, "Identifier type not recognized.")); - }; - - let user_id = UserId::parse_with_server_name(username.clone(), self.services.globals.server_name()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; - - // Check if password is correct - if let Some(hash) = self.services.users.password_hash(&user_id)? { - let hash_matches = hash::verify_password(password, &hash).is_ok(); - if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { - kind: ErrorKind::forbidden(), - message: "Invalid username or password.".to_owned(), - }); - return Ok((false, uiaainfo)); - } - } - - // Password was correct! Let's add it to `completed` - uiaainfo.completed.push(AuthType::Password); - }, - AuthData::RegistrationToken(t) => { - if Some(t.token.trim()) == self.server.config.registration_token.as_deref() { - uiaainfo.completed.push(AuthType::RegistrationToken); - } else { + // Check if password is correct + if let Ok(hash) = self.services.users.password_hash(&user_id).await { + let hash_matches = hash::verify_password(password, &hash).is_ok(); + if !hash_matches { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { kind: ErrorKind::forbidden(), - message: "Invalid registration token.".to_owned(), + message: "Invalid username or password.".to_owned(), }); return Ok((false, uiaainfo)); } - }, - AuthData::Dummy(_) => { - uiaainfo.completed.push(AuthType::Dummy); - }, - k => error!("type not supported: {:?}", k), - } - - // Check if a flow now succeeds - let mut completed = false; - 'flows: for flow in &mut uiaainfo.flows { - for stage in &flow.stages { - if !uiaainfo.completed.contains(stage) { - continue 'flows; - } } - // We didn't break, so this flow succeeded! - completed = true; - } - if !completed { - self.db.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - Some(&uiaainfo), - )?; - return Ok((false, uiaainfo)); - } + // Password was correct! Let's add it to `completed` + uiaainfo.completed.push(AuthType::Password); + }, + AuthData::RegistrationToken(t) => { + if Some(t.token.trim()) == self.services.server.config.registration_token.as_deref() { + uiaainfo.completed.push(AuthType::RegistrationToken); + } else { + uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { + kind: ErrorKind::forbidden(), + message: "Invalid registration token.".to_owned(), + }); + return Ok((false, uiaainfo)); + } + }, + AuthData::Dummy(_) => { + uiaainfo.completed.push(AuthType::Dummy); + }, + k => error!("type not supported: {:?}", k), + } - // UIAA was successful! Remove this session and return true - self.db.update_uiaa_session( + // Check if a flow now succeeds + let mut completed = false; + 'flows: for flow in &mut uiaainfo.flows { + for stage in &flow.stages { + if !uiaainfo.completed.contains(stage) { + continue 'flows; + } + } + // We didn't break, so this flow succeeded! + completed = true; + } + + if !completed { + self.update_uiaa_session( user_id, device_id, uiaainfo.session.as_ref().expect("session is always set"), - None, - )?; - Ok((true, uiaainfo)) + Some(&uiaainfo), + ); + + return Ok((false, uiaainfo)); } - #[must_use] - pub fn get_uiaa_request( - &self, user_id: &UserId, device_id: &DeviceId, session: &str, - ) -> Option { - self.db.get_uiaa_request(user_id, device_id, session) + // UIAA was successful! Remove this session and return true + self.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session is always set"), + None, + ); + + Ok((true, uiaainfo)) +} + +#[implement(Service)] +fn set_uiaa_request(&self, user_id: &UserId, device_id: &DeviceId, session: &str, request: &CanonicalJsonValue) { + let key = (user_id.to_owned(), device_id.to_owned(), session.to_owned()); + self.userdevicesessionid_uiaarequest + .write() + .expect("locked for writing") + .insert(key, request.to_owned()); +} + +#[implement(Service)] +pub fn get_uiaa_request( + &self, user_id: &UserId, device_id: Option<&DeviceId>, session: &str, +) -> Option { + let key = ( + user_id.to_owned(), + device_id.unwrap_or_else(|| EMPTY.into()).to_owned(), + session.to_owned(), + ); + + self.userdevicesessionid_uiaarequest + .read() + .expect("locked for reading") + .get(&key) + .cloned() +} + +#[implement(Service)] +fn update_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session: &str, uiaainfo: Option<&UiaaInfo>) { + let mut userdevicesessionid = user_id.as_bytes().to_vec(); + userdevicesessionid.push(0xFF); + userdevicesessionid.extend_from_slice(device_id.as_bytes()); + userdevicesessionid.push(0xFF); + userdevicesessionid.extend_from_slice(session.as_bytes()); + + if let Some(uiaainfo) = uiaainfo { + self.db.userdevicesessionid_uiaainfo.insert( + &userdevicesessionid, + &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), + ); + } else { + self.db + .userdevicesessionid_uiaainfo + .remove(&userdevicesessionid); } } + +#[implement(Service)] +async fn get_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session: &str) -> Result { + let key = (user_id, device_id, session); + self.db + .userdevicesessionid_uiaainfo + .qry(&key) + .await + .deserialized_json() + .map_err(|_| err!(Request(Forbidden("UIAA session does not exist.")))) +} diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index 3c69b243..4e16e22b 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -1,19 +1,22 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduit::{debug, err, info, utils, warn, Error, Result}; -use database::Map; +use conduit::{debug, info, warn, Result}; +use database::{Deserialized, Map}; use ruma::events::room::message::RoomMessageEventContent; use serde::Deserialize; -use tokio::{sync::Notify, time::interval}; +use tokio::{ + sync::Notify, + time::{interval, MissedTickBehavior}, +}; use crate::{admin, client, globals, Dep}; pub struct Service { - services: Services, - db: Arc, - interrupt: Notify, interval: Duration, + interrupt: Notify, + db: Arc, + services: Services, } struct Services { @@ -22,12 +25,12 @@ struct Services { globals: Dep, } -#[derive(Deserialize)] +#[derive(Debug, Deserialize)] struct CheckForUpdatesResponse { updates: Vec, } -#[derive(Deserialize)] +#[derive(Debug, Deserialize)] struct CheckForUpdatesResponseEntry { id: u64, date: String, @@ -42,33 +45,38 @@ const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u"; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { + interval: Duration::from_secs(CHECK_FOR_UPDATES_INTERVAL), + interrupt: Notify::new(), + db: args.db["global"].clone(), services: Services { globals: args.depend::("globals"), admin: args.depend::("admin"), client: args.depend::("client"), }, - db: args.db["global"].clone(), - interrupt: Notify::new(), - interval: Duration::from_secs(CHECK_FOR_UPDATES_INTERVAL), })) } + #[tracing::instrument(skip_all, name = "updates", level = "trace")] async fn worker(self: Arc) -> Result<()> { if !self.services.globals.allow_check_for_updates() { debug!("Disabling update check"); return Ok(()); } + let mut i = interval(self.interval); + i.set_missed_tick_behavior(MissedTickBehavior::Delay); loop { tokio::select! { - () = self.interrupt.notified() => return Ok(()), + () = self.interrupt.notified() => break, _ = i.tick() => (), } - if let Err(e) = self.handle_updates().await { + if let Err(e) = self.check().await { warn!(%e, "Failed to check for updates"); } } + + Ok(()) } fn interrupt(&self) { self.interrupt.notify_waiters(); } @@ -77,52 +85,52 @@ impl crate::Service for Service { } impl Service { - #[tracing::instrument(skip_all)] - async fn handle_updates(&self) -> Result<()> { + #[tracing::instrument(skip_all, level = "trace")] + async fn check(&self) -> Result<()> { let response = self .services .client .default .get(CHECK_FOR_UPDATES_URL) .send() + .await? + .text() .await?; - let response = serde_json::from_str::(&response.text().await?) - .map_err(|e| err!("Bad check for updates response: {e}"))?; - - let mut last_update_id = self.last_check_for_updates_id()?; - for update in response.updates { - last_update_id = last_update_id.max(update.id); - if update.id > self.last_check_for_updates_id()? { - info!("{:#}", update.message); - self.services - .admin - .send_message(RoomMessageEventContent::text_markdown(format!( - "### the following is a message from the conduwuit puppy\n\nit was sent on `{}`:\n\n@room: {}", - update.date, update.message - ))) - .await; + let response = serde_json::from_str::(&response)?; + for update in &response.updates { + if update.id > self.last_check_for_updates_id().await { + self.handle(update).await; + self.update_check_for_updates_id(update.id); } } - self.update_check_for_updates_id(last_update_id)?; Ok(()) } + async fn handle(&self, update: &CheckForUpdatesResponseEntry) { + info!("{} {:#}", update.date, update.message); + self.services + .admin + .send_message(RoomMessageEventContent::text_markdown(format!( + "### the following is a message from the conduwuit puppy\n\nit was sent on `{}`:\n\n@room: {}", + update.date, update.message + ))) + .await + .ok(); + } + #[inline] - pub fn update_check_for_updates_id(&self, id: u64) -> Result<()> { + pub fn update_check_for_updates_id(&self, id: u64) { self.db - .insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes())?; - - Ok(()) + .insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes()); } - pub fn last_check_for_updates_id(&self) -> Result { + pub async fn last_check_for_updates_id(&self) -> u64 { self.db - .get(LAST_CHECK_FOR_UPDATES_COUNT)? - .map_or(Ok(0_u64), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("last check for updates count has invalid bytes.")) - }) + .qry(LAST_CHECK_FOR_UPDATES_COUNT) + .await + .deserialized() + .unwrap_or(0_u64) } } diff --git a/src/service/users/data.rs b/src/service/users/data.rs deleted file mode 100644 index 70ff12e3..00000000 --- a/src/service/users/data.rs +++ /dev/null @@ -1,1098 +0,0 @@ -use std::{collections::BTreeMap, mem::size_of, sync::Arc}; - -use conduit::{debug_info, err, utils, warn, Err, Error, Result, Server}; -use database::Map; -use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, - encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, StateEventType}, - serde::Raw, - uint, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId, - OwnedMxcUri, OwnedUserId, UInt, UserId, -}; - -use crate::{globals, rooms, users::clean_signatures, Dep}; - -pub struct Data { - keychangeid_userid: Arc, - keyid_key: Arc, - onetimekeyid_onetimekeys: Arc, - openidtoken_expiresatuserid: Arc, - todeviceid_events: Arc, - token_userdeviceid: Arc, - userdeviceid_metadata: Arc, - userdeviceid_token: Arc, - userfilterid_filter: Arc, - userid_avatarurl: Arc, - userid_blurhash: Arc, - userid_devicelistversion: Arc, - userid_displayname: Arc, - userid_lastonetimekeyupdate: Arc, - userid_masterkeyid: Arc, - userid_password: Arc, - userid_selfsigningkeyid: Arc, - userid_usersigningkeyid: Arc, - useridprofilekey_value: Arc, - services: Services, -} - -struct Services { - server: Arc, - globals: Dep, - state_cache: Dep, - state_accessor: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - keychangeid_userid: db["keychangeid_userid"].clone(), - keyid_key: db["keyid_key"].clone(), - onetimekeyid_onetimekeys: db["onetimekeyid_onetimekeys"].clone(), - openidtoken_expiresatuserid: db["openidtoken_expiresatuserid"].clone(), - todeviceid_events: db["todeviceid_events"].clone(), - token_userdeviceid: db["token_userdeviceid"].clone(), - userdeviceid_metadata: db["userdeviceid_metadata"].clone(), - userdeviceid_token: db["userdeviceid_token"].clone(), - userfilterid_filter: db["userfilterid_filter"].clone(), - userid_avatarurl: db["userid_avatarurl"].clone(), - userid_blurhash: db["userid_blurhash"].clone(), - userid_devicelistversion: db["userid_devicelistversion"].clone(), - userid_displayname: db["userid_displayname"].clone(), - userid_lastonetimekeyupdate: db["userid_lastonetimekeyupdate"].clone(), - userid_masterkeyid: db["userid_masterkeyid"].clone(), - userid_password: db["userid_password"].clone(), - userid_selfsigningkeyid: db["userid_selfsigningkeyid"].clone(), - userid_usersigningkeyid: db["userid_usersigningkeyid"].clone(), - useridprofilekey_value: db["useridprofilekey_value"].clone(), - services: Services { - server: args.server.clone(), - globals: args.depend::("globals"), - state_cache: args.depend::("rooms::state_cache"), - state_accessor: args.depend::("rooms::state_accessor"), - }, - } - } - - /// Check if a user has an account on this homeserver. - #[inline] - pub(super) fn exists(&self, user_id: &UserId) -> Result { - Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) - } - - /// Check if account is deactivated - pub(super) fn is_deactivated(&self, user_id: &UserId) -> Result { - Ok(self - .userid_password - .get(user_id.as_bytes())? - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "User does not exist."))? - .is_empty()) - } - - /// Returns the number of users registered on this server. - #[inline] - pub(super) fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } - - /// Find out which user an access token belongs to. - pub(super) fn find_from_token(&self, token: &str) -> Result> { - self.token_userdeviceid - .get(token.as_bytes())? - .map_or(Ok(None), |bytes| { - let mut parts = bytes.split(|&b| b == 0xFF); - let user_bytes = parts - .next() - .ok_or_else(|| err!(Database("User ID in token_userdeviceid is invalid.")))?; - let device_bytes = parts - .next() - .ok_or_else(|| err!(Database("Device ID in token_userdeviceid is invalid.")))?; - - Ok(Some(( - UserId::parse( - utils::string_from_bytes(user_bytes) - .map_err(|e| err!(Database("User ID in token_userdeviceid is invalid unicode. {e}")))?, - ) - .map_err(|e| err!(Database("User ID in token_userdeviceid is invalid. {e}")))?, - utils::string_from_bytes(device_bytes) - .map_err(|e| err!(Database("Device ID in token_userdeviceid is invalid. {e}")))?, - ))) - }) - } - - /// Returns an iterator over all users on this homeserver. - pub fn iter<'a>(&'a self) -> Box> + 'a> { - Box::new(self.userid_password.iter().map(|(bytes, _)| { - UserId::parse( - utils::string_from_bytes(&bytes) - .map_err(|e| err!(Database("User ID in userid_password is invalid unicode. {e}")))?, - ) - .map_err(|e| err!(Database("User ID in userid_password is invalid. {e}"))) - })) - } - - /// Returns a list of local users as list of usernames. - /// - /// A user account is considered `local` if the length of it's password is - /// greater then zero. - pub(super) fn list_local_users(&self) -> Result> { - let users: Vec = self - .userid_password - .iter() - .filter_map(|(username, pw)| get_username_with_valid_password(&username, &pw)) - .collect(); - Ok(users) - } - - /// Returns the password hash for the given user. - pub(super) fn password_hash(&self, user_id: &UserId) -> Result> { - self.userid_password - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Password hash in db is not valid string.") - })?)) - }) - } - - /// Hash and set the user's password to the Argon2 hash - pub(super) fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - if let Some(password) = password { - if let Ok(hash) = utils::hash::password(password) { - self.userid_password - .insert(user_id.as_bytes(), hash.as_bytes())?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Password does not meet the requirements.", - )) - } - } else { - self.userid_password.insert(user_id.as_bytes(), b"")?; - Ok(()) - } - } - - /// Returns the displayname of a user on this homeserver. - pub(super) fn displayname(&self, user_id: &UserId) -> Result> { - self.userid_displayname - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some( - utils::string_from_bytes(&bytes) - .map_err(|e| err!(Database("Displayname in db is invalid. {e}")))?, - )) - }) - } - - /// Sets a new displayname or removes it if displayname is None. You still - /// need to nofify all rooms of this change. - pub(super) fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { - if let Some(displayname) = displayname { - self.userid_displayname - .insert(user_id.as_bytes(), displayname.as_bytes())?; - } else { - self.userid_displayname.remove(user_id.as_bytes())?; - } - - Ok(()) - } - - /// Get the `avatar_url` of a user. - pub(super) fn avatar_url(&self, user_id: &UserId) -> Result> { - self.userid_avatarurl - .get(user_id.as_bytes())? - .map(|bytes| { - let s_bytes = utils::string_from_bytes(&bytes) - .map_err(|e| err!(Database(warn!("Avatar URL in db is invalid: {e}"))))?; - let mxc_uri: OwnedMxcUri = s_bytes.into(); - Ok(mxc_uri) - }) - .transpose() - } - - /// Sets a new avatar_url or removes it if avatar_url is None. - pub(super) fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { - if let Some(avatar_url) = avatar_url { - self.userid_avatarurl - .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; - } else { - self.userid_avatarurl.remove(user_id.as_bytes())?; - } - - Ok(()) - } - - /// Get the blurhash of a user. - pub(super) fn blurhash(&self, user_id: &UserId) -> Result> { - self.userid_blurhash - .get(user_id.as_bytes())? - .map(|bytes| { - utils::string_from_bytes(&bytes).map_err(|e| err!(Database("Avatar URL in db is invalid. {e}"))) - }) - .transpose() - } - - /// Gets a specific user profile key - pub(super) fn profile_key(&self, user_id: &UserId, profile_key: &str) -> Result> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(profile_key.as_bytes()); - - self.useridprofilekey_value - .get(&key)? - .map_or(Ok(None), |bytes| Ok(Some(serde_json::from_slice(&bytes).unwrap()))) - } - - /// Gets all the user's profile keys and values in an iterator - pub(super) fn all_profile_keys<'a>( - &'a self, user_id: &UserId, - ) -> Box> + 'a + Send> { - let prefix = user_id.as_bytes().to_vec(); - - Box::new( - self.useridprofilekey_value - .scan_prefix(prefix) - .map(|(key, value)| { - let profile_key_name = utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .ok_or_else(|| err!(Database("Profile key in db is invalid")))?, - ) - .map_err(|e| err!(Database("Profile key in db is invalid. {e}")))?; - - let profile_key_value = serde_json::from_slice(&value) - .map_err(|e| err!(Database("Profile key in db is invalid. {e}")))?; - - Ok((profile_key_name, profile_key_value)) - }), - ) - } - - /// Sets a new profile key value, removes the key if value is None - pub(super) fn set_profile_key( - &self, user_id: &UserId, profile_key: &str, profile_key_value: Option, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(profile_key.as_bytes()); - - // TODO: insert to the stable MSC4175 key when it's stable - if let Some(value) = profile_key_value { - let value = serde_json::to_vec(&value).unwrap(); - - self.useridprofilekey_value.insert(&key, &value) - } else { - self.useridprofilekey_value.remove(&key) - } - } - - /// Get the timezone of a user. - pub(super) fn timezone(&self, user_id: &UserId) -> Result> { - // first check the unstable prefix - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(b"us.cloke.msc4175.tz"); - - let value = self - .useridprofilekey_value - .get(&key)? - .map(|bytes| utils::string_from_bytes(&bytes).map_err(|e| err!(Database("Timezone in db is invalid. {e}")))) - .transpose() - .unwrap(); - - // TODO: transparently migrate unstable key usage to the stable key once MSC4133 - // and MSC4175 are stable, likely a remove/insert in this block - if value.is_none() || value.as_ref().is_some_and(String::is_empty) { - // check the stable prefix - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(b"m.tz"); - - return self - .useridprofilekey_value - .get(&key)? - .map(|bytes| { - utils::string_from_bytes(&bytes).map_err(|e| err!(Database("Timezone in db is invalid. {e}"))) - }) - .transpose(); - } - - Ok(value) - } - - /// Sets a new timezone or removes it if timezone is None. - pub(super) fn set_timezone(&self, user_id: &UserId, timezone: Option) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(b"us.cloke.msc4175.tz"); - - // TODO: insert to the stable MSC4175 key when it's stable - if let Some(timezone) = timezone { - self.useridprofilekey_value - .insert(&key, timezone.as_bytes())?; - } else { - self.useridprofilekey_value.remove(&key)?; - } - - Ok(()) - } - - /// Sets a new avatar_url or removes it if avatar_url is None. - pub(super) fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { - if let Some(blurhash) = blurhash { - self.userid_blurhash - .insert(user_id.as_bytes(), blurhash.as_bytes())?; - } else { - self.userid_blurhash.remove(user_id.as_bytes())?; - } - - Ok(()) - } - - /// Adds a new device to a user. - pub(super) fn create_device( - &self, user_id: &UserId, device_id: &DeviceId, token: &str, initial_device_display_name: Option, - client_ip: Option, - ) -> Result<()> { - // This method should never be called for nonexistent users. We shouldn't assert - // though... - if !self.exists(user_id)? { - warn!("Called create_device for non-existent user {} in database", user_id); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "User does not exist.")); - } - - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(&Device { - device_id: device_id.into(), - display_name: initial_device_display_name, - last_seen_ip: client_ip, - last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), - }) - .expect("Device::to_string never fails."), - )?; - - self.set_token(user_id, device_id, token)?; - - Ok(()) - } - - /// Removes a device from a user. - pub(super) fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Remove tokens - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.userdeviceid_token.remove(&userdeviceid)?; - self.token_userdeviceid.remove(&old_token)?; - } - - // Remove todevice events - let mut prefix = userdeviceid.clone(); - prefix.push(0xFF); - - for (key, _) in self.todeviceid_events.scan_prefix(prefix) { - self.todeviceid_events.remove(&key)?; - } - - // TODO: Remove onetimekeys - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.remove(&userdeviceid)?; - - Ok(()) - } - - /// Returns an iterator over all device ids of this user. - pub(super) fn all_device_ids<'a>( - &'a self, user_id: &UserId, - ) -> Box> + 'a> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - // All devices have metadata - Box::new( - self.userdeviceid_metadata - .scan_prefix(prefix) - .map(|(bytes, _)| { - Ok(utils::string_from_bytes( - bytes - .rsplit(|&b| b == 0xFF) - .next() - .ok_or_else(|| err!(Database("UserDevice ID in db is invalid.")))?, - ) - .map_err(|e| err!(Database("Device ID in userdeviceid_metadata is invalid. {e}")))? - .into()) - }), - ) - } - - /// Replaces the access token of one device. - pub(super) fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // should not be None, but we shouldn't assert either lol... - if self.userdeviceid_metadata.get(&userdeviceid)?.is_none() { - return Err!(Database(error!( - "User {user_id:?} does not exist or device ID {device_id:?} has no metadata." - ))); - } - - // Remove old token - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.token_userdeviceid.remove(&old_token)?; - // It will be removed from userdeviceid_token by the insert later - } - - // Assign token to user device combination - self.userdeviceid_token - .insert(&userdeviceid, token.as_bytes())?; - self.token_userdeviceid - .insert(token.as_bytes(), &userdeviceid)?; - - Ok(()) - } - - pub(super) fn add_one_time_key( - &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, - one_time_key_value: &Raw, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(device_id.as_bytes()); - - // All devices have metadata - // Only existing devices should be able to call this, but we shouldn't assert - // either... - if self.userdeviceid_metadata.get(&key)?.is_none() { - return Err!(Database(error!( - "User {user_id:?} does not exist or device ID {device_id:?} has no metadata." - ))); - } - - key.push(0xFF); - // TODO: Use DeviceKeyId::to_string when it's available (and update everything, - // because there are no wrapping quotation marks anymore) - key.extend_from_slice( - serde_json::to_string(one_time_key_key) - .expect("DeviceKeyId::to_string always works") - .as_bytes(), - ); - - self.onetimekeyid_onetimekeys.insert( - &key, - &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), - )?; - - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &self.services.globals.next_count()?.to_be_bytes())?; - - Ok(()) - } - - pub(super) fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { - self.userid_lastonetimekeyupdate - .get(user_id.as_bytes())? - .map_or(Ok(0), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|e| err!(Database("Count in roomid_lastroomactiveupdate is invalid. {e}"))) - }) - } - - pub(super) fn take_one_time_key( - &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - ) -> Result)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); - prefix.push(b'"'); // Annoying quotation mark - prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); - prefix.push(b':'); - - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &self.services.globals.next_count()?.to_be_bytes())?; - - self.onetimekeyid_onetimekeys - .scan_prefix(prefix) - .next() - .map(|(key, value)| { - self.onetimekeyid_onetimekeys.remove(&key)?; - - Ok(( - serde_json::from_slice( - key.rsplit(|&b| b == 0xFF) - .next() - .ok_or_else(|| err!(Database("OneTimeKeyId in db is invalid.")))?, - ) - .map_err(|e| err!(Database("OneTimeKeyId in db is invalid. {e}")))?, - serde_json::from_slice(&value).map_err(|e| err!(Database("OneTimeKeys in db are invalid. {e}")))?, - )) - }) - .transpose() - } - - pub(super) fn count_one_time_keys( - &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - let mut counts = BTreeMap::new(); - - for algorithm in self - .onetimekeyid_onetimekeys - .scan_prefix(userdeviceid) - .map(|(bytes, _)| { - Ok::<_, Error>( - serde_json::from_slice::( - bytes - .rsplit(|&b| b == 0xFF) - .next() - .ok_or_else(|| err!(Database("OneTimeKey ID in db is invalid.")))?, - ) - .map_err(|e| err!(Database("DeviceKeyId in db is invalid. {e}")))? - .algorithm(), - ) - }) { - let count: &mut UInt = counts.entry(algorithm?).or_default(); - *count = count.saturating_add(uint!(1)); - } - - Ok(counts) - } - - pub(super) fn add_device_keys( - &self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw, - ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.keyid_key.insert( - &userdeviceid, - &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), - )?; - - self.mark_device_key_update(user_id)?; - - Ok(()) - } - - pub(super) fn add_cross_signing_keys( - &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, - user_signing_key: &Option>, notify: bool, - ) -> Result<()> { - // TODO: Check signatures - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - - let (master_key_key, _) = Self::parse_master_key(user_id, master_key)?; - - self.keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes())?; - - self.userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key)?; - - // Self-signing key - if let Some(self_signing_key) = self_signing_key { - let mut self_signing_key_ids = self_signing_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key"))? - .keys - .into_values(); - - let self_signing_key_id = self_signing_key_ids - .next() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Self signing key contained no key."))?; - - if self_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained more than one key.", - )); - } - - let mut self_signing_key_key = prefix.clone(); - self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); - - self.keyid_key - .insert(&self_signing_key_key, self_signing_key.json().get().as_bytes())?; - - self.userid_selfsigningkeyid - .insert(user_id.as_bytes(), &self_signing_key_key)?; - } - - // User-signing key - if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key"))? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids - .next() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "User signing key contained no key."))?; - - if user_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained more than one key.", - )); - } - - let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); - - self.keyid_key - .insert(&user_signing_key_key, user_signing_key.json().get().as_bytes())?; - - self.userid_usersigningkeyid - .insert(user_id.as_bytes(), &user_signing_key_key)?; - } - - if notify { - self.mark_device_key_update(user_id)?; - } - - Ok(()) - } - - pub(super) fn sign_key( - &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, - ) -> Result<()> { - let mut key = target_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(key_id.as_bytes()); - - let mut cross_signing_key: serde_json::Value = serde_json::from_slice( - &self - .keyid_key - .get(&key)? - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Tried to sign nonexistent key."))?, - ) - .map_err(|e| err!(Database("key in keyid_key is invalid. {e}")))?; - - let signatures = cross_signing_key - .get_mut("signatures") - .ok_or_else(|| err!(Database("key in keyid_key has no signatures field.")))? - .as_object_mut() - .ok_or_else(|| err!(Database("key in keyid_key has invalid signatures field.")))? - .entry(sender_id.to_string()) - .or_insert_with(|| serde_json::Map::new().into()); - - signatures - .as_object_mut() - .ok_or_else(|| err!(Database("signatures in keyid_key for a user is invalid.")))? - .insert(signature.0, signature.1.into()); - - self.keyid_key.insert( - &key, - &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), - )?; - - self.mark_device_key_update(target_id)?; - - Ok(()) - } - - pub(super) fn keys_changed<'a>( - &'a self, user_or_room_id: &str, from: u64, to: Option, - ) -> Box> + 'a> { - let mut prefix = user_or_room_id.as_bytes().to_vec(); - prefix.push(0xFF); - - let mut start = prefix.clone(); - start.extend_from_slice(&(from.saturating_add(1)).to_be_bytes()); - - let to = to.unwrap_or(u64::MAX); - - Box::new( - self.keychangeid_userid - .iter_from(&start, false) - .take_while(move |(k, _)| { - k.starts_with(&prefix) - && if let Some(current) = k.splitn(2, |&b| b == 0xFF).nth(1) { - if let Ok(c) = utils::u64_from_bytes(current) { - c <= to - } else { - warn!("BadDatabase: Could not parse keychangeid_userid bytes"); - false - } - } else { - warn!("BadDatabase: Could not parse keychangeid_userid"); - false - } - }) - .map(|(_, bytes)| { - UserId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") - })?, - ) - .map_err(|e| err!(Database("User ID in devicekeychangeid_userid is invalid. {e}"))) - }), - ) - } - - pub(super) fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { - let count = self.services.globals.next_count()?.to_be_bytes(); - for room_id in self - .services - .state_cache - .rooms_joined(user_id) - .filter_map(Result::ok) - { - // Don't send key updates to unencrypted rooms - if self - .services - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? - .is_none() - { - continue; - } - - let mut key = room_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(&count); - - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - } - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(&count); - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - - Ok(()) - } - - pub(super) fn get_device_keys(&self, user_id: &UserId, device_id: &DeviceId) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(device_id.as_bytes()); - - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some( - serde_json::from_slice(&bytes).map_err(|e| err!(Database("DeviceKeys in db are invalid. {e}")))?, - )) - }) - } - - pub(super) fn parse_master_key( - user_id: &UserId, master_key: &Raw, - ) -> Result<(Vec, CrossSigningKey)> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - - let master_key = master_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))?; - let mut master_key_ids = master_key.keys.values(); - let master_key_id = master_key_ids - .next() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Master key contained no key."))?; - if master_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained more than one key.", - )); - } - let mut master_key_key = prefix.clone(); - master_key_key.extend_from_slice(master_key_id.as_bytes()); - Ok((master_key_key, master_key)) - } - - pub(super) fn get_key( - &self, key: &[u8], sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|e| err!(Database("CrossSigningKey in db is invalid. {e}")))?; - clean_signatures(&mut cross_signing_key, sender_user, user_id, allowed_signatures)?; - - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key).expect("Value to RawValue serialization"), - ))) - }) - } - - pub(super) fn get_master_key( - &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.userid_masterkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| self.get_key(&key, sender_user, user_id, allowed_signatures)) - } - - pub(super) fn get_self_signing_key( - &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.userid_selfsigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| self.get_key(&key, sender_user, user_id, allowed_signatures)) - } - - pub(super) fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { - self.userid_usersigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some( - serde_json::from_slice(&bytes) - .map_err(|e| err!(Database("CrossSigningKey in db is invalid. {e}")))?, - )) - }) - }) - } - - pub(super) fn add_to_device_event( - &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, - content: serde_json::Value, - ) -> Result<()> { - let mut key = target_user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(target_device_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(&self.services.globals.next_count()?.to_be_bytes()); - - let mut json = serde_json::Map::new(); - json.insert("type".to_owned(), event_type.to_owned().into()); - json.insert("sender".to_owned(), sender.to_string().into()); - json.insert("content".to_owned(), content); - - let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); - - self.todeviceid_events.insert(&key, &value)?; - - Ok(()) - } - - pub(super) fn get_to_device_events( - &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result>> { - let mut events = Vec::new(); - - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); - - for (_, value) in self.todeviceid_events.scan_prefix(prefix) { - events.push( - serde_json::from_slice(&value) - .map_err(|e| err!(Database("Event in todeviceid_events is invalid. {e}")))?, - ); - } - - Ok(events) - } - - pub(super) fn remove_to_device_events(&self, user_id: &UserId, device_id: &DeviceId, until: u64) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); - - let mut last = prefix.clone(); - last.extend_from_slice(&until.to_be_bytes()); - - for (key, _) in self - .todeviceid_events - .iter_from(&last, true) // this includes last - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes(&key[key.len().saturating_sub(size_of::())..key.len()]) - .map_err(|e| err!(Database("ToDeviceId has invalid count bytes. {e}")))?, - )) - }) - .filter_map(Result::ok) - .take_while(|&(_, count)| count <= until) - { - self.todeviceid_events.remove(&key)?; - } - - Ok(()) - } - - pub(super) fn update_device_metadata(&self, user_id: &UserId, device_id: &DeviceId, device: &Device) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Only existing devices should be able to call this, but we shouldn't assert - // either... - if self.userdeviceid_metadata.get(&userdeviceid)?.is_none() { - warn!( - "Called update_device_metadata for a non-existent user \"{}\" and/or device ID \"{}\" with no \ - metadata in database", - user_id, device_id - ); - return Err(Error::bad_database( - "User does not exist or device ID has no metadata in database.", - )); - } - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(device).expect("Device::to_string always works"), - )?; - - Ok(()) - } - - /// Get device metadata. - pub(super) fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.userdeviceid_metadata - .get(&userdeviceid)? - .map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("Metadata in userdeviceid_metadata is invalid.") - })?)) - }) - } - - pub(super) fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.userid_devicelistversion - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|e| err!(Database("Invalid devicelistversion in db. {e}"))) - .map(Some) - }) - } - - pub(super) fn all_devices_metadata<'a>( - &'a self, user_id: &UserId, - ) -> Box> + 'a> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - - Box::new( - self.userdeviceid_metadata - .scan_prefix(key) - .map(|(_, bytes)| { - serde_json::from_slice::(&bytes) - .map_err(|e| err!(Database("Device in userdeviceid_metadata is invalid. {e}"))) - }), - ) - } - - /// Creates a new sync filter. Returns the filter id. - pub(super) fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { - let filter_id = utils::random_string(4); - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(filter_id.as_bytes()); - - self.userfilterid_filter - .insert(&key, &serde_json::to_vec(&filter).expect("filter is valid json"))?; - - Ok(filter_id) - } - - pub(super) fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(filter_id.as_bytes()); - - let raw = self.userfilterid_filter.get(&key)?; - - if let Some(raw) = raw { - serde_json::from_slice(&raw).map_err(|e| err!(Database("Invalid filter event in db. {e}"))) - } else { - Ok(None) - } - } - - /// Creates an OpenID token, which can be used to prove that a user has - /// access to an account (primarily for integrations) - pub(super) fn create_openid_token(&self, user_id: &UserId, token: &str) -> Result { - use std::num::Saturating as Sat; - - let expires_in = self.services.server.config.openid_token_ttl; - let expires_at = Sat(utils::millis_since_unix_epoch()) + Sat(expires_in) * Sat(1000); - - let mut value = expires_at.0.to_be_bytes().to_vec(); - value.extend_from_slice(user_id.as_bytes()); - - self.openidtoken_expiresatuserid - .insert(token.as_bytes(), value.as_slice())?; - - Ok(expires_in) - } - - /// Find out which user an OpenID access token belongs to. - pub(super) fn find_from_openid_token(&self, token: &str) -> Result { - let Some(value) = self.openidtoken_expiresatuserid.get(token.as_bytes())? else { - return Err(Error::BadRequest(ErrorKind::Unauthorized, "OpenID token is unrecognised")); - }; - - let (expires_at_bytes, user_bytes) = value.split_at(0_u64.to_be_bytes().len()); - - let expires_at = u64::from_be_bytes( - expires_at_bytes - .try_into() - .map_err(|e| err!(Database("expires_at in openid_userid is invalid u64. {e}")))?, - ); - - if expires_at < utils::millis_since_unix_epoch() { - debug_info!("OpenID token is expired, removing"); - self.openidtoken_expiresatuserid.remove(token.as_bytes())?; - - return Err(Error::BadRequest(ErrorKind::Unauthorized, "OpenID token is expired")); - } - - UserId::parse( - utils::string_from_bytes(user_bytes) - .map_err(|e| err!(Database("User ID in openid_userid is invalid unicode. {e}")))?, - ) - .map_err(|e| err!(Database("User ID in openid_userid is invalid. {e}"))) - } -} - -/// Will only return with Some(username) if the password was not empty and the -/// username could be successfully parsed. -/// If `utils::string_from_bytes`(...) returns an error that username will be -/// skipped and the error will be logged. -pub(super) fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option { - // A valid password is not empty - if password.is_empty() { - None - } else { - match utils::string_from_bytes(username) { - Ok(u) => Some(u), - Err(e) => { - warn!("Failed to parse username while calling get_local_users(): {}", e.to_string()); - None - }, - } - } -} diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 80897b5f..9a058ba9 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,552 +1,984 @@ -mod data; +use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; -use std::{ - collections::{BTreeMap, BTreeSet}, - mem, - sync::{Arc, Mutex, Mutex as StdMutex}, +use conduit::{ + debug_warn, err, utils, + utils::{stream::TryIgnore, string::Unquoted, ReadyExt, TryReadyExt}, + warn, Err, Error, Result, Server, }; - -use conduit::{Error, Result}; +use database::{Deserialized, Ignore, Interfix, Map}; +use futures::{pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; use ruma::{ - api::client::{ - device::Device, - filter::FilterDefinition, - sync::sync_events::{ - self, - v4::{ExtensionsConfig, SyncRequestList}, - }, - }, + api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::AnyToDeviceEvent, + events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, OwnedRoomId, OwnedUserId, - UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId, + OwnedMxcUri, OwnedUserId, UInt, UserId, }; -use self::data::Data; -use crate::{admin, rooms, Dep}; +use crate::{admin, globals, rooms, Dep}; pub struct Service { - connections: DbConnections, - pub db: Data, services: Services, + db: Data, } struct Services { + server: Arc, admin: Dep, + globals: Dep, + state_accessor: Dep, state_cache: Dep, } +struct Data { + keychangeid_userid: Arc, + keyid_key: Arc, + onetimekeyid_onetimekeys: Arc, + openidtoken_expiresatuserid: Arc, + todeviceid_events: Arc, + token_userdeviceid: Arc, + userdeviceid_metadata: Arc, + userdeviceid_token: Arc, + userfilterid_filter: Arc, + userid_avatarurl: Arc, + userid_blurhash: Arc, + userid_devicelistversion: Arc, + userid_displayname: Arc, + userid_lastonetimekeyupdate: Arc, + userid_masterkeyid: Arc, + userid_password: Arc, + userid_selfsigningkeyid: Arc, + userid_usersigningkeyid: Arc, + useridprofilekey_value: Arc, +} + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - connections: StdMutex::new(BTreeMap::new()), - db: Data::new(&args), services: Services { + server: args.server.clone(), admin: args.depend::("admin"), + globals: args.depend::("globals"), + state_accessor: args.depend::("rooms::state_accessor"), state_cache: args.depend::("rooms::state_cache"), }, + db: Data { + keychangeid_userid: args.db["keychangeid_userid"].clone(), + keyid_key: args.db["keyid_key"].clone(), + onetimekeyid_onetimekeys: args.db["onetimekeyid_onetimekeys"].clone(), + openidtoken_expiresatuserid: args.db["openidtoken_expiresatuserid"].clone(), + todeviceid_events: args.db["todeviceid_events"].clone(), + token_userdeviceid: args.db["token_userdeviceid"].clone(), + userdeviceid_metadata: args.db["userdeviceid_metadata"].clone(), + userdeviceid_token: args.db["userdeviceid_token"].clone(), + userfilterid_filter: args.db["userfilterid_filter"].clone(), + userid_avatarurl: args.db["userid_avatarurl"].clone(), + userid_blurhash: args.db["userid_blurhash"].clone(), + userid_devicelistversion: args.db["userid_devicelistversion"].clone(), + userid_displayname: args.db["userid_displayname"].clone(), + userid_lastonetimekeyupdate: args.db["userid_lastonetimekeyupdate"].clone(), + userid_masterkeyid: args.db["userid_masterkeyid"].clone(), + userid_password: args.db["userid_password"].clone(), + userid_selfsigningkeyid: args.db["userid_selfsigningkeyid"].clone(), + userid_usersigningkeyid: args.db["userid_usersigningkeyid"].clone(), + useridprofilekey_value: args.db["useridprofilekey_value"].clone(), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -type DbConnections = Mutex>; -type DbConnectionsKey = (OwnedUserId, OwnedDeviceId, String); -type DbConnectionsVal = Arc>; - -struct SlidingSyncCache { - lists: BTreeMap, - subscriptions: BTreeMap, - known_rooms: BTreeMap>, // For every room, the roomsince number - extensions: ExtensionsConfig, -} - impl Service { - /// Check if a user has an account on this homeserver. - #[inline] - pub fn exists(&self, user_id: &UserId) -> Result { self.db.exists(user_id) } - - pub fn remembered(&self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String) -> bool { - self.connections - .lock() - .unwrap() - .contains_key(&(user_id, device_id, conn_id)) - } - - pub fn forget_sync_request_connection(&self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String) { - self.connections - .lock() - .unwrap() - .remove(&(user_id, device_id, conn_id)); - } - - pub fn update_sync_request_with_cache( - &self, user_id: OwnedUserId, device_id: OwnedDeviceId, request: &mut sync_events::v4::Request, - ) -> BTreeMap> { - let Some(conn_id) = request.conn_id.clone() else { - return BTreeMap::new(); - }; - - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); - - for (list_id, list) in &mut request.lists { - if let Some(cached_list) = cached.lists.get(list_id) { - if list.sort.is_empty() { - list.sort.clone_from(&cached_list.sort); - }; - if list.room_details.required_state.is_empty() { - list.room_details - .required_state - .clone_from(&cached_list.room_details.required_state); - }; - list.room_details.timeline_limit = list - .room_details - .timeline_limit - .or(cached_list.room_details.timeline_limit); - list.include_old_rooms = list - .include_old_rooms - .clone() - .or_else(|| cached_list.include_old_rooms.clone()); - match (&mut list.filters, cached_list.filters.clone()) { - (Some(list_filters), Some(cached_filters)) => { - list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); - if list_filters.spaces.is_empty() { - list_filters.spaces = cached_filters.spaces; - } - list_filters.is_encrypted = list_filters.is_encrypted.or(cached_filters.is_encrypted); - list_filters.is_invite = list_filters.is_invite.or(cached_filters.is_invite); - if list_filters.room_types.is_empty() { - list_filters.room_types = cached_filters.room_types; - } - if list_filters.not_room_types.is_empty() { - list_filters.not_room_types = cached_filters.not_room_types; - } - list_filters.room_name_like = list_filters - .room_name_like - .clone() - .or(cached_filters.room_name_like); - if list_filters.tags.is_empty() { - list_filters.tags = cached_filters.tags; - } - if list_filters.not_tags.is_empty() { - list_filters.not_tags = cached_filters.not_tags; - } - }, - (_, Some(cached_filters)) => list.filters = Some(cached_filters), - (Some(list_filters), _) => list.filters = Some(list_filters.clone()), - (..) => {}, - } - if list.bump_event_types.is_empty() { - list.bump_event_types - .clone_from(&cached_list.bump_event_types); - }; - } - cached.lists.insert(list_id.clone(), list.clone()); - } - - cached - .subscriptions - .extend(request.room_subscriptions.clone()); - request - .room_subscriptions - .extend(cached.subscriptions.clone()); - - request.extensions.e2ee.enabled = request - .extensions - .e2ee - .enabled - .or(cached.extensions.e2ee.enabled); - - request.extensions.to_device.enabled = request - .extensions - .to_device - .enabled - .or(cached.extensions.to_device.enabled); - - request.extensions.account_data.enabled = request - .extensions - .account_data - .enabled - .or(cached.extensions.account_data.enabled); - request.extensions.account_data.lists = request - .extensions - .account_data - .lists - .clone() - .or_else(|| cached.extensions.account_data.lists.clone()); - request.extensions.account_data.rooms = request - .extensions - .account_data - .rooms - .clone() - .or_else(|| cached.extensions.account_data.rooms.clone()); - - cached.extensions = request.extensions.clone(); - - cached.known_rooms.clone() - } - - pub fn update_sync_subscriptions( - &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, - subscriptions: BTreeMap, - ) { - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); - - cached.subscriptions = subscriptions; - } - - pub fn update_sync_known_rooms( - &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, list_id: String, - new_cached_rooms: BTreeSet, globalsince: u64, - ) { - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); - - for (roomid, lastsince) in cached - .known_rooms - .entry(list_id.clone()) - .or_default() - .iter_mut() - { - if !new_cached_rooms.contains(roomid) { - *lastsince = 0; - } - } - let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); - } - } - - /// Check if account is deactivated - pub fn is_deactivated(&self, user_id: &UserId) -> Result { self.db.is_deactivated(user_id) } - /// Check if a user is an admin - pub fn is_admin(&self, user_id: &UserId) -> Result { - if let Some(admin_room_id) = self.services.admin.get_admin_room()? { - self.services.state_cache.is_joined(user_id, &admin_room_id) - } else { - Ok(false) - } - } + #[inline] + pub async fn is_admin(&self, user_id: &UserId) -> bool { self.services.admin.user_is_admin(user_id).await } /// Create a new user account on this homeserver. #[inline] pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.db.set_password(user_id, password)?; - Ok(()) - } - - /// Returns the number of users registered on this server. - #[inline] - pub fn count(&self) -> Result { self.db.count() } - - /// Find out which user an access token belongs to. - pub fn find_from_token(&self, token: &str) -> Result> { - self.db.find_from_token(token) - } - - /// Returns an iterator over all users on this homeserver. - pub fn iter(&self) -> impl Iterator> + '_ { self.db.iter() } - - /// Returns a list of local users as list of usernames. - /// - /// A user account is considered `local` if the length of it's password is - /// greater then zero. - pub fn list_local_users(&self) -> Result> { self.db.list_local_users() } - - /// Returns the password hash for the given user. - pub fn password_hash(&self, user_id: &UserId) -> Result> { self.db.password_hash(user_id) } - - /// Hash and set the user's password to the Argon2 hash - #[inline] - pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.db.set_password(user_id, password) - } - - /// Returns the displayname of a user on this homeserver. - pub fn displayname(&self, user_id: &UserId) -> Result> { self.db.displayname(user_id) } - - /// Sets a new displayname or removes it if displayname is None. You still - /// need to nofify all rooms of this change. - pub async fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { - self.db.set_displayname(user_id, displayname) - } - - /// Get the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.db.avatar_url(user_id) } - - /// Sets a new avatar_url or removes it if avatar_url is None. - pub async fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { - self.db.set_avatar_url(user_id, avatar_url) - } - - /// Get the blurhash of a user. - pub fn blurhash(&self, user_id: &UserId) -> Result> { self.db.blurhash(user_id) } - - pub fn timezone(&self, user_id: &UserId) -> Result> { self.db.timezone(user_id) } - - /// Gets a specific user profile key - pub fn profile_key(&self, user_id: &UserId, profile_key: &str) -> Result> { - self.db.profile_key(user_id, profile_key) - } - - /// Gets all the user's profile keys and values in an iterator - pub fn all_profile_keys<'a>( - &'a self, user_id: &UserId, - ) -> Box> + 'a + Send> { - self.db.all_profile_keys(user_id) - } - - /// Sets a new profile key value, removes the key if value is None - pub fn set_profile_key( - &self, user_id: &UserId, profile_key: &str, profile_key_value: Option, - ) -> Result<()> { - self.db - .set_profile_key(user_id, profile_key, profile_key_value) - } - - /// Sets a new tz or removes it if tz is None. - pub async fn set_timezone(&self, user_id: &UserId, tz: Option) -> Result<()> { - self.db.set_timezone(user_id, tz) - } - - /// Sets a new blurhash or removes it if blurhash is None. - pub async fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { - self.db.set_blurhash(user_id, blurhash) - } - - /// Adds a new device to a user. - pub fn create_device( - &self, user_id: &UserId, device_id: &DeviceId, token: &str, initial_device_display_name: Option, - client_ip: Option, - ) -> Result<()> { - self.db - .create_device(user_id, device_id, token, initial_device_display_name, client_ip) - } - - /// Removes a device from a user. - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - self.db.remove_device(user_id, device_id) - } - - /// Returns an iterator over all device ids of this user. - pub fn all_device_ids<'a>(&'a self, user_id: &UserId) -> impl Iterator> + 'a { - self.db.all_device_ids(user_id) - } - - /// Replaces the access token of one device. - #[inline] - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { - self.db.set_token(user_id, device_id, token) - } - - pub fn add_one_time_key( - &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, - one_time_key_value: &Raw, - ) -> Result<()> { - self.db - .add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) - } - - // TODO: use this ? - #[allow(dead_code)] - pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { - self.db.last_one_time_keys_update(user_id) - } - - pub fn take_one_time_key( - &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - ) -> Result)>> { - self.db.take_one_time_key(user_id, device_id, key_algorithm) - } - - pub fn count_one_time_keys( - &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { - self.db.count_one_time_keys(user_id, device_id) - } - - pub fn add_device_keys(&self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw) -> Result<()> { - self.db.add_device_keys(user_id, device_id, device_keys) - } - - pub fn add_cross_signing_keys( - &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, - user_signing_key: &Option>, notify: bool, - ) -> Result<()> { - self.db - .add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key, notify) - } - - pub fn sign_key( - &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, - ) -> Result<()> { - self.db.sign_key(target_id, key_id, signature, sender_id) - } - - pub fn keys_changed<'a>( - &'a self, user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator> + 'a { - self.db.keys_changed(user_or_room_id, from, to) - } - - #[inline] - pub fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { self.db.mark_device_key_update(user_id) } - - pub fn get_device_keys(&self, user_id: &UserId, device_id: &DeviceId) -> Result>> { - self.db.get_device_keys(user_id, device_id) - } - - #[inline] - pub fn parse_master_key( - &self, user_id: &UserId, master_key: &Raw, - ) -> Result<(Vec, CrossSigningKey)> { - Data::parse_master_key(user_id, master_key) - } - - #[inline] - pub fn get_key( - &self, key: &[u8], sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.db - .get_key(key, sender_user, user_id, allowed_signatures) - } - - pub fn get_master_key( - &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.db - .get_master_key(sender_user, user_id, allowed_signatures) - } - - pub fn get_self_signing_key( - &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.db - .get_self_signing_key(sender_user, user_id, allowed_signatures) - } - - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { - self.db.get_user_signing_key(user_id) - } - - pub fn add_to_device_event( - &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, - content: serde_json::Value, - ) -> Result<()> { - self.db - .add_to_device_event(sender, target_user_id, target_device_id, event_type, content) - } - - pub fn get_to_device_events(&self, user_id: &UserId, device_id: &DeviceId) -> Result>> { - self.db.get_to_device_events(user_id, device_id) - } - - pub fn remove_to_device_events(&self, user_id: &UserId, device_id: &DeviceId, until: u64) -> Result<()> { - self.db.remove_to_device_events(user_id, device_id, until) - } - - pub fn update_device_metadata(&self, user_id: &UserId, device_id: &DeviceId, device: &Device) -> Result<()> { - self.db.update_device_metadata(user_id, device_id, device) - } - - /// Get device metadata. - pub fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) -> Result> { - self.db.get_device_metadata(user_id, device_id) - } - - pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.db.get_devicelist_version(user_id) - } - - pub fn all_devices_metadata<'a>(&'a self, user_id: &UserId) -> impl Iterator> + 'a { - self.db.all_devices_metadata(user_id) + self.set_password(user_id, password) } /// Deactivate account - pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { + pub async fn deactivate_account(&self, user_id: &UserId) -> Result<()> { // Remove all associated devices - for device_id in self.all_device_ids(user_id) { - self.remove_device(user_id, &device_id?)?; - } + self.all_device_ids(user_id) + .for_each(|device_id| self.remove_device(user_id, device_id)) + .await; // Set the password to "" to indicate a deactivated account. Hashes will never // result in an empty string, so the user will not be able to log in again. // Systems like changing the password without logging in should check if the // account is deactivated. - self.db.set_password(user_id, None)?; + self.set_password(user_id, None)?; // TODO: Unhook 3PID Ok(()) } - /// Creates a new sync filter. Returns the filter id. - pub fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { - self.db.create_filter(user_id, filter) + /// Check if a user has an account on this homeserver. + #[inline] + pub async fn exists(&self, user_id: &UserId) -> bool { self.db.userid_password.qry(user_id).await.is_ok() } + + /// Check if account is deactivated + pub async fn is_deactivated(&self, user_id: &UserId) -> Result { + self.db + .userid_password + .qry(user_id) + .map_ok(|val| val.is_empty()) + .map_err(|_| err!(Request(NotFound("User does not exist.")))) + .await } - pub fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result> { - self.db.get_filter(user_id, filter_id) + /// Check if account is active, infallible + pub async fn is_active(&self, user_id: &UserId) -> bool { !self.is_deactivated(user_id).await.unwrap_or(true) } + + /// Check if account is active, infallible + pub async fn is_active_local(&self, user_id: &UserId) -> bool { + self.services.globals.user_is_local(user_id) && self.is_active(user_id).await + } + + /// Returns the number of users registered on this server. + #[inline] + pub async fn count(&self) -> usize { self.db.userid_password.count().await } + + /// Find out which user an access token belongs to. + pub async fn find_from_token(&self, token: &str) -> Result<(OwnedUserId, OwnedDeviceId)> { + self.db.token_userdeviceid.qry(token).await.deserialized() + } + + /// Returns an iterator over all users on this homeserver (offered for + /// compatibility) + #[allow(clippy::iter_without_into_iter, clippy::iter_not_returning_iterator)] + pub fn iter(&self) -> impl Stream + Send + '_ { self.stream().map(ToOwned::to_owned) } + + /// Returns an iterator over all users on this homeserver. + pub fn stream(&self) -> impl Stream + Send { self.db.userid_password.keys().ignore_err() } + + /// Returns a list of local users as list of usernames. + /// + /// A user account is considered `local` if the length of it's password is + /// greater then zero. + pub fn list_local_users(&self) -> impl Stream + Send + '_ { + self.db + .userid_password + .stream() + .ignore_err() + .ready_filter_map(|(u, p): (&UserId, &[u8])| (!p.is_empty()).then_some(u)) + } + + /// Returns the password hash for the given user. + pub async fn password_hash(&self, user_id: &UserId) -> Result { + self.db.userid_password.qry(user_id).await.deserialized() + } + + /// Hash and set the user's password to the Argon2 hash + pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + if let Some(password) = password { + if let Ok(hash) = utils::hash::password(password) { + self.db + .userid_password + .insert(user_id.as_bytes(), hash.as_bytes()); + Ok(()) + } else { + Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Password does not meet the requirements.", + )) + } + } else { + self.db.userid_password.insert(user_id.as_bytes(), b""); + Ok(()) + } + } + + /// Returns the displayname of a user on this homeserver. + pub async fn displayname(&self, user_id: &UserId) -> Result { + self.db.userid_displayname.qry(user_id).await.deserialized() + } + + /// Sets a new displayname or removes it if displayname is None. You still + /// need to nofify all rooms of this change. + pub fn set_displayname(&self, user_id: &UserId, displayname: Option) { + if let Some(displayname) = displayname { + self.db + .userid_displayname + .insert(user_id.as_bytes(), displayname.as_bytes()); + } else { + self.db.userid_displayname.remove(user_id.as_bytes()); + } + } + + /// Get the `avatar_url` of a user. + pub async fn avatar_url(&self, user_id: &UserId) -> Result { + self.db.userid_avatarurl.qry(user_id).await.deserialized() + } + + /// Sets a new avatar_url or removes it if avatar_url is None. + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) { + if let Some(avatar_url) = avatar_url { + self.db + .userid_avatarurl + .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes()); + } else { + self.db.userid_avatarurl.remove(user_id.as_bytes()); + } + } + + /// Get the blurhash of a user. + pub async fn blurhash(&self, user_id: &UserId) -> Result { + self.db.userid_blurhash.qry(user_id).await.deserialized() + } + + /// Sets a new avatar_url or removes it if avatar_url is None. + pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) { + if let Some(blurhash) = blurhash { + self.db + .userid_blurhash + .insert(user_id.as_bytes(), blurhash.as_bytes()); + } else { + self.db.userid_blurhash.remove(user_id.as_bytes()); + } + } + + /// Adds a new device to a user. + pub async fn create_device( + &self, user_id: &UserId, device_id: &DeviceId, token: &str, initial_device_display_name: Option, + client_ip: Option, + ) -> Result<()> { + // This method should never be called for nonexistent users. We shouldn't assert + // though... + if !self.exists(user_id).await { + warn!("Called create_device for non-existent user {} in database", user_id); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "User does not exist.")); + } + + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + increment(&self.db.userid_devicelistversion, user_id.as_bytes()); + + self.db.userdeviceid_metadata.insert( + &userdeviceid, + &serde_json::to_vec(&Device { + device_id: device_id.into(), + display_name: initial_device_display_name, + last_seen_ip: client_ip, + last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), + }) + .expect("Device::to_string never fails."), + ); + + self.set_token(user_id, device_id, token).await?; + + Ok(()) + } + + /// Removes a device from a user. + pub async fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) { + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + // Remove tokens + if let Ok(old_token) = self.db.userdeviceid_token.qry(&userdeviceid).await { + self.db.userdeviceid_token.remove(&userdeviceid); + self.db.token_userdeviceid.remove(&old_token); + } + + // Remove todevice events + let prefix = (user_id, device_id, Interfix); + self.db + .todeviceid_events + .keys_raw_prefix(&prefix) + .ignore_err() + .ready_for_each(|key| self.db.todeviceid_events.remove(key)) + .await; + + // TODO: Remove onetimekeys + + increment(&self.db.userid_devicelistversion, user_id.as_bytes()); + + self.db.userdeviceid_metadata.remove(&userdeviceid); + } + + /// Returns an iterator over all device ids of this user. + pub fn all_device_ids<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + let prefix = (user_id, Interfix); + self.db + .userdeviceid_metadata + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, device_id): (Ignore, &DeviceId)| device_id) + } + + /// Replaces the access token of one device. + pub async fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + let key = (user_id, device_id); + // should not be None, but we shouldn't assert either lol... + if self.db.userdeviceid_metadata.qry(&key).await.is_err() { + return Err!(Database(error!( + ?user_id, + ?device_id, + "User does not exist or device has no metadata." + ))); + } + + // Remove old token + if let Ok(old_token) = self.db.userdeviceid_token.qry(&key).await { + self.db.token_userdeviceid.remove(&old_token); + // It will be removed from userdeviceid_token by the insert later + } + + // Assign token to user device combination + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + self.db + .userdeviceid_token + .insert(&userdeviceid, token.as_bytes()); + self.db + .token_userdeviceid + .insert(token.as_bytes(), &userdeviceid); + + Ok(()) + } + + pub async fn add_one_time_key( + &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, + one_time_key_value: &Raw, + ) -> Result<()> { + // All devices have metadata + // Only existing devices should be able to call this, but we shouldn't assert + // either... + let key = (user_id, device_id); + if self.db.userdeviceid_metadata.qry(&key).await.is_err() { + return Err!(Database(error!( + ?user_id, + ?device_id, + "User does not exist or device has no metadata." + ))); + } + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(device_id.as_bytes()); + key.push(0xFF); + // TODO: Use DeviceKeyId::to_string when it's available (and update everything, + // because there are no wrapping quotation marks anymore) + key.extend_from_slice( + serde_json::to_string(one_time_key_key) + .expect("DeviceKeyId::to_string always works") + .as_bytes(), + ); + + self.db.onetimekeyid_onetimekeys.insert( + &key, + &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), + ); + + self.db + .userid_lastonetimekeyupdate + .insert(user_id.as_bytes(), &self.services.globals.next_count()?.to_be_bytes()); + + Ok(()) + } + + pub async fn last_one_time_keys_update(&self, user_id: &UserId) -> u64 { + self.db + .userid_lastonetimekeyupdate + .qry(user_id) + .await + .deserialized() + .unwrap_or(0) + } + + pub async fn take_one_time_key( + &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, + ) -> Result<(OwnedDeviceKeyId, Raw)> { + self.db + .userid_lastonetimekeyupdate + .insert(user_id.as_bytes(), &self.services.globals.next_count()?.to_be_bytes()); + + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xFF); + prefix.push(b'"'); // Annoying quotation mark + prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); + prefix.push(b':'); + + let one_time_key = self + .db + .onetimekeyid_onetimekeys + .raw_stream_prefix(&prefix) + .ignore_err() + .map(|(key, val)| { + self.db.onetimekeyid_onetimekeys.remove(key); + + let key = key + .rsplit(|&b| b == 0xFF) + .next() + .ok_or_else(|| err!(Database("OneTimeKeyId in db is invalid."))) + .unwrap(); + + let key = serde_json::from_slice(key) + .map_err(|e| err!(Database("OneTimeKeyId in db is invalid. {e}"))) + .unwrap(); + + let val = serde_json::from_slice(val) + .map_err(|e| err!(Database("OneTimeKeys in db are invalid. {e}"))) + .unwrap(); + + (key, val) + }) + .next() + .await; + + one_time_key.ok_or_else(|| err!(Request(NotFound("No one-time-key found")))) + } + + pub async fn count_one_time_keys( + &self, user_id: &UserId, device_id: &DeviceId, + ) -> BTreeMap { + type KeyVal<'a> = ((Ignore, Ignore, &'a Unquoted), Ignore); + + let mut algorithm_counts = BTreeMap::::new(); + let query = (user_id, device_id); + self.db + .onetimekeyid_onetimekeys + .stream_prefix(&query) + .ignore_err() + .ready_for_each(|((Ignore, Ignore, device_key_id), Ignore): KeyVal<'_>| { + let device_key_id: &DeviceKeyId = device_key_id + .as_str() + .try_into() + .expect("Invalid DeviceKeyID in database"); + + let count: &mut UInt = algorithm_counts + .entry(device_key_id.algorithm()) + .or_default(); + + *count = count.saturating_add(1_u32.into()); + }) + .await; + + algorithm_counts + } + + pub async fn add_device_keys(&self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw) { + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + self.db.keyid_key.insert( + &userdeviceid, + &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), + ); + + self.mark_device_key_update(user_id).await; + } + + pub async fn add_cross_signing_keys( + &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, + user_signing_key: &Option>, notify: bool, + ) -> Result<()> { + // TODO: Check signatures + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + + let (master_key_key, _) = parse_master_key(user_id, master_key)?; + + self.db + .keyid_key + .insert(&master_key_key, master_key.json().get().as_bytes()); + + self.db + .userid_masterkeyid + .insert(user_id.as_bytes(), &master_key_key); + + // Self-signing key + if let Some(self_signing_key) = self_signing_key { + let mut self_signing_key_ids = self_signing_key + .deserialize() + .map_err(|e| err!(Request(InvalidParam("Invalid self signing key: {e:?}"))))? + .keys + .into_values(); + + let self_signing_key_id = self_signing_key_ids + .next() + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Self signing key contained no key."))?; + + if self_signing_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Self signing key contained more than one key.", + )); + } + + let mut self_signing_key_key = prefix.clone(); + self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); + + self.db + .keyid_key + .insert(&self_signing_key_key, self_signing_key.json().get().as_bytes()); + + self.db + .userid_selfsigningkeyid + .insert(user_id.as_bytes(), &self_signing_key_key); + } + + // User-signing key + if let Some(user_signing_key) = user_signing_key { + let mut user_signing_key_ids = user_signing_key + .deserialize() + .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? + .keys + .into_values(); + + let user_signing_key_id = user_signing_key_ids + .next() + .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; + + if user_signing_key_ids.next().is_some() { + return Err!(Request(InvalidParam("User signing key contained more than one key."))); + } + + let mut user_signing_key_key = prefix; + user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); + + self.db + .keyid_key + .insert(&user_signing_key_key, user_signing_key.json().get().as_bytes()); + + self.db + .userid_usersigningkeyid + .insert(user_id.as_bytes(), &user_signing_key_key); + } + + if notify { + self.mark_device_key_update(user_id).await; + } + + Ok(()) + } + + pub async fn sign_key( + &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, + ) -> Result<()> { + let key = (target_id, key_id); + + let mut cross_signing_key: serde_json::Value = self + .db + .keyid_key + .qry(&key) + .await + .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key."))))? + .deserialized_json() + .map_err(|e| err!(Database("key in keyid_key is invalid. {e:?}")))?; + + let signatures = cross_signing_key + .get_mut("signatures") + .ok_or_else(|| err!(Database("key in keyid_key has no signatures field.")))? + .as_object_mut() + .ok_or_else(|| err!(Database("key in keyid_key has invalid signatures field.")))? + .entry(sender_id.to_string()) + .or_insert_with(|| serde_json::Map::new().into()); + + signatures + .as_object_mut() + .ok_or_else(|| err!(Database("signatures in keyid_key for a user is invalid.")))? + .insert(signature.0, signature.1.into()); + + let mut key = target_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(key_id.as_bytes()); + self.db.keyid_key.insert( + &key, + &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), + ); + + self.mark_device_key_update(target_id).await; + + Ok(()) + } + + pub fn keys_changed<'a>( + &'a self, user_or_room_id: &'a str, from: u64, to: Option, + ) -> impl Stream + Send + 'a { + type KeyVal<'a> = ((&'a str, u64), &'a UserId); + + let to = to.unwrap_or(u64::MAX); + let start = (user_or_room_id, from.saturating_add(1)); + self.db + .keychangeid_userid + .stream_from(&start) + .ignore_err() + .ready_take_while(move |((prefix, count), _): &KeyVal<'_>| *prefix == user_or_room_id && *count <= to) + .map(|((..), user_id): KeyVal<'_>| user_id) + } + + pub async fn mark_device_key_update(&self, user_id: &UserId) { + let count = self.services.globals.next_count().unwrap().to_be_bytes(); + let rooms_joined = self.services.state_cache.rooms_joined(user_id); + pin_mut!(rooms_joined); + while let Some(room_id) = rooms_joined.next().await { + // Don't send key updates to unencrypted rooms + if self + .services + .state_accessor + .room_state_get(room_id, &StateEventType::RoomEncryption, "") + .await + .is_err() + { + continue; + } + + let mut key = room_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(&count); + + self.db.keychangeid_userid.insert(&key, user_id.as_bytes()); + } + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(&count); + self.db.keychangeid_userid.insert(&key, user_id.as_bytes()); + } + + pub async fn get_device_keys<'a>(&'a self, user_id: &'a UserId, device_id: &DeviceId) -> Result> { + let key_id = (user_id, device_id); + self.db.keyid_key.qry(&key_id).await.deserialized_json() + } + + pub async fn get_key( + &self, key_id: &[u8], sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &F, + ) -> Result> + where + F: Fn(&UserId) -> bool + Send + Sync, + { + let key = self + .db + .keyid_key + .qry(key_id) + .await + .deserialized_json::()?; + + let cleaned = clean_signatures(key, sender_user, user_id, allowed_signatures)?; + let raw_value = serde_json::value::to_raw_value(&cleaned)?; + Ok(Raw::from_json(raw_value)) + } + + pub async fn get_master_key( + &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &F, + ) -> Result> + where + F: Fn(&UserId) -> bool + Send + Sync, + { + let key_id = self.db.userid_masterkeyid.qry(user_id).await?; + + self.get_key(&key_id, sender_user, user_id, allowed_signatures) + .await + } + + pub async fn get_self_signing_key( + &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &F, + ) -> Result> + where + F: Fn(&UserId) -> bool + Send + Sync, + { + let key_id = self.db.userid_selfsigningkeyid.qry(user_id).await?; + + self.get_key(&key_id, sender_user, user_id, allowed_signatures) + .await + } + + pub async fn get_user_signing_key(&self, user_id: &UserId) -> Result> { + let key_id = self.db.userid_usersigningkeyid.qry(user_id).await?; + + self.db.keyid_key.qry(&*key_id).await.deserialized_json() + } + + pub async fn add_to_device_event( + &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, + content: serde_json::Value, + ) { + let mut key = target_user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(target_device_id.as_bytes()); + key.push(0xFF); + key.extend_from_slice(&self.services.globals.next_count().unwrap().to_be_bytes()); + + let mut json = serde_json::Map::new(); + json.insert("type".to_owned(), event_type.to_owned().into()); + json.insert("sender".to_owned(), sender.to_string().into()); + json.insert("content".to_owned(), content); + + let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); + + self.db.todeviceid_events.insert(&key, &value); + } + + pub fn get_to_device_events<'a>( + &'a self, user_id: &'a UserId, device_id: &'a DeviceId, + ) -> impl Stream> + Send + 'a { + let prefix = (user_id, device_id, Interfix); + self.db + .todeviceid_events + .stream_raw_prefix(&prefix) + .ready_and_then(|(_, val)| serde_json::from_slice(val).map_err(Into::into)) + .ignore_err() + } + + pub async fn remove_to_device_events(&self, user_id: &UserId, device_id: &DeviceId, until: u64) { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xFF); + + let mut last = prefix.clone(); + last.extend_from_slice(&until.to_be_bytes()); + + self.db + .todeviceid_events + .rev_raw_keys_from(&last) // this includes last + .ignore_err() + .ready_take_while(move |key| key.starts_with(&prefix)) + .map(|key| { + let len = key.len(); + let start = len.saturating_sub(size_of::()); + let count = utils::u64_from_u8(&key[start..len]); + (key, count) + }) + .ready_take_while(move |(_, count)| *count <= until) + .ready_for_each(|(key, _)| self.db.todeviceid_events.remove(&key)) + .boxed() + .await; + } + + pub async fn update_device_metadata(&self, user_id: &UserId, device_id: &DeviceId, device: &Device) -> Result<()> { + // Only existing devices should be able to call this, but we shouldn't assert + // either... + let key = (user_id, device_id); + if self.db.userdeviceid_metadata.qry(&key).await.is_err() { + return Err!(Database(error!( + ?user_id, + ?device_id, + "Called update_device_metadata for a non-existent user and/or device" + ))); + } + + increment(&self.db.userid_devicelistversion, user_id.as_bytes()); + + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + self.db.userdeviceid_metadata.insert( + &userdeviceid, + &serde_json::to_vec(device).expect("Device::to_string always works"), + ); + + Ok(()) + } + + /// Get device metadata. + pub async fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) -> Result { + self.db + .userdeviceid_metadata + .qry(&(user_id, device_id)) + .await + .deserialized_json() + } + + pub async fn get_devicelist_version(&self, user_id: &UserId) -> Result { + self.db + .userid_devicelistversion + .qry(user_id) + .await + .deserialized() + } + + pub fn all_devices_metadata<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + self.db + .userdeviceid_metadata + .stream_raw_prefix(&(user_id, Interfix)) + .ready_and_then(|(_, val)| serde_json::from_slice::(val).map_err(Into::into)) + .ignore_err() + } + + /// Creates a new sync filter. Returns the filter id. + pub fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> String { + let filter_id = utils::random_string(4); + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(filter_id.as_bytes()); + + self.db + .userfilterid_filter + .insert(&key, &serde_json::to_vec(&filter).expect("filter is valid json")); + + filter_id + } + + pub async fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result { + self.db + .userfilterid_filter + .qry(&(user_id, filter_id)) + .await + .deserialized_json() } /// Creates an OpenID token, which can be used to prove that a user has /// access to an account (primarily for integrations) pub fn create_openid_token(&self, user_id: &UserId, token: &str) -> Result { - self.db.create_openid_token(user_id, token) + use std::num::Saturating as Sat; + + let expires_in = self.services.server.config.openid_token_ttl; + let expires_at = Sat(utils::millis_since_unix_epoch()) + Sat(expires_in) * Sat(1000); + + let mut value = expires_at.0.to_be_bytes().to_vec(); + value.extend_from_slice(user_id.as_bytes()); + + self.db + .openidtoken_expiresatuserid + .insert(token.as_bytes(), value.as_slice()); + + Ok(expires_in) } /// Find out which user an OpenID access token belongs to. - pub fn find_from_openid_token(&self, token: &str) -> Result { self.db.find_from_openid_token(token) } + pub async fn find_from_openid_token(&self, token: &str) -> Result { + let Ok(value) = self.db.openidtoken_expiresatuserid.qry(token).await else { + return Err!(Request(Unauthorized("OpenID token is unrecognised"))); + }; + + let (expires_at_bytes, user_bytes) = value.split_at(0_u64.to_be_bytes().len()); + let expires_at = u64::from_be_bytes( + expires_at_bytes + .try_into() + .map_err(|e| err!(Database("expires_at in openid_userid is invalid u64. {e}")))?, + ); + + if expires_at < utils::millis_since_unix_epoch() { + debug_warn!("OpenID token is expired, removing"); + self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); + + return Err!(Request(Unauthorized("OpenID token is expired"))); + } + + let user_string = utils::string_from_bytes(user_bytes) + .map_err(|e| err!(Database("User ID in openid_userid is invalid unicode. {e}")))?; + + UserId::parse(user_string).map_err(|e| err!(Database("User ID in openid_userid is invalid. {e}"))) + } + + /// Gets a specific user profile key + pub async fn profile_key(&self, user_id: &UserId, profile_key: &str) -> Result { + let key = (user_id, profile_key); + self.db + .useridprofilekey_value + .qry(&key) + .await + .deserialized() + } + + /// Gets all the user's profile keys and values in an iterator + pub fn all_profile_keys<'a>( + &'a self, user_id: &'a UserId, + ) -> impl Stream + 'a + Send { + type KeyVal = ((Ignore, String), serde_json::Value); + + let prefix = (user_id, Interfix); + self.db + .useridprofilekey_value + .stream_prefix(&prefix) + .ignore_err() + .map(|((_, key), val): KeyVal| (key, val)) + } + + /// Sets a new profile key value, removes the key if value is None + pub fn set_profile_key(&self, user_id: &UserId, profile_key: &str, profile_key_value: Option) { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(profile_key.as_bytes()); + + // TODO: insert to the stable MSC4175 key when it's stable + if let Some(value) = profile_key_value { + let value = serde_json::to_vec(&value).unwrap(); + + self.db.useridprofilekey_value.insert(&key, &value); + } else { + self.db.useridprofilekey_value.remove(&key); + } + } + + /// Get the timezone of a user. + pub async fn timezone(&self, user_id: &UserId) -> Result { + // TODO: transparently migrate unstable key usage to the stable key once MSC4133 + // and MSC4175 are stable, likely a remove/insert in this block. + + // first check the unstable prefix then check the stable prefix + let unstable_key = (user_id, "us.cloke.msc4175.tz"); + let stable_key = (user_id, "m.tz"); + self.db + .useridprofilekey_value + .qry(&unstable_key) + .or_else(|_| self.db.useridprofilekey_value.qry(&stable_key)) + .await + .deserialized() + } + + /// Sets a new timezone or removes it if timezone is None. + pub fn set_timezone(&self, user_id: &UserId, timezone: Option) { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(b"us.cloke.msc4175.tz"); + + // TODO: insert to the stable MSC4175 key when it's stable + if let Some(timezone) = timezone { + self.db + .useridprofilekey_value + .insert(&key, timezone.as_bytes()); + } else { + self.db.useridprofilekey_value.remove(&key); + } + } +} + +pub fn parse_master_key(user_id: &UserId, master_key: &Raw) -> Result<(Vec, CrossSigningKey)> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + + let master_key = master_key + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))?; + let mut master_key_ids = master_key.keys.values(); + let master_key_id = master_key_ids + .next() + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Master key contained no key."))?; + if master_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained more than one key.", + )); + } + let mut master_key_key = prefix.clone(); + master_key_key.extend_from_slice(master_key_id.as_bytes()); + Ok((master_key_key, master_key)) } /// Ensure that a user only sees signatures from themselves and the target user -pub fn clean_signatures bool>( - cross_signing_key: &mut serde_json::Value, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: F, -) -> Result<(), Error> { +fn clean_signatures( + mut cross_signing_key: serde_json::Value, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &F, +) -> Result +where + F: Fn(&UserId) -> bool + Send + Sync, +{ if let Some(signatures) = cross_signing_key .get_mut("signatures") .and_then(|v| v.as_object_mut()) @@ -563,5 +995,12 @@ pub fn clean_signatures bool>( } } - Ok(()) + Ok(cross_signing_key) +} + +//TODO: this is an ABA +fn increment(db: &Arc, key: &[u8]) { + let old = db.get(key); + let new = utils::increment(old.ok().as_deref()); + db.insert(key, &new); } From 4776fe66c4a9d5cbb0153e8ff23009d21ed5010e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 28 Sep 2024 15:14:48 +0000 Subject: [PATCH 0013/1248] handle serde_json for deserialized() Signed-off-by: Jason Volk --- src/database/de.rs | 94 ++++++++++++++++++++------- src/database/deserialized.rs | 14 ---- src/database/handle.rs | 28 -------- src/service/account_data/mod.rs | 2 +- src/service/appservice/data.rs | 2 +- src/service/globals/data.rs | 5 +- src/service/key_backups/mod.rs | 12 +--- src/service/pusher/mod.rs | 2 +- src/service/rooms/outlier/mod.rs | 4 +- src/service/rooms/state_cache/data.rs | 10 +-- src/service/rooms/timeline/data.rs | 15 ++--- src/service/uiaa/mod.rs | 2 +- src/service/users/mod.rs | 12 ++-- 13 files changed, 95 insertions(+), 107 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index 8ce25aa3..a5d2c127 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -58,10 +58,15 @@ impl<'de> Deserializer<'de> { } #[inline] - fn record_trail(&mut self) -> &'de [u8] { - let record = &self.buf[self.pos..]; - self.inc_pos(record.len()); - record + fn record_next_peek_byte(&self) -> Option { + let started = self.pos != 0; + let buf = &self.buf[self.pos..]; + debug_assert!( + !started || buf[0] == Self::SEP, + "Missing expected record separator at current position" + ); + + buf.get::(started.into()).copied() } #[inline] @@ -75,6 +80,13 @@ impl<'de> Deserializer<'de> { self.inc_pos(started.into()); } + #[inline] + fn record_trail(&mut self) -> &'de [u8] { + let record = &self.buf[self.pos..]; + self.inc_pos(record.len()); + record + } + #[inline] fn inc_pos(&mut self, n: usize) { self.pos = self.pos.saturating_add(n); @@ -85,13 +97,6 @@ impl<'de> Deserializer<'de> { impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { type Error = Error; - fn deserialize_map(self, _visitor: V) -> Result - where - V: Visitor<'de>, - { - unimplemented!("deserialize Map not implemented") - } - fn deserialize_seq(self, visitor: V) -> Result where V: Visitor<'de>, @@ -113,13 +118,23 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { visitor.visit_seq(self) } - fn deserialize_struct( - self, _name: &'static str, _fields: &'static [&'static str], _visitor: V, - ) -> Result + fn deserialize_map(self, visitor: V) -> Result where V: Visitor<'de>, { - unimplemented!("deserialize Struct not implemented") + let input = self.record_next(); + let mut d = serde_json::Deserializer::from_slice(input); + d.deserialize_map(visitor).map_err(Into::into) + } + + fn deserialize_struct(self, name: &'static str, fields: &'static [&'static str], visitor: V) -> Result + where + V: Visitor<'de>, + { + let input = self.record_next(); + let mut d = serde_json::Deserializer::from_slice(input); + d.deserialize_struct(name, fields, visitor) + .map_err(Into::into) } fn deserialize_unit_struct(self, name: &'static str, visitor: V) -> Result @@ -134,11 +149,14 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { visitor.visit_unit() } - fn deserialize_newtype_struct(self, _name: &'static str, _visitor: V) -> Result + fn deserialize_newtype_struct(self, name: &'static str, visitor: V) -> Result where V: Visitor<'de>, { - unimplemented!("deserialize Newtype Struct not implemented") + match name { + "$serde_json::private::RawValue" => visitor.visit_map(self), + _ => visitor.visit_newtype_struct(self), + } } fn deserialize_enum( @@ -228,19 +246,31 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } fn deserialize_unit>(self, _visitor: V) -> Result { - unimplemented!("deserialize Unit Struct not implemented") + unimplemented!("deserialize Unit not implemented") } - fn deserialize_identifier>(self, _visitor: V) -> Result { - unimplemented!("deserialize Identifier not implemented") + // this only used for $serde_json::private::RawValue at this time; see MapAccess + fn deserialize_identifier>(self, visitor: V) -> Result { + let input = "$serde_json::private::RawValue"; + visitor.visit_borrowed_str(input) } fn deserialize_ignored_any>(self, _visitor: V) -> Result { unimplemented!("deserialize Ignored Any not implemented") } - fn deserialize_any>(self, _visitor: V) -> Result { - unimplemented!("deserialize any not implemented") + fn deserialize_any>(self, visitor: V) -> Result { + debug_assert_eq!( + conduit::debug::type_name::(), + "serde_json::value::de::::deserialize::ValueVisitor", + "deserialize_any: type not expected" + ); + + match self.record_next_peek_byte() { + Some(b'{') => self.deserialize_map(visitor), + _ => self.deserialize_str(visitor), + } } } @@ -259,3 +289,23 @@ impl<'a, 'de: 'a> de::SeqAccess<'de> for &'a mut Deserializer<'de> { seed.deserialize(&mut **self).map(Some) } } + +// this only used for $serde_json::private::RawValue at this time. our db +// schema doesn't have its own map format; we use json for that anyway +impl<'a, 'de: 'a> de::MapAccess<'de> for &'a mut Deserializer<'de> { + type Error = Error; + + fn next_key_seed(&mut self, seed: K) -> Result> + where + K: DeserializeSeed<'de>, + { + seed.deserialize(&mut **self).map(Some) + } + + fn next_value_seed(&mut self, seed: V) -> Result + where + V: DeserializeSeed<'de>, + { + seed.deserialize(&mut **self) + } +} diff --git a/src/database/deserialized.rs b/src/database/deserialized.rs index 7da112d5..a59b2ce5 100644 --- a/src/database/deserialized.rs +++ b/src/database/deserialized.rs @@ -9,11 +9,6 @@ pub trait Deserialized { F: FnOnce(T) -> U, T: for<'de> Deserialize<'de>; - fn map_json(self, f: F) -> Result - where - F: FnOnce(T) -> U, - T: for<'de> Deserialize<'de>; - #[inline] fn deserialized(self) -> Result where @@ -22,13 +17,4 @@ pub trait Deserialized { { self.map_de(identity::) } - - #[inline] - fn deserialized_json(self) -> Result - where - T: for<'de> Deserialize<'de>, - Self: Sized, - { - self.map_json(identity::) - } } diff --git a/src/database/handle.rs b/src/database/handle.rs index 89d87137..0d4bd02e 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -48,15 +48,6 @@ impl AsRef for Handle<'_> { } impl Deserialized for Result> { - #[inline] - fn map_json(self, f: F) -> Result - where - F: FnOnce(T) -> U, - T: for<'de> Deserialize<'de>, - { - self?.map_json(f) - } - #[inline] fn map_de(self, f: F) -> Result where @@ -68,15 +59,6 @@ impl Deserialized for Result> { } impl<'a> Deserialized for Result<&'a Handle<'a>> { - #[inline] - fn map_json(self, f: F) -> Result - where - F: FnOnce(T) -> U, - T: for<'de> Deserialize<'de>, - { - self.and_then(|handle| handle.map_json(f)) - } - #[inline] fn map_de(self, f: F) -> Result where @@ -88,16 +70,6 @@ impl<'a> Deserialized for Result<&'a Handle<'a>> { } impl<'a> Deserialized for &'a Handle<'a> { - fn map_json(self, f: F) -> Result - where - F: FnOnce(T) -> U, - T: for<'de> Deserialize<'de>, - { - serde_json::from_slice::(self.as_ref()) - .map_err(Into::into) - .map(f) - } - fn map_de(self, f: F) -> Result where F: FnOnce(T) -> U, diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index b4eb143d..4f00cff1 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -108,7 +108,7 @@ pub async fn get( .qry(&key) .and_then(|roomuserdataid| self.db.roomuserdataid_accountdata.qry(&roomuserdataid)) .await - .deserialized_json() + .deserialized() } /// Returns all changes to the account data that happened after `since`. diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index d5fa5476..f31c5e63 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -40,7 +40,7 @@ impl Data { self.id_appserviceregistrations .qry(id) .await - .deserialized_json() + .deserialized() .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) } diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 3286e40c..76f97944 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -305,10 +305,7 @@ impl Data { } pub async fn signing_keys_for(&self, origin: &ServerName) -> Result { - self.server_signingkeys - .qry(origin) - .await - .deserialized_json() + self.server_signingkeys.qry(origin).await.deserialized() } pub async fn database_version(&self) -> u64 { self.global.qry("version").await.deserialized().unwrap_or(0) } diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 12712e79..decf32f7 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -166,11 +166,7 @@ pub async fn get_latest_backup(&self, user_id: &UserId) -> Result<(String, Raw Result> { let key = (user_id, version); - self.db - .backupid_algorithm - .qry(&key) - .await - .deserialized_json() + self.db.backupid_algorithm.qry(&key).await.deserialized() } #[implement(Service)] @@ -278,11 +274,7 @@ pub async fn get_session( ) -> Result> { let key = (user_id, version, room_id, session_id); - self.db - .backupkeyid_backup - .qry(&key) - .await - .deserialized_json() + self.db.backupkeyid_backup.qry(&key).await.deserialized() } #[implement(Service)] diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 44ff1945..8d8b553f 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -90,7 +90,7 @@ impl Service { .senderkey_pusher .qry(&senderkey) .await - .deserialized_json() + .deserialized() } pub async fn get_pushers(&self, sender: &UserId) -> Vec { diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 277b5982..4c9225ae 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -33,7 +33,7 @@ pub async fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result Result { .eventid_outlierpdu .qry(event_id) .await - .deserialized_json() + .deserialized() } /// Append the PDU as an outlier. diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index 38e504f6..f3ccaf10 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -156,10 +156,7 @@ impl Data { &self, user_id: &UserId, room_id: &RoomId, ) -> Result>> { let key = (user_id, room_id); - self.userroomid_invitestate - .qry(&key) - .await - .deserialized_json() + self.userroomid_invitestate.qry(&key).await.deserialized() } #[tracing::instrument(skip(self), level = "debug")] @@ -167,10 +164,7 @@ impl Data { &self, user_id: &UserId, room_id: &RoomId, ) -> Result>> { let key = (user_id, room_id); - self.userroomid_leftstate - .qry(&key) - .await - .deserialized_json() + self.userroomid_leftstate.qry(&key).await.deserialized() } /// Returns an iterator over all rooms a user left. diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index cd746be4..314dcb9f 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -90,17 +90,14 @@ impl Data { return Ok(pdu); } - self.eventid_outlierpdu - .qry(event_id) - .await - .deserialized_json() + self.eventid_outlierpdu.qry(event_id).await.deserialized() } /// Returns the json of a pdu. pub(super) async fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result { let pduid = self.get_pdu_id(event_id).await?; - self.pduid_pdu.qry(&pduid).await.deserialized_json() + self.pduid_pdu.qry(&pduid).await.deserialized() } /// Returns the pdu's id. @@ -113,7 +110,7 @@ impl Data { pub(super) async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { let pduid = self.get_pdu_id(event_id).await?; - self.pduid_pdu.qry(&pduid).await.deserialized_json() + self.pduid_pdu.qry(&pduid).await.deserialized() } /// Like get_non_outlier_pdu(), but without the expense of fetching and @@ -137,7 +134,7 @@ impl Data { self.eventid_outlierpdu .qry(event_id) .await - .deserialized_json() + .deserialized() .map(Arc::new) } @@ -162,12 +159,12 @@ impl Data { /// /// This does __NOT__ check the outliers `Tree`. pub(super) async fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result { - self.pduid_pdu.qry(pdu_id).await.deserialized_json() + self.pduid_pdu.qry(pdu_id).await.deserialized() } /// Returns the pdu as a `BTreeMap`. pub(super) async fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result { - self.pduid_pdu.qry(pdu_id).await.deserialized_json() + self.pduid_pdu.qry(pdu_id).await.deserialized() } pub(super) async fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) { diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 7e231514..0415bfc2 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -238,6 +238,6 @@ async fn get_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session .userdevicesessionid_uiaainfo .qry(&key) .await - .deserialized_json() + .deserialized() .map_err(|_| err!(Request(Forbidden("UIAA session does not exist.")))) } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 9a058ba9..ca37ed9d 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -577,7 +577,7 @@ impl Service { .qry(&key) .await .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key."))))? - .deserialized_json() + .deserialized() .map_err(|e| err!(Database("key in keyid_key is invalid. {e:?}")))?; let signatures = cross_signing_key @@ -652,7 +652,7 @@ impl Service { pub async fn get_device_keys<'a>(&'a self, user_id: &'a UserId, device_id: &DeviceId) -> Result> { let key_id = (user_id, device_id); - self.db.keyid_key.qry(&key_id).await.deserialized_json() + self.db.keyid_key.qry(&key_id).await.deserialized() } pub async fn get_key( @@ -666,7 +666,7 @@ impl Service { .keyid_key .qry(key_id) .await - .deserialized_json::()?; + .deserialized::()?; let cleaned = clean_signatures(key, sender_user, user_id, allowed_signatures)?; let raw_value = serde_json::value::to_raw_value(&cleaned)?; @@ -700,7 +700,7 @@ impl Service { pub async fn get_user_signing_key(&self, user_id: &UserId) -> Result> { let key_id = self.db.userid_usersigningkeyid.qry(user_id).await?; - self.db.keyid_key.qry(&*key_id).await.deserialized_json() + self.db.keyid_key.qry(&*key_id).await.deserialized() } pub async fn add_to_device_event( @@ -791,7 +791,7 @@ impl Service { .userdeviceid_metadata .qry(&(user_id, device_id)) .await - .deserialized_json() + .deserialized() } pub async fn get_devicelist_version(&self, user_id: &UserId) -> Result { @@ -830,7 +830,7 @@ impl Service { .userfilterid_filter .qry(&(user_id, filter_id)) .await - .deserialized_json() + .deserialized() } /// Creates an OpenID token, which can be used to prove that a user has From 3f7ec4221d89767e5bf0ff3e2a64c847a8dce264 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 25 Sep 2024 03:52:28 +0000 Subject: [PATCH 0014/1248] minor auth_chain optimizations/cleanup Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 47 ++++++++-------- src/api/server/event_auth.rs | 4 +- src/api/server/send_join.rs | 7 ++- src/api/server/state.rs | 4 +- src/api/server/state_ids.rs | 4 +- src/service/rooms/auth_chain/data.rs | 77 ++++++++++++++------------ src/service/rooms/auth_chain/mod.rs | 56 +++++++++---------- src/service/rooms/event_handler/mod.rs | 44 ++++++++------- 8 files changed, 125 insertions(+), 118 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 65c9bc71..350e08c6 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -27,33 +27,32 @@ pub(super) async fn echo(&self, message: Vec) -> Result) -> Result { - let event_id = Arc::::from(event_id); - if let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await { - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else { + return Ok(RoomMessageEventContent::notice_plain("Event not found.")); + }; - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let start = Instant::now(); - let count = self - .services - .rooms - .auth_chain - .event_ids_iter(room_id, vec![event_id]) - .await? - .count() - .await; + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let elapsed = start.elapsed(); - Ok(RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {count} in {elapsed:?}" - ))) - } else { - Ok(RoomMessageEventContent::text_plain("Event not found.")) - } + let start = Instant::now(); + let count = self + .services + .rooms + .auth_chain + .event_ids_iter(room_id, &[&event_id]) + .await? + .count() + .await; + + let elapsed = start.elapsed(); + Ok(RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {count} in {elapsed:?}" + ))) } #[admin_command] diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 6ec00b50..8307a4ad 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::borrow::Borrow; use axum::extract::State; use conduit::{Error, Result}; @@ -57,7 +57,7 @@ pub(crate) async fn get_event_authorization_route( let auth_chain = services .rooms .auth_chain - .event_ids_iter(room_id, vec![Arc::from(&*body.event_id)]) + .event_ids_iter(room_id, &[body.event_id.borrow()]) .await? .filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() }) .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 639fcafd..f9257690 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -1,6 +1,6 @@ #![allow(deprecated)] -use std::collections::BTreeMap; +use std::{borrow::Borrow, collections::BTreeMap}; use axum::extract::State; use conduit::{err, pdu::gen_event_id_canonical_json, utils::IterStream, warn, Error, Result}; @@ -11,7 +11,7 @@ use ruma::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, }, - CanonicalJsonValue, OwnedServerName, OwnedUserId, RoomId, ServerName, + CanonicalJsonValue, EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use service::Services; @@ -196,10 +196,11 @@ async fn create_join_event( .try_collect() .await?; + let starting_events: Vec<&EventId> = state_ids.values().map(Borrow::borrow).collect(); let auth_chain = services .rooms .auth_chain - .event_ids_iter(room_id, state_ids.values().cloned().collect()) + .event_ids_iter(room_id, &starting_events) .await? .map(Ok) .and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) diff --git a/src/api/server/state.rs b/src/api/server/state.rs index 37a14a3f..3a27cd0a 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::borrow::Borrow; use axum::extract::State; use conduit::{err, result::LogErr, utils::IterStream, Err, Result}; @@ -63,7 +63,7 @@ pub(crate) async fn get_room_state_route( let auth_chain = services .rooms .auth_chain - .event_ids_iter(&body.room_id, vec![Arc::from(&*body.event_id)]) + .event_ids_iter(&body.room_id, &[body.event_id.borrow()]) .await? .map(Ok) .and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await }) diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 95ca65aa..b026abf1 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::borrow::Borrow; use axum::extract::State; use conduit::{err, Err}; @@ -55,7 +55,7 @@ pub(crate) async fn get_room_state_ids_route( let auth_chain_ids = services .rooms .auth_chain - .event_ids_iter(&body.room_id, vec![Arc::from(&*body.event_id)]) + .event_ids_iter(&body.room_id, &[body.event_id.borrow()]) .await? .map(|id| (*id).to_owned()) .collect() diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 3d00374e..5c9dbda8 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduit::{utils, utils::math::usize_from_f64, Result}; +use conduit::{err, utils, utils::math::usize_from_f64, Err, Result}; use database::Map; use lru_cache::LruCache; @@ -24,54 +24,63 @@ impl Data { } } - pub(super) async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>> { + pub(super) async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { + debug_assert!(!key.is_empty(), "auth_chain key must not be empty"); + // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); + if let Some(result) = self + .auth_chain_cache + .lock() + .expect("cache locked") + .get_mut(key) + { + return Ok(Arc::clone(result)); } // We only save auth chains for single events in the db - if key.len() == 1 { - // Check DB cache - let chain = self.shorteventid_authchain.qry(&key[0]).await.map(|chain| { - chain - .chunks_exact(size_of::()) - .map(utils::u64_from_u8) - .collect::>() - }); - - if let Ok(chain) = chain { - // Cache in RAM - self.auth_chain_cache - .lock() - .expect("locked") - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } + if key.len() != 1 { + return Err!(Request(NotFound("auth_chain not cached"))); } - Ok(None) + // Check database + let chain = self + .shorteventid_authchain + .qry(&key[0]) + .await + .map_err(|_| err!(Request(NotFound("auth_chain not found"))))?; + + let chain = chain + .chunks_exact(size_of::()) + .map(utils::u64_from_u8) + .collect::>(); + + // Cache in RAM + self.auth_chain_cache + .lock() + .expect("cache locked") + .insert(vec![key[0]], Arc::clone(&chain)); + + Ok(chain) } - pub(super) fn cache_auth_chain(&self, key: Vec, auth_chain: Arc<[u64]>) -> Result<()> { + pub(super) fn cache_auth_chain(&self, key: Vec, auth_chain: Arc<[u64]>) { + debug_assert!(!key.is_empty(), "auth_chain key must not be empty"); + // Only persist single events in db if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &auth_chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - ); + let key = key[0].to_be_bytes(); + let val = auth_chain + .iter() + .flat_map(|s| s.to_be_bytes().to_vec()) + .collect::>(); + + self.shorteventid_authchain.insert(&key, &val); } // Cache in RAM self.auth_chain_cache .lock() - .expect("locked") + .expect("cache locked") .insert(key, auth_chain); - - Ok(()) } } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 7bc239d7..eae13b74 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -37,25 +37,18 @@ impl crate::Service for Service { } impl Service { - pub async fn event_ids_iter<'a>( - &'a self, room_id: &RoomId, starting_events_: Vec>, - ) -> Result> + Send + 'a> { - let mut starting_events: Vec<&EventId> = Vec::with_capacity(starting_events_.len()); - for starting_event in &starting_events_ { - starting_events.push(starting_event); - } + pub async fn event_ids_iter( + &self, room_id: &RoomId, starting_events: &[&EventId], + ) -> Result> + Send + '_> { + let chain = self.get_auth_chain(room_id, starting_events).await?; + let iter = chain.into_iter().stream().filter_map(|sid| { + self.services + .short + .get_eventid_from_short(sid) + .map(Result::ok) + }); - Ok(self - .get_auth_chain(room_id, &starting_events) - .await? - .into_iter() - .stream() - .filter_map(|sid| { - self.services - .short - .get_eventid_from_short(sid) - .map(Result::ok) - })) + Ok(iter) } #[tracing::instrument(skip_all, name = "auth_chain")] @@ -93,7 +86,7 @@ impl Service { } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = self.get_cached_eventid_authchain(&chunk_key).await? { + if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { trace!("Found cache entry for whole chunk"); full_auth_chain.extend(cached.iter().copied()); hits = hits.saturating_add(1); @@ -104,13 +97,13 @@ impl Service { let mut misses2: usize = 0; let mut chunk_cache = Vec::with_capacity(chunk.len()); for (sevent_id, event_id) in chunk { - if let Some(cached) = self.get_cached_eventid_authchain(&[sevent_id]).await? { + if let Ok(cached) = self.get_cached_eventid_authchain(&[sevent_id]).await { trace!(?event_id, "Found cache entry for event"); chunk_cache.extend(cached.iter().copied()); hits2 = hits2.saturating_add(1); } else { let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain(vec![sevent_id], &auth_chain)?; + self.cache_auth_chain(vec![sevent_id], &auth_chain); chunk_cache.extend(auth_chain.iter()); misses2 = misses2.saturating_add(1); debug!( @@ -125,7 +118,7 @@ impl Service { chunk_cache.sort_unstable(); chunk_cache.dedup(); - self.cache_auth_chain_vec(chunk_key, &chunk_cache)?; + self.cache_auth_chain_vec(chunk_key, &chunk_cache); full_auth_chain.extend(chunk_cache.iter()); misses = misses.saturating_add(1); debug!( @@ -163,11 +156,11 @@ impl Service { Ok(pdu) => { if pdu.room_id != room_id { return Err!(Request(Forbidden( - "auth event {event_id:?} for incorrect room {} which is not {}", + "auth event {event_id:?} for incorrect room {} which is not {room_id}", pdu.room_id, - room_id ))); } + for auth_event in &pdu.auth_events { let sauthevent = self .services @@ -187,20 +180,21 @@ impl Service { Ok(found) } - pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>> { + #[inline] + pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { self.db.get_cached_eventid_authchain(key).await } #[tracing::instrument(skip(self), level = "debug")] - pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) -> Result<()> { - self.db - .cache_auth_chain(key, auth_chain.iter().copied().collect::>()) + pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) { + let val = auth_chain.iter().copied().collect::>(); + self.db.cache_auth_chain(key, val); } #[tracing::instrument(skip(self), level = "debug")] - pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &Vec) -> Result<()> { - self.db - .cache_auth_chain(key, auth_chain.iter().copied().collect::>()) + pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &Vec) { + let val = auth_chain.iter().copied().collect::>(); + self.db.cache_auth_chain(key, val); } pub fn get_cache_usage(&self) -> (usize, usize) { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 07d6e4db..57b87706 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,6 +1,7 @@ mod parse_incoming_pdu; use std::{ + borrow::Borrow, collections::{hash_map, BTreeMap, HashMap, HashSet}, fmt::Write, sync::{Arc, RwLock as StdRwLock}, @@ -773,6 +774,7 @@ impl Service { Ok(pdu_id) } + #[tracing::instrument(skip_all, name = "resolve")] pub async fn resolve_state( &self, room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap>, ) -> Result>> { @@ -793,14 +795,17 @@ impl Service { let fork_states = [current_state_ids, incoming_state]; let mut auth_chain_sets = Vec::with_capacity(fork_states.len()); for state in &fork_states { - auth_chain_sets.push( - self.services - .auth_chain - .event_ids_iter(room_id, state.iter().map(|(_, id)| id.clone()).collect()) - .await? - .collect::>>() - .await, - ); + let starting_events: Vec<&EventId> = state.values().map(Borrow::borrow).collect(); + + let auth_chain = self + .services + .auth_chain + .event_ids_iter(room_id, &starting_events) + .await? + .collect::>>() + .await; + + auth_chain_sets.push(auth_chain); } debug!("Loading fork states"); @@ -962,12 +967,11 @@ impl Service { let mut state = StateMap::with_capacity(leaf_state.len()); let mut starting_events = Vec::with_capacity(leaf_state.len()); - - for (k, id) in leaf_state { + for (k, id) in &leaf_state { if let Ok((ty, st_key)) = self .services .short - .get_statekey_from_short(k) + .get_statekey_from_short(*k) .await .log_err() { @@ -976,18 +980,18 @@ impl Service { state.insert((ty.to_string().into(), st_key), id.clone()); } - starting_events.push(id); + starting_events.push(id.borrow()); } - auth_chain_sets.push( - self.services - .auth_chain - .event_ids_iter(room_id, starting_events) - .await? - .collect() - .await, - ); + let auth_chain = self + .services + .auth_chain + .event_ids_iter(room_id, &starting_events) + .await? + .collect() + .await; + auth_chain_sets.push(auth_chain); fork_states.push(state); } From 4496cf2d5b08780fd2d2b32c31c2c0b38bf010e7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 21 Sep 2024 16:28:46 -0400 Subject: [PATCH 0015/1248] add missing await to first admin room creation Signed-off-by: strawberry --- src/service/admin/create.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 7b090aa0..3dd5aea3 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -30,7 +30,11 @@ use crate::Services; pub async fn create_admin_room(services: &Services) -> Result<()> { let room_id = RoomId::new(services.globals.server_name()); - let _short_id = services.rooms.short.get_or_create_shortroomid(&room_id); + let _short_id = services + .rooms + .short + .get_or_create_shortroomid(&room_id) + .await; let state_lock = services.rooms.state.mutex.lock(&room_id).await; From 5192927a5342cffd9a7284bad3eb2c4b4819c674 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 29 Sep 2024 07:37:43 +0000 Subject: [PATCH 0016/1248] split remaining map suites Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 10 -- src/database/map.rs | 163 ++--------------------------- src/database/map/get.rs | 82 +++++++++++++++ src/database/map/insert.rs | 52 +++++++++ src/database/map/remove.rs | 44 ++++++++ src/service/globals/data.rs | 28 +++-- src/service/rooms/short/data.rs | 2 +- src/service/rooms/timeline/data.rs | 2 +- src/service/users/mod.rs | 2 +- 9 files changed, 205 insertions(+), 180 deletions(-) create mode 100644 src/database/map/get.rs create mode 100644 src/database/map/insert.rs create mode 100644 src/database/map/remove.rs diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index b1ea3709..fef83395 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -35,13 +35,3 @@ pub use self::{ #[inline] pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, source) } - -#[must_use] -pub fn generate_keypair() -> Vec { - let mut value = rand::string(8).as_bytes().to_vec(); - value.push(0xFF); - value.extend_from_slice( - &ruma::signatures::Ed25519KeyPair::generate().expect("Ed25519KeyPair generation always works (?)"), - ); - value -} diff --git a/src/database/map.rs b/src/database/map.rs index a3cf32d4..cac20d6a 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,7 +1,10 @@ mod count; +mod get; +mod insert; mod keys; mod keys_from; mod keys_prefix; +mod remove; mod rev_keys; mod rev_keys_from; mod rev_keys_prefix; @@ -18,23 +21,14 @@ use std::{ fmt, fmt::{Debug, Display}, future::Future, - io::Write, pin::Pin, sync::Arc, }; -use conduit::{err, Result}; -use futures::future; -use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteBatchWithTransaction, WriteOptions}; -use serde::Serialize; +use conduit::Result; +use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteOptions}; -use crate::{ - keyval::{OwnedKey, OwnedVal}, - ser, - util::{map_err, or_else}, - watchers::Watchers, - Engine, Handle, -}; +use crate::{watchers::Watchers, Engine}; pub struct Map { name: String, @@ -57,146 +51,6 @@ impl Map { })) } - #[tracing::instrument(skip(self), fields(%self), level = "trace")] - pub fn del(&self, key: &K) - where - K: Serialize + ?Sized + Debug, - { - let mut buf = Vec::::with_capacity(64); - self.bdel(key, &mut buf); - } - - #[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] - pub fn bdel(&self, key: &K, buf: &mut B) - where - K: Serialize + ?Sized + Debug, - B: Write + AsRef<[u8]>, - { - let key = ser::serialize(buf, key).expect("failed to serialize deletion key"); - self.remove(&key); - } - - #[tracing::instrument(level = "trace")] - pub fn remove(&self, key: &K) - where - K: AsRef<[u8]> + ?Sized + Debug, - { - let write_options = &self.write_options; - self.db - .db - .delete_cf_opt(&self.cf(), key, write_options) - .or_else(or_else) - .expect("database remove error"); - - if !self.db.corked() { - self.db.flush().expect("database flush error"); - } - } - - #[tracing::instrument(skip(self, value), fields(%self), level = "trace")] - pub fn insert(&self, key: &K, value: &V) - where - K: AsRef<[u8]> + ?Sized + Debug, - V: AsRef<[u8]> + ?Sized, - { - let write_options = &self.write_options; - self.db - .db - .put_cf_opt(&self.cf(), key, value, write_options) - .or_else(or_else) - .expect("database insert error"); - - if !self.db.corked() { - self.db.flush().expect("database flush error"); - } - - self.watchers.wake(key.as_ref()); - } - - #[tracing::instrument(skip(self), fields(%self), level = "trace")] - pub fn insert_batch<'a, I, K, V>(&'a self, iter: I) - where - I: Iterator + Send + Debug, - K: AsRef<[u8]> + Sized + Debug + 'a, - V: AsRef<[u8]> + Sized + 'a, - { - let mut batch = WriteBatchWithTransaction::::default(); - for (key, val) in iter { - batch.put_cf(&self.cf(), key.as_ref(), val.as_ref()); - } - - let write_options = &self.write_options; - self.db - .db - .write_opt(batch, write_options) - .or_else(or_else) - .expect("database insert batch error"); - - if !self.db.corked() { - self.db.flush().expect("database flush error"); - } - } - - #[tracing::instrument(skip(self), fields(%self), level = "trace")] - pub fn qry(&self, key: &K) -> impl Future>> + Send - where - K: Serialize + ?Sized + Debug, - { - let mut buf = Vec::::with_capacity(64); - self.bqry(key, &mut buf) - } - - #[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] - pub fn bqry(&self, key: &K, buf: &mut B) -> impl Future>> + Send - where - K: Serialize + ?Sized + Debug, - B: Write + AsRef<[u8]>, - { - let key = ser::serialize(buf, key).expect("failed to serialize query key"); - let val = self.get(key); - future::ready(val) - } - - #[tracing::instrument(skip(self), fields(%self), level = "trace")] - pub fn get(&self, key: &K) -> Result> - where - K: AsRef<[u8]> + ?Sized + Debug, - { - self.db - .db - .get_pinned_cf_opt(&self.cf(), key, &self.read_options) - .map_err(map_err)? - .map(Handle::from) - .ok_or(err!(Request(NotFound("Not found in database")))) - } - - #[tracing::instrument(skip(self), fields(%self), level = "trace")] - pub fn multi_get<'a, I, K>(&self, keys: I) -> Vec> - where - I: Iterator + ExactSizeIterator + Send + Debug, - K: AsRef<[u8]> + Sized + Debug + 'a, - { - // Optimization can be `true` if key vector is pre-sorted **by the column - // comparator**. - const SORTED: bool = false; - - let mut ret: Vec> = Vec::with_capacity(keys.len()); - let read_options = &self.read_options; - for res in self - .db - .db - .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options) - { - match res { - Ok(Some(res)) => ret.push(Some((*res).to_vec())), - Ok(None) => ret.push(None), - Err(e) => or_else(e).expect("database multiget error"), - } - } - - ret - } - #[inline] pub fn watch_prefix<'a, K>(&'a self, prefix: &K) -> Pin + Send + 'a>> where @@ -230,10 +84,7 @@ fn open(db: &Arc, name: &str) -> Result> { let bounded_ptr = Arc::into_raw(bounded_arc); let cf_ptr = bounded_ptr.cast::(); - // SAFETY: After thorough contemplation this appears to be the best solution, - // even by a significant margin. - // - // BACKGROUND: Column family handles out of RocksDB are basic pointers and can + // SAFETY: Column family handles out of RocksDB are basic pointers and can // be invalidated: 1. when the database closes. 2. when the column is dropped or // closed. rust_rocksdb wraps this for us by storing handles in their own // `RwLock` map and returning an Arc>` to diff --git a/src/database/map/get.rs b/src/database/map/get.rs new file mode 100644 index 00000000..b4d6a6ea --- /dev/null +++ b/src/database/map/get.rs @@ -0,0 +1,82 @@ +use std::{convert::AsRef, fmt::Debug, future::Future, io::Write}; + +use conduit::{err, implement, Result}; +use futures::future::ready; +use serde::Serialize; + +use crate::{ + keyval::{OwnedKey, OwnedVal}, + ser, + util::{map_err, or_else}, + Handle, +}; + +#[implement(super::Map)] +pub fn qry(&self, key: &K) -> impl Future>> + Send +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = Vec::::with_capacity(64); + self.bqry(key, &mut buf) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] +pub fn bqry(&self, key: &K, buf: &mut B) -> impl Future>> + Send +where + K: Serialize + ?Sized + Debug, + B: Write + AsRef<[u8]>, +{ + let key = ser::serialize(buf, key).expect("failed to serialize query key"); + self.get(key) +} + +#[implement(super::Map)] +pub fn get(&self, key: &K) -> impl Future>> + Send +where + K: AsRef<[u8]> + ?Sized + Debug, +{ + ready(self.get_blocking(key)) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] +pub fn get_blocking(&self, key: &K) -> Result> +where + K: AsRef<[u8]> + ?Sized + Debug, +{ + self.db + .db + .get_pinned_cf_opt(&self.cf(), key, &self.read_options) + .map_err(map_err)? + .map(Handle::from) + .ok_or(err!(Request(NotFound("Not found in database")))) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, keys), fields(%self), level = "trace")] +pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> Vec> +where + I: Iterator + ExactSizeIterator + Send + Debug, + K: AsRef<[u8]> + Sized + Debug + 'a, +{ + // Optimization can be `true` if key vector is pre-sorted **by the column + // comparator**. + const SORTED: bool = false; + + let mut ret: Vec> = Vec::with_capacity(keys.len()); + let read_options = &self.read_options; + for res in self + .db + .db + .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options) + { + match res { + Ok(Some(res)) => ret.push(Some((*res).to_vec())), + Ok(None) => ret.push(None), + Err(e) => or_else(e).expect("database multiget error"), + } + } + + ret +} diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs new file mode 100644 index 00000000..953c9c94 --- /dev/null +++ b/src/database/map/insert.rs @@ -0,0 +1,52 @@ +use std::{convert::AsRef, fmt::Debug}; + +use conduit::implement; +use rocksdb::WriteBatchWithTransaction; + +use crate::util::or_else; + +#[implement(super::Map)] +#[tracing::instrument(skip(self, value), fields(%self), level = "trace")] +pub fn insert(&self, key: &K, value: &V) +where + K: AsRef<[u8]> + ?Sized + Debug, + V: AsRef<[u8]> + ?Sized, +{ + let write_options = &self.write_options; + self.db + .db + .put_cf_opt(&self.cf(), key, value, write_options) + .or_else(or_else) + .expect("database insert error"); + + if !self.db.corked() { + self.db.flush().expect("database flush error"); + } + + self.watchers.wake(key.as_ref()); +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, iter), fields(%self), level = "trace")] +pub fn insert_batch<'a, I, K, V>(&'a self, iter: I) +where + I: Iterator + Send + Debug, + K: AsRef<[u8]> + Sized + Debug + 'a, + V: AsRef<[u8]> + Sized + 'a, +{ + let mut batch = WriteBatchWithTransaction::::default(); + for (key, val) in iter { + batch.put_cf(&self.cf(), key.as_ref(), val.as_ref()); + } + + let write_options = &self.write_options; + self.db + .db + .write_opt(batch, write_options) + .or_else(or_else) + .expect("database insert batch error"); + + if !self.db.corked() { + self.db.flush().expect("database flush error"); + } +} diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs new file mode 100644 index 00000000..fcf7587e --- /dev/null +++ b/src/database/map/remove.rs @@ -0,0 +1,44 @@ +use std::{convert::AsRef, fmt::Debug, io::Write}; + +use conduit::implement; +use serde::Serialize; + +use crate::{ser, util::or_else}; + +#[implement(super::Map)] +pub fn del(&self, key: &K) +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = Vec::::with_capacity(64); + self.bdel(key, &mut buf); +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] +pub fn bdel(&self, key: &K, buf: &mut B) +where + K: Serialize + ?Sized + Debug, + B: Write + AsRef<[u8]>, +{ + let key = ser::serialize(buf, key).expect("failed to serialize deletion key"); + self.remove(key); +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] +pub fn remove(&self, key: &K) +where + K: AsRef<[u8]> + ?Sized + Debug, +{ + let write_options = &self.write_options; + self.db + .db + .delete_cf_opt(&self.cf(), key, write_options) + .or_else(or_else) + .expect("database remove error"); + + if !self.db.corked() { + self.db.flush().expect("database flush error"); + } +} diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 76f97944..5332f07d 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, RwLock}, }; -use conduit::{trace, utils, Error, Result, Server}; +use conduit::{trace, utils, utils::rand, Error, Result, Server}; use database::{Database, Deserialized, Map}; use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; use ruma::{ @@ -102,7 +102,7 @@ impl Data { fn stored_count(global: &Arc) -> Result { global - .get(COUNTER) + .get_blocking(COUNTER) .as_deref() .map_or(Ok(0_u64), utils::u64_from_bytes) } @@ -206,17 +206,23 @@ impl Data { } pub fn load_keypair(&self) -> Result { - let keypair_bytes = self.global.get(b"keypair").map_or_else( - |_| { - let keypair = utils::generate_keypair(); - self.global.insert(b"keypair", &keypair); - Ok::<_, Error>(keypair) - }, - |val| Ok(val.to_vec()), - )?; + let generate = |_| { + let keypair = Ed25519KeyPair::generate().expect("Ed25519KeyPair generation always works (?)"); + + let mut value = rand::string(8).as_bytes().to_vec(); + value.push(0xFF); + value.extend_from_slice(&keypair); + + self.global.insert(b"keypair", &value); + value + }; + + let keypair_bytes: Vec = self + .global + .get_blocking(b"keypair") + .map_or_else(generate, Into::into); let mut parts = keypair_bytes.splitn(2, |&b| b == 0xFF); - utils::string_from_bytes( // 1. version parts diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs index f6a82488..fff3f2d6 100644 --- a/src/service/rooms/short/data.rs +++ b/src/service/rooms/short/data.rs @@ -59,7 +59,7 @@ impl Data { for (i, short) in self .eventid_shorteventid - .multi_get(keys.iter()) + .get_batch_blocking(keys.iter()) .iter() .enumerate() { diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 314dcb9f..1f9dad1d 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -326,7 +326,7 @@ pub(super) fn pdu_count(pdu_id: &[u8]) -> PduCount { //TODO: this is an ABA fn increment(db: &Arc, key: &[u8]) { - let old = db.get(key); + let old = db.get_blocking(key); let new = utils::increment(old.ok().as_deref()); db.insert(key, &new); } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index ca37ed9d..fa8c41b6 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1000,7 +1000,7 @@ where //TODO: this is an ABA fn increment(db: &Arc, key: &[u8]) { - let old = db.get(key); + let old = db.get_blocking(key); let new = utils::increment(old.ok().as_deref()); db.insert(key, &new); } From 0e8ae1e13e601c572336e38d1b020eb6a6aafe0d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 29 Sep 2024 12:49:24 +0000 Subject: [PATCH 0017/1248] add ArrayVec-backed serialized query overload; doc comments Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/database/Cargo.toml | 1 + src/database/de.rs | 2 +- src/database/handle.rs | 28 ++++++++++++++++------------ src/database/map/get.rs | 23 +++++++++++++++++++++++ src/database/map/remove.rs | 10 ++++++++++ 6 files changed, 52 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08e0498a..043d9704 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -726,6 +726,7 @@ dependencies = [ name = "conduit_database" version = "0.4.7" dependencies = [ + "arrayvec", "conduit_core", "const-str", "futures", diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index b5eb7612..0e718aa7 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -35,6 +35,7 @@ zstd_compression = [ ] [dependencies] +arrayvec.workspace = true conduit-core.workspace = true const-str.workspace = true futures.workspace = true diff --git a/src/database/de.rs b/src/database/de.rs index a5d2c127..fc36560d 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -195,7 +195,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } fn deserialize_u8>(self, _visitor: V) -> Result { - unimplemented!("deserialize u8 not implemented") + unimplemented!("deserialize u8 not implemented; try dereferencing the Handle for [u8] access instead") } fn deserialize_u16>(self, _visitor: V) -> Result { diff --git a/src/database/handle.rs b/src/database/handle.rs index 0d4bd02e..daee224d 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -35,18 +35,6 @@ impl Serialize for Handle<'_> { } } -impl Deref for Handle<'_> { - type Target = Slice; - - #[inline] - fn deref(&self) -> &Self::Target { &self.val } -} - -impl AsRef for Handle<'_> { - #[inline] - fn as_ref(&self) -> &Slice { &self.val } -} - impl Deserialized for Result> { #[inline] fn map_de(self, f: F) -> Result @@ -78,3 +66,19 @@ impl<'a> Deserialized for &'a Handle<'a> { deserialize_val(self.as_ref()).map(f) } } + +impl From> for Vec { + fn from(handle: Handle<'_>) -> Self { handle.deref().to_vec() } +} + +impl Deref for Handle<'_> { + type Target = Slice; + + #[inline] + fn deref(&self) -> &Self::Target { &self.val } +} + +impl AsRef for Handle<'_> { + #[inline] + fn as_ref(&self) -> &Slice { &self.val } +} diff --git a/src/database/map/get.rs b/src/database/map/get.rs index b4d6a6ea..71489402 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,5 +1,6 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write}; +use arrayvec::ArrayVec; use conduit::{err, implement, Result}; use futures::future::ready; use serde::Serialize; @@ -11,6 +12,9 @@ use crate::{ Handle, }; +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is serialized into an allocated buffer to perform +/// the query. #[implement(super::Map)] pub fn qry(&self, key: &K) -> impl Future>> + Send where @@ -20,6 +24,20 @@ where self.bqry(key, &mut buf) } +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is serialized into a fixed-sized buffer to perform +/// the query. The maximum size is supplied as const generic parameter. +#[implement(super::Map)] +pub fn aqry(&self, key: &K) -> impl Future>> + Send +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = ArrayVec::::new(); + self.bqry(key, &mut buf) +} + +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is serialized into a user-supplied Writer. #[implement(super::Map)] #[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] pub fn bqry(&self, key: &K, buf: &mut B) -> impl Future>> + Send @@ -31,6 +49,8 @@ where self.get(key) } +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] pub fn get(&self, key: &K) -> impl Future>> + Send where @@ -39,6 +59,9 @@ where ready(self.get_blocking(key)) } +/// Fetch a value from the database into cache, returning a reference-handle. +/// The key is referenced directly to perform the query. This is a thread- +/// blocking call. #[implement(super::Map)] #[tracing::instrument(skip(self, key), fields(%self), level = "trace")] pub fn get_blocking(&self, key: &K) -> Result> diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs index fcf7587e..10bb2ff0 100644 --- a/src/database/map/remove.rs +++ b/src/database/map/remove.rs @@ -1,5 +1,6 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; +use arrayvec::ArrayVec; use conduit::implement; use serde::Serialize; @@ -14,6 +15,15 @@ where self.bdel(key, &mut buf); } +#[implement(super::Map)] +pub fn adel(&self, key: &K) +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = ArrayVec::::new(); + self.bdel(key, &mut buf); +} + #[implement(super::Map)] #[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] pub fn bdel(&self, key: &K, buf: &mut B) From c569881b0853245dea0f8704342d6cfa6c465edb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 29 Sep 2024 13:13:09 +0000 Subject: [PATCH 0018/1248] merge rooms/short Data w/ Service; optimize queries Signed-off-by: Jason Volk --- src/service/account_data/mod.rs | 2 +- src/service/appservice/data.rs | 2 +- src/service/globals/data.rs | 12 +- src/service/globals/migrations.rs | 6 +- src/service/media/data.rs | 2 +- src/service/presence/data.rs | 6 +- src/service/rooms/alias/mod.rs | 6 +- src/service/rooms/directory/mod.rs | 2 +- src/service/rooms/outlier/mod.rs | 4 +- src/service/rooms/short/data.rs | 167 --------------- src/service/rooms/short/mod.rs | 232 +++++++++++++++++---- src/service/rooms/state/data.rs | 2 +- src/service/rooms/state_accessor/data.rs | 4 +- src/service/rooms/state_cache/mod.rs | 8 +- src/service/rooms/state_compressor/data.rs | 4 +- src/service/rooms/timeline/data.rs | 22 +- src/service/sending/data.rs | 6 +- src/service/users/mod.rs | 32 +-- 18 files changed, 257 insertions(+), 262 deletions(-) delete mode 100644 src/service/rooms/short/data.rs diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 4f00cff1..482229e7 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -106,7 +106,7 @@ pub async fn get( self.db .roomusertype_roomuserdataid .qry(&key) - .and_then(|roomuserdataid| self.db.roomuserdataid_accountdata.qry(&roomuserdataid)) + .and_then(|roomuserdataid| self.db.roomuserdataid_accountdata.get(&roomuserdataid)) .await .deserialized() } diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index f31c5e63..4eb9d09e 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -38,7 +38,7 @@ impl Data { pub async fn get_registration(&self, id: &str) -> Result { self.id_appserviceregistrations - .qry(id) + .get(id) .await .deserialized() .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 5332f07d..57a295d9 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -260,7 +260,7 @@ impl Data { &self, origin: &ServerName, new_keys: ServerSigningKeys, ) -> BTreeMap { // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.qry(origin).await; + let signingkeys = self.server_signingkeys.get(origin).await; let mut keys = signingkeys .and_then(|keys| serde_json::from_slice(&keys).map_err(Into::into)) @@ -311,10 +311,16 @@ impl Data { } pub async fn signing_keys_for(&self, origin: &ServerName) -> Result { - self.server_signingkeys.qry(origin).await.deserialized() + self.server_signingkeys.get(origin).await.deserialized() } - pub async fn database_version(&self) -> u64 { self.global.qry("version").await.deserialized().unwrap_or(0) } + pub async fn database_version(&self) -> u64 { + self.global + .get(b"version") + .await + .deserialized() + .unwrap_or(0) + } #[inline] pub fn bump_database_version(&self, new_version: u64) -> Result<()> { diff --git a/src/service/globals/migrations.rs b/src/service/globals/migrations.rs index c7a73230..469159fc 100644 --- a/src/service/globals/migrations.rs +++ b/src/service/globals/migrations.rs @@ -99,14 +99,14 @@ async fn migrate(services: &Services) -> Result<()> { db_lt_13(services).await?; } - if db["global"].qry("feat_sha256_media").await.is_not_found() { + if db["global"].get(b"feat_sha256_media").await.is_not_found() { media::migrations::migrate_sha256_media(services).await?; } else if config.media_startup_check { media::migrations::checkup_sha256_media(services).await?; } if db["global"] - .qry("fix_bad_double_separator_in_state_cache") + .get(b"fix_bad_double_separator_in_state_cache") .await .is_not_found() { @@ -114,7 +114,7 @@ async fn migrate(services: &Services) -> Result<()> { } if db["global"] - .qry("retroactively_fix_bad_data_from_roomuserid_joined") + .get(b"retroactively_fix_bad_data_from_roomuserid_joined") .await .is_not_found() { diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 29d562cc..248e9e1d 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -253,7 +253,7 @@ impl Data { } pub(super) async fn get_url_preview(&self, url: &str) -> Result { - let values = self.url_previews.qry(url).await?; + let values = self.url_previews.get(url).await?; let mut values = values.split(|&b| b == 0xFF); diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 0c3f3d31..9c9d0ae3 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -39,12 +39,12 @@ impl Data { pub async fn get_presence(&self, user_id: &UserId) -> Result<(u64, PresenceEvent)> { let count = self .userid_presenceid - .qry(user_id) + .get(user_id) .await .deserialized::()?; let key = presenceid_key(count, user_id); - let bytes = self.presenceid_presence.qry(&key).await?; + let bytes = self.presenceid_presence.get(&key).await?; let event = Presence::from_json_bytes(&bytes)? .to_presence_event(user_id, &self.services.users) .await; @@ -127,7 +127,7 @@ impl Data { pub(super) async fn remove_presence(&self, user_id: &UserId) { let Ok(count) = self .userid_presenceid - .qry(user_id) + .get(user_id) .await .deserialized::() else { diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 6b81a221..1d44cd2d 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -94,7 +94,7 @@ impl Service { } let alias = alias.alias(); - let Ok(room_id) = self.db.alias_roomid.qry(&alias).await else { + let Ok(room_id) = self.db.alias_roomid.get(&alias).await else { return Err!(Request(NotFound("Alias does not exist or is invalid."))); }; @@ -151,7 +151,7 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub async fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result { - self.db.alias_roomid.qry(alias.alias()).await.deserialized() + self.db.alias_roomid.get(alias.alias()).await.deserialized() } #[tracing::instrument(skip(self), level = "debug")] @@ -219,7 +219,7 @@ impl Service { } async fn who_created_alias(&self, alias: &RoomAliasId) -> Result { - self.db.alias_userid.qry(alias.alias()).await.deserialized() + self.db.alias_userid.get(alias.alias()).await.deserialized() } async fn resolve_appservice_alias(&self, room_alias: &RoomAliasId) -> Result> { diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 3585205d..5666a91a 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -32,7 +32,7 @@ pub fn set_public(&self, room_id: &RoomId) { self.db.publicroomids.insert(room_i pub fn set_not_public(&self, room_id: &RoomId) { self.db.publicroomids.remove(room_id.as_bytes()); } #[implement(Service)] -pub async fn is_public_room(&self, room_id: &RoomId) -> bool { self.db.publicroomids.qry(room_id).await.is_ok() } +pub async fn is_public_room(&self, room_id: &RoomId) -> bool { self.db.publicroomids.get(room_id).await.is_ok() } #[implement(Service)] pub fn public_rooms(&self) -> impl Stream + Send { diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 4c9225ae..b9d04263 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -31,7 +31,7 @@ impl crate::Service for Service { pub async fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result { self.db .eventid_outlierpdu - .qry(event_id) + .get(event_id) .await .deserialized() } @@ -41,7 +41,7 @@ pub async fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result Result { self.db .eventid_outlierpdu - .qry(event_id) + .get(event_id) .await .deserialized() } diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs deleted file mode 100644 index fff3f2d6..00000000 --- a/src/service/rooms/short/data.rs +++ /dev/null @@ -1,167 +0,0 @@ -use std::sync::Arc; - -use conduit::{err, utils, Error, Result}; -use database::{Deserialized, Map}; -use ruma::{events::StateEventType, EventId, RoomId}; - -use crate::{globals, Dep}; - -pub(super) struct Data { - eventid_shorteventid: Arc, - shorteventid_eventid: Arc, - statekey_shortstatekey: Arc, - shortstatekey_statekey: Arc, - roomid_shortroomid: Arc, - statehash_shortstatehash: Arc, - services: Services, -} - -struct Services { - globals: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - eventid_shorteventid: db["eventid_shorteventid"].clone(), - shorteventid_eventid: db["shorteventid_eventid"].clone(), - statekey_shortstatekey: db["statekey_shortstatekey"].clone(), - shortstatekey_statekey: db["shortstatekey_statekey"].clone(), - roomid_shortroomid: db["roomid_shortroomid"].clone(), - statehash_shortstatehash: db["statehash_shortstatehash"].clone(), - services: Services { - globals: args.depend::("globals"), - }, - } - } - - pub(super) async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { - if let Ok(shorteventid) = self.eventid_shorteventid.qry(event_id).await.deserialized() { - return shorteventid; - } - - let shorteventid = self.services.globals.next_count().unwrap(); - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes()); - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes()); - - shorteventid - } - - pub(super) async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Vec { - let mut ret: Vec = Vec::with_capacity(event_ids.len()); - let keys = event_ids - .iter() - .map(|id| id.as_bytes()) - .collect::>(); - - for (i, short) in self - .eventid_shorteventid - .get_batch_blocking(keys.iter()) - .iter() - .enumerate() - { - #[allow(clippy::single_match_else)] - match short { - Some(short) => ret.push( - utils::u64_from_bytes(short) - .map_err(|_| Error::bad_database("Invalid shorteventid in db.")) - .unwrap(), - ), - None => { - let short = self.services.globals.next_count().unwrap(); - self.eventid_shorteventid - .insert(keys[i], &short.to_be_bytes()); - self.shorteventid_eventid - .insert(&short.to_be_bytes(), keys[i]); - - debug_assert!(ret.len() == i, "position of result must match input"); - ret.push(short); - }, - } - } - - ret - } - - pub(super) async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { - let key = (event_type, state_key); - self.statekey_shortstatekey.qry(&key).await.deserialized() - } - - pub(super) async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> u64 { - let key = (event_type.to_string(), state_key); - if let Ok(shortstatekey) = self.statekey_shortstatekey.qry(&key).await.deserialized() { - return shortstatekey; - } - - let mut key = event_type.to_string().as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(state_key.as_bytes()); - - let shortstatekey = self.services.globals.next_count().unwrap(); - self.statekey_shortstatekey - .insert(&key, &shortstatekey.to_be_bytes()); - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &key); - - shortstatekey - } - - pub(super) async fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - self.shorteventid_eventid - .qry(&shorteventid) - .await - .deserialized() - .map_err(|e| err!(Database("Failed to find EventId from short {shorteventid:?}: {e:?}"))) - } - - pub(super) async fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - self.shortstatekey_statekey - .qry(&shortstatekey) - .await - .deserialized() - .map_err(|e| { - err!(Database( - "Failed to find (StateEventType, state_key) from short {shortstatekey:?}: {e:?}" - )) - }) - } - - /// Returns (shortstatehash, already_existed) - pub(super) async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, bool) { - if let Ok(shortstatehash) = self - .statehash_shortstatehash - .qry(state_hash) - .await - .deserialized() - { - return (shortstatehash, true); - } - - let shortstatehash = self.services.globals.next_count().unwrap(); - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes()); - - (shortstatehash, false) - } - - pub(super) async fn get_shortroomid(&self, room_id: &RoomId) -> Result { - self.roomid_shortroomid.qry(room_id).await.deserialized() - } - - pub(super) async fn get_or_create_shortroomid(&self, room_id: &RoomId) -> u64 { - self.roomid_shortroomid - .qry(room_id) - .await - .deserialized() - .unwrap_or_else(|_| { - let short = self.services.globals.next_count().unwrap(); - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes()); - short - }) - } -} diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 00bb7cb1..66da3948 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,61 +1,215 @@ -mod data; - use std::sync::Arc; -use conduit::Result; +use conduit::{err, implement, utils, Error, Result}; +use database::{Deserialized, Map}; use ruma::{events::StateEventType, EventId, RoomId}; -use self::data::Data; +use crate::{globals, Dep}; pub struct Service { db: Data, + services: Services, +} + +struct Data { + eventid_shorteventid: Arc, + shorteventid_eventid: Arc, + statekey_shortstatekey: Arc, + shortstatekey_statekey: Arc, + roomid_shortroomid: Arc, + statehash_shortstatehash: Arc, +} + +struct Services { + globals: Dep, } impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(&args), + db: Data { + eventid_shorteventid: args.db["eventid_shorteventid"].clone(), + shorteventid_eventid: args.db["shorteventid_eventid"].clone(), + statekey_shortstatekey: args.db["statekey_shortstatekey"].clone(), + shortstatekey_statekey: args.db["shortstatekey_statekey"].clone(), + roomid_shortroomid: args.db["roomid_shortroomid"].clone(), + statehash_shortstatehash: args.db["statehash_shortstatehash"].clone(), + }, + services: Services { + globals: args.depend::("globals"), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { - self.db.get_or_create_shorteventid(event_id).await +#[implement(Service)] +pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { + if let Ok(shorteventid) = self + .db + .eventid_shorteventid + .get(event_id) + .await + .deserialized() + { + return shorteventid; } - pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Vec { - self.db.multi_get_or_create_shorteventid(event_ids).await - } + let shorteventid = self.services.globals.next_count().unwrap(); + self.db + .eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes()); + self.db + .shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes()); - pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { - self.db.get_shortstatekey(event_type, state_key).await - } - - pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> u64 { - self.db - .get_or_create_shortstatekey(event_type, state_key) - .await - } - - pub async fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - self.db.get_eventid_from_short(shorteventid).await - } - - pub async fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - self.db.get_statekey_from_short(shortstatekey).await - } - - /// Returns (shortstatehash, already_existed) - pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, bool) { - self.db.get_or_create_shortstatehash(state_hash).await - } - - pub async fn get_shortroomid(&self, room_id: &RoomId) -> Result { self.db.get_shortroomid(room_id).await } - - pub async fn get_or_create_shortroomid(&self, room_id: &RoomId) -> u64 { - self.db.get_or_create_shortroomid(room_id).await - } + shorteventid +} + +#[implement(Service)] +pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Vec { + let mut ret: Vec = Vec::with_capacity(event_ids.len()); + let keys = event_ids + .iter() + .map(|id| id.as_bytes()) + .collect::>(); + + for (i, short) in self + .db + .eventid_shorteventid + .get_batch_blocking(keys.iter()) + .iter() + .enumerate() + { + match short { + Some(short) => ret.push( + utils::u64_from_bytes(short) + .map_err(|_| Error::bad_database("Invalid shorteventid in db.")) + .unwrap(), + ), + None => { + let short = self.services.globals.next_count().unwrap(); + self.db + .eventid_shorteventid + .insert(keys[i], &short.to_be_bytes()); + self.db + .shorteventid_eventid + .insert(&short.to_be_bytes(), keys[i]); + + debug_assert!(ret.len() == i, "position of result must match input"); + ret.push(short); + }, + } + } + + ret +} + +#[implement(Service)] +pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { + let key = (event_type, state_key); + self.db + .statekey_shortstatekey + .qry(&key) + .await + .deserialized() +} + +#[implement(Service)] +pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> u64 { + let key = (event_type.to_string(), state_key); + if let Ok(shortstatekey) = self + .db + .statekey_shortstatekey + .qry(&key) + .await + .deserialized() + { + return shortstatekey; + } + + let mut key = event_type.to_string().as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(state_key.as_bytes()); + + let shortstatekey = self.services.globals.next_count().unwrap(); + self.db + .statekey_shortstatekey + .insert(&key, &shortstatekey.to_be_bytes()); + self.db + .shortstatekey_statekey + .insert(&shortstatekey.to_be_bytes(), &key); + + shortstatekey +} + +#[implement(Service)] +pub async fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { + const BUFSIZE: usize = size_of::(); + + self.db + .shorteventid_eventid + .aqry::(&shorteventid) + .await + .deserialized() + .map_err(|e| err!(Database("Failed to find EventId from short {shorteventid:?}: {e:?}"))) +} + +#[implement(Service)] +pub async fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { + const BUFSIZE: usize = size_of::(); + + self.db + .shortstatekey_statekey + .aqry::(&shortstatekey) + .await + .deserialized() + .map_err(|e| { + err!(Database( + "Failed to find (StateEventType, state_key) from short {shortstatekey:?}: {e:?}" + )) + }) +} + +/// Returns (shortstatehash, already_existed) +#[implement(Service)] +pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, bool) { + if let Ok(shortstatehash) = self + .db + .statehash_shortstatehash + .get(state_hash) + .await + .deserialized() + { + return (shortstatehash, true); + } + + let shortstatehash = self.services.globals.next_count().unwrap(); + self.db + .statehash_shortstatehash + .insert(state_hash, &shortstatehash.to_be_bytes()); + + (shortstatehash, false) +} + +#[implement(Service)] +pub async fn get_shortroomid(&self, room_id: &RoomId) -> Result { + self.db.roomid_shortroomid.qry(room_id).await.deserialized() +} + +#[implement(Service)] +pub async fn get_or_create_shortroomid(&self, room_id: &RoomId) -> u64 { + self.db + .roomid_shortroomid + .get(room_id) + .await + .deserialized() + .unwrap_or_else(|_| { + let short = self.services.globals.next_count().unwrap(); + self.db + .roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes()); + short + }) } diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index ccf7509a..3072e3c6 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -25,7 +25,7 @@ impl Data { } pub(super) async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result { - self.roomid_shortstatehash.qry(room_id).await.deserialized() + self.roomid_shortstatehash.get(room_id).await.deserialized() } #[inline] diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 79a98325..adc26f00 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -157,8 +157,8 @@ impl Data { /// Returns the state hash for this pdu. pub(super) async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { self.eventid_shorteventid - .qry(event_id) - .and_then(|shorteventid| self.shorteventid_shortstatehash.qry(&shorteventid)) + .get(event_id) + .and_then(|shorteventid| self.shorteventid_shortstatehash.get(&shorteventid)) .await .deserialized() } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index ce5b024b..eedff861 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -435,10 +435,10 @@ impl Service { /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_joined(&self, user_id: &UserId) -> impl Stream + Send { + pub fn rooms_joined<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { self.db .userroomid_joined - .keys_prefix(user_id) + .keys_prefix_raw(user_id) .ignore_err() .map(|(_, room_id): (Ignore, &RoomId)| room_id) } @@ -494,10 +494,10 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub fn servers_invite_via<'a>(&'a self, room_id: &RoomId) -> impl Stream + Send + 'a { + pub fn servers_invite_via<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { self.db .roomid_inviteviaservers - .stream_prefix(room_id) + .stream_prefix_raw(room_id) .ignore_err() .map(|(_, servers): (Ignore, Vec<&ServerName>)| &**(servers.last().expect("at least one servername"))) } diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index 9a9f70a2..cb020470 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -23,9 +23,11 @@ impl Data { } pub(super) async fn get_statediff(&self, shortstatehash: u64) -> Result { + const BUFSIZE: usize = size_of::(); + let value = self .shortstatehash_statediff - .qry(&shortstatehash) + .aqry::(&shortstatehash) .await .map_err(|e| err!(Database("Failed to find StateDiff from short {shortstatehash:?}: {e}")))?; diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 1f9dad1d..cb85cf19 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -79,7 +79,7 @@ impl Data { /// Returns the `count` of this pdu's id. pub(super) async fn get_pdu_count(&self, event_id: &EventId) -> Result { self.eventid_pduid - .qry(event_id) + .get(event_id) .await .map(|pdu_id| pdu_count(&pdu_id)) } @@ -90,27 +90,27 @@ impl Data { return Ok(pdu); } - self.eventid_outlierpdu.qry(event_id).await.deserialized() + self.eventid_outlierpdu.get(event_id).await.deserialized() } /// Returns the json of a pdu. pub(super) async fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result { let pduid = self.get_pdu_id(event_id).await?; - self.pduid_pdu.qry(&pduid).await.deserialized() + self.pduid_pdu.get(&pduid).await.deserialized() } /// Returns the pdu's id. #[inline] pub(super) async fn get_pdu_id(&self, event_id: &EventId) -> Result> { - self.eventid_pduid.qry(event_id).await + self.eventid_pduid.get(event_id).await } /// Returns the pdu directly from `eventid_pduid` only. pub(super) async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { let pduid = self.get_pdu_id(event_id).await?; - self.pduid_pdu.qry(&pduid).await.deserialized() + self.pduid_pdu.get(&pduid).await.deserialized() } /// Like get_non_outlier_pdu(), but without the expense of fetching and @@ -118,7 +118,7 @@ impl Data { pub(super) async fn non_outlier_pdu_exists(&self, event_id: &EventId) -> Result<()> { let pduid = self.get_pdu_id(event_id).await?; - self.pduid_pdu.qry(&pduid).await?; + self.pduid_pdu.get(&pduid).await?; Ok(()) } @@ -132,7 +132,7 @@ impl Data { } self.eventid_outlierpdu - .qry(event_id) + .get(event_id) .await .deserialized() .map(Arc::new) @@ -141,7 +141,7 @@ impl Data { /// Like get_non_outlier_pdu(), but without the expense of fetching and /// parsing the PduEvent pub(super) async fn outlier_pdu_exists(&self, event_id: &EventId) -> Result<()> { - self.eventid_outlierpdu.qry(event_id).await?; + self.eventid_outlierpdu.get(event_id).await?; Ok(()) } @@ -159,12 +159,12 @@ impl Data { /// /// This does __NOT__ check the outliers `Tree`. pub(super) async fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result { - self.pduid_pdu.qry(pdu_id).await.deserialized() + self.pduid_pdu.get(pdu_id).await.deserialized() } /// Returns the pdu as a `BTreeMap`. pub(super) async fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result { - self.pduid_pdu.qry(pdu_id).await.deserialized() + self.pduid_pdu.get(pdu_id).await.deserialized() } pub(super) async fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) { @@ -196,7 +196,7 @@ impl Data { pub(super) async fn replace_pdu( &self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, _pdu: &PduEvent, ) -> Result<()> { - if self.pduid_pdu.qry(pdu_id).await.is_not_found() { + if self.pduid_pdu.get(pdu_id).await.is_not_found() { return Err!(Request(NotFound("PDU does not exist."))); } diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index b96f9a03..6f4b5b97 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -98,7 +98,7 @@ impl Data { } #[inline] - pub fn active_requests_for<'a>(&'a self, destination: &Destination) -> impl Stream + Send + 'a { + pub fn active_requests_for(&self, destination: &Destination) -> impl Stream + Send + '_ { let prefix = destination.get_prefix(); self.servercurrentevent_data .stream_raw_prefix(&prefix) @@ -133,7 +133,7 @@ impl Data { keys } - pub fn queued_requests<'a>(&'a self, destination: &Destination) -> impl Stream + Send + 'a { + pub fn queued_requests(&self, destination: &Destination) -> impl Stream + Send + '_ { let prefix = destination.get_prefix(); self.servernameevent_data .stream_raw_prefix(&prefix) @@ -152,7 +152,7 @@ impl Data { pub async fn get_latest_educount(&self, server_name: &ServerName) -> u64 { self.servername_educount - .qry(server_name) + .get(server_name) .await .deserialized() .unwrap_or(0) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index fa8c41b6..eb77ef35 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -120,13 +120,13 @@ impl Service { /// Check if a user has an account on this homeserver. #[inline] - pub async fn exists(&self, user_id: &UserId) -> bool { self.db.userid_password.qry(user_id).await.is_ok() } + pub async fn exists(&self, user_id: &UserId) -> bool { self.db.userid_password.get(user_id).await.is_ok() } /// Check if account is deactivated pub async fn is_deactivated(&self, user_id: &UserId) -> Result { self.db .userid_password - .qry(user_id) + .get(user_id) .map_ok(|val| val.is_empty()) .map_err(|_| err!(Request(NotFound("User does not exist.")))) .await @@ -146,7 +146,7 @@ impl Service { /// Find out which user an access token belongs to. pub async fn find_from_token(&self, token: &str) -> Result<(OwnedUserId, OwnedDeviceId)> { - self.db.token_userdeviceid.qry(token).await.deserialized() + self.db.token_userdeviceid.get(token).await.deserialized() } /// Returns an iterator over all users on this homeserver (offered for @@ -171,7 +171,7 @@ impl Service { /// Returns the password hash for the given user. pub async fn password_hash(&self, user_id: &UserId) -> Result { - self.db.userid_password.qry(user_id).await.deserialized() + self.db.userid_password.get(user_id).await.deserialized() } /// Hash and set the user's password to the Argon2 hash @@ -196,7 +196,7 @@ impl Service { /// Returns the displayname of a user on this homeserver. pub async fn displayname(&self, user_id: &UserId) -> Result { - self.db.userid_displayname.qry(user_id).await.deserialized() + self.db.userid_displayname.get(user_id).await.deserialized() } /// Sets a new displayname or removes it if displayname is None. You still @@ -213,7 +213,7 @@ impl Service { /// Get the `avatar_url` of a user. pub async fn avatar_url(&self, user_id: &UserId) -> Result { - self.db.userid_avatarurl.qry(user_id).await.deserialized() + self.db.userid_avatarurl.get(user_id).await.deserialized() } /// Sets a new avatar_url or removes it if avatar_url is None. @@ -229,7 +229,7 @@ impl Service { /// Get the blurhash of a user. pub async fn blurhash(&self, user_id: &UserId) -> Result { - self.db.userid_blurhash.qry(user_id).await.deserialized() + self.db.userid_blurhash.get(user_id).await.deserialized() } /// Sets a new avatar_url or removes it if avatar_url is None. @@ -284,7 +284,7 @@ impl Service { userdeviceid.extend_from_slice(device_id.as_bytes()); // Remove tokens - if let Ok(old_token) = self.db.userdeviceid_token.qry(&userdeviceid).await { + if let Ok(old_token) = self.db.userdeviceid_token.get(&userdeviceid).await { self.db.userdeviceid_token.remove(&userdeviceid); self.db.token_userdeviceid.remove(&old_token); } @@ -390,7 +390,7 @@ impl Service { pub async fn last_one_time_keys_update(&self, user_id: &UserId) -> u64 { self.db .userid_lastonetimekeyupdate - .qry(user_id) + .get(user_id) .await .deserialized() .unwrap_or(0) @@ -664,7 +664,7 @@ impl Service { let key = self .db .keyid_key - .qry(key_id) + .get(key_id) .await .deserialized::()?; @@ -679,7 +679,7 @@ impl Service { where F: Fn(&UserId) -> bool + Send + Sync, { - let key_id = self.db.userid_masterkeyid.qry(user_id).await?; + let key_id = self.db.userid_masterkeyid.get(user_id).await?; self.get_key(&key_id, sender_user, user_id, allowed_signatures) .await @@ -691,16 +691,16 @@ impl Service { where F: Fn(&UserId) -> bool + Send + Sync, { - let key_id = self.db.userid_selfsigningkeyid.qry(user_id).await?; + let key_id = self.db.userid_selfsigningkeyid.get(user_id).await?; self.get_key(&key_id, sender_user, user_id, allowed_signatures) .await } pub async fn get_user_signing_key(&self, user_id: &UserId) -> Result> { - let key_id = self.db.userid_usersigningkeyid.qry(user_id).await?; + let key_id = self.db.userid_usersigningkeyid.get(user_id).await?; - self.db.keyid_key.qry(&*key_id).await.deserialized() + self.db.keyid_key.get(&*key_id).await.deserialized() } pub async fn add_to_device_event( @@ -797,7 +797,7 @@ impl Service { pub async fn get_devicelist_version(&self, user_id: &UserId) -> Result { self.db .userid_devicelistversion - .qry(user_id) + .get(user_id) .await .deserialized() } @@ -853,7 +853,7 @@ impl Service { /// Find out which user an OpenID access token belongs to. pub async fn find_from_openid_token(&self, token: &str) -> Result { - let Ok(value) = self.db.openidtoken_expiresatuserid.qry(token).await else { + let Ok(value) = self.db.openidtoken_expiresatuserid.get(token).await else { return Err!(Request(Unauthorized("OpenID token is unrecognised"))); }; From a8d5cf96517d706cff5fe73650405edaff4e0779 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 23 Sep 2024 21:38:56 +0000 Subject: [PATCH 0019/1248] Add rocksdb logging integration with tracing. Signed-off-by: Jason Volk --- src/database/engine.rs | 17 ++++++++++++++++- src/database/opts.rs | 2 ++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index 067232e6..edf077fc 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -10,7 +10,7 @@ use conduit::{debug, error, info, utils::time::rfc2822_from_seconds, warn, Err, use rocksdb::{ backup::{BackupEngine, BackupEngineOptions}, perf::get_memory_usage_stats, - AsColumnFamilyRef, BoundColumnFamily, Cache, ColumnFamilyDescriptor, DBCommon, DBWithThreadMode, Env, + AsColumnFamilyRef, BoundColumnFamily, Cache, ColumnFamilyDescriptor, DBCommon, DBWithThreadMode, Env, LogLevel, MultiThreaded, Options, }; @@ -279,6 +279,21 @@ pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result<()> { Ok(()) } +#[tracing::instrument(skip_all, name = "rocksdb")] +pub(crate) fn handle_log(level: LogLevel, msg: &str) { + let msg = msg.trim(); + if msg.starts_with("Options") { + return; + } + + match level { + LogLevel::Header | LogLevel::Debug => debug!("{msg}"), + LogLevel::Error | LogLevel::Fatal => error!("{msg}"), + LogLevel::Info => debug!("{msg}"), + LogLevel::Warn => warn!("{msg}"), + }; +} + impl Drop for Engine { #[cold] fn drop(&mut self) { diff --git a/src/database/opts.rs b/src/database/opts.rs index d2ad4b95..46fb4c54 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -191,6 +191,8 @@ fn set_logging_defaults(opts: &mut Options, config: &Config) { if config.rocksdb_log_stderr { opts.set_stderr_logger(rocksdb_log_level, "rocksdb"); + } else { + opts.set_callback_logger(rocksdb_log_level, &super::engine::handle_log); } } From 6b80361c31fc8b2eeeafbcfbf14a463c3423ee7c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 30 Sep 2024 06:46:54 +0000 Subject: [PATCH 0020/1248] additional stream tools Signed-off-by: Jason Volk --- src/core/result.rs | 5 +- src/core/result/into_is_ok.rs | 10 +++ src/core/utils/bool.rs | 16 +++++ src/core/utils/future/mod.rs | 3 + src/core/utils/future/try_ext_ext.rs | 48 +++++++++++++ src/core/utils/mod.rs | 6 +- src/core/utils/stream/mod.rs | 2 + src/core/utils/stream/ready.rs | 102 ++++++++++++++++++--------- src/core/utils/stream/tools.rs | 80 +++++++++++++++++++++ src/service/rooms/state_cache/mod.rs | 16 ++--- 10 files changed, 242 insertions(+), 46 deletions(-) create mode 100644 src/core/result/into_is_ok.rs create mode 100644 src/core/utils/bool.rs create mode 100644 src/core/utils/future/mod.rs create mode 100644 src/core/utils/future/try_ext_ext.rs create mode 100644 src/core/utils/stream/tools.rs diff --git a/src/core/result.rs b/src/core/result.rs index 96a34b8a..82d67a9c 100644 --- a/src/core/result.rs +++ b/src/core/result.rs @@ -1,4 +1,5 @@ mod debug_inspect; +mod into_is_ok; mod log_debug_err; mod log_err; mod map_expect; @@ -6,8 +7,8 @@ mod not_found; mod unwrap_infallible; pub use self::{ - debug_inspect::DebugInspect, log_debug_err::LogDebugErr, log_err::LogErr, map_expect::MapExpect, - not_found::NotFound, unwrap_infallible::UnwrapInfallible, + debug_inspect::DebugInspect, into_is_ok::IntoIsOk, log_debug_err::LogDebugErr, log_err::LogErr, + map_expect::MapExpect, not_found::NotFound, unwrap_infallible::UnwrapInfallible, }; pub type Result = std::result::Result; diff --git a/src/core/result/into_is_ok.rs b/src/core/result/into_is_ok.rs new file mode 100644 index 00000000..220ce010 --- /dev/null +++ b/src/core/result/into_is_ok.rs @@ -0,0 +1,10 @@ +use super::Result; + +pub trait IntoIsOk { + fn into_is_ok(self) -> bool; +} + +impl IntoIsOk for Result { + #[inline] + fn into_is_ok(self) -> bool { self.is_ok() } +} diff --git a/src/core/utils/bool.rs b/src/core/utils/bool.rs new file mode 100644 index 00000000..d7ce78fe --- /dev/null +++ b/src/core/utils/bool.rs @@ -0,0 +1,16 @@ +//! Trait BoolExt + +/// Boolean extensions and chain.starters +pub trait BoolExt { + fn or T>(self, f: F) -> Option; + + fn or_some(self, t: T) -> Option; +} + +impl BoolExt for bool { + #[inline] + fn or T>(self, f: F) -> Option { (!self).then(f) } + + #[inline] + fn or_some(self, t: T) -> Option { (!self).then_some(t) } +} diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs new file mode 100644 index 00000000..6d45b656 --- /dev/null +++ b/src/core/utils/future/mod.rs @@ -0,0 +1,3 @@ +mod try_ext_ext; + +pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs new file mode 100644 index 00000000..e444ad94 --- /dev/null +++ b/src/core/utils/future/try_ext_ext.rs @@ -0,0 +1,48 @@ +//! Extended external extensions to futures::TryFutureExt + +use futures::{future::MapOkOrElse, TryFuture, TryFutureExt}; + +/// This interface is not necessarily complete; feel free to add as-needed. +pub trait TryExtExt +where + Self: TryFuture + Send, +{ + fn map_ok_or( + self, default: U, f: F, + ) -> MapOkOrElse U, impl FnOnce(Self::Error) -> U> + where + F: FnOnce(Self::Ok) -> U, + Self: Send + Sized; + + fn ok( + self, + ) -> MapOkOrElse Option, impl FnOnce(Self::Error) -> Option> + where + Self: Sized; +} + +impl TryExtExt for Fut +where + Fut: TryFuture + Send, +{ + #[inline] + fn map_ok_or( + self, default: U, f: F, + ) -> MapOkOrElse U, impl FnOnce(Self::Error) -> U> + where + F: FnOnce(Self::Ok) -> U, + Self: Send + Sized, + { + self.map_ok_or_else(|_| default, f) + } + + #[inline] + fn ok( + self, + ) -> MapOkOrElse Option, impl FnOnce(Self::Error) -> Option> + where + Self: Sized, + { + self.map_ok_or(None, Some) + } +} diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index fef83395..c34691d2 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -1,7 +1,9 @@ +pub mod bool; pub mod bytes; pub mod content_disposition; pub mod debug; pub mod defer; +pub mod future; pub mod hash; pub mod html; pub mod json; @@ -19,15 +21,17 @@ pub use ::conduit_macros::implement; pub use ::ctor::{ctor, dtor}; pub use self::{ + bool::BoolExt, bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}, debug::slice_truncated as debug_slice_truncated, + future::TryExtExt as TryFutureExtExt, hash::calculate_hash, html::Escape as HtmlEscape, json::{deserialize_from_str, to_canonical_object}, math::clamp, mutex_map::{Guard as MutexMapGuard, MutexMap}, rand::string as random_string, - stream::{IterStream, ReadyExt, TryReadyExt}, + stream::{IterStream, ReadyExt, Tools as StreamTools, TryReadyExt}, string::{str_from_bytes, string_from_bytes}, sys::available_parallelism, time::now_millis as millis_since_unix_epoch, diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 781bd522..1111915b 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -3,6 +3,7 @@ mod expect; mod ignore; mod iter_stream; mod ready; +mod tools; mod try_ready; pub use cloned::Cloned; @@ -10,4 +11,5 @@ pub use expect::TryExpect; pub use ignore::TryIgnore; pub use iter_stream::IterStream; pub use ready::ReadyExt; +pub use tools::Tools; pub use try_ready::TryReadyExt; diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index 13f730a7..da5aec5a 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -2,7 +2,7 @@ use futures::{ future::{ready, Ready}, - stream::{Any, Filter, FilterMap, Fold, ForEach, SkipWhile, Stream, StreamExt, TakeWhile}, + stream::{Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile}, }; /// Synchronous combinators to augment futures::StreamExt. Most Stream @@ -11,98 +11,130 @@ use futures::{ /// convenience to reduce boilerplate by de-cluttering non-async predicates. /// /// This interface is not necessarily complete; feel free to add as-needed. -pub trait ReadyExt +pub trait ReadyExt where - S: Stream + Send + ?Sized, - Self: Stream + Send + Sized, + Self: Stream + Send + Sized, { - fn ready_any(self, f: F) -> Any, impl FnMut(S::Item) -> Ready> + fn ready_any(self, f: F) -> Any, impl FnMut(Item) -> Ready> where - F: Fn(S::Item) -> bool; + F: Fn(Item) -> bool; - fn ready_filter<'a, F>(self, f: F) -> Filter, impl FnMut(&S::Item) -> Ready + 'a> + fn ready_filter<'a, F>(self, f: F) -> Filter, impl FnMut(&Item) -> Ready + 'a> where - F: Fn(&S::Item) -> bool + 'a; + F: Fn(&Item) -> bool + 'a; - fn ready_filter_map(self, f: F) -> FilterMap>, impl FnMut(S::Item) -> Ready>> + fn ready_filter_map(self, f: F) -> FilterMap>, impl FnMut(Item) -> Ready>> where - F: Fn(S::Item) -> Option; + F: Fn(Item) -> Option; - fn ready_fold(self, init: T, f: F) -> Fold, T, impl FnMut(T, S::Item) -> Ready> + fn ready_fold(self, init: T, f: F) -> Fold, T, impl FnMut(T, Item) -> Ready> where - F: Fn(T, S::Item) -> T; + F: Fn(T, Item) -> T; - fn ready_for_each(self, f: F) -> ForEach, impl FnMut(S::Item) -> Ready<()>> + fn ready_for_each(self, f: F) -> ForEach, impl FnMut(Item) -> Ready<()>> where - F: FnMut(S::Item); + F: FnMut(Item); - fn ready_take_while<'a, F>(self, f: F) -> TakeWhile, impl FnMut(&S::Item) -> Ready + 'a> + fn ready_take_while<'a, F>(self, f: F) -> TakeWhile, impl FnMut(&Item) -> Ready + 'a> where - F: Fn(&S::Item) -> bool + 'a; + F: Fn(&Item) -> bool + 'a; - fn ready_skip_while<'a, F>(self, f: F) -> SkipWhile, impl FnMut(&S::Item) -> Ready + 'a> + fn ready_scan( + self, init: T, f: F, + ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> where - F: Fn(&S::Item) -> bool + 'a; + F: Fn(&mut T, Item) -> Option; + + fn ready_scan_each( + self, init: T, f: F, + ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> + where + F: Fn(&mut T, &Item); + + fn ready_skip_while<'a, F>(self, f: F) -> SkipWhile, impl FnMut(&Item) -> Ready + 'a> + where + F: Fn(&Item) -> bool + 'a; } -impl ReadyExt for S +impl ReadyExt for S where - S: Stream + Send + ?Sized, - Self: Stream + Send + Sized, + S: Stream + Send + Sized, { #[inline] - fn ready_any(self, f: F) -> Any, impl FnMut(S::Item) -> Ready> + fn ready_any(self, f: F) -> Any, impl FnMut(Item) -> Ready> where - F: Fn(S::Item) -> bool, + F: Fn(Item) -> bool, { self.any(move |t| ready(f(t))) } #[inline] - fn ready_filter<'a, F>(self, f: F) -> Filter, impl FnMut(&S::Item) -> Ready + 'a> + fn ready_filter<'a, F>(self, f: F) -> Filter, impl FnMut(&Item) -> Ready + 'a> where - F: Fn(&S::Item) -> bool + 'a, + F: Fn(&Item) -> bool + 'a, { self.filter(move |t| ready(f(t))) } #[inline] - fn ready_filter_map(self, f: F) -> FilterMap>, impl FnMut(S::Item) -> Ready>> + fn ready_filter_map(self, f: F) -> FilterMap>, impl FnMut(Item) -> Ready>> where - F: Fn(S::Item) -> Option, + F: Fn(Item) -> Option, { self.filter_map(move |t| ready(f(t))) } #[inline] - fn ready_fold(self, init: T, f: F) -> Fold, T, impl FnMut(T, S::Item) -> Ready> + fn ready_fold(self, init: T, f: F) -> Fold, T, impl FnMut(T, Item) -> Ready> where - F: Fn(T, S::Item) -> T, + F: Fn(T, Item) -> T, { self.fold(init, move |a, t| ready(f(a, t))) } #[inline] #[allow(clippy::unit_arg)] - fn ready_for_each(self, mut f: F) -> ForEach, impl FnMut(S::Item) -> Ready<()>> + fn ready_for_each(self, mut f: F) -> ForEach, impl FnMut(Item) -> Ready<()>> where - F: FnMut(S::Item), + F: FnMut(Item), { self.for_each(move |t| ready(f(t))) } #[inline] - fn ready_take_while<'a, F>(self, f: F) -> TakeWhile, impl FnMut(&S::Item) -> Ready + 'a> + fn ready_take_while<'a, F>(self, f: F) -> TakeWhile, impl FnMut(&Item) -> Ready + 'a> where - F: Fn(&S::Item) -> bool + 'a, + F: Fn(&Item) -> bool + 'a, { self.take_while(move |t| ready(f(t))) } #[inline] - fn ready_skip_while<'a, F>(self, f: F) -> SkipWhile, impl FnMut(&S::Item) -> Ready + 'a> + fn ready_scan( + self, init: T, f: F, + ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> where - F: Fn(&S::Item) -> bool + 'a, + F: Fn(&mut T, Item) -> Option, + { + self.scan(init, move |s, t| ready(f(s, t))) + } + + fn ready_scan_each( + self, init: T, f: F, + ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> + where + F: Fn(&mut T, &Item), + { + self.ready_scan(init, move |s, t| { + f(s, &t); + Some(t) + }) + } + + #[inline] + fn ready_skip_while<'a, F>(self, f: F) -> SkipWhile, impl FnMut(&Item) -> Ready + 'a> + where + F: Fn(&Item) -> bool + 'a, { self.skip_while(move |t| ready(f(t))) } diff --git a/src/core/utils/stream/tools.rs b/src/core/utils/stream/tools.rs new file mode 100644 index 00000000..cc6b7ca9 --- /dev/null +++ b/src/core/utils/stream/tools.rs @@ -0,0 +1,80 @@ +//! StreamTools for futures::Stream + +use std::{collections::HashMap, hash::Hash}; + +use futures::{Future, Stream, StreamExt}; + +use super::ReadyExt; +use crate::expected; + +/// StreamTools +/// +/// This interface is not necessarily complete; feel free to add as-needed. +pub trait Tools +where + Self: Stream + Send + Sized, + ::Item: Send, +{ + fn counts(self) -> impl Future> + Send + where + ::Item: Eq + Hash; + + fn counts_by(self, f: F) -> impl Future> + Send + where + F: Fn(Item) -> K + Send, + K: Eq + Hash + Send; + + fn counts_by_with_cap(self, f: F) -> impl Future> + Send + where + F: Fn(Item) -> K + Send, + K: Eq + Hash + Send; + + fn counts_with_cap(self) -> impl Future> + Send + where + ::Item: Eq + Hash; +} + +impl Tools for S +where + S: Stream + Send + Sized, + ::Item: Send, +{ + #[inline] + fn counts(self) -> impl Future> + Send + where + ::Item: Eq + Hash, + { + self.counts_with_cap::<0>() + } + + #[inline] + fn counts_by(self, f: F) -> impl Future> + Send + where + F: Fn(Item) -> K + Send, + K: Eq + Hash + Send, + { + self.counts_by_with_cap::<0, K, F>(f) + } + + #[inline] + fn counts_by_with_cap(self, f: F) -> impl Future> + Send + where + F: Fn(Item) -> K + Send, + K: Eq + Hash + Send, + { + self.map(f).counts_with_cap::() + } + + #[inline] + fn counts_with_cap(self) -> impl Future> + Send + where + ::Item: Eq + Hash, + { + self.ready_fold(HashMap::with_capacity(CAP), |mut counts, item| { + let entry = counts.entry(item).or_default(); + let value = *entry; + *entry = expected!(value + 1); + counts + }) + } +} diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index eedff861..25388084 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -4,7 +4,7 @@ use std::{collections::HashSet, sync::Arc}; use conduit::{ err, - utils::{stream::TryIgnore, ReadyExt}, + utils::{stream::TryIgnore, ReadyExt, StreamTools}, warn, Result, }; use data::Data; @@ -495,11 +495,13 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub fn servers_invite_via<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + type KeyVal<'a> = (Ignore, Vec<&'a ServerName>); + self.db .roomid_inviteviaservers .stream_prefix_raw(room_id) .ignore_err() - .map(|(_, servers): (Ignore, Vec<&ServerName>)| &**(servers.last().expect("at least one servername"))) + .map(|(_, servers): KeyVal<'_>| *servers.last().expect("at least one server")) } /// Gets up to three servers that are likely to be in the room in the @@ -525,16 +527,14 @@ impl Service { let mut servers: Vec = self .room_members(room_id) - .collect::>() - .await - .iter() .counts_by(|user| user.server_name().to_owned()) - .iter() + .await + .into_iter() .sorted_by_key(|(_, users)| *users) - .map(|(server, _)| server.to_owned()) + .map(|(server, _)| server) .rev() .take(3) - .collect_vec(); + .collect(); if let Some(server) = most_powerful_user_server { servers.insert(0, server); From 96fcf7f94d65e93fdcb23acd3f52945813dbc18e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 1 Oct 2024 04:20:31 +0000 Subject: [PATCH 0021/1248] add rocksdb secondary; fix read_only mode. Signed-off-by: Jason Volk --- src/core/config/mod.rs | 3 +++ src/database/database.rs | 8 ++++++++ src/database/engine.rs | 11 +++++++++-- src/service/emergency/mod.rs | 4 ++++ src/service/globals/mod.rs | 3 +++ 5 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d2d583a8..d8e1c7d9 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -236,6 +236,8 @@ pub struct Config { #[serde(default)] pub rocksdb_read_only: bool, #[serde(default)] + pub rocksdb_secondary: bool, + #[serde(default)] pub rocksdb_compaction_prio_idle: bool, #[serde(default = "true_fn")] pub rocksdb_compaction_ioprio_idle: bool, @@ -752,6 +754,7 @@ impl fmt::Display for Config { line("RocksDB Recovery Mode", &self.rocksdb_recovery_mode.to_string()); line("RocksDB Repair Mode", &self.rocksdb_repair.to_string()); line("RocksDB Read-only Mode", &self.rocksdb_read_only.to_string()); + line("RocksDB Secondary Mode", &self.rocksdb_secondary.to_string()); line( "RocksDB Compaction Idle Priority", &self.rocksdb_compaction_prio_idle.to_string(), diff --git a/src/database/database.rs b/src/database/database.rs index ac6f62e9..4c29c840 100644 --- a/src/database/database.rs +++ b/src/database/database.rs @@ -38,6 +38,14 @@ impl Database { #[inline] pub fn iter_maps(&self) -> impl Iterator + Send + '_ { self.map.iter() } + + #[inline] + #[must_use] + pub fn is_read_only(&self) -> bool { self.db.secondary || self.db.read_only } + + #[inline] + #[must_use] + pub fn is_secondary(&self) -> bool { self.db.secondary } } impl Index<&str> for Database { diff --git a/src/database/engine.rs b/src/database/engine.rs index edf077fc..99d971ed 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -28,6 +28,8 @@ pub struct Engine { cfs: Mutex>, pub(crate) db: Db, corks: AtomicU32, + pub(super) read_only: bool, + pub(super) secondary: bool, } pub(crate) type Db = DBWithThreadMode; @@ -80,10 +82,13 @@ impl Engine { .collect::>(); debug!("Opening database..."); + let path = &config.database_path; let res = if config.rocksdb_read_only { - Db::open_cf_for_read_only(&db_opts, &config.database_path, cfs.clone(), false) + Db::open_cf_descriptors_read_only(&db_opts, path, cfds, false) + } else if config.rocksdb_secondary { + Db::open_cf_descriptors_as_secondary(&db_opts, path, path, cfds) } else { - Db::open_cf_descriptors(&db_opts, &config.database_path, cfds) + Db::open_cf_descriptors(&db_opts, path, cfds) }; let db = res.or_else(or_else)?; @@ -103,6 +108,8 @@ impl Engine { cfs: Mutex::new(cfs), db, corks: AtomicU32::new(0), + read_only: config.rocksdb_read_only, + secondary: config.rocksdb_secondary, })) } diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 98020bc2..c99a0891 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -32,6 +32,10 @@ impl crate::Service for Service { } async fn worker(self: Arc) -> Result<()> { + if self.services.globals.is_read_only() { + return Ok(()); + } + self.set_emergency_access() .await .inspect_err(|e| error!("Could not set the configured emergency password for the conduit user: {e}"))?; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index f777901f..f24e8a27 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -329,4 +329,7 @@ impl Service { #[inline] pub fn server_is_ours(&self, server_name: &ServerName) -> bool { server_name == self.config.server_name } + + #[inline] + pub fn is_read_only(&self) -> bool { self.db.db.is_read_only() } } From 26dcab272d04eff968997a94f90636df389ffda6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 1 Oct 2024 02:47:39 +0000 Subject: [PATCH 0022/1248] various cleanup tweaks/fixes Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/admin/query/room_alias.rs | 10 ++++- src/admin/room/alias.rs | 4 +- src/admin/room/directory.rs | 8 ++-- src/admin/room/info.rs | 10 ++--- src/admin/room/moderation.rs | 4 +- src/api/client/keys.rs | 2 +- src/api/client/membership.rs | 15 ++++--- src/api/client/search.rs | 4 +- src/api/client/sync.rs | 67 +++++++++++++--------------- src/api/client/user_directory.rs | 11 ++--- src/core/Cargo.toml | 1 + src/core/error/mod.rs | 2 + src/service/appservice/data.rs | 4 +- src/service/globals/migrations.rs | 11 ++--- src/service/rooms/state_cache/mod.rs | 28 +++--------- src/service/rooms/timeline/mod.rs | 1 - src/service/users/mod.rs | 2 + 18 files changed, 86 insertions(+), 99 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 043d9704..065aa1e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -709,6 +709,7 @@ dependencies = [ "serde", "serde_json", "serde_regex", + "serde_yaml", "thiserror", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs index 05fac42c..382e4a78 100644 --- a/src/admin/query/room_alias.rs +++ b/src/admin/query/room_alias.rs @@ -43,8 +43,13 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) room_id, } => { let timer = tokio::time::Instant::now(); - let results = services.rooms.alias.local_aliases_for_room(&room_id); - let aliases: Vec<_> = results.collect().await; + let aliases: Vec<_> = services + .rooms + .alias + .local_aliases_for_room(&room_id) + .map(ToOwned::to_owned) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -57,6 +62,7 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) .rooms .alias .all_local_aliases() + .map(|(room_id, alias)| (room_id.to_owned(), alias.to_owned())) .collect::>() .await; let query_time = timer.elapsed(); diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 34b6c42e..1ccde47d 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -119,12 +119,12 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> room_id, } => { if let Some(room_id) = room_id { - let aliases = services + let aliases: Vec = services .rooms .alias .local_aliases_for_room(&room_id) .map(Into::into) - .collect::>() + .collect() .await; let plain_list = aliases.iter().fold(String::new(), |mut output, alias| { diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 7ccdea6f..1080356a 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -47,22 +47,22 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_> } => { // TODO: i know there's a way to do this with clap, but i can't seem to find it let page = page.unwrap_or(1); - let mut rooms = services + let mut rooms: Vec<_> = services .rooms .directory .public_rooms() .then(|room_id| get_room_info(services, room_id)) - .collect::>() + .collect() .await; rooms.sort_by_key(|r| r.1); rooms.reverse(); - let rooms = rooms + let rooms: Vec<_> = rooms .into_iter() .skip(page.saturating_sub(1).saturating_mul(PAGE_SIZE)) .take(PAGE_SIZE) - .collect::>(); + .collect(); if rooms.is_empty() { return Ok(RoomMessageEventContent::text_plain("No more rooms.")); diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index fc0619e3..13a74a9d 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -42,14 +42,12 @@ async fn list_joined_members(&self, room_id: Box, local_only: bool) -> R .state_cache .room_members(&room_id) .ready_filter(|user_id| { - if local_only { - self.services.globals.user_is_local(user_id) - } else { - true - } + local_only + .then(|| self.services.globals.user_is_local(user_id)) + .unwrap_or(true) }) + .map(ToOwned::to_owned) .filter_map(|user_id| async move { - let user_id = user_id.to_owned(); Some(( self.services .users diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index 9a772da4..cfc048bd 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -555,13 +555,13 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> #[admin_command] async fn list_banned_rooms(&self, no_details: bool) -> Result { - let room_ids = self + let room_ids: Vec = self .services .rooms .metadata .list_banned_rooms() .map(Into::into) - .collect::>() + .collect() .await; if room_ids.is_empty() { diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index abf2a22f..254d92cc 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -244,7 +244,7 @@ pub(crate) async fn get_key_changes_route( device_list_updates.extend( services .users - .keys_changed(room_id.as_ref(), from, Some(to)) + .keys_changed(room_id.as_str(), from, Some(to)) .map(ToOwned::to_owned) .collect::>() .await, diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 5a5d436f..6e3bc894 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -167,12 +167,12 @@ pub(crate) async fn join_room_by_id_route( .await?; // There is no body.server_name for /roomId/join - let mut servers = services + let mut servers: Vec<_> = services .rooms .state_cache .servers_invite_via(&body.room_id) .map(ToOwned::to_owned) - .collect::>() + .collect() .await; servers.extend( @@ -641,12 +641,13 @@ pub(crate) async fn joined_members_route( .rooms .state_cache .room_members(&body.room_id) + .map(ToOwned::to_owned) .then(|user| async move { ( - user.to_owned(), + user.clone(), RoomMember { - display_name: services.users.displayname(user).await.ok(), - avatar_url: services.users.avatar_url(user).await.ok(), + display_name: services.users.displayname(&user).await.ok(), + avatar_url: services.users.avatar_url(&user).await.ok(), }, ) }) @@ -1575,7 +1576,7 @@ pub(crate) async fn invite_helper( // Make a user leave all their joined rooms, forgets all rooms, and ignores // errors pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { - let all_rooms = services + let all_rooms: Vec<_> = services .rooms .state_cache .rooms_joined(user_id) @@ -1587,7 +1588,7 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { .rooms_invited(user_id) .map(|(r, _)| r), ) - .collect::>() + .collect() .await; for room_id in all_rooms { diff --git a/src/api/client/search.rs b/src/api/client/search.rs index 7a061d49..b073640e 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -77,14 +77,14 @@ pub(crate) async fn search_events_route( .user_can_see_state_events(sender_user, room_id) .await { - let room_state = services + let room_state: Vec<_> = services .rooms .state_accessor .room_state_full(room_id) .await? .values() .map(|pdu| pdu.to_state_event()) - .collect::>(); + .collect(); debug!("Room state: {:?}", room_state); diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index 53d4f3c3..adb4d8da 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -7,13 +7,14 @@ use std::{ use axum::extract::State; use conduit::{ debug, err, error, is_equal_to, + result::IntoIsOk, utils::{ math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, - IterStream, ReadyExt, + BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, warn, PduCount, }; -use futures::{pin_mut, StreamExt}; +use futures::{pin_mut, FutureExt, StreamExt, TryFutureExt}; use ruma::{ api::client::{ error::ErrorKind, @@ -172,12 +173,12 @@ pub(crate) async fn sync_events_route( process_presence_updates(&services, &mut presence_updates, since, &sender_user).await?; } - let all_joined_rooms = services + let all_joined_rooms: Vec<_> = services .rooms .state_cache .rooms_joined(&sender_user) .map(ToOwned::to_owned) - .collect::>() + .collect() .await; // Coalesce database writes for the remainder of this scope. @@ -869,15 +870,13 @@ async fn load_joined_room( .rooms .state_cache .room_members(room_id) - .ready_filter(|user_id| { - // Don't send key updates from the sender to the sender - sender_user != *user_id - }) - .filter_map(|user_id| async move { - // Only send keys if the sender doesn't share an encrypted room with the target - // already - (!share_encrypted_room(services, sender_user, user_id, Some(room_id)).await) - .then_some(user_id.to_owned()) + // Don't send key updates from the sender to the sender + .ready_filter(|user_id| sender_user != *user_id) + // Only send keys if the sender doesn't share an encrypted room with the target + // already + .filter_map(|user_id| { + share_encrypted_room(services, sender_user, user_id, Some(room_id)) + .map(|res| res.or_some(user_id.to_owned())) }) .collect::>() .await, @@ -1117,13 +1116,12 @@ async fn share_encrypted_room( .user .get_shared_rooms(sender_user, user_id) .ready_filter(|&room_id| Some(room_id) != ignore_room) - .any(|other_room_id| async move { + .any(|other_room_id| { services .rooms .state_accessor .room_state_get(other_room_id, &StateEventType::RoomEncryption, "") - .await - .is_ok() + .map(Result::into_is_ok) }) .await } @@ -1178,20 +1176,20 @@ pub(crate) async fn sync_events_v4_route( .sync .update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body); - let all_joined_rooms = services + let all_joined_rooms: Vec<_> = services .rooms .state_cache .rooms_joined(&sender_user) .map(ToOwned::to_owned) - .collect::>() + .collect() .await; - let all_invited_rooms = services + let all_invited_rooms: Vec<_> = services .rooms .state_cache .rooms_invited(&sender_user) .map(|r| r.0) - .collect::>() + .collect() .await; let all_rooms = all_joined_rooms @@ -1364,15 +1362,13 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_cache .room_members(room_id) - .ready_filter(|user_id| { - // Don't send key updates from the sender to the sender - sender_user != user_id - }) - .filter_map(|user_id| async move { - // Only send keys if the sender doesn't share an encrypted room with the target - // already - (!share_encrypted_room(&services, sender_user, user_id, Some(room_id)).await) - .then_some(user_id.to_owned()) + // Don't send key updates from the sender to the sender + .ready_filter(|user_id| sender_user != user_id) + // Only send keys if the sender doesn't share an encrypted room with the target + // already + .filter_map(|user_id| { + share_encrypted_room(&services, sender_user, user_id, Some(room_id)) + .map(|res| res.or_some(user_id.to_owned())) }) .collect::>() .await, @@ -1650,26 +1646,25 @@ pub(crate) async fn sync_events_v4_route( .await; // Heroes - let heroes = services + let heroes: Vec<_> = services .rooms .state_cache .room_members(room_id) .ready_filter(|member| member != &sender_user) - .filter_map(|member| async move { + .filter_map(|user_id| { services .rooms .state_accessor - .get_member(room_id, member) - .await - .map(|memberevent| SlidingSyncRoomHero { - user_id: member.to_owned(), + .get_member(room_id, user_id) + .map_ok(|memberevent| SlidingSyncRoomHero { + user_id: user_id.into(), name: memberevent.displayname, avatar: memberevent.avatar_url, }) .ok() }) .take(5) - .collect::>() + .collect() .await; let name = match heroes.len().cmp(&(1_usize)) { diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 8ea7f1b8..868811a3 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,4 +1,5 @@ use axum::extract::State; +use conduit::utils::TryFutureExtExt; use futures::{pin_mut, StreamExt}; use ruma::{ api::client::user_directory::search_users, @@ -56,16 +57,12 @@ pub(crate) async fn search_users_route( .rooms .state_cache .rooms_joined(&user.user_id) - .any(|room| async move { + .any(|room| { services .rooms .state_accessor - .room_state_get(room, &StateEventType::RoomJoinRules, "") - .await - .map_or(false, |event| { - serde_json::from_str(event.content.get()) - .map_or(false, |r: RoomJoinRulesEventContent| r.join_rule == JoinRule::Public) - }) + .room_state_get_content::(room, &StateEventType::RoomJoinRules, "") + .map_ok_or(false, |content| content.join_rule == JoinRule::Public) }) .await; diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index cb957bc9..4fe413e9 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -83,6 +83,7 @@ ruma.workspace = true sanitize-filename.workspace = true serde_json.workspace = true serde_regex.workspace = true +serde_yaml.workspace = true serde.workspace = true thiserror.workspace = true tikv-jemallocator.optional = true diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 79e3d5b4..ad7f9f3c 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -75,6 +75,8 @@ pub enum Error { TracingFilter(#[from] tracing_subscriber::filter::ParseError), #[error("Tracing reload error: {0}")] TracingReload(#[from] tracing_subscriber::reload::Error), + #[error(transparent)] + Yaml(#[from] serde_yaml::Error), // ruma/conduwuit #[error("Arithmetic operation failed: {0}")] diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index 4eb9d09e..8fb7d958 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use conduit::{err, utils::stream::TryIgnore, Result}; -use database::{Database, Deserialized, Map}; +use database::{Database, Map}; use futures::Stream; use ruma::api::appservice::Registration; @@ -40,7 +40,7 @@ impl Data { self.id_appserviceregistrations .get(id) .await - .deserialized() + .and_then(|ref bytes| serde_yaml::from_slice(bytes).map_err(Into::into)) .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) } diff --git a/src/service/globals/migrations.rs b/src/service/globals/migrations.rs index 469159fc..fc6e477b 100644 --- a/src/service/globals/migrations.rs +++ b/src/service/globals/migrations.rs @@ -9,7 +9,7 @@ use itertools::Itertools; use ruma::{ events::{push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType}, push::Ruleset, - UserId, + OwnedUserId, UserId, }; use crate::{media, Services}; @@ -385,11 +385,12 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) for room_id in &room_ids { debug_info!("Fixing room {room_id}"); - let users_in_room = services + let users_in_room: Vec = services .rooms .state_cache .room_members(room_id) - .collect::>() + .map(ToOwned::to_owned) + .collect() .await; let joined_members = users_in_room @@ -418,12 +419,12 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) .collect::>() .await; - for user_id in joined_members { + for user_id in &joined_members { debug_info!("User is joined, marking as joined"); services.rooms.state_cache.mark_as_joined(user_id, room_id); } - for user_id in non_joined_members { + for user_id in &non_joined_members { debug_info!("User is left or banned, marking as left"); services.rooms.state_cache.mark_as_left(user_id, room_id); } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 25388084..dbe38561 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -648,35 +648,19 @@ impl Service { self.db.userroomid_leftstate.remove(&userroom_id); self.db.roomuserid_leftcount.remove(&roomuser_id); - if let Some(servers) = invite_via { - let mut prev_servers = self - .servers_invite_via(room_id) - .map(ToOwned::to_owned) - .collect::>() - .await; - #[allow(clippy::redundant_clone)] // this is a necessary clone? - prev_servers.append(servers.clone().as_mut()); - let servers = prev_servers.iter().rev().unique().rev().collect_vec(); - - let servers = servers - .iter() - .map(|server| server.as_bytes()) - .collect_vec() - .join(&[0xFF][..]); - - self.db - .roomid_inviteviaservers - .insert(room_id.as_bytes(), &servers); + if let Some(servers) = invite_via.as_deref() { + self.add_servers_invite_via(room_id, servers).await; } } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self, servers), level = "debug")] pub async fn add_servers_invite_via(&self, room_id: &RoomId, servers: &[OwnedServerName]) { - let mut prev_servers = self + let mut prev_servers: Vec<_> = self .servers_invite_via(room_id) .map(ToOwned::to_owned) - .collect::>() + .collect() .await; + prev_servers.extend(servers.to_owned()); prev_servers.sort_unstable(); prev_servers.dedup(); diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 5360d2c9..6a26a1d5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -408,7 +408,6 @@ impl Service { .get(None, user, GlobalAccountDataEventType::PushRules.to_string().into()) .await .and_then(|event| serde_json::from_str::(event.get()).map_err(Into::into)) - .map_err(|e| err!(Database(warn!(?user, ?e, "Invalid push rules event in db for user")))) .map_or_else(|_| Ruleset::server_default(user), |ev: PushRulesEvent| ev.content.global); let mut highlight = false; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index eb77ef35..438c220b 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -623,7 +623,9 @@ impl Service { pub async fn mark_device_key_update(&self, user_id: &UserId) { let count = self.services.globals.next_count().unwrap().to_be_bytes(); + let rooms_joined = self.services.state_cache.rooms_joined(user_id); + pin_mut!(rooms_joined); while let Some(room_id) = rooms_joined.next().await { // Don't send key updates to unencrypted rooms From ab06701ed08862bef04bf06800dbf021bd317497 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 1 Oct 2024 22:37:01 +0000 Subject: [PATCH 0023/1248] refactor multi-get to handle result type Signed-off-by: Jason Volk --- src/database/keyval.rs | 8 ------ src/database/map/get.rs | 45 ++++++++++++++++------------------ src/service/rooms/short/mod.rs | 39 +++++++++-------------------- 3 files changed, 33 insertions(+), 59 deletions(-) diff --git a/src/database/keyval.rs b/src/database/keyval.rs index c9d25977..a288f184 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -3,10 +3,6 @@ use serde::Deserialize; use crate::de; -pub(crate) type OwnedKeyVal = (Vec, Vec); -pub(crate) type OwnedKey = Vec; -pub(crate) type OwnedVal = Vec; - pub type KeyVal<'a, K = &'a Slice, V = &'a Slice> = (Key<'a, K>, Val<'a, V>); pub type Key<'a, T = &'a Slice> = T; pub type Val<'a, T = &'a Slice> = T; @@ -72,10 +68,6 @@ where de::from_slice::(val) } -#[inline] -#[must_use] -pub fn to_owned(kv: KeyVal<'_>) -> OwnedKeyVal { (kv.0.to_owned(), kv.1.to_owned()) } - #[inline] pub fn key(kv: KeyVal<'_, K, V>) -> Key<'_, K> { kv.0 } diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 71489402..72382e36 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -3,14 +3,12 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write}; use arrayvec::ArrayVec; use conduit::{err, implement, Result}; use futures::future::ready; +use rocksdb::DBPinnableSlice; use serde::Serialize; -use crate::{ - keyval::{OwnedKey, OwnedVal}, - ser, - util::{map_err, or_else}, - Handle, -}; +use crate::{ser, util, Handle}; + +type RocksdbResult<'a> = Result>, rocksdb::Error>; /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is serialized into an allocated buffer to perform @@ -68,17 +66,17 @@ pub fn get_blocking(&self, key: &K) -> Result> where K: AsRef<[u8]> + ?Sized + Debug, { - self.db + let res = self .db - .get_pinned_cf_opt(&self.cf(), key, &self.read_options) - .map_err(map_err)? - .map(Handle::from) - .ok_or(err!(Request(NotFound("Not found in database")))) + .db + .get_pinned_cf_opt(&self.cf(), key, &self.read_options); + + into_result_handle(res) } #[implement(super::Map)] #[tracing::instrument(skip(self, keys), fields(%self), level = "trace")] -pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> Vec> +pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> Vec>> where I: Iterator + ExactSizeIterator + Send + Debug, K: AsRef<[u8]> + Sized + Debug + 'a, @@ -87,19 +85,18 @@ where // comparator**. const SORTED: bool = false; - let mut ret: Vec> = Vec::with_capacity(keys.len()); let read_options = &self.read_options; - for res in self - .db + self.db .db .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options) - { - match res { - Ok(Some(res)) => ret.push(Some((*res).to_vec())), - Ok(None) => ret.push(None), - Err(e) => or_else(e).expect("database multiget error"), - } - } - - ret + .into_iter() + .map(into_result_handle) + .collect() +} + +fn into_result_handle(result: RocksdbResult<'_>) -> Result> { + result + .map_err(util::map_err)? + .map(Handle::from) + .ok_or(err!(Request(NotFound("Not found in database")))) } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 66da3948..825ee109 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{err, implement, utils, Error, Result}; +use conduit::{err, implement, utils, Result}; use database::{Deserialized, Map}; use ruma::{events::StateEventType, EventId, RoomId}; @@ -69,41 +69,26 @@ pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { #[implement(Service)] pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Vec { - let mut ret: Vec = Vec::with_capacity(event_ids.len()); - let keys = event_ids - .iter() - .map(|id| id.as_bytes()) - .collect::>(); - - for (i, short) in self - .db + self.db .eventid_shorteventid - .get_batch_blocking(keys.iter()) - .iter() + .get_batch_blocking(event_ids.iter()) + .into_iter() .enumerate() - { - match short { - Some(short) => ret.push( - utils::u64_from_bytes(short) - .map_err(|_| Error::bad_database("Invalid shorteventid in db.")) - .unwrap(), - ), - None => { + .map(|(i, result)| match result { + Ok(ref short) => utils::u64_from_u8(short), + Err(_) => { let short = self.services.globals.next_count().unwrap(); self.db .eventid_shorteventid - .insert(keys[i], &short.to_be_bytes()); + .insert(event_ids[i], &short.to_be_bytes()); self.db .shorteventid_eventid - .insert(&short.to_be_bytes(), keys[i]); + .insert(&short.to_be_bytes(), event_ids[i]); - debug_assert!(ret.len() == i, "position of result must match input"); - ret.push(short); + short }, - } - } - - ret + }) + .collect() } #[implement(Service)] From 36677bb9828038294d06f2292eef755139216c40 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 1 Oct 2024 23:19:47 +0000 Subject: [PATCH 0024/1248] optimize auth_chain short_id to event_id translation step Signed-off-by: Jason Volk --- src/service/rooms/auth_chain/mod.rs | 30 ++++++++++++++++++-------- src/service/rooms/event_handler/mod.rs | 16 +++++++------- src/service/rooms/short/mod.rs | 17 +++++++++++++++ 3 files changed, 46 insertions(+), 17 deletions(-) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index eae13b74..f3861ca3 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use conduit::{debug, debug_error, trace, utils::IterStream, validated, warn, Err, Result}; -use futures::{FutureExt, Stream, StreamExt}; +use futures::Stream; use ruma::{EventId, RoomId}; use self::data::Data; @@ -40,15 +40,27 @@ impl Service { pub async fn event_ids_iter( &self, room_id: &RoomId, starting_events: &[&EventId], ) -> Result> + Send + '_> { - let chain = self.get_auth_chain(room_id, starting_events).await?; - let iter = chain.into_iter().stream().filter_map(|sid| { - self.services - .short - .get_eventid_from_short(sid) - .map(Result::ok) - }); + let stream = self + .get_event_ids(room_id, starting_events) + .await? + .into_iter() + .stream(); - Ok(iter) + Ok(stream) + } + + pub async fn get_event_ids(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result>> { + let chain = self.get_auth_chain(room_id, starting_events).await?; + let event_ids = self + .services + .short + .multi_get_eventid_from_short(&chain) + .await + .into_iter() + .filter_map(Result::ok) + .collect(); + + Ok(event_ids) } #[tracing::instrument(skip_all, name = "auth_chain")] diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 57b87706..4708a86c 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -797,13 +797,13 @@ impl Service { for state in &fork_states { let starting_events: Vec<&EventId> = state.values().map(Borrow::borrow).collect(); - let auth_chain = self + let auth_chain: HashSet> = self .services .auth_chain - .event_ids_iter(room_id, &starting_events) + .get_event_ids(room_id, &starting_events) .await? - .collect::>>() - .await; + .into_iter() + .collect(); auth_chain_sets.push(auth_chain); } @@ -983,13 +983,13 @@ impl Service { starting_events.push(id.borrow()); } - let auth_chain = self + let auth_chain: HashSet> = self .services .auth_chain - .event_ids_iter(room_id, &starting_events) + .get_event_ids(room_id, &starting_events) .await? - .collect() - .await; + .into_iter() + .collect(); auth_chain_sets.push(auth_chain); fork_states.push(state); diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 825ee109..20082da2 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -141,6 +141,23 @@ pub async fn get_eventid_from_short(&self, shorteventid: u64) -> Result Vec>> { + const BUFSIZE: usize = size_of::(); + + let keys: Vec<[u8; BUFSIZE]> = shorteventid + .iter() + .map(|short| short.to_be_bytes()) + .collect(); + + self.db + .shorteventid_eventid + .get_batch_blocking(keys.iter()) + .into_iter() + .map(Deserialized::deserialized) + .collect() +} + #[implement(Service)] pub async fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { const BUFSIZE: usize = size_of::(); From 83119526291f25a78d67a15f638eeaedc0b10b2d Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 28 Sep 2024 18:30:40 -0400 Subject: [PATCH 0025/1248] bump ruma, cargo.lock, and deps Signed-off-by: strawberry --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- src/api/client/capabilities.rs | 8 +++++++- 3 files changed, 21 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 065aa1e4..4d40c458 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2975,7 +2975,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "assign", "js_int", @@ -2997,7 +2997,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "js_int", "ruma-common", @@ -3009,7 +3009,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "as_variant", "assign", @@ -3032,7 +3032,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "as_variant", "base64 0.22.1", @@ -3062,7 +3062,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3086,7 +3086,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "bytes", "http", @@ -3104,7 +3104,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "js_int", "thiserror", @@ -3113,7 +3113,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "js_int", "ruma-common", @@ -3123,7 +3123,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "cfg-if", "once_cell", @@ -3139,7 +3139,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "js_int", "ruma-common", @@ -3151,7 +3151,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "headers", "http", @@ -3164,7 +3164,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3180,7 +3180,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7db44989d68406393270d3a91815597385d3acb#e7db44989d68406393270d3a91815597385d3acb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" dependencies = [ "futures-util", "itertools 0.12.1", diff --git a/Cargo.toml b/Cargo.toml index 3bfb3bc8..28e280cf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -315,7 +315,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "e7db44989d68406393270d3a91815597385d3acb" +rev = "ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" features = [ "compat", "rand", diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 83e1dc7e..89157e47 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -3,7 +3,8 @@ use std::collections::BTreeMap; use axum::extract::State; use ruma::{ api::client::discovery::get_capabilities::{ - self, Capabilities, RoomVersionStability, RoomVersionsCapability, ThirdPartyIdChangesCapability, + self, Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability, + ThirdPartyIdChangesCapability, }, RoomVersionId, }; @@ -43,6 +44,11 @@ pub(crate) async fn get_capabilities_route( enabled: false, }; + // we dont support generating tokens yet + capabilities.get_login_token = GetLoginTokenCapability { + enabled: false, + }; + // MSC4133 capability capabilities .set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true})) From fafe32089980eefc5bda1cb8991a0be762c30e6b Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 28 Sep 2024 19:38:35 -0400 Subject: [PATCH 0026/1248] send EDUs to appservices if in events to_device is not supported yet Signed-off-by: strawberry --- src/service/sending/sender.rs | 39 ++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 4db9922a..3a401995 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -13,9 +13,14 @@ use conduit::{ }; use futures::{future::BoxFuture, pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; use ruma::{ - api::federation::transactions::{ - edu::{DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, ReceiptData, ReceiptMap}, - send_transaction_message, + api::{ + appservice::event::push_events::v1::Edu as RumaEdu, + federation::transactions::{ + edu::{ + DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, ReceiptData, ReceiptMap, + }, + send_transaction_message, + }, }, device_id, events::{push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType}, @@ -441,7 +446,18 @@ impl Service { return Err((dest.clone(), err!(Database(warn!(?id, "Missing appservice registration"))))); }; - let mut pdu_jsons = Vec::new(); + let mut pdu_jsons = Vec::with_capacity( + events + .iter() + .filter(|event| matches!(event, SendingEvent::Pdu(_))) + .count(), + ); + let mut edu_jsons: Vec = Vec::with_capacity( + events + .iter() + .filter(|event| matches!(event, SendingEvent::Edu(_))) + .count(), + ); for event in &events { match event { SendingEvent::Pdu(pdu_id) => { @@ -449,10 +465,12 @@ impl Service { pdu_jsons.push(pdu.to_room_event()); } }, - SendingEvent::Edu(_) | SendingEvent::Flush => { - // Appservices don't need EDUs (?) and flush only; - // no new content + SendingEvent::Edu(edu) => { + if let Ok(edu) = serde_json::from_slice(edu) { + edu_jsons.push(edu); + } }, + SendingEvent::Flush => {}, // flush only; no new content } } @@ -466,7 +484,8 @@ impl Service { .collect::>(), )); - //debug_assert!(!pdu_jsons.is_empty(), "sending empty transaction"); + //debug_assert!(pdu_jsons.len() + edu_jsons.len() > 0, "sending empty + // transaction"); let client = &self.services.client.appservice; match appservice::send_request( client, @@ -474,8 +493,8 @@ impl Service { ruma::api::appservice::event::push_events::v1::Request { events: pdu_jsons, txn_id: txn_id.into(), - ephemeral: Vec::new(), - to_device: Vec::new(), + ephemeral: edu_jsons, + to_device: Vec::new(), // TODO }, ) .await From 890ee84f713c3f4905247934d5bfc277f61959cd Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 28 Sep 2024 21:44:38 -0400 Subject: [PATCH 0027/1248] dont send read receipts and typing indicators from ignored users Signed-off-by: strawberry --- src/api/client/sync.rs | 25 ++++++++++++++-- src/service/rooms/state_cache/mod.rs | 26 +--------------- src/service/rooms/typing/mod.rs | 45 +++++++++++++++++++++------- src/service/users/mod.rs | 32 ++++++++++++++++++-- 4 files changed, 88 insertions(+), 40 deletions(-) diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index adb4d8da..1383f902 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -1011,15 +1011,27 @@ async fn load_joined_room( .rooms .read_receipt .readreceipts_since(room_id, since) - .map(|(_, _, v)| v) + .filter_map(|(read_user, _, v)| async move { + (!services + .users + .user_is_ignored(&read_user, sender_user) + .await) + .then_some(v) + }) .collect() .await; if services.rooms.typing.last_typing_update(room_id).await? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&services.rooms.typing.typings_all(room_id).await?) - .expect("event is valid, we just created it"), + &serde_json::to_string( + &services + .rooms + .typing + .typings_all(room_id, sender_user) + .await?, + ) + .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), ); @@ -1583,6 +1595,13 @@ pub(crate) async fn sync_events_v4_route( .rooms .read_receipt .readreceipts_since(room_id, *roomsince) + .filter_map(|(read_user, ts, v)| async move { + (!services + .users + .user_is_ignored(&read_user, sender_user) + .await) + .then_some((read_user, ts, v)) + }) .collect() .await; diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index dbe38561..b1a71caf 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -14,7 +14,6 @@ use itertools::Itertools; use ruma::{ events::{ direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, room::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, @@ -197,30 +196,7 @@ impl Service { }, MembershipState::Invite => { // We want to know if the sender is ignored by the receiver - let is_ignored = self - .services - .account_data - .get( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - ) - .await - .and_then(|event| { - serde_json::from_str::(event.get()) - .map_err(|e| err!(Database(warn!("Invalid account data event in db: {e:?}")))) - }) - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|(user, _details)| user == sender) - }); - - if is_ignored { + if self.services.users.user_is_ignored(sender, user_id).await { return Ok(()); } diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index bcfce616..8ee34f44 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -1,6 +1,11 @@ use std::{collections::BTreeMap, sync::Arc}; -use conduit::{debug_info, trace, utils, Result, Server}; +use conduit::{ + debug_info, trace, + utils::{self, IterStream}, + Result, Server, +}; +use futures::StreamExt; use ruma::{ api::federation::transactions::edu::{Edu, TypingContent}, events::SyncEphemeralRoomEvent, @@ -8,7 +13,7 @@ use ruma::{ }; use tokio::sync::{broadcast, RwLock}; -use crate::{globals, sending, Dep}; +use crate::{globals, sending, users, Dep}; pub struct Service { server: Arc, @@ -23,6 +28,7 @@ pub struct Service { struct Services { globals: Dep, sending: Dep, + users: Dep, } impl crate::Service for Service { @@ -32,6 +38,7 @@ impl crate::Service for Service { services: Services { globals: args.depend::("globals"), sending: args.depend::("sending"), + users: args.depend::("users"), }, typing: RwLock::new(BTreeMap::new()), last_typing_update: RwLock::new(BTreeMap::new()), @@ -170,17 +177,35 @@ impl Service { /// Returns a new typing EDU. pub async fn typings_all( - &self, room_id: &RoomId, + &self, room_id: &RoomId, sender_user: &UserId, ) -> Result> { + let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); + + let Some(typing_indicators) = room_typing_indicators else { + return Ok(SyncEphemeralRoomEvent { + content: ruma::events::typing::TypingEventContent { + user_ids: Vec::new(), + }, + }); + }; + + let user_ids: Vec<_> = typing_indicators + .into_keys() + .stream() + .filter_map(|typing_user_id| async move { + (!self + .services + .users + .user_is_ignored(&typing_user_id, sender_user) + .await) + .then_some(typing_user_id) + }) + .collect() + .await; + Ok(SyncEphemeralRoomEvent { content: ruma::events::typing::TypingEventContent { - user_ids: self - .typing - .read() - .await - .get(room_id) - .map(|m| m.keys().cloned().collect()) - .unwrap_or_default(), + user_ids, }, }) } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 438c220b..1c079085 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -10,13 +10,13 @@ use futures::{pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; use ruma::{ api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, StateEventType}, + events::{ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType, StateEventType}, serde::Raw, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, OwnedUserId, UInt, UserId, }; -use crate::{admin, globals, rooms, Dep}; +use crate::{account_data, admin, globals, rooms, Dep}; pub struct Service { services: Services, @@ -25,6 +25,7 @@ pub struct Service { struct Services { server: Arc, + account_data: Dep, admin: Dep, globals: Dep, state_accessor: Dep, @@ -58,6 +59,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { server: args.server.clone(), + account_data: args.depend::("account_data"), admin: args.depend::("admin"), globals: args.depend::("globals"), state_accessor: args.depend::("rooms::state_accessor"), @@ -91,6 +93,32 @@ impl crate::Service for Service { } impl Service { + /// Returns true/false based on whether the recipient/receiving user has + /// blocked the sender + pub async fn user_is_ignored(&self, sender_user: &UserId, recipient_user: &UserId) -> bool { + self.services + .account_data + .get( + None, + recipient_user, + GlobalAccountDataEventType::IgnoredUserList + .to_string() + .into(), + ) + .await + .and_then(|event| { + serde_json::from_str::(event.get()) + .map_err(|e| err!(Database(warn!("Invalid account data event in db: {e:?}")))) + }) + .map_or(false, |ignored| { + ignored + .content + .ignored_users + .keys() + .any(|blocked_user| blocked_user == sender_user) + }) + } + /// Check if a user is an admin #[inline] pub async fn is_admin(&self, user_id: &UserId) -> bool { self.services.admin.user_is_admin(user_id).await } From 2083c38c764d5d144ff6355ce0688e8fe98d7d49 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 28 Sep 2024 22:12:17 -0400 Subject: [PATCH 0028/1248] dont send non-state events from ignored users over sync Signed-off-by: strawberry --- src/api/client/sync.rs | 91 +++++++++++++++++++++++++++++++----------- 1 file changed, 68 insertions(+), 23 deletions(-) diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index 1383f902..51df88a3 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -35,6 +35,7 @@ use ruma::{ presence::PresenceEvent, room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, StateEventType, TimelineEventType, + TimelineEventType::*, }, serde::Raw, state_res::Event, @@ -1004,8 +1005,31 @@ async fn load_joined_room( let room_events: Vec<_> = timeline_pdus .iter() - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); + .stream() + .filter_map(|(_, pdu)| async move { + // list of safe and common non-state events to ignore + if matches!( + &pdu.kind, + RoomMessage + | Sticker | CallInvite + | CallNotify | RoomEncrypted + | Image | File | Audio + | Voice | Video | UnstablePollStart + | PollStart | KeyVerificationStart + | Reaction | Emote + | Location + ) && services + .users + .user_is_ignored(&pdu.sender, sender_user) + .await + { + return None; + } + + Some(pdu.to_sync_room_event()) + }) + .collect() + .await; let mut edus: Vec<_> = services .rooms @@ -1144,11 +1168,11 @@ async fn share_encrypted_room( pub(crate) async fn sync_events_v4_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.expect("user is authenticated"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let mut body = body.body; // Setup watchers, so if there's no response, we can wait for them - let watcher = services.globals.watch(&sender_user, &sender_device); + let watcher = services.globals.watch(sender_user, &sender_device); let next_batch = services.globals.next_count()?; @@ -1191,7 +1215,7 @@ pub(crate) async fn sync_events_v4_route( let all_joined_rooms: Vec<_> = services .rooms .state_cache - .rooms_joined(&sender_user) + .rooms_joined(sender_user) .map(ToOwned::to_owned) .collect() .await; @@ -1199,7 +1223,7 @@ pub(crate) async fn sync_events_v4_route( let all_invited_rooms: Vec<_> = services .rooms .state_cache - .rooms_invited(&sender_user) + .rooms_invited(sender_user) .map(|r| r.0) .collect() .await; @@ -1213,7 +1237,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.to_device.enabled.unwrap_or(false) { services .users - .remove_to_device_events(&sender_user, &sender_device, globalsince) + .remove_to_device_events(sender_user, &sender_device, globalsince) .await; } @@ -1232,7 +1256,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.account_data.enabled.unwrap_or(false) { account_data.global = services .account_data - .changes_since(None, &sender_user, globalsince) + .changes_since(None, sender_user, globalsince) .await? .into_iter() .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) @@ -1244,7 +1268,7 @@ pub(crate) async fn sync_events_v4_route( room.clone(), services .account_data - .changes_since(Some(&room), &sender_user, globalsince) + .changes_since(Some(&room), sender_user, globalsince) .await? .into_iter() .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) @@ -1338,7 +1362,7 @@ pub(crate) async fn sync_events_v4_route( let user_id = UserId::parse(state_key.clone()) .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - if user_id == sender_user { + if user_id == *sender_user { continue; } @@ -1350,7 +1374,7 @@ pub(crate) async fn sync_events_v4_route( match new_membership { MembershipState::Join => { // A new user joined an encrypted room - if !share_encrypted_room(&services, &sender_user, &user_id, Some(room_id)) + if !share_encrypted_room(&services, sender_user, &user_id, Some(room_id)) .await { device_list_changes.insert(user_id); @@ -1367,7 +1391,6 @@ pub(crate) async fn sync_events_v4_route( } } if joined_since_last_sync || new_encrypted_room { - let sender_user = &sender_user; // If the user is in a new encrypted room, give them all joined users device_list_changes.extend( services @@ -1400,7 +1423,7 @@ pub(crate) async fn sync_events_v4_route( } for user_id in left_encrypted_users { - let dont_share_encrypted_room = !share_encrypted_room(&services, &sender_user, &user_id, None).await; + let dont_share_encrypted_room = !share_encrypted_room(&services, sender_user, &user_id, None).await; // If the user doesn't share an encrypted room with the target anymore, we need // to tell them @@ -1564,14 +1587,14 @@ pub(crate) async fn sync_events_v4_route( invite_state = services .rooms .state_cache - .invite_state(&sender_user, room_id) + .invite_state(sender_user, room_id) .await .ok(); (timeline_pdus, limited) = (Vec::new(), true); } else { (timeline_pdus, limited) = - match load_timeline(&services, &sender_user, room_id, roomsincecount, *timeline_limit).await { + match load_timeline(&services, sender_user, room_id, roomsincecount, *timeline_limit).await { Ok(value) => value, Err(err) => { warn!("Encountered missing timeline in {}, error {}", room_id, err); @@ -1584,7 +1607,7 @@ pub(crate) async fn sync_events_v4_route( room_id.clone(), services .account_data - .changes_since(Some(room_id), &sender_user, *roomsince) + .changes_since(Some(room_id), sender_user, *roomsince) .await? .into_iter() .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) @@ -1639,8 +1662,30 @@ pub(crate) async fn sync_events_v4_route( let room_events: Vec<_> = timeline_pdus .iter() - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); + .stream() + .filter_map(|(_, pdu)| async move { + // list of safe and common non-state events to ignore + if matches!( + &pdu.kind, + RoomMessage + | Sticker | CallInvite + | CallNotify | RoomEncrypted + | Image | File | Audio + | Voice | Video | UnstablePollStart + | PollStart | KeyVerificationStart + | Reaction | Emote | Location + ) && services + .users + .user_is_ignored(&pdu.sender, sender_user) + .await + { + return None; + } + + Some(pdu.to_sync_room_event()) + }) + .collect() + .await; for (_, pdu) in timeline_pdus { let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts); @@ -1669,7 +1714,7 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_cache .room_members(room_id) - .ready_filter(|member| member != &sender_user) + .ready_filter(|member| member != sender_user) .filter_map(|user_id| { services .rooms @@ -1743,7 +1788,7 @@ pub(crate) async fn sync_events_v4_route( services .rooms .user - .highlight_count(&sender_user, room_id) + .highlight_count(sender_user, room_id) .await .try_into() .expect("notification count can't go that high"), @@ -1752,7 +1797,7 @@ pub(crate) async fn sync_events_v4_route( services .rooms .user - .notification_count(&sender_user, room_id) + .notification_count(sender_user, room_id) .await .try_into() .expect("notification count can't go that high"), @@ -1811,7 +1856,7 @@ pub(crate) async fn sync_events_v4_route( Some(sync_events::v4::ToDevice { events: services .users - .get_to_device_events(&sender_user, &sender_device) + .get_to_device_events(sender_user, &sender_device) .collect() .await, next_batch: next_batch.to_string(), @@ -1826,7 +1871,7 @@ pub(crate) async fn sync_events_v4_route( }, device_one_time_keys_count: services .users - .count_one_time_keys(&sender_user, &sender_device) + .count_one_time_keys(sender_user, &sender_device) .await, // Fallback keys are not yet supported device_unused_fallback_key_types: None, From 4413793f7e302c9e5b0880ba3eb3f20f8558e6b3 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 28 Sep 2024 23:15:43 -0400 Subject: [PATCH 0029/1248] dont allow sending/receiving room invites with ignored users Signed-off-by: strawberry --- src/api/client/membership.rs | 8 ++++++++ src/api/client/room.rs | 20 ++++++++++++++++++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 6e3bc894..f89903b4 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -364,6 +364,14 @@ pub(crate) async fn invite_user_route( user_id, } = &body.recipient { + if services.users.user_is_ignored(sender_user, user_id).await { + return Err!(Request(Forbidden("You cannot invite users you have ignored to rooms."))); + } else if services.users.user_is_ignored(user_id, sender_user).await { + // silently drop the invite to the recipient if they've been ignored by the + // sender, pretend it worked + return Ok(invite_user::v3::Response {}); + } + invite_helper(&services, sender_user, user_id, &body.room_id, body.reason.clone(), false).await?; Ok(invite_user::v3::Response {}) } else { diff --git a/src/api/client/room.rs b/src/api/client/room.rs index 1edf85d8..0d8e12a2 100644 --- a/src/api/client/room.rs +++ b/src/api/client/room.rs @@ -267,8 +267,16 @@ pub(crate) async fn create_room_route( let mut users = BTreeMap::from_iter([(sender_user.clone(), int!(100))]); if preset == RoomPreset::TrustedPrivateChat { - for invite_ in &body.invite { - users.insert(invite_.clone(), int!(100)); + for invite in &body.invite { + if services.users.user_is_ignored(sender_user, invite).await { + return Err!(Request(Forbidden("You cannot invite users you have ignored to rooms."))); + } else if services.users.user_is_ignored(invite, sender_user).await { + // silently drop the invite to the recipient if they've been ignored by the + // sender, pretend it worked + continue; + } + + users.insert(invite.clone(), int!(100)); } } @@ -476,6 +484,14 @@ pub(crate) async fn create_room_route( // 8. Events implied by invite (and TODO: invite_3pid) drop(state_lock); for user_id in &body.invite { + if services.users.user_is_ignored(sender_user, user_id).await { + return Err!(Request(Forbidden("You cannot invite users you have ignored to rooms."))); + } else if services.users.user_is_ignored(user_id, sender_user).await { + // silently drop the invite to the recipient if they've been ignored by the + // sender, pretend it worked + continue; + } + if let Err(e) = invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct) .boxed() .await From b64a23516520fe764e5d463a28d6b341942642c3 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 29 Sep 2024 00:28:05 -0400 Subject: [PATCH 0030/1248] use ok_or_else for a rare error Signed-off-by: strawberry --- src/service/rooms/event_handler/parse_incoming_pdu.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 2de3e28e..9081fcbc 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -14,7 +14,7 @@ impl super::Service { let room_id: OwnedRoomId = value .get("room_id") .and_then(|id| RoomId::parse(id.as_str()?).ok()) - .ok_or(err!(Request(InvalidParam("Invalid room id in pdu"))))?; + .ok_or_else(|| err!(Request(InvalidParam("Invalid room id in pdu"))))?; let Ok(room_version_id) = self.services.state.get_room_version(&room_id).await else { return Err!("Server is not in room {room_id}"); From ee1580e4800f254cee39707b8e0c3d0a9339bb23 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 29 Sep 2024 00:50:12 -0400 Subject: [PATCH 0031/1248] fix list_rooms admin command filters Signed-off-by: strawberry --- src/admin/room/commands.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index 1c90a998..35e40c8b 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -6,7 +6,7 @@ use crate::{admin_command, get_room_info, PAGE_SIZE}; #[admin_command] pub(super) async fn list_rooms( - &self, page: Option, _exclude_disabled: bool, _exclude_banned: bool, no_details: bool, + &self, page: Option, exclude_disabled: bool, exclude_banned: bool, no_details: bool, ) -> Result { // TODO: i know there's a way to do this with clap, but i can't seem to find it let page = page.unwrap_or(1); @@ -15,8 +15,12 @@ pub(super) async fn list_rooms( .rooms .metadata .iter_ids() - //.filter(|room_id| async { !exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await }) - //.filter(|room_id| async { !exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await }) + .filter_map(|room_id| async move { + (!exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await).then_some(room_id) + }) + .filter_map(|room_id| async move { + (!exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await).then_some(room_id) + }) .then(|room_id| get_room_info(self.services, room_id)) .collect::>() .await; From 7a59add8f1bc8d4697580823e8651f4b72e4b9d5 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 29 Sep 2024 01:54:07 -0400 Subject: [PATCH 0032/1248] add support for reading a registration token from a file Signed-off-by: strawberry --- conduwuit-example.toml | 18 +++++++++++++---- docs/deploying/docker-compose.for-traefik.yml | 2 +- docs/deploying/docker-compose.with-caddy.yml | 2 +- .../deploying/docker-compose.with-traefik.yml | 6 +++--- docs/deploying/docker-compose.yml | 2 +- src/api/client/account.rs | 6 +++--- src/core/config/check.rs | 20 ++++++++++++++++++- src/core/config/mod.rs | 15 +++++++++++--- src/service/globals/mod.rs | 16 +++++++++++++++ src/service/uiaa/mod.rs | 12 +++++++---- 10 files changed, 78 insertions(+), 21 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index b532d381..11735616 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -195,11 +195,14 @@ allow_guests_auto_join_rooms = false # Enables registration. If set to false, no users can register on this # server. +# # If set to true without a token configured, users can register with no form of 2nd- # step only if you set # `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` to -# true in your config. If you would like -# registration only via token reg, please configure the `registration_token` key. +# true in your config. +# +# If you would like registration only via token reg, please configure +# `registration_token` or `registration_token_file`. allow_registration = false # Please note that an open registration homeserver with no second-step verification # is highly prone to abuse and potential defederation by homeservers, including @@ -208,7 +211,14 @@ allow_registration = false # A static registration token that new users will have to provide when creating # an account. If unset and `allow_registration` is true, registration is open # without any condition. YOU NEED TO EDIT THIS. -registration_token = "change this token for something specific to your server" +registration_token = "change this token/string here or set registration_token_file" + +# Path to a file on the system that gets read for the registration token +# +# conduwuit must be able to access the file, and it must not be empty +# +# no default +#registration_token_file = "/etc/conduwuit/.reg_token" # controls whether federation is allowed or not # defaults to true @@ -344,7 +354,7 @@ allow_profile_lookup_federation_requests = true # Controls the max log level for admin command log captures (logs generated from running admin commands) # # Defaults to "info" on release builds, else "debug" on debug builds -#admin_log_capture = info +#admin_log_capture = "info" # Allows admins to enter commands in rooms other than #admins by prefixing with \!admin. The reply # will be publicly visible to the room, originating from the sender. diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 1c615673..ae93d52f 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -16,7 +16,7 @@ services: CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit CONDUWUIT_DATABASE_BACKEND: rocksdb CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label - CONDUWUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' CONDUWUIT_ALLOW_FEDERATION: 'true' CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 899f4d67..36924212 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -32,7 +32,7 @@ services: CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit CONDUWUIT_DATABASE_BACKEND: rocksdb CONDUWUIT_PORT: 6167 - CONDUWUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' CONDUWUIT_ALLOW_FEDERATION: 'true' CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index f05006a5..89118c74 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -15,7 +15,8 @@ services: CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' CONDUWUIT_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this - CONDUWUIT_REGISTRATION_TOKEN: # This is a token you can use to register on the server + CONDUWUIT_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server + #CONDUWUIT_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read CONDUWUIT_ADDRESS: 0.0.0.0 CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit @@ -23,7 +24,6 @@ services: ### Uncomment and change values as desired, note that conduwuit has plenty of config options, so you should check out the example example config too # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUWUIT_LOG: info # default is: "warn,state_res=warn" - # CONDUWUIT_ALLOW_JAEGER: 'false' # CONDUWUIT_ALLOW_ENCRYPTION: 'true' # CONDUWUIT_ALLOW_FEDERATION: 'true' # CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' @@ -31,7 +31,7 @@ services: # CONDUWUIT_ALLOW_OUTGOING_PRESENCE: true # CONDUWUIT_ALLOW_LOCAL_PRESENCE: true # CONDUWUIT_WORKERS: 10 - # CONDUWUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + # CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB # CONDUWUIT_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧" # We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index bc9f2477..26145c5a 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -16,7 +16,7 @@ services: CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit CONDUWUIT_DATABASE_BACKEND: rocksdb CONDUWUIT_PORT: 6167 - CONDUWUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' CONDUWUIT_ALLOW_FEDERATION: 'true' CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 63d02f8f..1ededa36 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -111,7 +111,7 @@ pub(crate) async fn register_route( if is_guest && (!services.globals.allow_guest_registration() - || (services.globals.allow_registration() && services.globals.config.registration_token.is_some())) + || (services.globals.allow_registration() && services.globals.registration_token.is_some())) { info!( "Guest registration disabled / registration enabled with token configured, rejecting guest registration \ @@ -183,7 +183,7 @@ pub(crate) async fn register_route( // UIAA let mut uiaainfo; - let skip_auth = if services.globals.config.registration_token.is_some() { + let skip_auth = if services.globals.registration_token.is_some() { // Registration token required uiaainfo = UiaaInfo { flows: vec![AuthFlow { @@ -685,7 +685,7 @@ pub(crate) async fn request_3pid_management_token_via_msisdn_route( pub(crate) async fn check_registration_token_validity( State(services): State, body: Ruma, ) -> Result { - let Some(reg_token) = services.globals.config.registration_token.clone() else { + let Some(reg_token) = services.globals.registration_token.clone() else { return Err(Error::BadRequest( ErrorKind::forbidden(), "Server does not allow token registration.", diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 8dea55d8..c0d05533 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -94,6 +94,22 @@ pub fn check(config: &Config) -> Result<()> { )); } + // check if we can read the token file path, and check if the file is empty + if config.registration_token_file.as_ref().is_some_and(|path| { + let Ok(token) = std::fs::read_to_string(path).inspect_err(|e| { + error!("Failed to read the registration token file: {e}"); + }) else { + return true; + }; + + token == String::new() + }) { + return Err!(Config( + "registration_token_file", + "Registration token file was specified but is empty or failed to be read" + )); + } + if config.max_request_size < 5_120_000 { return Err!(Config( "max_request_size", @@ -111,12 +127,13 @@ pub fn check(config: &Config) -> Result<()> { if config.allow_registration && !config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse && config.registration_token.is_none() + && config.registration_token_file.is_none() { return Err!(Config( "registration_token", "!! You have `allow_registration` enabled without a token configured in your config which means you are \ allowing ANYONE to register on your conduwuit instance without any 2nd-step (e.g. registration token).\n -If this is not the intended behaviour, please set a registration token with the `registration_token` config option.\n +If this is not the intended behaviour, please set a registration token.\n For security and safety reasons, conduwuit will shut down. If you are extra sure this is the desired behaviour you \ want, please set the following config option to true: `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`" @@ -126,6 +143,7 @@ For security and safety reasons, conduwuit will shut down. If you are extra sure if config.allow_registration && config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse && config.registration_token.is_none() + && config.registration_token_file.is_none() { warn!( "Open registration is enabled via setting \ diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d8e1c7d9..126b3123 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -139,6 +139,7 @@ pub struct Config { #[serde(default)] pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool, pub registration_token: Option, + pub registration_token_file: Option, #[serde(default = "true_fn")] pub allow_encryption: bool, #[serde(default = "true_fn")] @@ -572,12 +573,20 @@ impl fmt::Display for Config { line("Allow registration", &self.allow_registration.to_string()); line( "Registration token", - if self.registration_token.is_some() { - "set" + if self.registration_token.is_none() && self.registration_token_file.is_none() && self.allow_registration { + "not set (⚠️ open registration!)" + } else if self.registration_token.is_none() && self.registration_token_file.is_none() { + "not set" } else { - "not set (open registration!)" + "set" }, ); + line( + "Registration token file path", + self.registration_token_file + .as_ref() + .map_or("", |path| path.to_str().unwrap_or_default()), + ); line( "Allow guest registration (inherently false if allow registration is false)", &self.allow_guest_registration.to_string(), diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index f24e8a27..fb970f07 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -41,6 +41,7 @@ pub struct Service { pub server_user: OwnedUserId, pub admin_alias: OwnedRoomAliasId, pub turn_secret: String, + pub registration_token: Option, } type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries @@ -96,6 +97,20 @@ impl crate::Service for Service { }) }); + let registration_token = + config + .registration_token_file + .as_ref() + .map_or(config.registration_token.clone(), |path| { + let Ok(token) = std::fs::read_to_string(path).inspect_err(|e| { + error!("Failed to read the registration token file: {e}"); + }) else { + return config.registration_token.clone(); + }; + + Some(token) + }); + let mut s = Self { db, config: config.clone(), @@ -112,6 +127,7 @@ impl crate::Service for Service { server_user: UserId::parse_with_server_name(String::from("conduit"), &config.server_name) .expect("@conduit:server_name is valid"), turn_secret, + registration_token, }; if !s diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 0415bfc2..f75f1bcd 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -6,7 +6,7 @@ use std::{ use conduit::{ err, error, implement, utils, utils::{hash, string::EMPTY}, - Error, Result, Server, + Error, Result, }; use database::{Deserialized, Map}; use ruma::{ @@ -26,7 +26,6 @@ pub struct Service { } struct Services { - server: Arc, globals: Dep, users: Dep, } @@ -48,7 +47,6 @@ impl crate::Service for Service { userdevicesessionid_uiaainfo: args.db["userdevicesessionid_uiaainfo"].clone(), }, services: Services { - server: args.server.clone(), globals: args.depend::("globals"), users: args.depend::("users"), }, @@ -135,7 +133,13 @@ pub async fn try_auth( uiaainfo.completed.push(AuthType::Password); }, AuthData::RegistrationToken(t) => { - if Some(t.token.trim()) == self.services.server.config.registration_token.as_deref() { + if self + .services + .globals + .registration_token + .as_ref() + .is_some_and(|reg_token| t.token.trim() == reg_token) + { uiaainfo.completed.push(AuthType::RegistrationToken); } else { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { From 6a81bf23dec75e97be30c44f248ef9bd1493835e Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 29 Sep 2024 20:13:37 -0400 Subject: [PATCH 0033/1248] dont send events from ignored users over /messages Signed-off-by: strawberry --- src/api/client/message.rs | 80 +++++++++++++++++++++++++++++++-------- 1 file changed, 65 insertions(+), 15 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index bab5fa54..d577e3c8 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,7 +1,11 @@ use std::collections::{BTreeMap, HashSet}; use axum::extract::State; -use conduit::{err, utils::ReadyExt, Err, PduCount}; +use conduit::{ + err, + utils::{IterStream, ReadyExt}, + Err, PduCount, +}; use futures::{FutureExt, StreamExt}; use ruma::{ api::client::{ @@ -9,7 +13,7 @@ use ruma::{ filter::{RoomEventFilter, UrlFilter}, message::{get_message_events, send_message_event}, }, - events::{MessageLikeEventType, StateEventType}, + events::{MessageLikeEventType, StateEventType, TimelineEventType::*}, UserId, }; use serde_json::{from_str, Value}; @@ -182,8 +186,30 @@ pub(crate) async fn get_message_events_route( let events_after: Vec<_> = events_after .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); + .stream() + .filter_map(|(_, pdu)| async move { + // list of safe and common non-state events to ignore + if matches!( + &pdu.kind, + RoomMessage + | Sticker | CallInvite + | CallNotify | RoomEncrypted + | Image | File | Audio + | Voice | Video | UnstablePollStart + | PollStart | KeyVerificationStart + | Reaction | Emote | Location + ) && services + .users + .user_is_ignored(&pdu.sender, sender_user) + .await + { + return None; + } + + Some(pdu.to_room_event()) + }) + .collect() + .await; resp.start = from.stringify(); resp.end = next_token.map(|count| count.stringify()); @@ -203,6 +229,27 @@ pub(crate) async fn get_message_events_route( .pdus_until(sender_user, room_id, from) .await? .ready_filter_map(|item| contains_url_filter(item, filter)) + .filter_map(|(count, pdu)| async move { + // list of safe and common non-state events to ignore + if matches!( + &pdu.kind, + RoomMessage + | Sticker | CallInvite + | CallNotify | RoomEncrypted + | Image | File | Audio + | Voice | Video | UnstablePollStart + | PollStart | KeyVerificationStart + | Reaction | Emote | Location + ) && services + .users + .user_is_ignored(&pdu.sender, sender_user) + .await + { + return None; + } + + Some((count, pdu)) + }) .filter_map(|item| visibility_filter(&services, item, sender_user)) .ready_take_while(|(count, _)| Some(*count) != to) // Stop at `to` .take(limit) @@ -243,17 +290,20 @@ pub(crate) async fn get_message_events_route( }, } - resp.state = Vec::new(); - for ll_id in &lazy_loaded { - if let Ok(member_event) = services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, ll_id.as_str()) - .await - { - resp.state.push(member_event.to_state_event()); - } - } + resp.state = lazy_loaded + .iter() + .stream() + .filter_map(|ll_user_id| async move { + services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, ll_user_id.as_str()) + .await + .map(|member_event| member_event.to_state_event()) + .ok() + }) + .collect() + .await; // remove the feature check when we are sure clients like element can handle it if !cfg!(feature = "element_hacks") { From a9e3e8f77ad38549f7bb21c9447e5e7549ac31fe Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 29 Sep 2024 20:40:54 -0400 Subject: [PATCH 0034/1248] dont send non-state events from ignored users over /context/{eventId} Signed-off-by: strawberry --- src/api/client/context.rs | 64 ++++++++++++++++++++++++++++++--------- 1 file changed, 49 insertions(+), 15 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index cc49b763..9a5c4e82 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -5,12 +5,12 @@ use conduit::{err, error, Err}; use futures::StreamExt; use ruma::{ api::client::{context::get_context, filter::LazyLoadOptions}, - events::StateEventType, + events::{StateEventType, TimelineEventType::*}, }; use crate::{Result, Ruma}; -/// # `GET /_matrix/client/r0/rooms/{roomId}/context` +/// # `GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}` /// /// Allows loading room history around an event. /// @@ -31,7 +31,7 @@ pub(crate) async fn get_context_route( LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")), }; - let mut lazy_loaded = HashSet::new(); + let mut lazy_loaded = HashSet::with_capacity(100); let base_token = services .rooms @@ -79,6 +79,25 @@ pub(crate) async fn get_context_route( .await? .take(limit / 2) .filter_map(|(count, pdu)| async move { + // list of safe and common non-state events to ignore + if matches!( + &pdu.kind, + RoomMessage + | Sticker | CallInvite + | CallNotify | RoomEncrypted + | Image | File | Audio + | Voice | Video | UnstablePollStart + | PollStart | KeyVerificationStart + | Reaction | Emote + | Location + ) && services + .users + .user_is_ignored(&pdu.sender, sender_user) + .await + { + return None; + } + services .rooms .state_accessor @@ -104,11 +123,6 @@ pub(crate) async fn get_context_route( .last() .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()); - let events_before: Vec<_> = events_before - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); - let events_after: Vec<_> = services .rooms .timeline @@ -116,6 +130,25 @@ pub(crate) async fn get_context_route( .await? .take(limit / 2) .filter_map(|(count, pdu)| async move { + // list of safe and common non-state events to ignore + if matches!( + &pdu.kind, + RoomMessage + | Sticker | CallInvite + | CallNotify | RoomEncrypted + | Image | File | Audio + | Voice | Video | UnstablePollStart + | PollStart | KeyVerificationStart + | Reaction | Emote + | Location + ) && services + .users + .user_is_ignored(&pdu.sender, sender_user) + .await + { + return None; + } + services .rooms .state_accessor @@ -167,11 +200,6 @@ pub(crate) async fn get_context_route( .last() .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()); - let events_after: Vec<_> = events_after - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); - let mut state = Vec::with_capacity(state_ids.len()); for (shortstatekey, id) in state_ids { @@ -201,9 +229,15 @@ pub(crate) async fn get_context_route( Ok(get_context::v3::Response { start: Some(start_token), end: Some(end_token), - events_before, + events_before: events_before + .iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(), event: Some(base_event), - events_after, + events_after: events_after + .iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(), state, }) } From 115ea03edfc1cf785ad280abcc850bf14a2b76cc Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 29 Sep 2024 20:57:33 -0400 Subject: [PATCH 0035/1248] remove unnecessary full type annos Signed-off-by: strawberry --- src/api/client/sync.rs | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index 51df88a3..c4ff1eeb 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -34,8 +34,8 @@ use ruma::{ events::{ presence::PresenceEvent, room::member::{MembershipState, RoomMemberEventContent}, - AnyRawAccountDataEvent, StateEventType, TimelineEventType, - TimelineEventType::*, + AnyRawAccountDataEvent, StateEventType, + TimelineEventType::{self, *}, }, serde::Raw, state_res::Event, @@ -50,14 +50,8 @@ use crate::{ }; const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; -const DEFAULT_BUMP_TYPES: &[TimelineEventType] = &[ - TimelineEventType::RoomMessage, - TimelineEventType::RoomEncrypted, - TimelineEventType::Sticker, - TimelineEventType::CallInvite, - TimelineEventType::PollStart, - TimelineEventType::Beacon, -]; +const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = + &[RoomMessage, RoomEncrypted, Sticker, CallInvite, PollStart, Beacon]; macro_rules! extract_variant { ($e:expr, $variant:path) => { @@ -376,7 +370,7 @@ async fn handle_left_room( origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("Timestamp is valid js_int value"), - kind: TimelineEventType::RoomMember, + kind: RoomMember, content: serde_json::from_str(r#"{"membership":"leave"}"#).expect("this is valid JSON"), state_key: Some(sender_user.to_string()), unsigned: None, @@ -639,7 +633,7 @@ async fn load_joined_room( .timeline .all_pdus(sender_user, room_id) .await? - .ready_filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) + .ready_filter(|(_, pdu)| pdu.kind == RoomMember) .filter_map(|(_, pdu)| async move { let Ok(content) = serde_json::from_str::(pdu.content.get()) else { return None; @@ -827,11 +821,11 @@ async fn load_joined_room( let send_member_count = delta_state_events .iter() - .any(|event| event.kind == TimelineEventType::RoomMember); + .any(|event| event.kind == RoomMember); if encrypted_room { for state_event in &delta_state_events { - if state_event.kind != TimelineEventType::RoomMember { + if state_event.kind != RoomMember { continue; } @@ -895,7 +889,7 @@ async fn load_joined_room( // Mark all member events we're returning as lazy-loaded for pdu in &state_events { - if pdu.kind == TimelineEventType::RoomMember { + if pdu.kind == RoomMember { match UserId::parse( pdu.state_key .as_ref() @@ -1357,7 +1351,7 @@ pub(crate) async fn sync_events_v4_route( error!("Pdu in state not found: {id}"); continue; }; - if pdu.kind == TimelineEventType::RoomMember { + if pdu.kind == RoomMember { if let Some(state_key) = &pdu.state_key { let user_id = UserId::parse(state_key.clone()) .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; From 4eb7ad79d18c1d89784d2756a8652a1c0ce2d347 Mon Sep 17 00:00:00 2001 From: strawberry Date: Tue, 1 Oct 2024 01:59:24 -0400 Subject: [PATCH 0036/1248] update last_seen_ip and last_seen_ts on updating device metadata Signed-off-by: strawberry --- src/api/client/device.rs | 20 +++++++++++++++----- src/service/users/mod.rs | 11 ----------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 93eaa393..7e56f27e 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,10 +1,14 @@ use axum::extract::State; +use axum_client_ip::InsecureClientIp; use conduit::{err, Err}; use futures::StreamExt; -use ruma::api::client::{ - device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, - error::ErrorKind, - uiaa::{AuthFlow, AuthType, UiaaInfo}, +use ruma::{ + api::client::{ + device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, + error::ErrorKind, + uiaa::{AuthFlow, AuthType, UiaaInfo}, + }, + MilliSecondsSinceUnixEpoch, }; use super::SESSION_ID_LENGTH; @@ -51,8 +55,10 @@ pub(crate) async fn get_device_route( /// # `PUT /_matrix/client/r0/devices/{deviceId}` /// /// Updates the metadata on a given device of the sender user. +#[tracing::instrument(skip_all, fields(%client), name = "update_device")] pub(crate) async fn update_device_route( - State(services): State, body: Ruma, + State(services): State, InsecureClientIp(client): InsecureClientIp, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -63,6 +69,10 @@ pub(crate) async fn update_device_route( .map_err(|_| err!(Request(NotFound("Device not found."))))?; device.display_name.clone_from(&body.display_name); + device.last_seen_ip.clone_from(&Some(client.to_string())); + device + .last_seen_ts + .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); services .users diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 1c079085..44d169dd 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -791,17 +791,6 @@ impl Service { } pub async fn update_device_metadata(&self, user_id: &UserId, device_id: &DeviceId, device: &Device) -> Result<()> { - // Only existing devices should be able to call this, but we shouldn't assert - // either... - let key = (user_id, device_id); - if self.db.userdeviceid_metadata.qry(&key).await.is_err() { - return Err!(Database(error!( - ?user_id, - ?device_id, - "Called update_device_metadata for a non-existent user and/or device" - ))); - } - increment(&self.db.userid_devicelistversion, user_id.as_bytes()); let mut userdeviceid = user_id.as_bytes().to_vec(); From 98363852b18c4f2cc1525c671b770a6fbf4a7f3a Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 2 Oct 2024 00:56:09 -0400 Subject: [PATCH 0037/1248] fix: dont add remote users for push targets, use hashset instead of vec Signed-off-by: strawberry --- src/service/rooms/timeline/mod.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 6a26a1d5..f8f770bc 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -43,7 +43,7 @@ use self::data::Data; pub use self::data::PdusIterItem; use crate::{ account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, - rooms::state_compressor::CompressedStateEvent, sending, server_keys, Dep, + rooms::state_compressor::CompressedStateEvent, sending, server_keys, users, Dep, }; // Update Relationships @@ -90,6 +90,7 @@ struct Services { sending: Dep, server_keys: Dep, user: Dep, + users: Dep, pusher: Dep, threads: Dep, search: Dep, @@ -119,6 +120,7 @@ impl crate::Service for Service { sending: args.depend::("sending"), server_keys: args.depend::("server_keys"), user: args.depend::("rooms::user"), + users: args.depend::("users"), pusher: args.depend::("pusher"), threads: args.depend::("rooms::threads"), search: args.depend::("rooms::search"), @@ -378,20 +380,20 @@ impl Service { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - let mut push_target = self + let mut push_target: HashSet<_> = self .services .state_cache .active_local_users_in_room(&pdu.room_id) .map(ToOwned::to_owned) - .collect::>() + .collect() .await; if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { - let target_user_id = UserId::parse(state_key.clone()).expect("This state_key was previously validated"); + let target_user_id = UserId::parse(state_key.clone())?; - if !push_target.contains(&target_user_id) { - push_target.push(target_user_id); + if self.services.users.is_active_local(&target_user_id).await { + push_target.insert(target_user_id); } } } From 54a107c3c473049f5049e96f807d4c505f5a13db Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 2 Oct 2024 01:47:19 -0400 Subject: [PATCH 0038/1248] drop unnecessary error to debug_warn Signed-off-by: strawberry --- src/api/client/state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index f9a4a763..d89c23e8 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -130,7 +130,7 @@ pub(crate) async fn get_state_events_for_key_route( .room_state_get(&body.room_id, &body.event_type, &body.state_key) .await .map_err(|_| { - err!(Request(NotFound(error!( + err!(Request(NotFound(debug_warn!( room_id = ?body.room_id, event_type = ?body.event_type, "State event not found in room.", From ab9a65db5d8501c60c523bc69d704032459db482 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 2 Oct 2024 01:47:53 -0400 Subject: [PATCH 0039/1248] add MSC4151 room reporting support Signed-off-by: strawberry --- Cargo.lock | 28 +++++----- Cargo.toml | 2 +- src/api/client/report.rs | 107 +++++++++++++++++++++++++++------------ src/api/router.rs | 1 + 4 files changed, 90 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d40c458..e72c7e80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2975,7 +2975,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "assign", "js_int", @@ -2997,7 +2997,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "js_int", "ruma-common", @@ -3009,7 +3009,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "as_variant", "assign", @@ -3032,7 +3032,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "as_variant", "base64 0.22.1", @@ -3062,7 +3062,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3086,7 +3086,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "bytes", "http", @@ -3104,7 +3104,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "js_int", "thiserror", @@ -3113,7 +3113,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "js_int", "ruma-common", @@ -3123,7 +3123,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "cfg-if", "once_cell", @@ -3139,7 +3139,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "js_int", "ruma-common", @@ -3151,7 +3151,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "headers", "http", @@ -3164,7 +3164,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3180,10 +3180,10 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e#ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" dependencies = [ "futures-util", - "itertools 0.12.1", + "itertools 0.13.0", "js_int", "ruma-common", "ruma-events", diff --git a/Cargo.toml b/Cargo.toml index 28e280cf..18f33375 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -315,7 +315,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "ade2f1daf0b1d9e8f7de81a24dca8925406e4d8e" +rev = "e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" features = [ "compat", "rand", diff --git a/src/api/client/report.rs b/src/api/client/report.rs index a40c35a2..cf789246 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -1,10 +1,14 @@ use std::time::Duration; use axum::extract::State; +use axum_client_ip::InsecureClientIp; use conduit::{utils::ReadyExt, Err}; use rand::Rng; use ruma::{ - api::client::{error::ErrorKind, room::report_content}, + api::client::{ + error::ErrorKind, + room::{report_content, report_room}, + }, events::room::message, int, EventId, RoomId, UserId, }; @@ -14,22 +18,75 @@ use tracing::info; use crate::{ debug_info, service::{pdu::PduEvent, Services}, - utils::HtmlEscape, Error, Result, Ruma, }; +/// # `POST /_matrix/client/v3/rooms/{roomId}/report` +/// +/// Reports an abusive room to homeserver admins +#[tracing::instrument(skip_all, fields(%client), name = "report_room")] +pub(crate) async fn report_room_route( + State(services): State, InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + // user authentication + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + info!( + "Received room report by user {sender_user} for room {} with reason: {:?}", + body.room_id, body.reason + ); + + delay_response().await; + + if !services + .rooms + .state_cache + .server_in_room(&services.globals.config.server_name, &body.room_id) + .await + { + return Err!(Request(NotFound( + "Room does not exist to us, no local users have joined at all" + ))); + } + + if body.reason.as_ref().is_some_and(|s| s.len() > 750) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Reason too long, should be 750 characters or fewer", + )); + }; + + // send admin room message that we received the report with an @room ping for + // urgency + services + .admin + .send_message(message::RoomMessageEventContent::text_markdown(format!( + "@room Room report received from {} -\n\nRoom ID: {}\n\nReport Reason: {}", + sender_user.to_owned(), + body.room_id, + body.reason.as_deref().unwrap_or("") + ))) + .await + .ok(); + + Ok(report_room::v3::Response {}) +} + /// # `POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}` /// /// Reports an inappropriate event to homeserver admins +#[tracing::instrument(skip_all, fields(%client), name = "report_event")] pub(crate) async fn report_event_route( - State(services): State, body: Ruma, + State(services): State, InsecureClientIp(client): InsecureClientIp, + body: Ruma, ) -> Result { // user authentication let sender_user = body.sender_user.as_ref().expect("user is authenticated"); info!( - "Received /report request by user {sender_user} for room {} and event ID {}", - body.room_id, body.event_id + "Received event report by user {sender_user} for room {} and event ID {}, with reason: {:?}", + body.room_id, body.event_id, body.reason ); delay_response().await; @@ -39,7 +96,7 @@ pub(crate) async fn report_event_route( return Err!(Request(NotFound("Event ID is not known to us or Event ID is invalid"))); }; - is_report_valid( + is_event_report_valid( &services, &pdu.event_id, &body.room_id, @@ -54,32 +111,16 @@ pub(crate) async fn report_event_route( // urgency services .admin - .send_message(message::RoomMessageEventContent::text_html( - format!( - "@room Report received from: {}\n\nEvent ID: {}\nRoom ID: {}\nSent By: {}\n\nReport Score: {}\nReport \ - Reason: {}", - sender_user.to_owned(), - pdu.event_id, - pdu.room_id, - pdu.sender.clone(), - body.score.unwrap_or_else(|| ruma::Int::from(0)), - body.reason.as_deref().unwrap_or("") - ), - format!( - "

    @room Report received from: {0}\ -
    • Event Info
      • Event ID: {1}\ - 🔗
      • Room ID: {2}\ -
      • Sent By: {3}
    • \ - Report Info
      • Report Score: {4}
      • Report Reason: {5}
    • \ -
    ", - sender_user.to_owned(), - pdu.event_id.clone(), - pdu.room_id.clone(), - pdu.sender.clone(), - body.score.unwrap_or_else(|| ruma::Int::from(0)), - HtmlEscape(body.reason.as_deref().unwrap_or("")) - ), - )) + .send_message(message::RoomMessageEventContent::text_markdown(format!( + "@room Event report received from {} -\n\nEvent ID: {}\nRoom ID: {}\nSent By: {}\n\nReport Score: \ + {}\nReport Reason: {}", + sender_user.to_owned(), + pdu.event_id, + pdu.room_id, + pdu.sender, + body.score.unwrap_or_else(|| ruma::Int::from(0)), + body.reason.as_deref().unwrap_or("") + ))) .await .ok(); @@ -92,7 +133,7 @@ pub(crate) async fn report_event_route( /// check if score is in valid range /// check if report reasoning is less than or equal to 750 characters /// check if reporting user is in the reporting room -async fn is_report_valid( +async fn is_event_report_valid( services: &Services, event_id: &EventId, room_id: &RoomId, sender_user: &UserId, reason: &Option, score: Option, pdu: &std::sync::Arc, ) -> Result<()> { diff --git a/src/api/router.rs b/src/api/router.rs index c4275f05..ddd91d11 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -91,6 +91,7 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&client::create_room_route) .ruma_route(&client::redact_event_route) .ruma_route(&client::report_event_route) + .ruma_route(&client::report_room_route) .ruma_route(&client::create_alias_route) .ruma_route(&client::delete_alias_route) .ruma_route(&client::get_alias_route) From bd56d8304561bae45f8578a61e89e020a8387888 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 2 Oct 2024 09:26:28 -0400 Subject: [PATCH 0040/1248] fix room directory regression Signed-off-by: strawberry --- src/service/rooms/directory/mod.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 5666a91a..2112ecef 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,8 +1,8 @@ use std::sync::Arc; use conduit::{implement, utils::stream::TryIgnore, Result}; -use database::{Ignore, Map}; -use futures::{Stream, StreamExt}; +use database::Map; +use futures::Stream; use ruma::RoomId; pub struct Service { @@ -35,10 +35,4 @@ pub fn set_not_public(&self, room_id: &RoomId) { self.db.publicroomids.remove(ro pub async fn is_public_room(&self, room_id: &RoomId) -> bool { self.db.publicroomids.get(room_id).await.is_ok() } #[implement(Service)] -pub fn public_rooms(&self) -> impl Stream + Send { - self.db - .publicroomids - .keys() - .ignore_err() - .map(|(room_id, _): (&RoomId, Ignore)| room_id) -} +pub fn public_rooms(&self) -> impl Stream + Send { self.db.publicroomids.keys().ignore_err() } From fa7c1200b55a1d90df57dc31c3d54c92fb89fff0 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 2 Oct 2024 21:38:52 -0400 Subject: [PATCH 0041/1248] miniscule spaces code optimisations still terrible though Signed-off-by: strawberry --- src/service/rooms/spaces/mod.rs | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 17fbf0ef..920424a4 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -62,11 +62,11 @@ impl FromStr for PaginationToken { let mut values = value.split('_'); let mut pag_tok = || { - let mut rooms = vec![]; - - for room in values.next()?.split(',') { - rooms.push(u64::from_str(room).ok()?); - } + let rooms = values + .next()? + .split(',') + .filter_map(|room_s| u64::from_str(room_s).ok()) + .collect(); Some(Self { short_room_ids: rooms, @@ -469,7 +469,7 @@ impl Service { }, )]]; - let mut results = Vec::new(); + let mut results = Vec::with_capacity(limit); while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } { if results.len() >= limit { @@ -548,11 +548,12 @@ impl Service { parents.pop_front(); parents.push_back(room); - let mut short_room_ids = vec![]; - - for room in parents { - short_room_ids.push(self.services.short.get_or_create_shortroomid(&room).await); - } + let short_room_ids: Vec<_> = parents + .iter() + .stream() + .filter_map(|room_id| async move { self.services.short.get_shortroomid(room_id).await.ok() }) + .collect() + .await; Some( PaginationToken { @@ -585,7 +586,7 @@ impl Service { .await .map_err(|e| err!(Database("State in space not found: {e}")))?; - let mut children_pdus = Vec::new(); + let mut children_pdus = Vec::with_capacity(state.len()); for (key, id) in state { let (event_type, state_key) = self.services.short.get_statekey_from_short(key).await?; From c6b7c24e99891a8374a7444048c493240f7dbca5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 21:42:25 +0000 Subject: [PATCH 0042/1248] consume all bytes for top-level Ignore; add comments/tweaks Signed-off-by: Jason Volk --- src/database/de.rs | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index fc36560d..9ee52267 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -12,6 +12,7 @@ where let mut deserializer = Deserializer { buf, pos: 0, + seq: false, }; T::deserialize(&mut deserializer).debug_inspect(|_| { @@ -24,6 +25,7 @@ where pub(crate) struct Deserializer<'de> { buf: &'de [u8], pos: usize, + seq: bool, } /// Directive to ignore a record. This type can be used to skip deserialization @@ -32,8 +34,11 @@ pub(crate) struct Deserializer<'de> { pub struct Ignore; impl<'de> Deserializer<'de> { + /// Record separator; an intentionally invalid-utf8 byte. const SEP: u8 = b'\xFF'; + /// Determine if the input was fully consumed and error if bytes remaining. + /// This is intended for debug assertions; not optimized for parsing logic. fn finished(&self) -> Result<()> { let pos = self.pos; let len = self.buf.len(); @@ -48,6 +53,20 @@ impl<'de> Deserializer<'de> { ))) } + /// Consume the current record to ignore it. Inside a sequence the next + /// record is skipped but at the top-level all records are skipped such that + /// deserialization completes with self.finished() == Ok. + #[inline] + fn record_ignore(&mut self) { + if self.seq { + self.record_next(); + } else { + self.record_trail(); + } + } + + /// Consume the current record. The position pointer is moved to the start + /// of the next record. Slice of the current record is returned. #[inline] fn record_next(&mut self) -> &'de [u8] { self.buf[self.pos..] @@ -57,8 +76,10 @@ impl<'de> Deserializer<'de> { .expect("remainder of buf even if SEP was not found") } + /// Peek at the first byte of the current record. If all records were + /// consumed None is returned instead. #[inline] - fn record_next_peek_byte(&self) -> Option { + fn record_peek_byte(&self) -> Option { let started = self.pos != 0; let buf = &self.buf[self.pos..]; debug_assert!( @@ -69,6 +90,8 @@ impl<'de> Deserializer<'de> { buf.get::(started.into()).copied() } + /// Consume the record separator such that the position cleanly points to + /// the start of the next record. (Case for some sequences) #[inline] fn record_start(&mut self) { let started = self.pos != 0; @@ -78,8 +101,11 @@ impl<'de> Deserializer<'de> { ); self.inc_pos(started.into()); + self.seq = true; } + /// Consume all remaining bytes, which may include record separators, + /// returning a raw slice. #[inline] fn record_trail(&mut self) -> &'de [u8] { let record = &self.buf[self.pos..]; @@ -87,6 +113,7 @@ impl<'de> Deserializer<'de> { record } + /// Increment the position pointer. #[inline] fn inc_pos(&mut self, n: usize) { self.pos = self.pos.saturating_add(n); @@ -142,7 +169,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { V: Visitor<'de>, { match name { - "Ignore" => self.record_next(), + "Ignore" => self.record_ignore(), _ => unimplemented!("Unrecognized deserialization Directive {name:?}"), }; @@ -190,7 +217,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { fn deserialize_i64>(self, visitor: V) -> Result { let bytes: [u8; size_of::()] = self.buf[self.pos..].try_into()?; - self.pos = self.pos.saturating_add(size_of::()); + self.inc_pos(size_of::()); visitor.visit_i64(i64::from_be_bytes(bytes)) } @@ -208,7 +235,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { fn deserialize_u64>(self, visitor: V) -> Result { let bytes: [u8; size_of::()] = self.buf[self.pos..].try_into()?; - self.pos = self.pos.saturating_add(size_of::()); + self.inc_pos(size_of::()); visitor.visit_u64(u64::from_be_bytes(bytes)) } @@ -267,7 +294,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { "deserialize_any: type not expected" ); - match self.record_next_peek_byte() { + match self.record_peek_byte() { Some(b'{') => self.deserialize_map(visitor), _ => self.deserialize_str(visitor), } From 2d049dacc37a7f3c7265006a8ebc39516ce7ee55 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Oct 2024 02:23:28 +0000 Subject: [PATCH 0043/1248] fix get_all_media_keys deserialization Signed-off-by: Jason Volk --- src/service/media/data.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 248e9e1d..b2271883 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -122,8 +122,9 @@ impl Data { let keys: Vec> = self .mediaid_file - .keys_prefix_raw(&prefix) + .raw_keys_prefix(&prefix) .ignore_err() + .map(<[u8]>::to_vec) .collect() .await; From bd9a9cc5f84066d7131ded8263a46b1ab57667b8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 22:03:39 +0000 Subject: [PATCH 0044/1248] fix trait-solver issue requiring recursion_limit increase Signed-off-by: Jason Volk --- src/api/mod.rs | 2 -- src/main/main.rs | 2 -- src/router/mod.rs | 2 -- src/service/mod.rs | 1 - src/service/service.rs | 45 ++++++++++++++++++++++++++++++++--------- src/service/services.rs | 2 ++ 6 files changed, 37 insertions(+), 17 deletions(-) diff --git a/src/api/mod.rs b/src/api/mod.rs index 82b857db..96837470 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,5 +1,3 @@ -#![recursion_limit = "192"] - pub mod client; pub mod router; pub mod server; diff --git a/src/main/main.rs b/src/main/main.rs index 8703eef2..8e644a15 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -1,5 +1,3 @@ -#![recursion_limit = "192"] - pub(crate) mod clap; mod mods; mod restart; diff --git a/src/router/mod.rs b/src/router/mod.rs index 67ebc0e3..e123442c 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -1,5 +1,3 @@ -#![recursion_limit = "160"] - mod layers; mod request; mod router; diff --git a/src/service/mod.rs b/src/service/mod.rs index cb8bfcd9..604e3404 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,4 +1,3 @@ -#![recursion_limit = "192"] #![allow(refining_impl_trait)] mod manager; diff --git a/src/service/service.rs b/src/service/service.rs index 03165050..7ec2ea0f 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -51,7 +51,7 @@ pub(crate) struct Args<'a> { /// Dep is a reference to a service used within another service. /// Circular-dependencies between services require this indirection. -pub(crate) struct Dep { +pub(crate) struct Dep { dep: OnceLock>, service: Weak, name: &'static str, @@ -62,24 +62,47 @@ pub(crate) type MapType = BTreeMap; pub(crate) type MapVal = (Weak, Weak); pub(crate) type MapKey = String; -impl Deref for Dep { +/// SAFETY: Workaround for a compiler limitation (or bug) where it is Hard to +/// prove the Sync'ness of Dep because services contain circular references +/// to other services through Dep's. The Sync'ness of Dep can still be +/// proved without unsafety by declaring the crate-attribute #![recursion_limit +/// = "192"] but this may take a while. Re-evaluate this when a new trait-solver +/// (such as Chalk) becomes available. +unsafe impl Sync for Dep {} + +/// SAFETY: Ancillary to unsafe impl Sync; while this is not needed to prevent +/// violating the recursion_limit, the trait-solver still spends an inordinate +/// amount of time to prove this. +unsafe impl Send for Dep {} + +impl Deref for Dep { type Target = Arc; /// Dereference a dependency. The dependency must be ready or panics. + #[inline] fn deref(&self) -> &Self::Target { - self.dep.get_or_init(|| { - let service = self - .service - .upgrade() - .expect("services map exists for dependency initialization."); + self.dep.get_or_init( + #[inline(never)] + || self.init(), + ) + } +} - require::(&service, self.name) - }) +impl Dep { + #[inline] + fn init(&self) -> Arc { + let service = self + .service + .upgrade() + .expect("services map exists for dependency initialization."); + + require::(&service, self.name) } } impl<'a> Args<'a> { /// Create a lazy-reference to a service when constructing another Service. + #[inline] pub(crate) fn depend(&'a self, name: &'static str) -> Dep { Dep:: { dep: OnceLock::new(), @@ -90,12 +113,14 @@ impl<'a> Args<'a> { /// Create a reference immediately to a service when constructing another /// Service. The other service must be constructed. + #[inline] pub(crate) fn require(&'a self, name: &str) -> Arc { require::(self.service, name) } } /// Reference a Service by name. Panics if the Service does not exist or was /// incorrectly cast. -pub(crate) fn require(map: &Map, name: &str) -> Arc { +#[inline] +fn require(map: &Map, name: &str) -> Arc { try_get::(map, name) .inspect_err(inspect_log) .expect("Failure to reference service required by another service.") diff --git a/src/service/services.rs b/src/service/services.rs index da22fb2d..0b63a5ca 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -195,6 +195,7 @@ impl Services { } } + #[inline] pub fn try_get(&self, name: &str) -> Result> where T: Any + Send + Sync + Sized, @@ -202,6 +203,7 @@ impl Services { service::try_get::(&self.service, name) } + #[inline] pub fn get(&self, name: &str) -> Option> where T: Any + Send + Sync + Sized, From ba683cf5340ff4321b8e8789b101d923b07bd9d4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Oct 2024 17:17:10 +0000 Subject: [PATCH 0045/1248] fix aliasid_alias key deserialization Signed-off-by: Jason Volk --- src/service/rooms/alias/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 1d44cd2d..f50cc46c 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -101,9 +101,9 @@ impl Service { let prefix = (&room_id, Interfix); self.db .aliasid_alias - .keys_prefix(&prefix) + .keys_raw_prefix(&prefix) .ignore_err() - .ready_for_each(|key: &[u8]| self.db.aliasid_alias.remove(&key)) + .ready_for_each(|key| self.db.aliasid_alias.remove(key)) .await; self.db.alias_roomid.remove(alias.as_bytes()); @@ -161,7 +161,7 @@ impl Service { .aliasid_alias .stream_prefix(&prefix) .ignore_err() - .map(|((Ignore, Ignore), alias): ((Ignore, Ignore), &RoomAliasId)| alias) + .map(|(_, alias): (Ignore, &RoomAliasId)| alias) } #[tracing::instrument(skip(self), level = "debug")] From 9eace1fbbb8eaaf819d12649e0a777dd5c7f4cf6 Mon Sep 17 00:00:00 2001 From: morguldir Date: Sat, 5 Oct 2024 12:30:05 -0400 Subject: [PATCH 0046/1248] fix sliding sync room type filter regression Signed-off-by: strawberry --- src/api/client/sync.rs | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index c4ff1eeb..5940d7cf 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -6,7 +6,9 @@ use std::{ use axum::extract::State; use conduit::{ - debug, err, error, is_equal_to, + debug, err, error, + error::is_not_found, + is_equal_to, result::IntoIsOk, utils::{ math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, @@ -1887,18 +1889,21 @@ async fn filter_rooms( .iter() .stream() .filter_map(|r| async move { - match services.rooms.state_accessor.get_room_type(r).await { - Err(_) => false, - Ok(result) => { - let result = RoomTypeFilter::from(Some(result)); - if negate { - !filter.contains(&result) - } else { - filter.is_empty() || filter.contains(&result) - } - }, + let room_type = services.rooms.state_accessor.get_room_type(r).await; + + if room_type.as_ref().is_err_and(|e| !is_not_found(e)) { + return None; } - .then_some(r.to_owned()) + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(r.to_owned()) }) .collect() .await From 8eec78e9e0e5076289e98b82b2bb9b4a139be70d Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 5 Oct 2024 12:51:15 -0400 Subject: [PATCH 0047/1248] mark the server user bot as online/offline on shutdown/startup Signed-off-by: strawberry --- src/service/services.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/service/services.rs b/src/service/services.rs index 0b63a5ca..ea81f434 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -123,6 +123,14 @@ impl Services { .start() .await?; + // set the server user as online + if self.server.config.allow_local_presence { + _ = self + .presence + .ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Online) + .await; + } + debug_info!("Services startup complete."); Ok(Arc::clone(self)) } @@ -130,6 +138,14 @@ impl Services { pub async fn stop(&self) { info!("Shutting down services..."); + // set the server user as offline + if self.server.config.allow_local_presence { + _ = self + .presence + .ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Offline) + .await; + } + self.interrupt(); if let Some(manager) = self.manager.lock().await.as_ref() { manager.stop().await; From 814b9e28b68dbe6af91d3a397f6a631f1f4e9113 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 9 Oct 2024 03:37:13 +0000 Subject: [PATCH 0048/1248] fix unnecessary re-serializations Signed-off-by: Jason Volk --- src/database/map/rev_stream_from.rs | 3 +-- src/database/map/stream_from.rs | 3 +-- src/service/rooms/metadata/mod.rs | 4 ++-- src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/short/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 4 ++-- src/service/updates/mod.rs | 2 +- 7 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 650cf038..c48f406b 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -18,8 +18,7 @@ where K: Deserialize<'a> + Send, V: Deserialize<'a> + Send, { - let key = ser::serialize_to_vec(from).expect("failed to serialize query key"); - self.rev_stream_raw_from(&key) + self.rev_stream_raw_from(from) .map(keyval::result_deserialize::) } diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 153d5bb6..db828125 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -18,8 +18,7 @@ where K: Deserialize<'a> + Send, V: Deserialize<'a> + Send, { - let key = ser::serialize_to_vec(from).expect("failed to serialize query key"); - self.stream_raw_from(&key) + self.stream_raw_from(from) .map(keyval::result_deserialize::) } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 5d4a47c7..d8be6aab 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -85,8 +85,8 @@ pub fn list_banned_rooms(&self) -> impl Stream + Send + '_ { sel #[implement(Service)] #[inline] -pub async fn is_disabled(&self, room_id: &RoomId) -> bool { self.db.disabledroomids.qry(room_id).await.is_ok() } +pub async fn is_disabled(&self, room_id: &RoomId) -> bool { self.db.disabledroomids.get(room_id).await.is_ok() } #[implement(Service)] #[inline] -pub async fn is_banned(&self, room_id: &RoomId) -> bool { self.db.bannedroomids.qry(room_id).await.is_ok() } +pub async fn is_banned(&self, room_id: &RoomId) -> bool { self.db.bannedroomids.get(room_id).await.is_ok() } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index f2323475..8e045658 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -94,6 +94,6 @@ impl Data { } pub(super) async fn is_event_soft_failed(&self, event_id: &EventId) -> bool { - self.softfailedeventids.qry(event_id).await.is_ok() + self.softfailedeventids.get(event_id).await.is_ok() } } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 20082da2..bd8fdcc9 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -197,7 +197,7 @@ pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, boo #[implement(Service)] pub async fn get_shortroomid(&self, room_id: &RoomId) -> Result { - self.db.roomid_shortroomid.qry(room_id).await.deserialized() + self.db.roomid_shortroomid.get(room_id).await.deserialized() } #[implement(Service)] diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index b1a71caf..a6c468f5 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -342,7 +342,7 @@ impl Service { /// Returns the number of users which are currently in a room #[tracing::instrument(skip(self), level = "debug")] pub async fn room_joined_count(&self, room_id: &RoomId) -> Result { - self.db.roomid_joinedcount.qry(room_id).await.deserialized() + self.db.roomid_joinedcount.get(room_id).await.deserialized() } #[tracing::instrument(skip(self), level = "debug")] @@ -366,7 +366,7 @@ impl Service { pub async fn room_invited_count(&self, room_id: &RoomId) -> Result { self.db .roomid_invitedcount - .qry(room_id) + .get(room_id) .await .deserialized() } diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index 4e16e22b..fca63725 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -128,7 +128,7 @@ impl Service { pub async fn last_check_for_updates_id(&self) -> u64 { self.db - .qry(LAST_CHECK_FOR_UPDATES_COUNT) + .get(LAST_CHECK_FOR_UPDATES_COUNT) .await .deserialized() .unwrap_or(0_u64) From 56dd0f51392cd3f21f62cc054838d6f01160f6de Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Oct 2024 22:08:55 +0000 Subject: [PATCH 0049/1248] use loop condition to account for loole channel close Signed-off-by: Jason Volk --- src/service/presence/mod.rs | 7 ++++--- src/service/sending/sender.rs | 3 +-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index 3b5c4caf..82a99bd5 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -55,14 +55,13 @@ impl crate::Service for Service { async fn worker(self: Arc) -> Result<()> { let mut presence_timers = FuturesUnordered::new(); let receiver = self.timer_receiver.lock().await; - loop { - debug_assert!(!receiver.is_closed(), "channel error"); + while !receiver.is_closed() { tokio::select! { Some(user_id) = presence_timers.next() => { self.process_presence_timer(&user_id).await.log_err().ok(); }, event = receiver.recv_async() => match event { - Err(_e) => return Ok(()), + Err(_) => break, Ok((user_id, timeout)) => { debug!("Adding timer {}: {user_id} timeout:{timeout:?}", presence_timers.len()); presence_timers.push(presence_timer(user_id, timeout)); @@ -70,6 +69,8 @@ impl crate::Service for Service { }, } } + + Ok(()) } fn interrupt(&self) { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 3a401995..19205a65 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -57,8 +57,7 @@ impl Service { let receiver = self.receiver.lock().await; self.initial_requests(&mut futures, &mut statuses).await; - loop { - debug_assert!(!receiver.is_closed(), "channel error"); + while !receiver.is_closed() { tokio::select! { request = receiver.recv_async() => match request { Ok(request) => self.handle_request(request, &mut futures, &mut statuses).await, From 89a3c807002ea7f6278e30541df7f3249b8fc681 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Oct 2024 21:18:32 +0000 Subject: [PATCH 0050/1248] split admin-room branch from build_and_append_pdu (fixes large stack warning) Signed-off-by: Jason Volk --- src/service/rooms/timeline/mod.rs | 146 ++++++++++++++---------------- 1 file changed, 68 insertions(+), 78 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index f8f770bc..b49e9fad 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -8,13 +8,13 @@ use std::{ }; use conduit::{ - debug, err, error, info, + debug, err, error, implement, info, pdu::{EventHash, PduBuilder, PduCount, PduEvent}, utils, utils::{stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, validated, warn, Err, Error, Result, Server, }; -use futures::{future, future::ready, Future, Stream, StreamExt, TryStreamExt}; +use futures::{future, future::ready, Future, FutureExt, Stream, StreamExt, TryStreamExt}; use ruma::{ api::{client::error::ErrorKind, federation}, canonical_json::to_canonical_value, @@ -858,82 +858,7 @@ impl Service { .await?; if self.services.admin.is_admin_room(&pdu.room_id).await { - match pdu.event_type() { - TimelineEventType::RoomEncryption => { - warn!("Encryption is not allowed in the admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Encryption is not allowed in the admins room", - )); - }, - TimelineEventType::RoomMember => { - let target = pdu - .state_key() - .filter(|v| v.starts_with('@')) - .unwrap_or(sender.as_str()); - let server_user = &self.services.globals.server_user.to_string(); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu"))?; - - if content.membership == MembershipState::Leave { - if target == server_user { - warn!("Server user cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server user cannot leave from admins room.", - )); - } - - let count = self - .services - .state_cache - .room_members(&pdu.room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .ready_filter(|user| *user != target) - .boxed() - .count() - .await; - - if count < 2 { - warn!("Last admin cannot leave from admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Last admin cannot leave from admins room.", - )); - } - } - - if content.membership == MembershipState::Ban && pdu.state_key().is_some() { - if target == server_user { - warn!("Server user cannot be banned in admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server user cannot be banned in admins room.", - )); - } - - let count = self - .services - .state_cache - .room_members(&pdu.room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .ready_filter(|user| *user != target) - .boxed() - .count() - .await; - - if count < 2 { - warn!("Last admin cannot be banned in admins room"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Last admin cannot be banned in admins room.", - )); - } - } - }, - _ => {}, - } + self.check_pdu_for_admin_room(&pdu, sender).boxed().await?; } // If redaction event is not authorized, do not append it to the timeline @@ -1298,6 +1223,71 @@ impl Service { } } +#[implement(Service)] +#[tracing::instrument(skip_all, level = "debug")] +async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Result<()> { + match pdu.event_type() { + TimelineEventType::RoomEncryption => { + return Err!(Request(Forbidden(error!("Encryption not supported in admins room.")))); + }, + TimelineEventType::RoomMember => { + let target = pdu + .state_key() + .filter(|v| v.starts_with('@')) + .unwrap_or(sender.as_str()); + + let server_user = &self.services.globals.server_user.to_string(); + + let content: RoomMemberEventContent = pdu.get_content()?; + match content.membership { + MembershipState::Leave => { + if target == server_user { + return Err!(Request(Forbidden(error!("Server user cannot leave the admins room.")))); + } + + let count = self + .services + .state_cache + .room_members(&pdu.room_id) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .ready_filter(|user| *user != target) + .boxed() + .count() + .await; + + if count < 2 { + return Err!(Request(Forbidden(error!("Last admin cannot leave the admins room.")))); + } + }, + + MembershipState::Ban if pdu.state_key().is_some() => { + if target == server_user { + return Err!(Request(Forbidden(error!("Server cannot be banned from admins room.")))); + } + + let count = self + .services + .state_cache + .room_members(&pdu.room_id) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .ready_filter(|user| *user != target) + .boxed() + .count() + .await; + + if count < 2 { + return Err!(Request(Forbidden(error!("Last admin cannot be banned from admins room.")))); + } + }, + _ => {}, + }; + }, + _ => {}, + }; + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; From 08a2fecc0ed2e0404446d16e40bc136dfff7b7c7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 26 Sep 2024 04:59:16 +0000 Subject: [PATCH 0051/1248] catch panics at base functions to integrate with other fatal errors. Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/router/Cargo.toml | 13 +++++++------ src/router/mod.rs | 23 ++++++++++++++++++----- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e72c7e80..b9f366e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -763,6 +763,7 @@ dependencies = [ "conduit_core", "conduit_service", "const-str", + "futures", "http", "http-body-util", "hyper", diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 62690194..e1535868 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -54,20 +54,18 @@ axum-server-dual-protocol.workspace = true axum-server-dual-protocol.optional = true axum-server.workspace = true axum.workspace = true +bytes.workspace = true conduit-admin.workspace = true conduit-api.workspace = true conduit-core.workspace = true conduit-service.workspace = true const-str.workspace = true -log.workspace = true -tokio.workspace = true -tower.workspace = true -tracing.workspace = true -bytes.workspace = true -http-body-util.workspace = true +futures.workspace = true http.workspace = true +http-body-util.workspace = true hyper.workspace = true hyper-util.workspace = true +log.workspace = true ruma.workspace = true rustls.workspace = true rustls.optional = true @@ -78,7 +76,10 @@ sentry-tracing.optional = true sentry-tracing.workspace = true sentry.workspace = true serde_json.workspace = true +tokio.workspace = true +tower.workspace = true tower-http.workspace = true +tracing.workspace = true [target.'cfg(unix)'.dependencies] sd-notify.workspace = true diff --git a/src/router/mod.rs b/src/router/mod.rs index e123442c..1580f605 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -6,10 +6,11 @@ mod serve; extern crate conduit_core as conduit; -use std::{future::Future, pin::Pin, sync::Arc}; +use std::{panic::AssertUnwindSafe, pin::Pin, sync::Arc}; -use conduit::{Result, Server}; +use conduit::{Error, Result, Server}; use conduit_service::Services; +use futures::{Future, FutureExt, TryFutureExt}; conduit::mod_ctor! {} conduit::mod_dtor! {} @@ -17,15 +18,27 @@ conduit::rustc_flags_capture! {} #[no_mangle] pub extern "Rust" fn start(server: &Arc) -> Pin>> + Send>> { - Box::pin(run::start(server.clone())) + AssertUnwindSafe(run::start(server.clone())) + .catch_unwind() + .map_err(Error::from_panic) + .unwrap_or_else(Err) + .boxed() } #[no_mangle] pub extern "Rust" fn stop(services: Arc) -> Pin> + Send>> { - Box::pin(run::stop(services)) + AssertUnwindSafe(run::stop(services)) + .catch_unwind() + .map_err(Error::from_panic) + .unwrap_or_else(Err) + .boxed() } #[no_mangle] pub extern "Rust" fn run(services: &Arc) -> Pin> + Send>> { - Box::pin(run::run(services.clone())) + AssertUnwindSafe(run::run(services.clone())) + .catch_unwind() + .map_err(Error::from_panic) + .unwrap_or_else(Err) + .boxed() } From a2e5c3d5d3bc9253fb634bf8b1b30ec7087e886f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 00:30:16 +0000 Subject: [PATCH 0052/1248] add FlatOk trait to Result/Option suite Signed-off-by: Jason Volk --- src/core/result.rs | 3 ++- src/core/result/flat_ok.rs | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 src/core/result/flat_ok.rs diff --git a/src/core/result.rs b/src/core/result.rs index 82d67a9c..9a60d19e 100644 --- a/src/core/result.rs +++ b/src/core/result.rs @@ -1,4 +1,5 @@ mod debug_inspect; +mod flat_ok; mod into_is_ok; mod log_debug_err; mod log_err; @@ -7,7 +8,7 @@ mod not_found; mod unwrap_infallible; pub use self::{ - debug_inspect::DebugInspect, into_is_ok::IntoIsOk, log_debug_err::LogDebugErr, log_err::LogErr, + debug_inspect::DebugInspect, flat_ok::FlatOk, into_is_ok::IntoIsOk, log_debug_err::LogDebugErr, log_err::LogErr, map_expect::MapExpect, not_found::NotFound, unwrap_infallible::UnwrapInfallible, }; diff --git a/src/core/result/flat_ok.rs b/src/core/result/flat_ok.rs new file mode 100644 index 00000000..e378e5d0 --- /dev/null +++ b/src/core/result/flat_ok.rs @@ -0,0 +1,34 @@ +use super::Result; + +pub trait FlatOk { + /// Equivalent to .transpose().ok().flatten() + fn flat_ok(self) -> Option; + + /// Equivalent to .transpose().ok().flatten().ok_or(...) + fn flat_ok_or(self, err: E) -> Result; + + /// Equivalent to .transpose().ok().flatten().ok_or_else(...) + fn flat_ok_or_else E>(self, err: F) -> Result; +} + +impl FlatOk for Option> { + #[inline] + fn flat_ok(self) -> Option { self.transpose().ok().flatten() } + + #[inline] + fn flat_ok_or(self, err: Ep) -> Result { self.flat_ok().ok_or(err) } + + #[inline] + fn flat_ok_or_else Ep>(self, err: F) -> Result { self.flat_ok().ok_or_else(err) } +} + +impl FlatOk for Result, E> { + #[inline] + fn flat_ok(self) -> Option { self.ok().flatten() } + + #[inline] + fn flat_ok_or(self, err: Ep) -> Result { self.flat_ok().ok_or(err) } + + #[inline] + fn flat_ok_or_else Ep>(self, err: F) -> Result { self.flat_ok().ok_or_else(err) } +} From 4485f36e34d9da010b37d0db832ac6e38c794e7e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 01:00:00 +0000 Subject: [PATCH 0053/1248] add mactors for true/false Signed-off-by: Jason Volk --- src/core/mod.rs | 56 ++++++++++++++++++++++++++++++++++++++++++ src/core/utils/math.rs | 32 ------------------------ 2 files changed, 56 insertions(+), 32 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index e4553186..491d8b4c 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -38,3 +38,59 @@ pub mod mods { () => {}; } } + +/// Functor for falsy +#[macro_export] +macro_rules! is_false { + () => { + |x| !x + }; +} + +/// Functor for truthy +#[macro_export] +macro_rules! is_true { + () => { + |x| !!x + }; +} + +/// Functor for equality to zero +#[macro_export] +macro_rules! is_zero { + () => { + $crate::is_matching!(0) + }; +} + +/// Functor for equality i.e. .is_some_and(is_equal!(2)) +#[macro_export] +macro_rules! is_equal_to { + ($val:expr) => { + |x| x == $val + }; +} + +/// Functor for less i.e. .is_some_and(is_less_than!(2)) +#[macro_export] +macro_rules! is_less_than { + ($val:expr) => { + |x| x < $val + }; +} + +/// Functor for matches! i.e. .is_some_and(is_matching!('A'..='Z')) +#[macro_export] +macro_rules! is_matching { + ($val:expr) => { + |x| matches!(x, $val) + }; +} + +/// Functor for !is_empty() +#[macro_export] +macro_rules! is_not_empty { + () => { + |x| !x.is_empty() + }; +} diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index 215de339..ccff6400 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -53,38 +53,6 @@ macro_rules! validated { ($($input:tt)+) => { $crate::expected!($($input)+) } } -/// Functor for equality to zero -#[macro_export] -macro_rules! is_zero { - () => { - $crate::is_matching!(0) - }; -} - -/// Functor for equality i.e. .is_some_and(is_equal!(2)) -#[macro_export] -macro_rules! is_equal_to { - ($val:expr) => { - |x| (x == $val) - }; -} - -/// Functor for less i.e. .is_some_and(is_less_than!(2)) -#[macro_export] -macro_rules! is_less_than { - ($val:expr) => { - |x| (x < $val) - }; -} - -/// Functor for matches! i.e. .is_some_and(is_matching!('A'..='Z')) -#[macro_export] -macro_rules! is_matching { - ($val:expr) => { - |x| matches!(x, $val) - }; -} - /// Returns false if the exponential backoff has expired based on the inputs #[inline] #[must_use] From dd9f53080acb354905a610dc235c8720c72d742c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 08:04:16 +0000 Subject: [PATCH 0054/1248] add unwrap_or to TryFutureExtExt Signed-off-by: Jason Volk --- src/core/utils/future/try_ext_ext.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index e444ad94..d30d2cac 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -1,6 +1,9 @@ //! Extended external extensions to futures::TryFutureExt -use futures::{future::MapOkOrElse, TryFuture, TryFutureExt}; +use futures::{ + future::{MapOkOrElse, UnwrapOrElse}, + TryFuture, TryFutureExt, +}; /// This interface is not necessarily complete; feel free to add as-needed. pub trait TryExtExt @@ -19,6 +22,10 @@ where ) -> MapOkOrElse Option, impl FnOnce(Self::Error) -> Option> where Self: Sized; + + fn unwrap_or(self, default: Self::Ok) -> UnwrapOrElse Self::Ok> + where + Self: Sized; } impl TryExtExt for Fut @@ -45,4 +52,12 @@ where { self.map_ok_or(None, Some) } + + #[inline] + fn unwrap_or(self, default: Self::Ok) -> UnwrapOrElse Self::Ok> + where + Self: Sized, + { + self.unwrap_or_else(move |_| default) + } } From 685eadb1713d0d09a48025b74f05407cd6f65742 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Oct 2024 17:07:31 +0000 Subject: [PATCH 0055/1248] add is_not_found as Error member function; tweak interface; add doc comments Signed-off-by: Jason Volk --- src/api/client/sync.rs | 8 +++----- src/api/server/send.rs | 4 ++-- src/core/error/mod.rs | 21 +++++++++++++++++---- src/core/result/not_found.rs | 4 ++-- 4 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index 5940d7cf..f0b26e80 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -6,10 +6,8 @@ use std::{ use axum::extract::State; use conduit::{ - debug, err, error, - error::is_not_found, - is_equal_to, - result::IntoIsOk, + debug, err, error, is_equal_to, + result::{FlatOk, IntoIsOk}, utils::{ math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, @@ -1891,7 +1889,7 @@ async fn filter_rooms( .filter_map(|r| async move { let room_type = services.rooms.state_accessor.get_room_type(r).await; - if room_type.as_ref().is_err_and(|e| !is_not_found(e)) { + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { return None; } diff --git a/src/api/server/send.rs b/src/api/server/send.rs index bb424988..50a79e00 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{debug, debug_warn, err, result::LogErr, trace, utils::ReadyExt, warn, Err, Error, Result}; +use conduit::{debug, debug_warn, err, error, result::LogErr, trace, utils::ReadyExt, warn, Err, Error, Result}; use futures::StreamExt; use ruma::{ api::{ @@ -85,7 +85,7 @@ pub(crate) async fn send_transaction_message_route( Ok(send_transaction_message::v1::Response { pdus: resolved_map .into_iter() - .map(|(e, r)| (e, r.map_err(|e| e.sanitized_string()))) + .map(|(e, r)| (e, r.map_err(error::sanitized_message))) .collect(), }) } diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index ad7f9f3c..39fa4340 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -120,17 +120,19 @@ pub enum Error { } impl Error { + //#[deprecated] pub fn bad_database(message: &'static str) -> Self { crate::err!(Database(error!("{message}"))) } /// Sanitizes public-facing errors that can leak sensitive information. - pub fn sanitized_string(&self) -> String { + pub fn sanitized_message(&self) -> String { match self { Self::Database(..) => String::from("Database error occurred."), Self::Io(..) => String::from("I/O error occurred."), - _ => self.to_string(), + _ => self.message(), } } + /// Generate the error message string. pub fn message(&self) -> String { match self { Self::Federation(ref origin, ref error) => format!("Answer from {origin}: {error}"), @@ -151,6 +153,8 @@ impl Error { } } + /// Returns the HTTP error code or closest approximation based on error + /// variant. pub fn status_code(&self) -> http::StatusCode { use http::StatusCode; @@ -163,10 +167,17 @@ impl Error { _ => StatusCode::INTERNAL_SERVER_ERROR, } } + + /// Returns true for "not found" errors. This means anything that qualifies + /// as a "not found" from any variant's contained error type. This call is + /// often used as a special case to eliminate a contained Option with a + /// Result where Ok(None) is instead Err(e) if e.is_not_found(). + #[inline] + pub fn is_not_found(&self) -> bool { self.status_code() == http::StatusCode::NOT_FOUND } } impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{self}") } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.message()) } } #[allow(clippy::fallible_impl_from)] @@ -184,6 +195,8 @@ pub fn infallible(_e: &Infallible) { panic!("infallible error should never exist"); } +/// Convenience functor for fundamental Error::sanitized_message(); see member. #[inline] #[must_use] -pub fn is_not_found(e: &Error) -> bool { e.status_code() == http::StatusCode::NOT_FOUND } +#[allow(clippy::needless_pass_by_value)] +pub fn sanitized_message(e: Error) -> String { e.sanitized_message() } diff --git a/src/core/result/not_found.rs b/src/core/result/not_found.rs index 69ce821b..d61825af 100644 --- a/src/core/result/not_found.rs +++ b/src/core/result/not_found.rs @@ -1,5 +1,5 @@ use super::Result; -use crate::{error, Error}; +use crate::Error; pub trait NotFound { #[must_use] @@ -8,5 +8,5 @@ pub trait NotFound { impl NotFound for Result { #[inline] - fn is_not_found(&self) -> bool { self.as_ref().is_err_and(error::is_not_found) } + fn is_not_found(&self) -> bool { self.as_ref().is_err_and(Error::is_not_found) } } From 2b2055fe8a47ba9dd1981237ee130923766938f4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Oct 2024 03:40:00 +0000 Subject: [PATCH 0056/1248] parallelize calculate_invite_state Signed-off-by: Jason Volk --- src/api/client/membership.rs | 2 +- src/service/rooms/state/mod.rs | 80 ++++++++++--------------------- src/service/rooms/timeline/mod.rs | 5 +- 3 files changed, 27 insertions(+), 60 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index f89903b4..ae56094c 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1452,7 +1452,7 @@ pub(crate) async fn invite_helper( ) .await?; - let invite_room_state = services.rooms.state.calculate_invite_state(&pdu).await?; + let invite_room_state = services.rooms.state.summary_stripped(&pdu).await; drop(state_lock); diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index c7f6605c..177b7e9b 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -3,6 +3,7 @@ mod data; use std::{ collections::{HashMap, HashSet}, fmt::Write, + iter::once, sync::Arc, }; @@ -13,7 +14,7 @@ use conduit::{ }; use data::Data; use database::{Ignore, Interfix}; -use futures::{pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{future::join_all, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ events::{ room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, @@ -288,61 +289,30 @@ impl Service { } } - #[tracing::instrument(skip(self, invite_event), level = "debug")] - pub async fn calculate_invite_state(&self, invite_event: &PduEvent) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Ok(e) = self - .services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "") - .await - { - state.push(e.to_stripped_state_event()); - } - if let Ok(e) = self - .services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "") - .await - { - state.push(e.to_stripped_state_event()); - } - if let Ok(e) = self - .services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomCanonicalAlias, "") - .await - { - state.push(e.to_stripped_state_event()); - } - if let Ok(e) = self - .services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "") - .await - { - state.push(e.to_stripped_state_event()); - } - if let Ok(e) = self - .services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomName, "") - .await - { - state.push(e.to_stripped_state_event()); - } - if let Ok(e) = self - .services - .state_accessor - .room_state_get(&invite_event.room_id, &StateEventType::RoomMember, invite_event.sender.as_str()) - .await - { - state.push(e.to_stripped_state_event()); - } + #[tracing::instrument(skip_all, level = "debug")] + pub async fn summary_stripped(&self, invite: &PduEvent) -> Vec> { + let cells = [ + (&StateEventType::RoomCreate, ""), + (&StateEventType::RoomJoinRules, ""), + (&StateEventType::RoomCanonicalAlias, ""), + (&StateEventType::RoomName, ""), + (&StateEventType::RoomAvatar, ""), + (&StateEventType::RoomMember, invite.sender.as_str()), // Add recommended events + ]; - state.push(invite_event.to_stripped_state_event()); - Ok(state) + let fetches = cells.iter().map(|(event_type, state_key)| { + self.services + .state_accessor + .room_state_get(&invite.room_id, event_type, state_key) + }); + + join_all(fetches) + .await + .into_iter() + .filter_map(Result::ok) + .map(|e| e.to_stripped_state_event()) + .chain(once(invite.to_stripped_state_event())) + .collect() } /// Set the state hash to a new version, but does not update state_cache. diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index b49e9fad..84f29c86 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -513,10 +513,7 @@ impl Service { })?; let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.services.state.calculate_invite_state(pdu).await?; - Some(state) - }, + MembershipState::Invite => self.services.state.summary_stripped(pdu).await.into(), _ => None, }; From 48a767d52c3d24c6a460a6defdddaa9c7c707387 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 01:01:25 +0000 Subject: [PATCH 0057/1248] abstract common patterns as core pdu memberfns Signed-off-by: Jason Volk --- src/core/pdu/mod.rs | 67 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 60 insertions(+), 7 deletions(-) diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index cf9ffe64..a94e2bdc 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -18,11 +18,11 @@ use ruma::{ use serde::{Deserialize, Serialize}; use serde_json::{ json, - value::{to_raw_value, RawValue as RawJsonValue}, + value::{to_raw_value, RawValue as RawJsonValue, Value as JsonValue}, }; pub use self::{builder::PduBuilder, count::PduCount}; -use crate::{err, warn, Error, Result}; +use crate::{err, is_true, warn, Error, Result}; #[derive(Deserialize)] struct ExtractRedactedBecause { @@ -58,8 +58,8 @@ pub struct PduEvent { pub unsigned: Option>, pub hashes: EventHash, #[serde(default, skip_serializing_if = "Option::is_none")] - pub signatures: Option>, /* BTreeMap, BTreeMap> */ + // BTreeMap, BTreeMap> + pub signatures: Option>, } impl PduEvent { @@ -170,6 +170,54 @@ impl PduEvent { (self.redacts.clone(), self.content.clone()) } + #[must_use] + pub fn get_content_as_value(&self) -> JsonValue { + self.get_content() + .expect("pdu content must be a valid JSON value") + } + + pub fn get_content(&self) -> Result + where + T: for<'de> Deserialize<'de>, + { + serde_json::from_str(self.content.get()) + .map_err(|e| err!(Database("Failed to deserialize pdu content into type: {e}"))) + } + + pub fn contains_unsigned_property(&self, property: &str, is_type: F) -> bool + where + F: FnOnce(&JsonValue) -> bool, + { + self.get_unsigned_as_value() + .get(property) + .map(is_type) + .is_some_and(is_true!()) + } + + pub fn get_unsigned_property(&self, property: &str) -> Result + where + T: for<'de> Deserialize<'de>, + { + self.get_unsigned_as_value() + .get_mut(property) + .map(JsonValue::take) + .map(serde_json::from_value) + .ok_or(err!(Request(NotFound("property not found in unsigned object"))))? + .map_err(|e| err!(Database("Failed to deserialize unsigned.{property} into type: {e}"))) + } + + #[must_use] + pub fn get_unsigned_as_value(&self) -> JsonValue { self.get_unsigned::().unwrap_or_default() } + + pub fn get_unsigned(&self) -> Result { + self.unsigned + .as_ref() + .map(|raw| raw.get()) + .map(serde_json::from_str) + .ok_or(err!(Request(NotFound("\"unsigned\" property not found in pdu"))))? + .map_err(|e| err!(Database("Failed to deserialize \"unsigned\" into value: {e}"))) + } + #[tracing::instrument(skip(self), level = "debug")] pub fn to_sync_room_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); @@ -270,8 +318,8 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_state_event(&self) -> Raw { + #[must_use] + pub fn to_state_event_value(&self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -286,7 +334,12 @@ impl PduEvent { json["unsigned"] = json!(unsigned); } - serde_json::from_value(json).expect("Raw::from_value always works") + json + } + + #[tracing::instrument(skip(self), level = "debug")] + pub fn to_state_event(&self) -> Raw { + serde_json::from_value(self.to_state_event_value()).expect("Raw::from_value always works") } #[tracing::instrument(skip(self), level = "debug")] From da34b43302d8e0d66dc218a1612e5a6eb18cb710 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Oct 2024 07:57:18 +0000 Subject: [PATCH 0058/1248] abstract account-data deserializations for serde_json::from_elim Signed-off-by: Jason Volk --- src/admin/query/account_data.rs | 9 +-- src/admin/user/commands.rs | 42 ++++------ src/api/client/config.rs | 20 ++--- src/api/client/push.rs | 117 ++++++++++----------------- src/api/client/tag.rs | 68 ++++++---------- src/service/account_data/mod.rs | 39 ++++++--- src/service/admin/grant.rs | 3 +- src/service/globals/migrations.rs | 11 +-- src/service/rooms/state_cache/mod.rs | 14 +--- src/service/rooms/timeline/mod.rs | 3 +- src/service/sending/sender.rs | 3 +- src/service/users/mod.rs | 14 +--- 12 files changed, 133 insertions(+), 210 deletions(-) diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index 896bf95c..ea45eb16 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,9 +1,6 @@ use clap::Subcommand; use conduit::Result; -use ruma::{ - events::{room::message::RoomMessageEventContent, RoomAccountDataEventType}, - RoomId, UserId, -}; +use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId}; use crate::Command; @@ -25,7 +22,7 @@ pub(crate) enum AccountDataCommand { /// Full user ID user_id: Box, /// Account data event type - kind: RoomAccountDataEventType, + kind: String, /// Optional room ID of the account data room_id: Option>, }, @@ -60,7 +57,7 @@ pub(super) async fn process(subcommand: AccountDataCommand, context: &Command<'_ let timer = tokio::time::Instant::now(); let results = services .account_data - .get(room_id.as_deref(), &user_id, kind) + .get_raw(room_id.as_deref(), &user_id, &kind) .await; let query_time = timer.elapsed(); diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 1b086856..562bb9c7 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -501,20 +501,16 @@ pub(super) async fn put_room_tag( ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; - let event = self + let mut tags_event = self .services .account_data - .get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag) - .await; - - let mut tags_event = event.map_or_else( - |_| TagEvent { + .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) + .await + .unwrap_or(TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, - }, - |e| serde_json::from_str(e.get()).expect("Bad account data in database for user {user_id}"), - ); + }); tags_event .content @@ -542,20 +538,16 @@ pub(super) async fn delete_room_tag( ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; - let event = self + let mut tags_event = self .services .account_data - .get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag) - .await; - - let mut tags_event = event.map_or_else( - |_| TagEvent { + .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) + .await + .unwrap_or(TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, - }, - |e| serde_json::from_str(e.get()).expect("Bad account data in database for user {user_id}"), - ); + }); tags_event.content.tags.remove(&tag.clone().into()); @@ -578,20 +570,16 @@ pub(super) async fn delete_room_tag( pub(super) async fn get_room_tags(&self, user_id: String, room_id: Box) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; - let event = self + let tags_event = self .services .account_data - .get(Some(&room_id), &user_id, RoomAccountDataEventType::Tag) - .await; - - let tags_event = event.map_or_else( - |_| TagEvent { + .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) + .await + .unwrap_or(TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, - }, - |e| serde_json::from_str(e.get()).expect("Bad account data in database for user {user_id}"), - ); + }); Ok(RoomMessageEventContent::notice_markdown(format!( "```\n{:#?}\n```", diff --git a/src/api/client/config.rs b/src/api/client/config.rs index 33b85136..d06cc072 100644 --- a/src/api/client/config.rs +++ b/src/api/client/config.rs @@ -58,18 +58,14 @@ pub(crate) async fn get_global_account_data_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = services + let account_data: ExtractGlobalEventContent = services .account_data - .get(None, sender_user, body.event_type.to_string().into()) + .get_global(sender_user, body.event_type.clone()) .await .map_err(|_| err!(Request(NotFound("Data not found."))))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; - Ok(get_global_account_data::v3::Response { - account_data, + account_data: account_data.content, }) } @@ -81,18 +77,14 @@ pub(crate) async fn get_room_account_data_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = services + let account_data: ExtractRoomEventContent = services .account_data - .get(Some(&body.room_id), sender_user, body.event_type.clone()) + .get_room(&body.room_id, sender_user, body.event_type.clone()) .await .map_err(|_| err!(Request(NotFound("Data not found."))))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; - Ok(get_room_account_data::v3::Response { - account_data, + account_data: account_data.content, }) } diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 39095199..103c0c5e 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -13,7 +13,7 @@ use ruma::{ GlobalAccountDataEventType, }, push::{InsertPushRuleError, RemovePushRuleError, Ruleset}, - CanonicalJsonObject, + CanonicalJsonObject, CanonicalJsonValue, }; use service::Services; @@ -27,38 +27,23 @@ pub(crate) async fn get_pushrules_all_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let global_ruleset: Ruleset; - - let event = services + let Some(content_value) = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) - .await; - - let Ok(event) = event else { + .get_global::(sender_user, GlobalAccountDataEventType::PushRules) + .await + .ok() + .and_then(|event| event.get("content").cloned()) + .filter(CanonicalJsonValue::is_object) + else { // user somehow has non-existent push rule event. recreate it and return server // default silently return recreate_push_rules_and_return(&services, sender_user).await; }; - let value = serde_json::from_str::(event.get()) + let account_data_content = serde_json::from_value::(content_value.into()) .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; - let Some(content_value) = value.get("content") else { - // user somehow has a push rule event with no content key, recreate it and - // return server default silently - return recreate_push_rules_and_return(&services, sender_user).await; - }; - - if content_value.to_string().is_empty() { - // user somehow has a push rule event with empty content, recreate it and return - // server default silently - return recreate_push_rules_and_return(&services, sender_user).await; - } - - let account_data_content = serde_json::from_value::(content_value.clone().into()) - .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; - - global_ruleset = account_data_content.global; + let global_ruleset: Ruleset = account_data_content.global; Ok(get_pushrules_all::v3::Response { global: global_ruleset, @@ -73,17 +58,14 @@ pub(crate) async fn get_pushrule_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services + let event: PushRulesEvent = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(sender_user, GlobalAccountDataEventType::PushRules) .await - .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; - - let rule = account_data + let rule = event + .content .global .get(body.kind.clone(), &body.rule_id) .map(Into::into); @@ -113,14 +95,11 @@ pub(crate) async fn set_pushrule_route( )); } - let event = services + let mut account_data: PushRulesEvent = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(sender_user, GlobalAccountDataEventType::PushRules) .await - .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; - - let mut account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; if let Err(error) = account_data @@ -181,21 +160,18 @@ pub(crate) async fn get_pushrule_actions_route( )); } - let event = services + let event: PushRulesEvent = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(sender_user, GlobalAccountDataEventType::PushRules) .await - .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; - - let global = account_data.global; - let actions = global + let actions = event + .content + .global .get(body.kind.clone(), &body.rule_id) .map(|rule| rule.actions().to_owned()) - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."))?; + .ok_or(err!(Request(NotFound("Push rule not found."))))?; Ok(get_pushrule_actions::v3::Response { actions, @@ -217,14 +193,11 @@ pub(crate) async fn set_pushrule_actions_route( )); } - let event = services + let mut account_data: PushRulesEvent = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(sender_user, GlobalAccountDataEventType::PushRules) .await - .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; - - let mut account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; if account_data .content @@ -263,20 +236,18 @@ pub(crate) async fn get_pushrule_enabled_route( )); } - let event = services + let event: PushRulesEvent = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(sender_user, GlobalAccountDataEventType::PushRules) .await - .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; - - let global = account_data.content.global; - let enabled = global + let enabled = event + .content + .global .get(body.kind.clone(), &body.rule_id) .map(ruma::push::AnyPushRuleRef::enabled) - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."))?; + .ok_or(err!(Request(NotFound("Push rule not found."))))?; Ok(get_pushrule_enabled::v3::Response { enabled, @@ -298,14 +269,11 @@ pub(crate) async fn set_pushrule_enabled_route( )); } - let event = services + let mut account_data: PushRulesEvent = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(sender_user, GlobalAccountDataEventType::PushRules) .await - .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; - - let mut account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; if account_data .content @@ -344,14 +312,11 @@ pub(crate) async fn delete_pushrule_route( )); } - let event = services + let mut account_data: PushRulesEvent = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(sender_user, GlobalAccountDataEventType::PushRules) .await - .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; - - let mut account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; if let Err(error) = account_data .content diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index bcd0f817..b5fa19e3 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -9,7 +9,7 @@ use ruma::{ }, }; -use crate::{Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` /// @@ -21,21 +21,15 @@ pub(crate) async fn update_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services + let mut tags_event = services .account_data - .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag) - .await; - - let mut tags_event = event.map_or_else( - |_| { - Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }) - }, - |e| serde_json::from_str(e.get()).map_err(|_| Error::bad_database("Invalid account data event in db.")), - )?; + .get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag) + .await + .unwrap_or(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }); tags_event .content @@ -65,21 +59,15 @@ pub(crate) async fn delete_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services + let mut tags_event = services .account_data - .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag) - .await; - - let mut tags_event = event.map_or_else( - |_| { - Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }) - }, - |e| serde_json::from_str(e.get()).map_err(|_| Error::bad_database("Invalid account data event in db.")), - )?; + .get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag) + .await + .unwrap_or(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }); tags_event.content.tags.remove(&body.tag.clone().into()); @@ -106,21 +94,15 @@ pub(crate) async fn get_tags_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services + let tags_event = services .account_data - .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag) - .await; - - let tags_event = event.map_or_else( - |_| { - Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }) - }, - |e| serde_json::from_str(e.get()).map_err(|_| Error::bad_database("Invalid account data event in db.")), - )?; + .get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag) + .await + .unwrap_or(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }); Ok(get_tags::v3::Response { tags: tags_event.content.tags, diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 482229e7..8065ac55 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -5,14 +5,17 @@ use conduit::{ utils::{stream::TryIgnore, ReadyExt}, Err, Error, Result, }; -use database::{Deserialized, Map}; +use database::{Deserialized, Handle, Map}; use futures::{StreamExt, TryFutureExt}; use ruma::{ - events::{AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, RoomAccountDataEventType}, + events::{ + AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, + }, serde::Raw, RoomId, UserId, }; -use serde_json::value::RawValue; +use serde::Deserialize; use crate::{globals, Dep}; @@ -97,18 +100,36 @@ pub async fn update( Ok(()) } -/// Searches the account data for a specific kind. +/// Searches the room account data for a specific kind. #[implement(Service)] -pub async fn get( - &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, -) -> Result> { - let key = (room_id, user_id, kind.to_string()); +pub async fn get_global(&self, user_id: &UserId, kind: GlobalAccountDataEventType) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.get_raw(None, user_id, &kind.to_string()) + .await + .deserialized() +} + +/// Searches the global account data for a specific kind. +#[implement(Service)] +pub async fn get_room(&self, room_id: &RoomId, user_id: &UserId, kind: RoomAccountDataEventType) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.get_raw(Some(room_id), user_id, &kind.to_string()) + .await + .deserialized() +} + +#[implement(Service)] +pub async fn get_raw(&self, room_id: Option<&RoomId>, user_id: &UserId, kind: &str) -> Result> { + let key = (room_id, user_id, kind.to_owned()); self.db .roomusertype_roomuserdataid .qry(&key) .and_then(|roomuserdataid| self.db.roomuserdataid_accountdata.get(&roomuserdataid)) .await - .deserialized() } /// Returns all changes to the account data that happened after `since`. diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 4b3ebb88..6e266ca9 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -143,9 +143,8 @@ async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> R let mut event = self .services .account_data - .get(Some(room_id), user_id, RoomAccountDataEventType::Tag) + .get_room(room_id, user_id, RoomAccountDataEventType::Tag) .await - .and_then(|event| serde_json::from_str(event.get()).map_err(Into::into)) .unwrap_or_else(|_| TagEvent { content: TagEventContent { tags: BTreeMap::new(), diff --git a/src/service/globals/migrations.rs b/src/service/globals/migrations.rs index fc6e477b..334e71c6 100644 --- a/src/service/globals/migrations.rs +++ b/src/service/globals/migrations.rs @@ -215,13 +215,12 @@ async fn db_lt_12(services: &Services) -> Result<()> { }, }; - let raw_rules_list = services + let mut account_data: PushRulesEvent = services .account_data - .get(None, &user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(&user, GlobalAccountDataEventType::PushRules) .await .expect("Username is invalid"); - let mut account_data = serde_json::from_str::(raw_rules_list.get()).unwrap(); let rules_list = &mut account_data.content.global; //content rule @@ -294,14 +293,12 @@ async fn db_lt_13(services: &Services) -> Result<()> { }, }; - let raw_rules_list = services + let mut account_data: PushRulesEvent = services .account_data - .get(None, &user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(&user, GlobalAccountDataEventType::PushRules) .await .expect("Username is invalid"); - let mut account_data = serde_json::from_str::(raw_rules_list.get()).unwrap(); - let user_default_rules = Ruleset::server_default(&user); account_data .content diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index a6c468f5..8539c940 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -146,12 +146,9 @@ impl Service { if let Ok(tag_event) = self .services .account_data - .get(Some(&predecessor.room_id), user_id, RoomAccountDataEventType::Tag) + .get_room(&predecessor.room_id, user_id, RoomAccountDataEventType::Tag) .await - .and_then(|event| { - serde_json::from_str(event.get()) - .map_err(|e| err!(Database(warn!("Invalid account data event in db: {e:?}")))) - }) { + { self.services .account_data .update(Some(room_id), user_id, RoomAccountDataEventType::Tag, &tag_event) @@ -163,12 +160,9 @@ impl Service { if let Ok(mut direct_event) = self .services .account_data - .get(None, user_id, GlobalAccountDataEventType::Direct.to_string().into()) + .get_global::(user_id, GlobalAccountDataEventType::Direct) .await - .and_then(|event| { - serde_json::from_str::(event.get()) - .map_err(|e| err!(Database(warn!("Invalid account data event in db: {e:?}")))) - }) { + { let mut room_ids_updated = false; for room_ids in direct_event.content.0.values_mut() { if room_ids.iter().any(|r| r == &predecessor.room_id) { diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 84f29c86..7cf06522 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -407,9 +407,8 @@ impl Service { let rules_for_user = self .services .account_data - .get(None, user, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(user, GlobalAccountDataEventType::PushRules) .await - .and_then(|event| serde_json::from_str::(event.get()).map_err(Into::into)) .map_or_else(|_| Ruleset::server_default(user), |ev: PushRulesEvent| ev.content.global); let mut highlight = false; diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 19205a65..90977abe 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -539,9 +539,8 @@ impl Service { let rules_for_user = self .services .account_data - .get(None, userid, GlobalAccountDataEventType::PushRules.to_string().into()) + .get_global(userid, GlobalAccountDataEventType::PushRules) .await - .and_then(|event| serde_json::from_str::(event.get()).map_err(Into::into)) .map_or_else( |_| push::Ruleset::server_default(userid), |ev: PushRulesEvent| ev.content.global, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 44d169dd..3ab6b3c3 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -98,19 +98,9 @@ impl Service { pub async fn user_is_ignored(&self, sender_user: &UserId, recipient_user: &UserId) -> bool { self.services .account_data - .get( - None, - recipient_user, - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - ) + .get_global(recipient_user, GlobalAccountDataEventType::IgnoredUserList) .await - .and_then(|event| { - serde_json::from_str::(event.get()) - .map_err(|e| err!(Database(warn!("Invalid account data event in db: {e:?}")))) - }) - .map_or(false, |ignored| { + .map_or(false, |ignored: IgnoredUserListEvent| { ignored .content .ignored_users From 68315ac1128196c46802216054b31ec517dbfcb2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 09:38:10 +0000 Subject: [PATCH 0059/1248] Add state_get_content(shortid) for serde_json::from elim Signed-off-by: Jason Volk --- src/api/client/sync.rs | 39 ++++++------- src/service/rooms/state_accessor/mod.rs | 78 ++++++++++--------------- 2 files changed, 48 insertions(+), 69 deletions(-) diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index f0b26e80..65d62a78 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -14,7 +14,7 @@ use conduit::{ }, warn, PduCount, }; -use futures::{pin_mut, FutureExt, StreamExt, TryFutureExt}; +use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt, TryFutureExt}; use ruma::{ api::client::{ error::ErrorKind, @@ -681,20 +681,22 @@ async fn load_joined_room( )) }; - let since_sender_member: Option = if let Some(short) = since_shortstatehash { + let get_sender_member_content = |short| { services .rooms .state_accessor - .state_get(short, &StateEventType::RoomMember, sender_user.as_str()) - .await - .and_then(|pdu| serde_json::from_str(pdu.content.get()).map_err(Into::into)) + .state_get_content(short, &StateEventType::RoomMember, sender_user.as_str()) .ok() - } else { - None }; - let joined_since_last_sync = - since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + let since_sender_member: OptionFuture<_> = since_shortstatehash.map(get_sender_member_content).into(); + + let joined_since_last_sync = since_sender_member + .await + .flatten() + .map_or(true, |content: RoomMemberEventContent| { + content.membership != MembershipState::Join + }); if since_shortstatehash.is_none() || joined_since_last_sync { // Probably since = 0, we will do an initial sync @@ -1296,18 +1298,6 @@ pub(crate) async fn sync_events_v4_route( .await .ok(); - let since_sender_member: Option = if let Some(short) = since_shortstatehash { - services - .rooms - .state_accessor - .state_get(short, &StateEventType::RoomMember, sender_user.as_str()) - .await - .and_then(|pdu| serde_json::from_str(pdu.content.get()).map_err(Into::into)) - .ok() - } else { - None - }; - let encrypted_room = services .rooms .state_accessor @@ -1327,6 +1317,13 @@ pub(crate) async fn sync_events_v4_route( .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") .await; + let since_sender_member: Option = services + .rooms + .state_accessor + .state_get_content(since_shortstatehash, &StateEventType::RoomMember, sender_user.as_str()) + .ok() + .await; + let joined_since_last_sync = since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 4c28483c..ece8679d 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -33,8 +33,8 @@ use ruma::{ }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, EventId, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, - UserId, + EventEncryptionAlgorithm, EventId, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, + ServerName, UserId, }; use serde::Deserialize; use serde_json::value::to_raw_value; @@ -125,16 +125,23 @@ impl Service { .await } + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub async fn state_get_content( + &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + ) -> Result + where + T: for<'de> Deserialize<'de> + Send, + { + self.state_get(shortstatehash, event_type, state_key) + .await + .and_then(|event| event.get_content()) + } + /// Get membership for given user in state async fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> MembershipState { - self.state_get(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) .await - .map_or(MembershipState::Leave, |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomMemberEventContent| c.membership) - .map_err(|_| Error::bad_database("Invalid room membership event in database.")) - .unwrap() - }) + .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) } /// The user was a joined member at this state (potentially in the past) @@ -171,19 +178,10 @@ impl Service { } let history_visibility = self - .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") .await - .map_or(HistoryVisibility::Shared, |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) - .map_err(|e| { - error!( - "Invalid history visibility event in database for room {room_id}, assuming is \"shared\": \ - {e}" - ); - Error::bad_database("Invalid history visibility event in database.") - }) - .unwrap() + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility }); let current_server_members = self @@ -240,19 +238,10 @@ impl Service { let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; let history_visibility = self - .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") .await - .map_or(HistoryVisibility::Shared, |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) - .map_err(|e| { - error!( - "Invalid history visibility event in database for room {room_id}, assuming is \"shared\": \ - {e}" - ); - Error::bad_database("Invalid history visibility event in database.") - }) - .unwrap() + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility }); let visibility = match history_visibility { @@ -284,25 +273,18 @@ impl Service { /// the room's history_visibility at that event's state. #[tracing::instrument(skip(self, user_id, room_id))] pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { - let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; + if self.services.state_cache.is_joined(user_id, room_id).await { + return true; + } let history_visibility = self - .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "") + .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") .await - .map_or(Ok(HistoryVisibility::Shared), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) - .map_err(|e| { - error!( - "Invalid history visibility event in database for room {room_id}, assuming is \"shared\": \ - {e}" - ); - Error::bad_database("Invalid history visibility event in database.") - }) - }) - .unwrap_or(HistoryVisibility::Shared); + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); - currently_member || history_visibility == HistoryVisibility::WorldReadable + history_visibility == HistoryVisibility::WorldReadable } /// Returns the state hash for this pdu. From f7af6966b7fbfac3de141f7101bfe8b5e3904c85 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 09:44:43 +0000 Subject: [PATCH 0060/1248] refactor to room_state_get_content() for serde_json::from_ elim Signed-off-by: Jason Volk --- src/api/client/membership.rs | 78 ++++++++++--------------- src/api/client/profile.rs | 57 ++++++++---------- src/api/client/room.rs | 32 ++++------ src/service/rooms/alias/mod.rs | 27 +++++---- src/service/rooms/spaces/mod.rs | 9 +-- src/service/rooms/state_accessor/mod.rs | 23 +++----- src/service/rooms/timeline/mod.rs | 7 +-- 7 files changed, 91 insertions(+), 142 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index ae56094c..a260b8c5 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -389,17 +389,12 @@ pub(crate) async fn kick_user_route( let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - let mut event: RoomMemberEventContent = serde_json::from_str( - services - .rooms - .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) - .await - .map_err(|_| err!(Request(BadState("Cannot kick member that's not in the room."))))? - .content - .get(), - ) - .map_err(|_| err!(Database("Invalid member event in database.")))?; + let mut event: RoomMemberEventContent = services + .rooms + .state_accessor + .room_state_get_content(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) + .await + .map_err(|_| err!(Request(BadState("Cannot kick member that's not in the room."))))?; event.membership = MembershipState::Leave; event.reason.clone_from(&body.reason); @@ -442,10 +437,10 @@ pub(crate) async fn ban_user_route( let event = services .rooms .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) + .room_state_get_content(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) .await - .map_or( - Ok(RoomMemberEventContent { + .map_or_else( + |_| RoomMemberEventContent { membership: MembershipState::Ban, displayname: None, avatar_url: None, @@ -454,21 +449,17 @@ pub(crate) async fn ban_user_route( blurhash: blurhash.clone(), reason: body.reason.clone(), join_authorized_via_users_server: None, - }), - |event| { - serde_json::from_str(event.content.get()) - .map(|event: RoomMemberEventContent| RoomMemberEventContent { - membership: MembershipState::Ban, - displayname: None, - avatar_url: None, - blurhash: blurhash.clone(), - reason: body.reason.clone(), - join_authorized_via_users_server: None, - ..event - }) - .map_err(|e| err!(Database("Invalid member event in database: {e:?}"))) }, - )?; + |event| RoomMemberEventContent { + membership: MembershipState::Ban, + displayname: None, + avatar_url: None, + blurhash: blurhash.clone(), + reason: body.reason.clone(), + join_authorized_via_users_server: None, + ..event + }, + ); services .rooms @@ -503,17 +494,12 @@ pub(crate) async fn unban_user_route( let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - let mut event: RoomMemberEventContent = serde_json::from_str( - services - .rooms - .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) - .await - .map_err(|_| err!(Request(BadState("Cannot unban a user who is not banned."))))? - .content - .get(), - ) - .map_err(|e| err!(Database("Invalid member event in database: {e:?}")))?; + let mut event: RoomMemberEventContent = services + .rooms + .state_accessor + .room_state_get_content(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) + .await + .map_err(|_| err!(Request(BadState("Cannot unban a user who is not banned."))))?; event.membership = MembershipState::Leave; event.reason.clone_from(&body.reason); @@ -1650,14 +1636,13 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, } else { let state_lock = services.rooms.state.mutex.lock(room_id).await; - let member_event = services + let Ok(mut event) = services .rooms .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) - .await; - - // Fix for broken rooms - let Ok(member_event) = member_event else { + .room_state_get_content::(room_id, &StateEventType::RoomMember, user_id.as_str()) + .await + else { + // Fix for broken rooms error!("Trying to leave a room you are not a member of."); services @@ -1677,9 +1662,6 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, return Ok(()); }; - let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get()) - .map_err(|e| err!(Database(error!("Invalid room member event in database: {e}"))))?; - event.membership = MembershipState::Leave; event.reason = reason; diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 495bc8ec..cdc047f0 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -301,10 +301,10 @@ pub async fn update_displayname( // Send a new join membership event into all joined rooms let mut joined_rooms = Vec::new(); for room_id in all_joined_rooms { - let Ok(event) = services + let Ok(content) = services .rooms .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) + .room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str()) .await else { continue; @@ -315,7 +315,7 @@ pub async fn update_displayname( content: to_raw_value(&RoomMemberEventContent { displayname: displayname.clone(), join_authorized_via_users_server: None, - ..serde_json::from_str(event.content.get()).expect("Database contains invalid PDU.") + ..content }) .expect("event is valid, we just created it"), unsigned: None, @@ -354,35 +354,28 @@ pub async fn update_avatar_url( .iter() .try_stream() .and_then(|room_id: &OwnedRoomId| async move { - Ok(( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - avatar_url: avatar_url.clone(), - blurhash: blurhash.clone(), - join_authorized_via_users_server: None, - ..serde_json::from_str( - services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) - .await - .map_err(|_| { - Error::bad_database("Tried to send avatar URL update for user not in the room.") - })? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }, - room_id, - )) + let content = services + .rooms + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str()) + .await?; + + let pdu = PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + avatar_url: avatar_url.clone(), + blurhash: blurhash.clone(), + join_authorized_via_users_server: None, + ..content + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + timestamp: None, + }; + + Ok((pdu, room_id)) }) .ignore_err() .collect() diff --git a/src/api/client/room.rs b/src/api/client/room.rs index 0d8e12a2..e22ad796 100644 --- a/src/api/client/room.rs +++ b/src/api/client/room.rs @@ -664,16 +664,12 @@ pub(crate) async fn upgrade_room_route( let state_lock = services.rooms.state.mutex.lock(&replacement_room).await; // Get the old room creation event - let mut create_event_content = serde_json::from_str::( - services - .rooms - .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomCreate, "") - .await - .map_err(|_| err!(Database("Found room without m.room.create event.")))? - .content - .get(), - )?; + let mut create_event_content: CanonicalJsonObject = services + .rooms + .state_accessor + .room_state_get_content(&body.room_id, &StateEventType::RoomCreate, "") + .await + .map_err(|_| err!(Database("Found room without m.room.create event.")))?; // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( @@ -825,16 +821,12 @@ pub(crate) async fn upgrade_room_route( } // Get the old room power levels - let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( - services - .rooms - .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "") - .await - .map_err(|_| err!(Database("Found room without m.room.create event.")))? - .content - .get(), - )?; + let mut power_levels_event_content: RoomPowerLevelsEventContent = services + .rooms + .state_accessor + .room_state_get_content(&body.room_id, &StateEventType::RoomPowerLevels, "") + .await + .map_err(|_| err!(Database("Found room without m.room.power_levels event.")))?; // Setting events_default and invite to the greater of 50 and users_default + 1 let new_level = max( diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index f50cc46c..7fac6be6 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -190,32 +190,31 @@ impl Service { // Always allow the server service account to remove the alias, since there may not be an admin room || server_user == user_id { - Ok(true) - // Checking whether the user is able to change canonical aliases of the - // room - } else if let Ok(event) = self + return Ok(true); + } + + // Checking whether the user is able to change canonical aliases of the room + if let Ok(content) = self .services .state_accessor - .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "") + .room_state_get_content::(&room_id, &StateEventType::RoomPowerLevels, "") .await { - serde_json::from_str(event.content.get()) - .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) - .map(|content: RoomPowerLevelsEventContent| { - RoomPowerLevels::from(content).user_can_send_state(user_id, StateEventType::RoomCanonicalAlias) - }) + return Ok(RoomPowerLevels::from(content).user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)); + } + // If there is no power levels event, only the room creator can change // canonical aliases - } else if let Ok(event) = self + if let Ok(event) = self .services .state_accessor .room_state_get(&room_id, &StateEventType::RoomCreate, "") .await { - Ok(event.sender == user_id) - } else { - Err(Error::bad_database("Room has no m.room.create event")) + return Ok(event.sender == user_id); } + + Err!(Database("Room has no m.room.create event")) } async fn who_created_alias(&self, alias: &RoomAliasId) -> Result { diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 920424a4..a30c2cfc 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -380,14 +380,9 @@ impl Service { let join_rule = self .services .state_accessor - .room_state_get(room_id, &StateEventType::RoomJoinRules, "") + .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") .await - .map_or(JoinRule::Invite, |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomJoinRulesEventContent| c.join_rule) - .map_err(|e| err!(Database(error!("Invalid room join rule event in database: {e}")))) - .unwrap() - }); + .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); let allowed_room_ids = self .services diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index ece8679d..3b2c2931 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -338,14 +338,13 @@ impl Service { .map(|c: RoomNameEventContent| c.name) } - pub async fn get_avatar(&self, room_id: &RoomId) -> ruma::JsOption { - self.room_state_get(room_id, &StateEventType::RoomAvatar, "") + pub async fn get_avatar(&self, room_id: &RoomId) -> JsOption { + let content = self + .room_state_get_content(room_id, &StateEventType::RoomAvatar, "") .await - .map_or(ruma::JsOption::Undefined, |s| { - serde_json::from_str(s.content.get()) - .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) - .unwrap() - }) + .ok(); + + JsOption::from_option(content) } pub async fn get_member(&self, room_id: &RoomId, user_id: &UserId) -> Result { @@ -416,16 +415,10 @@ impl Service { &self, redacts: &EventId, sender: &UserId, room_id: &RoomId, federation: bool, ) -> Result { if let Ok(event) = self - .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") + .room_state_get_content::(room_id, &StateEventType::RoomPowerLevels, "") .await { - let Ok(event) = serde_json::from_str(event.content.get()) - .map(|content: RoomPowerLevelsEventContent| content.into()) - .map(|event: RoomPowerLevels| event) - else { - return Ok(false); - }; - + let event: RoomPowerLevels = event.into(); Ok(event.user_can_redact_event_of_other(sender) || event.user_can_redact_own_event(sender) && if let Ok(pdu) = self.services.timeline.get_pdu(redacts).await { diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 7cf06522..cc5940e6 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1061,13 +1061,8 @@ impl Service { let power_levels: RoomPowerLevelsEventContent = self .services .state_accessor - .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") + .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") .await - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - .unwrap() - }) .unwrap_or_default(); let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| { From 55c85f685177eb22f126fdd7382e99959e32e3d8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 09:49:28 +0000 Subject: [PATCH 0061/1248] refactor to pdu.get_content() for serde_json::from_ elim Signed-off-by: Jason Volk --- src/api/client/sync.rs | 46 +++++++------------------ src/service/rooms/event_handler/mod.rs | 10 +++--- src/service/rooms/pdu_metadata/mod.rs | 2 +- src/service/rooms/spaces/mod.rs | 10 +++--- src/service/rooms/state/mod.rs | 17 ++------- src/service/rooms/state_accessor/mod.rs | 4 +-- src/service/rooms/timeline/mod.rs | 36 +++++-------------- 7 files changed, 35 insertions(+), 90 deletions(-) diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index 65d62a78..8c4c6a44 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -635,17 +635,8 @@ async fn load_joined_room( .await? .ready_filter(|(_, pdu)| pdu.kind == RoomMember) .filter_map(|(_, pdu)| async move { - let Ok(content) = serde_json::from_str::(pdu.content.get()) else { - return None; - }; - - let Some(state_key) = &pdu.state_key else { - return None; - }; - - let Ok(user_id) = UserId::parse(state_key) else { - return None; - }; + let content: RoomMemberEventContent = pdu.get_content().ok()?; + let user_id: &UserId = pdu.state_key.as_deref().map(TryInto::try_into).flat_ok()?; if user_id == sender_user { return None; @@ -656,22 +647,17 @@ async fn load_joined_room( return None; } - if !services - .rooms - .state_cache - .is_joined(&user_id, room_id) - .await && services - .rooms - .state_cache - .is_invited(&user_id, room_id) - .await - { + let is_invited = services.rooms.state_cache.is_invited(user_id, room_id); + + let is_joined = services.rooms.state_cache.is_joined(user_id, room_id); + + if !is_joined.await && is_invited.await { return None; } - Some(user_id) + Some(user_id.to_owned()) }) - .collect::>() + .collect::>() .await; Ok::<_, Error>(( @@ -839,11 +825,9 @@ async fn load_joined_room( continue; } - let new_membership = serde_json::from_str::(state_event.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; + let content: RoomMemberEventContent = state_event.get_content()?; - match new_membership { + match content.membership { MembershipState::Join => { // A new user joined an encrypted room if !share_encrypted_room(services, sender_user, &user_id, Some(room_id)).await { @@ -1357,12 +1341,8 @@ pub(crate) async fn sync_events_v4_route( continue; } - let new_membership = - serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; - - match new_membership { + let content: RoomMemberEventContent = pdu.get_content()?; + match content.membership { MembershipState::Join => { // A new user joined an encrypted room if !share_encrypted_room(&services, sender_user, &user_id, Some(room_id)) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 4708a86c..05f9a27a 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -614,9 +614,7 @@ impl Service { } }, _ => { - let content = serde_json::from_str::(incoming_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?; - + let content: RoomRedactionEventContent = incoming_pdu.get_content()?; if let Some(redact_id) = &content.redacts { !self .services @@ -1432,10 +1430,10 @@ impl Service { } fn get_room_version_id(create_event: &PduEvent) -> Result { - let create_event_content: RoomCreateEventContent = serde_json::from_str(create_event.content.get()) - .map_err(|e| err!(Database("Invalid create event: {e}")))?; + let content: RoomCreateEventContent = create_event.get_content()?; + let room_version = content.room_version; - Ok(create_event_content.room_version) + Ok(room_version) } #[inline] diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index dbaebfbf..fb85d031 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -102,7 +102,7 @@ impl Service { return false; } - let Ok(content) = serde_json::from_str::(pdu.content.get()) else { + let Ok(content) = pdu.get_content::() else { return false; }; diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index a30c2cfc..5aea5f6a 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -596,12 +596,10 @@ impl Service { .await .map_err(|e| err!(Database("Event {id:?} in space state not found: {e:?}")))?; - if serde_json::from_str::(pdu.content.get()) - .ok() - .map(|c| c.via) - .map_or(true, |v| v.is_empty()) - { - continue; + if let Ok(content) = pdu.get_content::() { + if content.via.is_empty() { + continue; + } } if OwnedRoomId::try_from(state_key).is_ok() { diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 177b7e9b..81760b36 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -93,28 +93,17 @@ impl Service { pin_mut!(event_ids); while let Some(event_id) = event_ids.next().await { - let Ok(pdu) = self.services.timeline.get_pdu_json(&event_id).await else { + let Ok(pdu) = self.services.timeline.get_pdu(&event_id).await else { continue; }; - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - match pdu.kind { TimelineEventType::RoomMember => { - let Ok(membership_event) = serde_json::from_str::(pdu.content.get()) else { + let Some(user_id) = pdu.state_key.as_ref().map(UserId::parse).flat_ok() else { continue; }; - let Some(state_key) = pdu.state_key else { - continue; - }; - - let Ok(user_id) = UserId::parse(state_key) else { + let Ok(membership_event) = pdu.get_content::() else { continue; }; diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 3b2c2931..3855d92a 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -325,11 +325,9 @@ impl Service { where T: for<'de> Deserialize<'de> + Send, { - use serde_json::from_str; - self.room_state_get(room_id, event_type, state_key) .await - .and_then(|event| from_str::(event.content.get()).map_err(Into::into)) + .and_then(|event| event.get_content()) } pub async fn get_name(&self, room_id: &RoomId) -> Result { diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index cc5940e6..487262e6 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -471,12 +471,7 @@ impl Service { } }, _ => { - let content = - serde_json::from_str::(pdu.content.get()).map_err(|e| { - warn!("Invalid content in redaction pdu: {e}"); - Error::bad_database("Invalid content in redaction pdu") - })?; - + let content: RoomRedactionEventContent = pdu.get_content()?; if let Some(redact_id) = &content.redacts { if self .services @@ -506,11 +501,7 @@ impl Service { let target_user_id = UserId::parse(state_key.clone()).expect("This state_key was previously validated"); - let content = serde_json::from_str::(pdu.content.get()).map_err(|e| { - error!("Invalid room member event content in pdu: {e}"); - Error::bad_database("Invalid room member event content in pdu.") - })?; - + let content: RoomMemberEventContent = pdu.get_content()?; let invite_state = match content.membership { MembershipState::Invite => self.services.state.summary_stripped(pdu).await.into(), _ => None, @@ -533,9 +524,7 @@ impl Service { } }, TimelineEventType::RoomMessage => { - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - + let content: ExtractBody = pdu.get_content()?; if let Some(body) = content.body { self.services.search.index_pdu(shortroomid, &pdu_id, &body); @@ -549,7 +538,7 @@ impl Service { _ => {}, } - if let Ok(content) = serde_json::from_str::(pdu.content.get()) { + if let Ok(content) = pdu.get_content::() { if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await { self.services .pdu_metadata @@ -557,7 +546,7 @@ impl Service { } } - if let Ok(content) = serde_json::from_str::(pdu.content.get()) { + if let Ok(content) = pdu.get_content::() { match content.relates_to { Relation::Reply { in_reply_to, @@ -712,10 +701,7 @@ impl Service { .room_state_get(room_id, &event_type.to_string().into(), state_key) .await { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); + unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value()); unsigned.insert( "prev_sender".to_owned(), serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), @@ -874,9 +860,7 @@ impl Service { }; }, _ => { - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|e| err!(Database("Invalid content in redaction pdu: {e:?}")))?; - + let content: RoomRedactionEventContent = pdu.get_content()?; if let Some(redact_id) = &content.redacts { if !self .services @@ -1026,7 +1010,7 @@ impl Service { .await .map_err(|e| err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))))?; - if let Ok(content) = serde_json::from_str::(pdu.content.get()) { + if let Ok(content) = pdu.get_content::() { if let Some(body) = content.body { self.services .search @@ -1200,9 +1184,7 @@ impl Service { drop(insert_lock); if pdu.kind == TimelineEventType::RoomMessage { - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|e| err!(Database("Invalid content in pdu: {e:?}")))?; - + let content: ExtractBody = pdu.get_content()?; if let Some(body) = content.body { self.services.search.index_pdu(shortroomid, &pdu_id, &body); } From d526db681f045f28519a3757f761090599d2a14e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 09:57:43 +0000 Subject: [PATCH 0062/1248] refactor various patterns for serde_json::from_ elim bump ruma Signed-off-by: Jason Volk --- Cargo.lock | 26 ++++++++++----------- Cargo.toml | 2 +- src/api/client/membership.rs | 24 +++++++------------- src/api/client/state.rs | 28 +++++++---------------- src/api/server/get_missing_events.rs | 24 +++++++++++--------- src/api/server/invite.rs | 31 ++++++++++---------------- src/service/rooms/event_handler/mod.rs | 16 +++++-------- src/service/rooms/state/mod.rs | 1 + src/service/sending/sender.rs | 8 ++----- 9 files changed, 64 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9f366e7..cae6994c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2976,7 +2976,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "assign", "js_int", @@ -2998,7 +2998,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "js_int", "ruma-common", @@ -3010,7 +3010,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "as_variant", "assign", @@ -3033,7 +3033,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "as_variant", "base64 0.22.1", @@ -3063,7 +3063,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3087,7 +3087,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "bytes", "http", @@ -3105,7 +3105,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "js_int", "thiserror", @@ -3114,7 +3114,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "js_int", "ruma-common", @@ -3124,7 +3124,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "cfg-if", "once_cell", @@ -3140,7 +3140,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "js_int", "ruma-common", @@ -3152,7 +3152,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "headers", "http", @@ -3165,7 +3165,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3181,7 +3181,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37#e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" dependencies = [ "futures-util", "itertools 0.13.0", diff --git a/Cargo.toml b/Cargo.toml index 18f33375..25d1001d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -315,7 +315,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "e81ed2741b4ebe98fe41cabdfee2ac28a52a8e37" +rev = "f485a0265c67a59df75fc6686787538172fa4cac" features = [ "compat", "rand", diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index a260b8c5..fde6099a 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -183,10 +183,8 @@ pub(crate) async fn join_room_by_id_route( .await .unwrap_or_default() .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) - .filter_map(|sender| UserId::parse(sender).ok()) + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()), ); @@ -248,10 +246,8 @@ pub(crate) async fn join_room_by_id_or_alias_route( .await .unwrap_or_default() .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) - .filter_map(|sender| UserId::parse(sender).ok()) + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()), ); @@ -294,10 +290,8 @@ pub(crate) async fn join_room_by_id_or_alias_route( .await .unwrap_or_default() .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) - .filter_map(|sender| UserId::parse(sender).ok()) + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()), ); @@ -1708,10 +1702,8 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room servers.extend( invite_state .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) - .filter_map(|sender| UserId::parse(sender).ok()) + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()), ); diff --git a/src/api/client/state.rs b/src/api/client/state.rs index d89c23e8..2a13ba1f 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use axum::extract::State; -use conduit::{err, error, pdu::PduBuilder, Err, Error, Result}; +use conduit::{err, pdu::PduBuilder, utils::BoolExt, Err, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, @@ -137,27 +137,15 @@ pub(crate) async fn get_state_events_for_key_route( )))) })?; - if body + let event_format = body .format .as_ref() - .is_some_and(|f| f.to_lowercase().eq("event")) - { - Ok(get_state_events_for_key::v3::Response { - content: None, - event: serde_json::from_str(event.to_state_event().json().get()).map_err(|e| { - error!("Invalid room state event in database: {}", e); - Error::bad_database("Invalid room state event in database") - })?, - }) - } else { - Ok(get_state_events_for_key::v3::Response { - content: Some(serde_json::from_str(event.content.get()).map_err(|e| { - error!("Invalid room state event content in database: {}", e); - Error::bad_database("Invalid room state event content in database") - })?), - event: None, - }) - } + .is_some_and(|f| f.to_lowercase().eq("event")); + + Ok(get_state_events_for_key::v3::Response { + content: event_format.or(|| event.get_content_as_value()), + event: event_format.then(|| event.to_state_event_value()), + }) } /// # `GET /_matrix/client/v3/rooms/{roomid}/state/{eventType}` diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index 7ae0ff60..e267898f 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -2,7 +2,7 @@ use axum::extract::State; use conduit::{Error, Result}; use ruma::{ api::{client::error::ErrorKind, federation::event::get_missing_events}, - OwnedEventId, RoomId, + CanonicalJsonValue, EventId, RoomId, }; use crate::Ruma; @@ -78,17 +78,19 @@ pub(crate) async fn get_missing_events_route( continue; } - queued_events.extend_from_slice( - &serde_json::from_value::>( - serde_json::to_value( - pdu.get("prev_events") - .cloned() - .ok_or_else(|| Error::bad_database("Event in db has no prev_events property."))?, - ) - .expect("canonical json is valid json value"), - ) - .map_err(|_| Error::bad_database("Invalid prev_events in event in database."))?, + let prev_events = pdu + .get("prev_events") + .and_then(CanonicalJsonValue::as_array) + .unwrap_or_default(); + + queued_events.extend( + prev_events + .iter() + .map(<&EventId>::try_from) + .filter_map(Result::ok) + .map(ToOwned::to_owned), ); + events.push( services .sending diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 9968bdf7..dd2374b6 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,11 +1,11 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{utils, warn, Error, PduEvent, Result}; +use conduit::{err, utils, warn, Err, Error, PduEvent, Result}; use ruma::{ api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, - CanonicalJsonValue, EventId, OwnedUserId, + CanonicalJsonValue, EventId, OwnedUserId, UserId, }; use crate::Ruma; @@ -79,14 +79,11 @@ pub(crate) async fn create_invite_route( let mut signed_event = utils::to_canonical_object(&body.event) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; - let invited_user: OwnedUserId = serde_json::from_value( - signed_event - .get("state_key") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event has no state_key property."))? - .clone() - .into(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user ID."))?; + let invited_user: OwnedUserId = signed_event + .get("state_key") + .try_into() + .map(UserId::to_owned) + .map_err(|e| err!(Request(InvalidParam("Invalid state_key property: {e}"))))?; if !services.globals.server_is_ours(invited_user.server_name()) { return Err(Error::BadRequest( @@ -121,14 +118,10 @@ pub(crate) async fn create_invite_route( // Add event_id back signed_event.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.to_string())); - let sender: OwnedUserId = serde_json::from_value( - signed_event - .get("sender") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event had no sender property."))? - .clone() - .into(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user ID."))?; + let sender: &UserId = signed_event + .get("sender") + .try_into() + .map_err(|e| err!(Request(InvalidParam("Invalid sender property: {e}"))))?; if services.rooms.metadata.is_banned(&body.room_id).await && !services.users.is_admin(&invited_user).await { return Err(Error::BadRequest( @@ -171,7 +164,7 @@ pub(crate) async fn create_invite_route( &body.room_id, &invited_user, RoomMemberEventContent::new(MembershipState::Invite), - &sender, + sender, Some(invite_state), body.via.clone(), true, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 05f9a27a..f8042b67 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -30,8 +30,8 @@ use ruma::{ int, serde::Base64, state_res::{self, EventTypeExt, RoomVersion, StateMap}, - uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, - RoomVersionId, ServerName, + uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, + ServerName, UserId, }; use tokio::sync::RwLock; @@ -157,14 +157,10 @@ impl Service { self.acl_check(origin, room_id).await?; // 1.3.2 Check room ACL on sender's server name - let sender: OwnedUserId = serde_json::from_value( - value - .get("sender") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "PDU does not have a sender key"))? - .clone() - .into(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "User ID in sender is invalid"))?; + let sender: &UserId = value + .get("sender") + .try_into() + .map_err(|e| err!(Request(InvalidParam("PDU does not have a valid sender key: {e}"))))?; self.acl_check(sender.server_name(), room_id).await?; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 81760b36..cfcb2da6 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -9,6 +9,7 @@ use std::{ use conduit::{ err, + result::FlatOk, utils::{calculate_hash, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard}, warn, PduEvent, Result, }; diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 90977abe..5c0a324b 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -528,12 +528,8 @@ impl Service { for pdu in pdus { // Redacted events are not notification targets (we don't send push for them) - if let Some(unsigned) = &pdu.unsigned { - if let Ok(unsigned) = serde_json::from_str::(unsigned.get()) { - if unsigned.get("redacted_because").is_some() { - continue; - } - } + if pdu.contains_unsigned_property("redacted_because", serde_json::Value::is_string) { + continue; } let rules_for_user = self From 57e0a5f65dce2be514d0bc45dbfb26b5c5b0cd00 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 10:02:24 +0000 Subject: [PATCH 0063/1248] additional database stream deserializations for serde_json::from_ elim Signed-off-by: Jason Volk --- src/service/key_backups/mod.rs | 64 ++++++++------------------- src/service/pusher/mod.rs | 2 +- src/service/rooms/state_cache/data.rs | 62 +++++++++++++------------- src/service/users/mod.rs | 11 ++--- 4 files changed, 57 insertions(+), 82 deletions(-) diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index decf32f7..55263eeb 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,9 +1,9 @@ use std::{collections::BTreeMap, sync::Arc}; use conduit::{ - err, implement, utils, + err, implement, utils::stream::{ReadyExt, TryIgnore}, - Err, Error, Result, + Err, Result, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::StreamExt; @@ -110,57 +110,35 @@ pub async fn update_backup( #[implement(Service)] pub async fn get_latest_backup_version(&self, user_id: &UserId) -> Result { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + type Key<'a> = (&'a UserId, &'a str); + let last_possible_key = (user_id, u64::MAX); self.db .backupid_algorithm - .rev_raw_keys_from(&last_possible_key) + .rev_keys_from(&last_possible_key) .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix)) + .ready_take_while(|(user_id_, _): &Key<'_>| *user_id_ == user_id) + .map(|(_, version): Key<'_>| version.to_owned()) .next() .await .ok_or_else(|| err!(Request(NotFound("No backup versions found")))) - .and_then(|key| { - utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) - }) } #[implement(Service)] pub async fn get_latest_backup(&self, user_id: &UserId) -> Result<(String, Raw)> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + type Key<'a> = (&'a UserId, &'a str); + type KeyVal<'a> = (Key<'a>, Raw); + let last_possible_key = (user_id, u64::MAX); self.db .backupid_algorithm - .rev_raw_stream_from(&last_possible_key) + .rev_stream_from(&last_possible_key) .ignore_err() - .ready_take_while(move |(key, _)| key.starts_with(&prefix)) + .ready_take_while(|((user_id_, _), _): &KeyVal<'_>| *user_id_ == user_id) + .map(|((_, version), algorithm): KeyVal<'_>| (version.to_owned(), algorithm)) .next() .await .ok_or_else(|| err!(Request(NotFound("No backup found")))) - .and_then(|(key, val)| { - let version = utils::string_from_bytes( - key.rsplit(|&b| b == 0xFF) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; - - let algorithm = serde_json::from_slice(val) - .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid."))?; - - Ok((version, algorithm)) - }) } #[implement(Service)] @@ -223,7 +201,8 @@ pub async fn get_etag(&self, user_id: &UserId, version: &str) -> String { #[implement(Service)] pub async fn get_all(&self, user_id: &UserId, version: &str) -> BTreeMap { - type KeyVal<'a> = ((Ignore, Ignore, &'a RoomId, &'a str), &'a [u8]); + type Key<'a> = (Ignore, Ignore, &'a RoomId, &'a str); + type KeyVal<'a> = (Key<'a>, Raw); let mut rooms = BTreeMap::::new(); let default = || RoomKeyBackup { @@ -235,13 +214,12 @@ pub async fn get_all(&self, user_id: &UserId, version: &str) -> BTreeMap| { - let key_data = serde_json::from_slice(value).expect("Invalid KeyBackupData JSON"); + .ready_for_each(|((_, _, room_id, session_id), key_backup_data): KeyVal<'_>| { rooms .entry(room_id.into()) .or_insert_with(default) .sessions - .insert(session_id.into(), key_data); + .insert(session_id.into(), key_backup_data); }) .await; @@ -252,18 +230,14 @@ pub async fn get_all(&self, user_id: &UserId, version: &str) -> BTreeMap BTreeMap> { - type KeyVal<'a> = ((Ignore, Ignore, Ignore, &'a str), &'a [u8]); + type KeyVal<'a> = ((Ignore, Ignore, Ignore, &'a str), Raw); let prefix = (user_id, version, room_id, Interfix); self.db .backupkeyid_backup .stream_prefix(&prefix) .ignore_err() - .map(|((.., session_id), value): KeyVal<'_>| { - let session_id = session_id.to_owned(); - let key_backup_data = serde_json::from_slice(value).expect("Invalid KeyBackupData JSON"); - (session_id, key_backup_data) - }) + .map(|((.., session_id), key_backup_data): KeyVal<'_>| (session_id.to_owned(), key_backup_data)) .collect() .await } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 8d8b553f..e7b1824a 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -99,7 +99,7 @@ impl Service { .senderkey_pusher .stream_prefix(&prefix) .ignore_err() - .map(|(_, val): (Ignore, &[u8])| serde_json::from_slice(val).expect("Invalid Pusher in db.")) + .map(|(_, pusher): (Ignore, Pusher)| pusher) .collect() .await } diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index f3ccaf10..6e01e49d 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, RwLock}, }; -use conduit::{utils, utils::stream::TryIgnore, Error, Result}; +use conduit::{utils::stream::TryIgnore, Result}; use database::{Deserialized, Interfix, Map}; use futures::{Stream, StreamExt}; use ruma::{ @@ -135,20 +135,31 @@ impl Data { pub(super) fn rooms_invited<'a>( &'a self, user_id: &'a UserId, ) -> impl Stream + Send + 'a { + type Key<'a> = (&'a UserId, &'a RoomId); + type KeyVal<'a> = (Key<'a>, Raw>); + let prefix = (user_id, Interfix); self.userroomid_invitestate - .stream_raw_prefix(&prefix) + .stream_prefix(&prefix) .ignore_err() - .map(|(key, val)| { - let room_id = key.rsplit(|&b| b == 0xFF).next().unwrap(); - let room_id = utils::string_from_bytes(room_id).unwrap(); - let room_id = RoomId::parse(room_id).unwrap(); - let state = serde_json::from_slice(val) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate.")) - .unwrap(); + .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) + .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) + .ignore_err() + } - (room_id, state) - }) + /// Returns an iterator over all rooms a user left. + #[inline] + pub(super) fn rooms_left<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + type Key<'a> = (&'a UserId, &'a RoomId); + type KeyVal<'a> = (Key<'a>, Raw>>); + + let prefix = (user_id, Interfix); + self.userroomid_leftstate + .stream_prefix(&prefix) + .ignore_err() + .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) + .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) + .ignore_err() } #[tracing::instrument(skip(self), level = "debug")] @@ -156,7 +167,11 @@ impl Data { &self, user_id: &UserId, room_id: &RoomId, ) -> Result>> { let key = (user_id, room_id); - self.userroomid_invitestate.qry(&key).await.deserialized() + self.userroomid_invitestate + .qry(&key) + .await + .deserialized() + .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) } #[tracing::instrument(skip(self), level = "debug")] @@ -164,25 +179,10 @@ impl Data { &self, user_id: &UserId, room_id: &RoomId, ) -> Result>> { let key = (user_id, room_id); - self.userroomid_leftstate.qry(&key).await.deserialized() - } - - /// Returns an iterator over all rooms a user left. - #[inline] - pub(super) fn rooms_left<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { - let prefix = (user_id, Interfix); self.userroomid_leftstate - .stream_raw_prefix(&prefix) - .ignore_err() - .map(|(key, val)| { - let room_id = key.rsplit(|&b| b == 0xFF).next().unwrap(); - let room_id = utils::string_from_bytes(room_id).unwrap(); - let room_id = RoomId::parse(room_id).unwrap(); - let state = serde_json::from_slice(val) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate.")) - .unwrap(); - - (room_id, state) - }) + .qry(&key) + .await + .deserialized() + .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) } } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 3ab6b3c3..71a93666 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; use conduit::{ debug_warn, err, utils, - utils::{stream::TryIgnore, string::Unquoted, ReadyExt, TryReadyExt}, + utils::{stream::TryIgnore, string::Unquoted, ReadyExt}, warn, Err, Error, Result, Server, }; use database::{Deserialized, Ignore, Interfix, Map}; @@ -749,9 +749,9 @@ impl Service { let prefix = (user_id, device_id, Interfix); self.db .todeviceid_events - .stream_raw_prefix(&prefix) - .ready_and_then(|(_, val)| serde_json::from_slice(val).map_err(Into::into)) + .stream_prefix(&prefix) .ignore_err() + .map(|(_, val): (Ignore, Raw)| val) } pub async fn remove_to_device_events(&self, user_id: &UserId, device_id: &DeviceId, until: u64) { @@ -812,11 +812,12 @@ impl Service { } pub fn all_devices_metadata<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + let key = (user_id, Interfix); self.db .userdeviceid_metadata - .stream_raw_prefix(&(user_id, Interfix)) - .ready_and_then(|(_, val)| serde_json::from_slice::(val).map_err(Into::into)) + .stream_prefix(&key) .ignore_err() + .map(|(_, val): (Ignore, Device)| val) } /// Creates a new sync filter. Returns the filter id. From f503ed918c90720c28f978c2851d252e21920a29 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Oct 2024 10:03:31 +0000 Subject: [PATCH 0064/1248] misc cleanup Signed-off-by: Jason Volk --- src/api/client/membership.rs | 33 ++++++++----------- src/api/client/state.rs | 2 +- src/api/server/invite.rs | 28 ++++------------ src/api/server/send.rs | 22 ++++++------- src/service/admin/mod.rs | 2 +- .../rooms/event_handler/parse_incoming_pdu.rs | 30 ++++++++--------- src/service/rooms/timeline/mod.rs | 3 +- 7 files changed, 49 insertions(+), 71 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index fde6099a..f21f3d7d 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1333,10 +1333,8 @@ pub async fn validate_and_add_event_id( services: &Services, pdu: &RawJsonValue, room_version: &RoomVersionId, pub_key_map: &RwLock>>, ) -> Result<(OwnedEventId, CanonicalJsonObject)> { - let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - debug_error!("Invalid PDU in server response: {pdu:#?}"); - err!(BadServerResponse("Invalid PDU in server response: {e:?}")) - })?; + let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()) + .map_err(|e| err!(BadServerResponse(debug_error!("Invalid PDU in server response: {e:?}"))))?; let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&value, room_version).expect("ruma can calculate reference hashes") @@ -1478,10 +1476,8 @@ pub(crate) async fn invite_helper( if *pdu.event_id != *event_id { warn!( - "Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", + "Server {} changed invite event, that's not allowed in the spec: ours: {pdu_json:?}, theirs: {value:?}", user_id.server_name(), - pdu_json, - value ); } @@ -1564,20 +1560,19 @@ pub(crate) async fn invite_helper( // Make a user leave all their joined rooms, forgets all rooms, and ignores // errors pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { - let all_rooms: Vec<_> = services + let rooms_joined = services .rooms .state_cache .rooms_joined(user_id) - .map(ToOwned::to_owned) - .chain( - services - .rooms - .state_cache - .rooms_invited(user_id) - .map(|(r, _)| r), - ) - .collect() - .await; + .map(ToOwned::to_owned); + + let rooms_invited = services + .rooms + .state_cache + .rooms_invited(user_id) + .map(|(r, _)| r); + + let all_rooms: Vec<_> = rooms_joined.chain(rooms_invited).collect().await; for room_id in all_rooms { // ignore errors @@ -1601,7 +1596,7 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, .await { if let Err(e) = remote_leave_room(services, user_id, room_id).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); + warn!("Failed to leave room {user_id} remotely: {e}"); // Don't tell the client about this error } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 2a13ba1f..1396ae77 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -176,7 +176,7 @@ async fn send_state_event_for_key_helper( .build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), - content: serde_json::from_str(json.json().get()).expect("content is valid json"), + content: serde_json::from_str(json.json().get())?, unsigned: None, state_key: Some(state_key), redacts: None, diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index dd2374b6..447e54be 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -47,10 +47,7 @@ pub(crate) async fn create_invite_route( .forbidden_remote_server_names .contains(&server.to_owned()) { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } @@ -64,15 +61,13 @@ pub(crate) async fn create_invite_route( "Received federated/remote invite from banned server {origin} for room ID {}. Rejecting.", body.room_id ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + + return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(via) = &body.via { if via.is_empty() { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "via field must not be empty.")); + return Err!(Request(InvalidParam("via field must not be empty."))); } } @@ -86,10 +81,7 @@ pub(crate) async fn create_invite_route( .map_err(|e| err!(Request(InvalidParam("Invalid state_key property: {e}"))))?; if !services.globals.server_is_ours(invited_user.server_name()) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "User does not belong to this homeserver.", - )); + return Err!(Request(InvalidParam("User does not belong to this homeserver."))); } // Make sure we're not ACL'ed from their room. @@ -124,17 +116,11 @@ pub(crate) async fn create_invite_route( .map_err(|e| err!(Request(InvalidParam("Invalid sender property: {e}"))))?; if services.rooms.metadata.is_banned(&body.room_id).await && !services.users.is_admin(&invited_user).await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "This room is banned on this homeserver.", - )); + return Err!(Request(Forbidden("This room is banned on this homeserver."))); } if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user).await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "This server does not allow room invites.", - )); + return Err!(Request(Forbidden("This server does not allow room invites."))); } let mut invite_state = body.invite_room_state.clone(); diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 50a79e00..f6916ccf 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -16,9 +16,11 @@ use ruma::{ }, }, events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, + serde::Raw, to_device::DeviceIdOrAllDevices, OwnedEventId, ServerName, }; +use serde_json::value::RawValue as RawJsonValue; use tokio::sync::RwLock; use crate::{ @@ -70,8 +72,8 @@ pub(crate) async fn send_transaction_message_route( "Starting txn", ); - let resolved_map = handle_pdus(&services, &client, &body, origin, &txn_start_time).await; - handle_edus(&services, &client, &body, origin).await; + let resolved_map = handle_pdus(&services, &client, &body.pdus, origin, &txn_start_time).await; + handle_edus(&services, &client, &body.edus, origin).await; debug!( pdus = ?body.pdus.len(), @@ -91,11 +93,10 @@ pub(crate) async fn send_transaction_message_route( } async fn handle_pdus( - services: &Services, _client: &IpAddr, body: &Ruma, origin: &ServerName, - txn_start_time: &Instant, + services: &Services, _client: &IpAddr, pdus: &[Box], origin: &ServerName, txn_start_time: &Instant, ) -> ResolvedMap { - let mut parsed_pdus = Vec::with_capacity(body.pdus.len()); - for pdu in &body.pdus { + let mut parsed_pdus = Vec::with_capacity(pdus.len()); + for pdu in pdus { parsed_pdus.push(match services.rooms.event_handler.parse_incoming_pdu(pdu).await { Ok(t) => t, Err(e) => { @@ -162,11 +163,8 @@ async fn handle_pdus( resolved_map } -async fn handle_edus( - services: &Services, client: &IpAddr, body: &Ruma, origin: &ServerName, -) { - for edu in body - .edus +async fn handle_edus(services: &Services, client: &IpAddr, edus: &[Raw], origin: &ServerName) { + for edu in edus .iter() .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) { @@ -178,7 +176,7 @@ async fn handle_edus( Edu::DirectToDevice(content) => handle_edu_direct_to_device(services, client, origin, content).await, Edu::SigningKeyUpdate(content) => handle_edu_signing_key_update(services, client, origin, content).await, Edu::_Custom(ref _custom) => { - debug_warn!(?body.edus, "received custom/unknown EDU"); + debug_warn!(?edus, "received custom/unknown EDU"); }, } } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 12eacc8f..da7f3cf4 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -198,7 +198,6 @@ impl Service { Ok(None) => debug!("Command successful with no response"), Ok(Some(output)) | Err(output) => self .handle_response(output) - .boxed() .await .unwrap_or_else(default_log), } @@ -277,6 +276,7 @@ impl Service { }; self.respond_to_room(content, &pdu.room_id, response_sender) + .boxed() .await } diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 9081fcbc..39920219 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -1,29 +1,29 @@ -use conduit::{debug_warn, err, pdu::gen_event_id_canonical_json, Err, Result}; -use ruma::{CanonicalJsonObject, OwnedEventId, OwnedRoomId, RoomId}; +use conduit::{err, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; +use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, RoomId}; use serde_json::value::RawValue as RawJsonValue; impl super::Service { pub async fn parse_incoming_pdu( &self, pdu: &RawJsonValue, ) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - debug_warn!("Error parsing incoming event {pdu:#?}"); - err!(BadServerResponse("Error parsing incoming event {e:?}")) - })?; + let value = serde_json::from_str::(pdu.get()) + .map_err(|e| err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}"))))?; let room_id: OwnedRoomId = value .get("room_id") - .and_then(|id| RoomId::parse(id.as_str()?).ok()) - .ok_or_else(|| err!(Request(InvalidParam("Invalid room id in pdu"))))?; + .and_then(CanonicalJsonValue::as_str) + .map(RoomId::parse) + .flat_ok_or(err!(Request(InvalidParam("Invalid room_id in pdu"))))?; - let Ok(room_version_id) = self.services.state.get_room_version(&room_id).await else { - return Err!("Server is not in room {room_id}"); - }; + let room_version_id = self + .services + .state + .get_room_version(&room_id) + .await + .map_err(|_| err!("Server is not in room {room_id}"))?; - let Ok((event_id, value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { - // Event could not be converted to canonical json - return Err!(Request(InvalidParam("Could not convert event to canonical json."))); - }; + let (event_id, value) = gen_event_id_canonical_json(pdu, &room_version_id) + .map_err(|e| err!(Request(InvalidParam("Could not convert event to canonical json: {e}"))))?; Ok((event_id, value, room_id)) } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 487262e6..21e5395d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -661,8 +661,7 @@ impl Service { .await .or_else(|_| { if event_type == TimelineEventType::RoomCreate { - let content = serde_json::from_str::(content.get()) - .expect("Invalid content in RoomCreate pdu."); + let content: RoomCreateEventContent = serde_json::from_str(content.get())?; Ok(content.room_version) } else { Err(Error::InconsistentRoomState( From e482c0646f58ae0fe58abc12dff4be7cb1fd8e8f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Oct 2024 20:25:32 +0000 Subject: [PATCH 0065/1248] Add constructions and Default for PduBuilder simplify various RoomMemberEventContent constructions Signed-off-by: Jason Volk --- src/admin/user/commands.rs | 21 +-- src/api/client/account.rs | 12 +- src/api/client/membership.rs | 157 +++++++--------------- src/api/client/message.rs | 17 +-- src/api/client/profile.rs | 33 ++--- src/api/client/redact.rs | 18 +-- src/api/client/room.rs | 159 +++++++--------------- src/api/client/state.rs | 3 +- src/api/server/make_join.rs | 29 ++-- src/api/server/make_leave.rs | 25 +--- src/core/pdu/builder.rs | 59 +++++++- src/core/pdu/mod.rs | 5 +- src/service/admin/create.rs | 171 +++++++----------------- src/service/admin/grant.rs | 83 +++--------- src/service/admin/mod.rs | 31 +---- src/service/rooms/state_accessor/mod.rs | 20 +-- 16 files changed, 279 insertions(+), 564 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 562bb9c7..df393833 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -12,11 +12,10 @@ use ruma::{ redaction::RoomRedactionEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, StateEventType, TimelineEventType, + RoomAccountDataEventType, StateEventType, }, EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, }; -use serde_json::value::to_raw_value; use crate::{ admin_command, get_room_info, @@ -461,14 +460,7 @@ pub(super) async fn force_demote( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&power_levels_content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + PduBuilder::state(String::new(), &power_levels_content), &user_id, &room_id, &state_lock, @@ -623,16 +615,11 @@ pub(super) async fn redact_event(&self, event_id: Box) -> Result(room_id, &StateEventType::RoomMember, user_id.as_str()) @@ -1651,21 +1601,18 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, return Ok(()); }; - event.membership = MembershipState::Leave; - event.reason = reason; - services .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }, + PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent { + membership: MembershipState::Leave, + reason, + ..event + }, + ), user_id, room_id, &state_lock, diff --git a/src/api/client/message.rs b/src/api/client/message.rs index d577e3c8..578b675b 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -9,7 +9,6 @@ use conduit::{ use futures::{FutureExt, StreamExt}; use ruma::{ api::client::{ - error::ErrorKind, filter::{RoomEventFilter, UrlFilter}, message::{get_message_events, send_message_event}, }, @@ -21,7 +20,7 @@ use service::rooms::timeline::PdusIterItem; use crate::{ service::{pdu::PduBuilder, Services}, - utils, Error, Result, Ruma, + utils, Result, Ruma, }; /// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` @@ -77,27 +76,25 @@ pub(crate) async fn send_message_event_route( let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); - let content = from_str(body.body.body.json().get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?; + let content = + from_str(body.body.body.json().get()).map_err(|e| err!(Request(BadJson("Invalid JSON body: {e}"))))?; let event_id = services .rooms .timeline .build_and_append_pdu( PduBuilder { - event_type: body.event_type.to_string().into(), + event_type: body.event_type.clone().into(), content, unsigned: Some(unsigned), - state_key: None, - redacts: None, timestamp: appservice_info.and(body.timestamp), + ..Default::default() }, sender_user, &body.room_id, &state_lock, ) - .await - .map(|event_id| (*event_id).to_owned())?; + .await?; services .transaction_ids @@ -106,7 +103,7 @@ pub(crate) async fn send_message_event_route( drop(state_lock); Ok(send_message_event::v3::Response { - event_id, + event_id: event_id.into(), }) } diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index cdc047f0..32f7a723 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -13,11 +13,10 @@ use ruma::{ }, federation, }, - events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType}, + events::{room::member::RoomMemberEventContent, StateEventType}, presence::PresenceState, OwnedMxcUri, OwnedRoomId, UserId, }; -use serde_json::value::to_raw_value; use service::Services; use crate::Ruma; @@ -310,19 +309,14 @@ pub async fn update_displayname( continue; }; - let pdu = PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { + let pdu = PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent { displayname: displayname.clone(), join_authorized_via_users_server: None, ..content - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }; + }, + ); joined_rooms.push((pdu, room_id)); } @@ -360,20 +354,15 @@ pub async fn update_avatar_url( .room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str()) .await?; - let pdu = PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { + let pdu = PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent { avatar_url: avatar_url.clone(), blurhash: blurhash.clone(), join_authorized_via_users_server: None, ..content - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }; + }, + ); Ok((pdu, room_id)) }) diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index 2102f6cd..a986dc18 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -1,9 +1,5 @@ use axum::extract::State; -use ruma::{ - api::client::redact::redact_event, - events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, -}; -use serde_json::value::to_raw_value; +use ruma::{api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent}; use crate::{service::pdu::PduBuilder, Result, Ruma}; @@ -25,16 +21,11 @@ pub(crate) async fn redact_event_route( .timeline .build_and_append_pdu( PduBuilder { - event_type: TimelineEventType::RoomRedaction, - content: to_raw_value(&RoomRedactionEventContent { + redacts: Some(body.event_id.clone().into()), + ..PduBuilder::timeline(&RoomRedactionEventContent { redacts: Some(body.event_id.clone()), reason: body.reason.clone(), }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: Some(body.event_id.into()), - timestamp: None, }, sender_user, &body.room_id, @@ -44,8 +35,7 @@ pub(crate) async fn redact_event_route( drop(state_lock); - let event_id = (*event_id).to_owned(); Ok(redact_event::v3::Response { - event_id, + event_id: event_id.into(), }) } diff --git a/src/api/client/room.rs b/src/api/client/room.rs index e22ad796..daadb724 100644 --- a/src/api/client/room.rs +++ b/src/api/client/room.rs @@ -150,8 +150,7 @@ pub(crate) async fn create_room_route( None => services.globals.default_room_version(), }; - #[allow(clippy::single_match_else)] - let content = match &body.creation_content { + let create_content = match &body.creation_content { Some(content) => { use RoomVersionId::*; @@ -213,11 +212,9 @@ pub(crate) async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, + content: to_raw_value(&create_content).expect("create event content serialization"), state_key: Some(String::new()), - redacts: None, - timestamp: None, + ..Default::default() }, sender_user, &room_id, @@ -231,24 +228,16 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, + PduBuilder::state( + sender_user.to_string(), + &RoomMemberEventContent { displayname: services.users.displayname(sender_user).await.ok(), avatar_url: services.users.avatar_url(sender_user).await.ok(), - is_direct: Some(body.is_direct), - third_party_invite: None, blurhash: services.users.blurhash(sender_user).await.ok(), - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - timestamp: None, - }, + is_direct: Some(body.is_direct), + ..RoomMemberEventContent::new(MembershipState::Join) + }, + ), sender_user, &room_id, &state_lock, @@ -289,11 +278,9 @@ pub(crate) async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&power_levels_content).expect("to_raw_value always works on serde_json::Value"), - unsigned: None, + content: to_raw_value(&power_levels_content).expect("serialized power_levels event content"), state_key: Some(String::new()), - redacts: None, - timestamp: None, + ..Default::default() }, sender_user, &room_id, @@ -308,18 +295,13 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { + PduBuilder::state( + String::new(), + &RoomCanonicalAliasEventContent { alias: Some(room_alias_id.to_owned()), alt_aliases: vec![], - }) - .expect("We checked that alias earlier, it must be fine"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }, + ), sender_user, &room_id, &state_lock, @@ -335,19 +317,14 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { + PduBuilder::state( + String::new(), + &RoomJoinRulesEventContent::new(match preset { RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default _ => JoinRule::Invite, - })) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }), + ), sender_user, &room_id, &state_lock, @@ -360,15 +337,10 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + PduBuilder::state( + String::new(), + &RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared), + ), sender_user, &room_id, &state_lock, @@ -381,18 +353,13 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { + PduBuilder::state( + String::new(), + &RoomGuestAccessEventContent::new(match preset { RoomPreset::PublicChat => GuestAccess::Forbidden, _ => GuestAccess::CanJoin, - })) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }), + ), sender_user, &room_id, &state_lock, @@ -440,15 +407,7 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(name.clone())) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + PduBuilder::state(String::new(), &RoomNameEventContent::new(name.clone())), sender_user, &room_id, &state_lock, @@ -462,17 +421,12 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { + PduBuilder::state( + String::new(), + &RoomTopicEventContent { topic: topic.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }, + ), sender_user, &room_id, &state_lock, @@ -641,18 +595,13 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomTombstone, - content: to_raw_value(&RoomTombstoneEventContent { + PduBuilder::state( + String::new(), + &RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }, + ), sender_user, &body.room_id, &state_lock, @@ -788,10 +737,8 @@ pub(crate) async fn upgrade_room_route( PduBuilder { event_type: event_type.to_string().into(), content: event_content, - unsigned: None, state_key: Some(String::new()), - redacts: None, - timestamp: None, + ..Default::default() }, sender_user, &replacement_room, @@ -821,7 +768,7 @@ pub(crate) async fn upgrade_room_route( } // Get the old room power levels - let mut power_levels_event_content: RoomPowerLevelsEventContent = services + let power_levels_event_content: RoomPowerLevelsEventContent = services .rooms .state_accessor .room_state_get_content(&body.room_id, &StateEventType::RoomPowerLevels, "") @@ -836,8 +783,6 @@ pub(crate) async fn upgrade_room_route( .checked_add(int!(1)) .ok_or_else(|| err!(Request(BadJson("users_default power levels event content is not valid"))))?, ); - power_levels_event_content.events_default = new_level; - power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and // inviting new users @@ -845,14 +790,14 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&power_levels_event_content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + PduBuilder::state( + String::new(), + &RoomPowerLevelsEventContent { + events_default: new_level, + invite: new_level, + ..power_levels_event_content + }, + ), sender_user, &body.room_id, &state_lock, diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 1396ae77..5090d557 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -177,10 +177,9 @@ async fn send_state_event_for_key_helper( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get())?, - unsigned: None, state_key: Some(state_key), - redacts: None, timestamp, + ..Default::default() }, sender, room_id, diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index ba081aad..85668038 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -8,7 +8,7 @@ use ruma::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - StateEventType, TimelineEventType, + StateEventType, }, CanonicalJsonObject, RoomId, RoomVersionId, UserId, }; @@ -125,30 +125,17 @@ pub(crate) async fn create_join_event_template_route( )); } - let content = to_raw_value(&RoomMemberEventContent { - avatar_url: None, - blurhash: None, - displayname: None, - is_direct: None, - membership: MembershipState::Join, - third_party_invite: None, - reason: None, - join_authorized_via_users_server, - }) - .expect("member event is valid value"); - let (_pdu, mut pdu_json) = services .rooms .timeline .create_hash_and_sign_event( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content, - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - timestamp: None, - }, + PduBuilder::state( + body.user_id.to_string(), + &RoomMemberEventContent { + join_authorized_via_users_server, + ..RoomMemberEventContent::new(MembershipState::Join) + }, + ), &body.user_id, &body.room_id, &state_lock, diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 41ea1c80..81a32c86 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -2,10 +2,7 @@ use axum::extract::State; use conduit::{Error, Result}; use ruma::{ api::{client::error::ErrorKind, federation::membership::prepare_leave_event}, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - TimelineEventType, - }, + events::room::member::{MembershipState, RoomMemberEventContent}, }; use serde_json::value::to_raw_value; @@ -39,30 +36,12 @@ pub(crate) async fn create_leave_event_template_route( let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - let content = to_raw_value(&RoomMemberEventContent { - avatar_url: None, - blurhash: None, - displayname: None, - is_direct: None, - membership: MembershipState::Leave, - third_party_invite: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("member event is valid value"); let (_pdu, mut pdu_json) = services .rooms .timeline .create_hash_and_sign_event( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content, - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - timestamp: None, - }, + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent::new(MembershipState::Leave)), &body.user_id, &body.room_id, &state_lock, diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index ba4c19e5..80ff0713 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -1,20 +1,67 @@ use std::{collections::BTreeMap, sync::Arc}; -use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch}; +use ruma::{ + events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, + EventId, MilliSecondsSinceUnixEpoch, +}; use serde::Deserialize; -use serde_json::value::RawValue as RawJsonValue; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; /// Build the start of a PDU in order to add it to the Database. #[derive(Debug, Deserialize)] -pub struct PduBuilder { +pub struct Builder { #[serde(rename = "type")] pub event_type: TimelineEventType, + pub content: Box, - pub unsigned: Option>, + + pub unsigned: Option, + pub state_key: Option, + pub redacts: Option>, - /// For timestamped messaging, should only be used for appservices - /// + + /// For timestamped messaging, should only be used for appservices. /// Will be set to current time if None pub timestamp: Option, } + +type Unsigned = BTreeMap; + +impl Builder { + pub fn state(state_key: String, content: &T) -> Self + where + T: EventContent, + { + Self { + event_type: content.event_type().into(), + content: to_raw_value(content).expect("Builder failed to serialize state event content to RawValue"), + state_key: Some(state_key), + ..Self::default() + } + } + + pub fn timeline(content: &T) -> Self + where + T: EventContent, + { + Self { + event_type: content.event_type().into(), + content: to_raw_value(content).expect("Builder failed to serialize timeline event content to RawValue"), + ..Self::default() + } + } +} + +impl Default for Builder { + fn default() -> Self { + Self { + event_type: "m.room.message".into(), + content: Box::::default(), + unsigned: None, + state_key: None, + redacts: None, + timestamp: None, + } + } +} diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index a94e2bdc..5f50fe5b 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -21,7 +21,10 @@ use serde_json::{ value::{to_raw_value, RawValue as RawJsonValue, Value as JsonValue}, }; -pub use self::{builder::PduBuilder, count::PduCount}; +pub use self::{ + builder::{Builder, Builder as PduBuilder}, + count::PduCount, +}; use crate::{err, is_true, warn, Error, Result}; #[derive(Deserialize)] diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 3dd5aea3..1631f1cb 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -2,24 +2,20 @@ use std::collections::BTreeMap; use conduit::{pdu::PduBuilder, Result}; use ruma::{ - events::{ - room::{ - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - name::RoomNameEventContent, - power_levels::RoomPowerLevelsEventContent, - preview_url::RoomPreviewUrlsEventContent, - topic::RoomTopicEventContent, - }, - TimelineEventType, + events::room::{ + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + preview_url::RoomPreviewUrlsEventContent, + topic::RoomTopicEventContent, }, RoomId, RoomVersionId, }; -use serde_json::value::to_raw_value; use crate::Services; @@ -44,7 +40,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { let room_version = services.globals.default_room_version(); - let mut content = { + let create_content = { use RoomVersionId::*; match room_version { V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => RoomCreateEventContent::new_v1(server_user.clone()), @@ -52,23 +48,20 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { } }; - content.federate = true; - content.predecessor = None; - content.room_version = room_version; - // 1. The room create event services .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + PduBuilder::state( + String::new(), + &RoomCreateEventContent { + federate: true, + predecessor: None, + room_version, + ..create_content + }, + ), server_user, &room_id, &state_lock, @@ -80,24 +73,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(server_user.to_string()), - redacts: None, - timestamp: None, - }, + PduBuilder::state(server_user.to_string(), &RoomMemberEventContent::new(MembershipState::Join)), server_user, &room_id, &state_lock, @@ -111,18 +87,13 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { + PduBuilder::state( + String::new(), + &RoomPowerLevelsEventContent { users, ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }, + ), server_user, &room_id, &state_lock, @@ -134,15 +105,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + PduBuilder::state(String::new(), &RoomJoinRulesEventContent::new(JoinRule::Invite)), server_user, &room_id, &state_lock, @@ -154,15 +117,10 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + PduBuilder::state( + String::new(), + &RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared), + ), server_user, &room_id, &state_lock, @@ -174,15 +132,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + PduBuilder::state(String::new(), &RoomGuestAccessEventContent::new(GuestAccess::Forbidden)), server_user, &room_id, &state_lock, @@ -195,15 +145,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(room_name)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + PduBuilder::state(String::new(), &RoomNameEventContent::new(room_name)), server_user, &room_id, &state_lock, @@ -214,17 +156,12 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { + PduBuilder::state( + String::new(), + &RoomTopicEventContent { topic: format!("Manage {}", services.globals.server_name()), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }, + ), server_user, &room_id, &state_lock, @@ -238,18 +175,13 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { + PduBuilder::state( + String::new(), + &RoomCanonicalAliasEventContent { alias: Some(alias.clone()), alt_aliases: Vec::new(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }, + ), server_user, &room_id, &state_lock, @@ -266,17 +198,12 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPreviewUrls, - content: to_raw_value(&RoomPreviewUrlsEventContent { + PduBuilder::state( + String::new(), + &RoomPreviewUrlsEventContent { disabled: true, - }) - .expect("event is valid we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }, + ), server_user, &room_id, &state_lock, diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 6e266ca9..405da982 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -9,11 +9,10 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, TimelineEventType, + RoomAccountDataEventType, }, RoomId, UserId, }; -use serde_json::value::to_raw_value; use crate::pdu::PduBuilder; @@ -35,24 +34,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { self.services .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }, + PduBuilder::state(user_id.to_string(), &RoomMemberEventContent::new(MembershipState::Invite)), server_user, &room_id, &state_lock, @@ -61,24 +43,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { self.services .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - timestamp: None, - }, + PduBuilder::state(user_id.to_string(), &RoomMemberEventContent::new(MembershipState::Join)), user_id, &room_id, &state_lock, @@ -91,18 +56,13 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { self.services .timeline .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { + PduBuilder::state( + String::new(), + &RoomPowerLevelsEventContent { users, ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, + }, + ), server_user, &room_id, &state_lock, @@ -117,23 +77,18 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { } } + let welcome_message = String::from("## Thank you for trying out conduwuit!\n\nconduwuit is a fork of upstream Conduit which is in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Git and Documentation: https://github.com/girlbossceo/conduwuit\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command):\n\nconduwuit room (Ask questions and get notified on updates):\n`/join #conduwuit:puppygock.gay`"); + // Send welcome message - self.services.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMessage, - content: to_raw_value(&RoomMessageEventContent::text_markdown( - String::from("## Thank you for trying out conduwuit!\n\nconduwuit is a fork of upstream Conduit which is in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Git and Documentation: https://github.com/girlbossceo/conduwuit\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command):\n\nconduwuit room (Ask questions and get notified on updates):\n`/join #conduwuit:puppygock.gay`"), - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - timestamp: None, - }, - server_user, - &room_id, - &state_lock, - ).await?; + self.services + .timeline + .build_and_append_pdu( + PduBuilder::timeline(&RoomMessageEventContent::text_markdown(welcome_message)), + server_user, + &room_id, + &state_lock, + ) + .await?; Ok(()) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index da7f3cf4..58cc012c 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -15,13 +15,9 @@ pub use create::create_admin_room; use futures::{FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{ - events::{ - room::message::{Relation, RoomMessageEventContent}, - TimelineEventType, - }, + events::room::message::{Relation, RoomMessageEventContent}, OwnedEventId, OwnedRoomId, RoomId, UserId, }; -use serde_json::value::to_raw_value; use tokio::sync::{Mutex, RwLock}; use crate::{account_data, globals, rooms, rooms::state::RoomMutexGuard, Dep}; @@ -285,20 +281,12 @@ impl Service { ) -> Result<()> { assert!(self.user_is_admin(user_id).await, "sender is not admin"); - let response_pdu = PduBuilder { - event_type: TimelineEventType::RoomMessage, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - timestamp: None, - }; - let state_lock = self.services.state.mutex.lock(room_id).await; + if let Err(e) = self .services .timeline - .build_and_append_pdu(response_pdu, user_id, room_id, &state_lock) + .build_and_append_pdu(PduBuilder::timeline(&content), user_id, room_id, &state_lock) .await { self.handle_response_error(e, room_id, user_id, &state_lock) @@ -313,23 +301,14 @@ impl Service { &self, e: Error, room_id: &RoomId, user_id: &UserId, state_lock: &RoomMutexGuard, ) -> Result<()> { error!("Failed to build and append admin room response PDU: \"{e}\""); - let error_room_message = RoomMessageEventContent::text_plain(format!( + let content = RoomMessageEventContent::text_plain(format!( "Failed to build and append admin room PDU: \"{e}\"\n\nThe original admin command may have finished \ successfully, but we could not return the output." )); - let response_pdu = PduBuilder { - event_type: TimelineEventType::RoomMessage, - content: to_raw_value(&error_room_message).expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - timestamp: None, - }; - self.services .timeline - .build_and_append_pdu(response_pdu, user_id, room_id, state_lock) + .build_and_append_pdu(PduBuilder::timeline(&content), user_id, room_id, state_lock) .await?; Ok(()) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 3855d92a..19f1f141 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -37,7 +37,6 @@ use ruma::{ ServerName, UserId, }; use serde::Deserialize; -use serde_json::value::to_raw_value; use self::data::Data; use crate::{rooms, rooms::state::RoomMutexGuard, Dep}; @@ -353,21 +352,14 @@ impl Service { pub async fn user_can_invite( &self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &RoomMutexGuard, ) -> bool { - let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite)) - .expect("Event content always serializes"); - - let new_event = PduBuilder { - event_type: ruma::events::TimelineEventType::RoomMember, - content, - unsigned: None, - state_key: Some(target_user.into()), - redacts: None, - timestamp: None, - }; - self.services .timeline - .create_hash_and_sign_event(new_event, sender, room_id, state_lock) + .create_hash_and_sign_event( + PduBuilder::state(target_user.into(), &RoomMemberEventContent::new(MembershipState::Invite)), + sender, + room_id, + state_lock, + ) .await .is_ok() } From 8ea2dccc9ad72df70555c8dc04ee85d6ed49f1a7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 9 Jul 2024 17:23:00 +0000 Subject: [PATCH 0066/1248] sort rustfmt Signed-off-by: Jason Volk --- rustfmt.toml | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 114677d4..fd912a19 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,28 +1,27 @@ -edition = "2021" - +array_width = 80 +chain_width = 60 +comment_width = 80 condense_wildcard_suffixes = true +edition = "2021" +fn_call_width = 80 +fn_params_layout = "Compressed" +fn_single_line = true format_code_in_doc_comments = true format_macro_bodies = true format_macro_matchers = true format_strings = true -hex_literal_case = "Upper" -max_width = 120 -tab_spaces = 4 -array_width = 80 -comment_width = 80 -wrap_comments = true -fn_params_layout = "Compressed" -fn_call_width = 80 -fn_single_line = true +group_imports = "StdExternalCrate" hard_tabs = true -match_block_trailing_comma = true +hex_literal_case = "Upper" imports_granularity = "Crate" +match_block_trailing_comma = true +max_width = 120 +newline_style = "Unix" normalize_comments = false reorder_impl_items = true reorder_imports = true -group_imports = "StdExternalCrate" -newline_style = "Unix" +tab_spaces = 4 use_field_init_shorthand = true use_small_heuristics = "Off" use_try_shorthand = true -chain_width = 60 +wrap_comments = true From c9c405facfcfd30c76e3b830929a4e4c90b930c2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 29 Sep 2024 09:20:17 +0000 Subject: [PATCH 0067/1248] relax Sized bound for debug::type_name Signed-off-by: Jason Volk --- src/core/debug.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/debug.rs b/src/core/debug.rs index 1e36ca8e..85574a2f 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -86,11 +86,11 @@ pub fn panic_str(p: &Box) -> &'static str { p.downcast_ref::<&st #[inline(always)] #[must_use] -pub fn rttype_name(_: &T) -> &'static str { type_name::() } +pub fn rttype_name(_: &T) -> &'static str { type_name::() } #[inline(always)] #[must_use] -pub fn type_name() -> &'static str { std::any::type_name::() } +pub fn type_name() -> &'static str { std::any::type_name::() } #[must_use] #[inline] From 16f82b02a07110ae3f4133758d3a7e20ca2401ea Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 29 Sep 2024 04:18:47 +0000 Subject: [PATCH 0068/1248] add util to restore state on scope exit Signed-off-by: Jason Volk --- src/core/utils/defer.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/core/utils/defer.rs b/src/core/utils/defer.rs index 08477b6f..29199700 100644 --- a/src/core/utils/defer.rs +++ b/src/core/utils/defer.rs @@ -15,8 +15,14 @@ macro_rules! defer { }; ($body:expr) => { - $crate::defer! {{ - $body - }} + $crate::defer! {{ $body }} + }; +} + +#[macro_export] +macro_rules! scope_restore { + ($val:ident, $ours:expr) => { + let theirs = $crate::utils::exchange($val, $ours); + $crate::defer! {{ *$val = theirs; }}; }; } From a5e85727b5d1447a67e6ef970f5cc9d54f866f87 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 29 Sep 2024 09:01:57 +0000 Subject: [PATCH 0069/1248] add tuple access functor-macro Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index c34691d2..4dbecf91 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -39,3 +39,10 @@ pub use self::{ #[inline] pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, source) } + +#[macro_export] +macro_rules! at { + ($idx:tt) => { + |t| t.$idx + }; +} From 43b0bb6a5e62a9262abcad63431bf9ac0c2d60cc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 7 Oct 2024 19:19:53 +0000 Subject: [PATCH 0070/1248] add non-allocating fixed-size random string generator Signed-off-by: Jason Volk --- src/core/utils/rand.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/core/utils/rand.rs b/src/core/utils/rand.rs index b80671eb..d717c4bd 100644 --- a/src/core/utils/rand.rs +++ b/src/core/utils/rand.rs @@ -3,6 +3,7 @@ use std::{ time::{Duration, SystemTime}, }; +use arrayvec::ArrayString; use rand::{thread_rng, Rng}; pub fn string(length: usize) -> String { @@ -13,6 +14,18 @@ pub fn string(length: usize) -> String { .collect() } +#[inline] +pub fn string_array() -> ArrayString { + let mut ret = ArrayString::::new(); + thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(LENGTH) + .map(char::from) + .for_each(|c| ret.push(c)); + + ret +} + #[inline] #[must_use] pub fn timepoint_secs(range: Range) -> SystemTime { From c40d20cb95283c1e03c72fec437c48b8debee678 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 25 Sep 2024 05:04:25 +0000 Subject: [PATCH 0071/1248] add macro util to determine if cargo build or check/clippy. Signed-off-by: Jason Volk --- src/macros/utils.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/macros/utils.rs b/src/macros/utils.rs index 197dd90e..e4ffc622 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -2,6 +2,16 @@ use syn::{parse_str, Expr, Generics, Lit, Meta}; use crate::Result; +pub(crate) fn is_cargo_build() -> bool { + std::env::args() + .find(|flag| flag.starts_with("--emit")) + .as_ref() + .and_then(|flag| flag.split_once('=')) + .map(|val| val.1.split(',')) + .and_then(|mut vals| vals.find(|elem| *elem == "link")) + .is_some() +} + pub(crate) fn get_named_generics(args: &[Meta], name: &str) -> Result { const DEFAULT: &str = "<>"; From 2a59a56eaa6d63c7db6634b1c1662d7f34dd7598 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Sep 2024 22:17:02 +0000 Subject: [PATCH 0072/1248] initial example-config generator Signed-off-by: Jason Volk --- src/core/config/mod.rs | 2 + src/macros/config.rs | 98 ++++++++++++++++++++++++++++++++++++++++++ src/macros/mod.rs | 8 +++- 3 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 src/macros/config.rs diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 126b3123..64e1c9ba 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -5,6 +5,7 @@ use std::{ path::PathBuf, }; +use conduit_macros::config_example_generator; use either::{ Either, Either::{Left, Right}, @@ -27,6 +28,7 @@ pub mod check; pub mod proxy; /// all the config options for conduwuit +#[config_example_generator] #[derive(Clone, Debug, Deserialize)] #[allow(clippy::struct_excessive_bools)] pub struct Config { diff --git a/src/macros/config.rs b/src/macros/config.rs new file mode 100644 index 00000000..6d29c21f --- /dev/null +++ b/src/macros/config.rs @@ -0,0 +1,98 @@ +use std::fmt::Write; + +use proc_macro::TokenStream; +use quote::ToTokens; +use syn::{Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, MetaNameValue, Type, TypePath}; + +use crate::{utils::is_cargo_build, Result}; + +#[allow(clippy::needless_pass_by_value)] +pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { + if is_cargo_build() { + generate_example(&input, args)?; + } + + Ok(input.to_token_stream().into()) +} + +#[allow(clippy::needless_pass_by_value)] +#[allow(unused_variables)] +fn generate_example(input: &ItemStruct, _args: &[Meta]) -> Result<()> { + if let Fields::Named(FieldsNamed { + named, + .. + }) = &input.fields + { + for field in named { + let Some(ident) = &field.ident else { + continue; + }; + + let Some(doc) = get_doc_comment(field) else { + continue; + }; + + let Some(type_name) = get_type_name(field) else { + continue; + }; + + //println!("{:?} {type_name:?}\n{doc}", ident.to_string()); + } + } + + Ok(()) +} + +fn get_doc_comment(field: &Field) -> Option { + let mut out = String::new(); + for attr in &field.attrs { + let Meta::NameValue(MetaNameValue { + path, + value, + .. + }) = &attr.meta + else { + continue; + }; + + if !path + .segments + .iter() + .next() + .is_some_and(|s| s.ident == "doc") + { + continue; + } + + let Expr::Lit(ExprLit { + lit, + .. + }) = &value + else { + continue; + }; + + let Lit::Str(token) = &lit else { + continue; + }; + + writeln!(&mut out, "# {}", token.value()).expect("wrote to output string buffer"); + } + + (!out.is_empty()).then_some(out) +} + +fn get_type_name(field: &Field) -> Option { + let Type::Path(TypePath { + path, + .. + }) = &field.ty + else { + return None; + }; + + path.segments + .iter() + .next() + .map(|segment| segment.ident.to_string()) +} diff --git a/src/macros/mod.rs b/src/macros/mod.rs index d32cda71..1aa1e24f 100644 --- a/src/macros/mod.rs +++ b/src/macros/mod.rs @@ -1,5 +1,6 @@ mod admin; mod cargo; +mod config; mod debug; mod implement; mod refutable; @@ -9,7 +10,7 @@ mod utils; use proc_macro::TokenStream; use syn::{ parse::{Parse, Parser}, - parse_macro_input, Error, Item, ItemConst, ItemEnum, ItemFn, Meta, + parse_macro_input, Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, }; pub(crate) type Result = std::result::Result; @@ -47,6 +48,11 @@ pub fn implement(args: TokenStream, input: TokenStream) -> TokenStream { attribute_macro::(args, input, implement::implement) } +#[proc_macro_attribute] +pub fn config_example_generator(args: TokenStream, input: TokenStream) -> TokenStream { + attribute_macro::(args, input, config::example_generator) +} + fn attribute_macro(args: TokenStream, input: TokenStream, func: F) -> TokenStream where F: Fn(I, &[Meta]) -> Result, From f67cfcd5353bf112760f89a9451aafc2ba2d9fde Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Oct 2024 19:10:20 +0000 Subject: [PATCH 0073/1248] cleanup Config::load() Signed-off-by: Jason Volk --- src/core/config/mod.rs | 42 +++++++++++++++++------------------------- src/main/server.rs | 2 +- 2 files changed, 18 insertions(+), 26 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 64e1c9ba..40c900e5 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1,3 +1,6 @@ +pub mod check; +pub mod proxy; + use std::{ collections::{BTreeMap, BTreeSet}, fmt, @@ -22,10 +25,7 @@ use url::Url; pub use self::check::check; use self::proxy::ProxyConfig; -use crate::{error::Error, utils::sys, Err, Result}; - -pub mod check; -pub mod proxy; +use crate::{err, error::Error, utils::sys, Result}; /// all the config options for conduwuit #[config_example_generator] @@ -441,34 +441,26 @@ const DEPRECATED_KEYS: &[&str; 9] = &[ impl Config { /// Pre-initialize config - pub fn load(paths: &Option>) -> Result { - let raw_config = if let Some(config_file_env) = Env::var("CONDUIT_CONFIG") { - Figment::new().merge(Toml::file(config_file_env).nested()) - } else if let Some(config_file_arg) = Env::var("CONDUWUIT_CONFIG") { - Figment::new().merge(Toml::file(config_file_arg).nested()) - } else if let Some(config_file_args) = paths { - let mut figment = Figment::new(); + pub fn load(paths: Option<&[PathBuf]>) -> Result { + let paths_files = paths.into_iter().flatten().map(Toml::file); - for config in config_file_args { - figment = figment.merge(Toml::file(config).nested()); - } + let envs = [Env::var("CONDUIT_CONFIG"), Env::var("CONDUWUIT_CONFIG")]; + let envs_files = envs.into_iter().flatten().map(Toml::file); - figment - } else { - Figment::new() - }; - - Ok(raw_config + let config = envs_files + .chain(paths_files) + .fold(Figment::new(), |config, file| config.merge(file.nested())) .merge(Env::prefixed("CONDUIT_").global().split("__")) - .merge(Env::prefixed("CONDUWUIT_").global().split("__"))) + .merge(Env::prefixed("CONDUWUIT_").global().split("__")); + + Ok(config) } /// Finalize config pub fn new(raw_config: &Figment) -> Result { - let config = match raw_config.extract::() { - Err(e) => return Err!("There was a problem with your configuration file: {e}"), - Ok(config) => config, - }; + let config = raw_config + .extract::() + .map_err(|e| err!("There was a problem with your configuration file: {e}"))?; // don't start if we're listening on both UNIX sockets and TCP at same time check::is_dual_listening(raw_config)?; diff --git a/src/main/server.rs b/src/main/server.rs index e435b2f4..4813d586 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -24,7 +24,7 @@ pub(crate) struct Server { impl Server { pub(crate) fn build(args: &Args, runtime: Option<&runtime::Handle>) -> Result, Error> { - let raw_config = Config::load(&args.config)?; + let raw_config = Config::load(args.config.as_deref())?; let raw_config = crate::clap::update(raw_config, args)?; let config = Config::new(&raw_config)?; From fc4d109f35d2cfb54ae3a463cb66e318e5947510 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 5 Oct 2024 19:39:27 +0000 Subject: [PATCH 0074/1248] add document comments to config items Signed-off-by: Jason Volk --- src/core/config/mod.rs | 845 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 842 insertions(+), 3 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 40c900e5..b5e07da2 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -31,221 +31,768 @@ use crate::{err, error::Error, utils::sys, Result}; #[config_example_generator] #[derive(Clone, Debug, Deserialize)] #[allow(clippy::struct_excessive_bools)] +#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] pub struct Config { - /// [`IpAddr`] conduwuit will listen on (can be IPv4 or IPv6) + /// The server_name is the pretty name of this server. It is used as a + /// suffix for user and room ids. Examples: matrix.org, conduit.rs + /// + /// The Conduit server needs all /_matrix/ requests to be reachable at + /// https://your.server.name/ on port 443 (client-server) and 8448 (federation). + /// + /// If that's not possible for you, you can create /.well-known files to + /// redirect requests (delegation). See + /// https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient + /// and + /// https://spec.matrix.org/v1.9/server-server-api/#getwell-knownmatrixserver + /// for more information. + /// + /// YOU NEED TO EDIT THIS + pub server_name: OwnedServerName, + + /// Database backend: Only rocksdb is supported. + /// default address (IPv4 or IPv6) conduwuit will listen on. Generally you + /// want this to be localhost (127.0.0.1 / ::1). If you are using Docker or + /// a container NAT networking setup, you likely need this to be 0.0.0.0. + /// To listen multiple addresses, specify a vector e.g. ["127.0.0.1", "::1"] + /// + /// default if unspecified is both IPv4 and IPv6 localhost: ["127.0.0.1", + /// "::1"] #[serde(default = "default_address")] address: ListeningAddr, - /// default TCP port(s) conduwuit will listen on + + /// The port(s) conduwuit will be running on. You need to set up a reverse + /// proxy such as Caddy or Nginx so all requests to /_matrix on port 443 + /// and 8448 will be forwarded to the conduwuit instance running on this + /// port Docker users: Don't change this, you'll need to map an external + /// port to this. To listen on multiple ports, specify a vector e.g. [8080, + /// 8448] + /// + /// default if unspecified is 8008 #[serde(default = "default_port")] port: ListeningPort, + pub tls: Option, + + /// Uncomment unix_socket_path to listen on a UNIX socket at the specified + /// path. If listening on a UNIX socket, you must remove/comment the + /// 'address' key if defined and add your reverse proxy to the 'conduwuit' + /// group, unless world RW permissions are specified with unix_socket_perms + /// (666 minimum). pub unix_socket_path: Option, + #[serde(default = "default_unix_socket_perms")] pub unix_socket_perms: u32, - pub server_name: OwnedServerName, + #[serde(default = "default_database_backend")] pub database_backend: String, + + /// This is the only directory where conduwuit will save its data, including + /// media. Note: this was previously "/var/lib/matrix-conduit" pub database_path: PathBuf, + pub database_backup_path: Option, + #[serde(default = "default_database_backups_to_keep")] pub database_backups_to_keep: i16, + + /// Set this to any float value in megabytes for conduwuit to tell the + /// database engine that this much memory is available for database-related + /// caches. May be useful if you have significant memory to spare to + /// increase performance. Defaults to 256.0 #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, + + /// Option to control adding arbitrary text to the end of the user's + /// displayname upon registration with a space before the text. This was the + /// lightning bolt emoji option, just replaced with support for adding your + /// own custom text or emojis. To disable, set this to "" (an empty string) + /// Defaults to "🏳️⚧️" (trans pride flag) #[serde(default = "default_new_user_displayname_suffix")] pub new_user_displayname_suffix: String, + + /// If enabled, conduwuit will send a simple GET request periodically to + /// `https://pupbrain.dev/check-for-updates/stable` for any new + /// announcements made. Despite the name, this is not an update check + /// endpoint, it is simply an announcement check endpoint. Defaults to + /// false. #[serde(default)] pub allow_check_for_updates: bool, #[serde(default = "default_pdu_cache_capacity")] pub pdu_cache_capacity: u32, + + /// Set this to any float value to multiply conduwuit's in-memory LRU caches + /// with. May be useful if you have significant memory to spare to increase + /// performance. + /// + /// This was previously called `conduit_cache_capacity_modifier` + /// + /// Defaults to 1.0. #[serde(default = "default_cache_capacity_modifier", alias = "conduit_cache_capacity_modifier")] pub cache_capacity_modifier: f64, + #[serde(default = "default_auth_chain_cache_capacity")] pub auth_chain_cache_capacity: u32, + #[serde(default = "default_shorteventid_cache_capacity")] pub shorteventid_cache_capacity: u32, + #[serde(default = "default_eventidshort_cache_capacity")] pub eventidshort_cache_capacity: u32, + #[serde(default = "default_shortstatekey_cache_capacity")] pub shortstatekey_cache_capacity: u32, + #[serde(default = "default_statekeyshort_cache_capacity")] pub statekeyshort_cache_capacity: u32, + #[serde(default = "default_server_visibility_cache_capacity")] pub server_visibility_cache_capacity: u32, + #[serde(default = "default_user_visibility_cache_capacity")] pub user_visibility_cache_capacity: u32, + #[serde(default = "default_stateinfo_cache_capacity")] pub stateinfo_cache_capacity: u32, + #[serde(default = "default_roomid_spacehierarchy_cache_capacity")] pub roomid_spacehierarchy_cache_capacity: u32, + /// Maximum entries stored in DNS memory-cache. The size of an entry may + /// vary so please take care if raising this value excessively. Only + /// decrease this when using an external DNS cache. Please note + /// that systemd does *not* count as an external cache, even when configured + /// to do so. #[serde(default = "default_dns_cache_entries")] pub dns_cache_entries: u32, + + /// Minimum time-to-live in seconds for entries in the DNS cache. The + /// default may appear high to most administrators; this is by design. Only + /// decrease this if you are using an external DNS cache. #[serde(default = "default_dns_min_ttl")] pub dns_min_ttl: u64, + + /// Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. + /// This value is critical for the server to federate efficiently. + /// NXDOMAIN's are assumed to not be returning to the federation + /// and aggressively cached rather than constantly rechecked. + /// + /// Defaults to 3 days as these are *very rarely* false negatives. #[serde(default = "default_dns_min_ttl_nxdomain")] pub dns_min_ttl_nxdomain: u64, + + /// Number of retries after a timeout. #[serde(default = "default_dns_attempts")] pub dns_attempts: u16, + + /// The number of seconds to wait for a reply to a DNS query. Please note + /// that recursive queries can take up to several seconds for some domains, + /// so this value should not be too low. #[serde(default = "default_dns_timeout")] pub dns_timeout: u64, + + /// Fallback to TCP on DNS errors. Set this to false if unsupported by + /// nameserver. #[serde(default = "true_fn")] pub dns_tcp_fallback: bool, + + /// Enable to query all nameservers until the domain is found. Referred to + /// as "trust_negative_responses" in hickory_reso> This can avoid useless + /// DNS queries if the first nameserver responds with NXDOMAIN or an empty + /// NOERROR response. + /// + /// The default is to query one nameserver and stop (false). #[serde(default = "true_fn")] pub query_all_nameservers: bool, + + /// Enables using *only* TCP for querying your specified nameservers instead + /// of UDP. + /// + /// You very likely do *not* want this. hickory-resolver already falls back + /// to TCP on UDP errors. Defaults to false #[serde(default)] pub query_over_tcp_only: bool, + + /// DNS A/AAAA record lookup strategy + /// + /// Takes a number of one of the following options: + /// 1 - Ipv4Only (Only query for A records, no AAAA/IPv6) + /// 2 - Ipv6Only (Only query for AAAA records, no A/IPv4) + /// 3 - Ipv4AndIpv6 (Query for A and AAAA records in parallel, uses whatever + /// returns a successful response first) 4 - Ipv6thenIpv4 (Query for AAAA + /// record, if that fails then query the A record) 5 - Ipv4thenIpv6 (Query + /// for A record, if that fails then query the AAAA record) + /// + /// If you don't have IPv6 networking, then for better performance it may be + /// suitable to set this to Ipv4Only (1) as you will never ever use the + /// AAAA record contents even if the AAAA record is successful instead of + /// the A record. + /// + /// Defaults to 5 - Ipv4ThenIpv6 as this is the most compatible and IPv4 + /// networking is currently the most prevalent. #[serde(default = "default_ip_lookup_strategy")] pub ip_lookup_strategy: u8, + /// Max request size for file uploads #[serde(default = "default_max_request_size")] pub max_request_size: usize, + #[serde(default = "default_max_fetch_prev_events")] pub max_fetch_prev_events: u16, + /// Default/base connection timeout. + /// This is used only by URL previews and update/news endpoint checks + /// + /// Defaults to 10 seconds #[serde(default = "default_request_conn_timeout")] pub request_conn_timeout: u64, + + /// Default/base request timeout. The time waiting to receive more data from + /// another server. This is used only by URL previews, update/news, and + /// misc endpoint checks + /// + /// Defaults to 35 seconds #[serde(default = "default_request_timeout")] pub request_timeout: u64, + + /// Default/base request total timeout. The time limit for a whole request. + /// This is set very high to not cancel healthy requests while serving as a + /// backstop. This is used only by URL previews and update/news endpoint + /// checks + /// + /// Defaults to 320 seconds #[serde(default = "default_request_total_timeout")] pub request_total_timeout: u64, + + /// Default/base idle connection pool timeout + /// This is used only by URL previews and update/news endpoint checks + /// + /// Defaults to 5 seconds #[serde(default = "default_request_idle_timeout")] pub request_idle_timeout: u64, + + /// Default/base max idle connections per host + /// This is used only by URL previews and update/news endpoint checks + /// + /// Defaults to 1 as generally the same open connection can be re-used #[serde(default = "default_request_idle_per_host")] pub request_idle_per_host: u16, + + /// Federation well-known resolution connection timeout + /// + /// Defaults to 6 seconds #[serde(default = "default_well_known_conn_timeout")] pub well_known_conn_timeout: u64, + + /// Federation HTTP well-known resolution request timeout + /// + /// Defaults to 10 seconds #[serde(default = "default_well_known_timeout")] pub well_known_timeout: u64, + + /// Federation client request timeout + /// You most definitely want this to be high to account for extremely large + /// room joins, slow homeservers, your own resources etc. + /// + /// Defaults to 300 seconds #[serde(default = "default_federation_timeout")] pub federation_timeout: u64, + + /// Federation client idle connection pool timeout + /// + /// Defaults to 25 seconds #[serde(default = "default_federation_idle_timeout")] pub federation_idle_timeout: u64, + + /// Federation client max idle connections per host + /// + /// Defaults to 1 as generally the same open connection can be re-used #[serde(default = "default_federation_idle_per_host")] pub federation_idle_per_host: u16, + + /// Federation sender request timeout + /// The time it takes for the remote server to process sent transactions can + /// take a while. + /// + /// Defaults to 180 seconds #[serde(default = "default_sender_timeout")] pub sender_timeout: u64, + + /// Federation sender idle connection pool timeout + /// + /// Defaults to 180 seconds #[serde(default = "default_sender_idle_timeout")] pub sender_idle_timeout: u64, + + /// Federation sender transaction retry backoff limit + /// + /// Defaults to 86400 seconds #[serde(default = "default_sender_retry_backoff_limit")] pub sender_retry_backoff_limit: u64, + + /// Appservice URL request connection timeout + /// + /// Defaults to 35 seconds as generally appservices are hosted within the + /// same network #[serde(default = "default_appservice_timeout")] pub appservice_timeout: u64, + + /// Appservice URL idle connection pool timeout + /// + /// Defaults to 300 seconds #[serde(default = "default_appservice_idle_timeout")] pub appservice_idle_timeout: u64, + + /// Notification gateway pusher idle connection pool timeout + /// + /// Defaults to 15 seconds #[serde(default = "default_pusher_idle_timeout")] pub pusher_idle_timeout: u64, + /// Enables registration. If set to false, no users can register on this + /// server. + /// + /// If set to true without a token configured, users can register with no + /// form of 2nd- step only if you set + /// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` to + /// true in your config. + /// + /// If you would like registration only via token reg, please configure + /// `registration_token` or `registration_token_file`. #[serde(default)] pub allow_registration: bool, + #[serde(default)] pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool, + + /// A static registration token that new users will have to provide when + /// creating an account. If unset and `allow_registration` is true, + /// registration is open without any condition. YOU NEED TO EDIT THIS. pub registration_token: Option, + + /// Path to a file on the system that gets read for the registration token + /// + /// conduwuit must be able to access the file, and it must not be empty + /// + /// no default pub registration_token_file: Option, + + /// controls whether encrypted rooms and events are allowed (default true) #[serde(default = "true_fn")] pub allow_encryption: bool, + + /// controls whether federation is allowed or not + /// defaults to true #[serde(default = "true_fn")] pub allow_federation: bool, + #[serde(default)] pub federation_loopback: bool, + + /// Set this to true to allow your server's public room directory to be + /// federated. Set this to false to protect against /publicRooms spiders, + /// but will forbid external users from viewing your server's public room + /// directory. If federation is disabled entirely (`allow_federation`), + /// this is inherently false. #[serde(default)] pub allow_public_room_directory_over_federation: bool, + + /// Set this to true to allow your server's public room directory to be + /// queried without client authentication (access token) through the Client + /// APIs. Set this to false to protect against /publicRooms spiders. #[serde(default)] pub allow_public_room_directory_without_auth: bool, + + /// allow guests/unauthenticated users to access TURN credentials + /// + /// this is the equivalent of Synapse's `turn_allow_guests` config option. + /// this allows any unauthenticated user to call + /// `/_matrix/client/v3/voip/turnServer`. + /// + /// defaults to false #[serde(default)] pub turn_allow_guests: bool, + + /// Set this to true to lock down your server's public room directory and + /// only allow admins to publish rooms to the room directory. Unpublishing + /// is still allowed by all users with this enabled. + /// + /// Defaults to false #[serde(default)] pub lockdown_public_room_directory: bool, + + /// Set this to true to allow federating device display names / allow + /// external users to see your device display name. If federation is + /// disabled entirely (`allow_federation`), this is inherently false. For + /// privacy, this is best disabled. #[serde(default)] pub allow_device_name_federation: bool, + + /// Config option to allow or disallow incoming federation requests that + /// obtain the profiles of our local users from + /// `/_matrix/federation/v1/query/profile` + /// + /// This is inherently false if `allow_federation` is disabled + /// + /// Defaults to true #[serde(default = "true_fn")] pub allow_profile_lookup_federation_requests: bool, + + /// controls whether users are allowed to create rooms. + /// appservices and admins are always allowed to create rooms + /// defaults to true #[serde(default = "true_fn")] pub allow_room_creation: bool, + + /// Set to false to disable users from joining or creating room versions + /// that aren't 100% officially supported by conduwuit. + /// conduwuit officially supports room versions 6 - 10. conduwuit has + /// experimental/unstable support for 3 - 5, and 11. Defaults to true. #[serde(default = "true_fn")] pub allow_unstable_room_versions: bool, + #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, + #[serde(default)] pub well_known: WellKnownConfig, + #[serde(default)] pub allow_jaeger: bool, + #[serde(default = "default_jaeger_filter")] pub jaeger_filter: String, + + /// If the 'perf_measurements' feature is enabled, enables collecting folded + /// stack trace profile of tracing spans using tracing_flame. The resulting + /// profile can be visualized with inferno[1], speedscope[2], or a number of + /// other tools. [1]: https://github.com/jonhoo/inferno + /// [2]: www.speedscope.app #[serde(default)] pub tracing_flame: bool, + #[serde(default = "default_tracing_flame_filter")] pub tracing_flame_filter: String, + #[serde(default = "default_tracing_flame_output_path")] pub tracing_flame_output_path: String, + #[serde(default)] pub proxy: ProxyConfig, + pub jwt_secret: Option, + + /// Servers listed here will be used to gather public keys of other servers + /// (notary trusted key servers). + /// + /// (Currently, conduwuit doesn't support batched key requests, so this list + /// should only contain other Synapse servers) Defaults to `matrix.org` #[serde(default = "default_trusted_servers")] pub trusted_servers: Vec, + + /// Option to control whether conduwuit will query your list of trusted + /// notary key servers (`trusted_servers`) for remote homeserver signing + /// keys it doesn't know *first*, or query the individual servers first + /// before falling back to the trusted key servers. + /// + /// The former/default behaviour makes federated/remote rooms joins + /// generally faster because we're querying a single (or list of) server + /// that we know works, is reasonably fast, and is reliable for just about + /// all the homeserver signing keys in the room. Querying individual + /// servers may take longer depending on the general infrastructure of + /// everyone in there, how many dead servers there are, etc. + /// + /// However, this does create an increased reliance on one single or + /// multiple large entities as `trusted_servers` should generally + /// contain long-term and large servers who know a very large number of + /// homeservers. + /// + /// If you don't know what any of this means, leave this and + /// `trusted_servers` alone to their defaults. + /// + /// Defaults to true as this is the fastest option for federation. #[serde(default = "true_fn")] pub query_trusted_key_servers_first: bool, + + /// max log level for conduwuit. allows debug, info, warn, or error + /// see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives + /// **Caveat**: + /// For release builds, the tracing crate is configured to only implement + /// levels higher than error to avoid unnecessary overhead in the compiled + /// binary from trace macros. For debug builds, this restriction is not + /// applied. + /// + /// Defaults to "info" #[serde(default = "default_log")] pub log: String, + + /// controls whether logs will be outputted with ANSI colours + /// + /// defaults to true #[serde(default = "true_fn", alias = "log_colours")] pub log_colors: bool, + + /// OpenID token expiration/TTL in seconds + /// + /// These are the OpenID tokens that are primarily used for Matrix account + /// integrations, *not* OIDC/OpenID Connect/etc + /// + /// Defaults to 3600 (1 hour) #[serde(default = "default_openid_token_ttl")] pub openid_token_ttl: u64, + + /// TURN username to provide the client + /// + /// no default #[serde(default)] pub turn_username: String, + + /// TURN password to provide the client + /// + /// no default #[serde(default)] pub turn_password: String, + + /// vector list of TURN URIs/servers to use + /// + /// replace "example.turn.uri" with your TURN domain, such as the coturn + /// "realm". if using TURN over TLS, replace "turn:" with "turns:" + /// + /// No default #[serde(default = "Vec::new")] pub turn_uris: Vec, + + /// TURN secret to use for generating the HMAC-SHA1 hash apart of username + /// and password generation + /// + /// this is more secure, but if needed you can use traditional + /// username/password below. + /// + /// no default #[serde(default)] pub turn_secret: String, + + /// TURN secret to use that's read from the file path specified + /// + /// this takes priority over "turn_secret" first, and falls back to + /// "turn_secret" if invalid or failed to open. + /// + /// no default pub turn_secret_file: Option, + + /// TURN TTL + /// + /// Default is 86400 seconds #[serde(default = "default_turn_ttl")] pub turn_ttl: u64, + /// List/vector of room **IDs** that conduwuit will make newly registered + /// users join. The room IDs specified must be rooms that you have joined + /// at least once on the server, and must be public. + /// + /// No default. #[serde(default = "Vec::new")] pub auto_join_rooms: Vec, + + /// Config option to automatically deactivate the account of any user who + /// attempts to join a: + /// - banned room + /// - forbidden room alias + /// - room alias or ID with a forbidden server name + /// + /// This may be useful if all your banned lists consist of toxic rooms or + /// servers that no good faith user would ever attempt to join, and + /// to automatically remediate the problem without any admin user + /// intervention. + /// + /// This will also make the user leave all rooms. Federation (e.g. remote + /// room invites) are ignored here. + /// + /// Defaults to false as rooms can be banned for non-moderation-related + /// reasons #[serde(default)] pub auto_deactivate_banned_room_attempts: bool, + /// RocksDB log level. This is not the same as conduwuit's log level. This + /// is the log level for the RocksDB engine/library which show up in your + /// database folder/path as `LOG` files. Defaults to error. conduwuit will + /// typically log RocksDB errors as normal. #[serde(default = "default_rocksdb_log_level")] pub rocksdb_log_level: String, + #[serde(default)] pub rocksdb_log_stderr: bool, + + /// Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB. #[serde(default = "default_rocksdb_max_log_file_size")] pub rocksdb_max_log_file_size: usize, + + /// Time in seconds before RocksDB will forcibly rotate logs. Defaults to 0. #[serde(default = "default_rocksdb_log_time_to_roll")] pub rocksdb_log_time_to_roll: usize, + + /// Set this to true to use RocksDB config options that are tailored to HDDs + /// (slower device storage) + /// + /// It is worth noting that by default, conduwuit will use RocksDB with + /// Direct IO enabled. *Generally* speaking this improves performance as it + /// bypasses buffered I/O (system page cache). However there is a potential + /// chance that Direct IO may cause issues with database operations if your + /// setup is uncommon. This has been observed with FUSE filesystems, and + /// possibly ZFS filesystem. RocksDB generally deals/corrects these issues + /// but it cannot account for all setups. If you experience any weird + /// RocksDB issues, try enabling this option as it turns off Direct IO and + /// feel free to report in the conduwuit Matrix room if this option fixes + /// your DB issues. See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. + /// + /// Defaults to false #[serde(default)] pub rocksdb_optimize_for_spinning_disks: bool, + + /// Enables direct-io to increase database performance. This is enabled by + /// default. Set this option to false if the database resides on a + /// filesystem which does not support direct-io. #[serde(default = "true_fn")] pub rocksdb_direct_io: bool, + + /// Amount of threads that RocksDB will use for parallelism on database + /// operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use + /// all your logical threads. + /// + /// Defaults to your CPU logical thread count. #[serde(default = "default_rocksdb_parallelism_threads")] pub rocksdb_parallelism_threads: usize, + + /// Maximum number of LOG files RocksDB will keep. This must *not* be set to + /// 0. It must be at least 1. Defaults to 3 as these are not very useful. #[serde(default = "default_rocksdb_max_log_files")] pub rocksdb_max_log_files: usize, + + /// Type of RocksDB database compression to use. + /// Available options are "zstd", "zlib", "bz2", "lz4", or "none" + /// It is best to use ZSTD as an overall good balance between + /// speed/performance, storage, IO amplification, and CPU usage. + /// For more performance but less compression (more storage used) and less + /// CPU usage, use LZ4. See https://github.com/facebook/rocksdb/wiki/Compression for more details. + /// + /// "none" will disable compression. + /// + /// Defaults to "zstd" #[serde(default = "default_rocksdb_compression_algo")] pub rocksdb_compression_algo: String, + + /// Level of compression the specified compression algorithm for RocksDB to + /// use. Default is 32767, which is internally read by RocksDB as the + /// default magic number and translated to the library's default + /// compression level as they all differ. + /// See their `kDefaultCompressionLevel`. #[serde(default = "default_rocksdb_compression_level")] pub rocksdb_compression_level: i32, + + /// Level of compression the specified compression algorithm for the + /// bottommost level/data for RocksDB to use. Default is 32767, which is + /// internally read by RocksDB as the default magic number and translated + /// to the library's default compression level as they all differ. + /// See their `kDefaultCompressionLevel`. + /// + /// Since this is the bottommost level (generally old and least used data), + /// it may be desirable to have a very high compression level here as it's + /// lesss likely for this data to be used. Research your chosen compression + /// algorithm. #[serde(default = "default_rocksdb_bottommost_compression_level")] pub rocksdb_bottommost_compression_level: i32, + + /// Whether to enable RocksDB "bottommost_compression". + /// At the expense of more CPU usage, this will further compress the + /// database to reduce more storage. It is recommended to use ZSTD + /// compression with this for best compression results. See https://github.com/facebook/rocksdb/wiki/Compression for more details. + /// + /// Defaults to false as this uses more CPU when compressing. #[serde(default)] pub rocksdb_bottommost_compression: bool, + + /// Database recovery mode (for RocksDB WAL corruption) + /// + /// Use this option when the server reports corruption and refuses to start. + /// Set mode 2 (PointInTime) to cleanly recover from this corruption. The + /// server will continue from the last good state, several seconds or + /// minutes prior to the crash. Clients may have to run "clear-cache & + /// reload" to account for the rollback. Upon success, you may reset the + /// mode back to default and restart again. Please note in some cases the + /// corruption error may not be cleared for at least 30 minutes of + /// operation in PointInTime mode. + /// + /// As a very last ditch effort, if PointInTime does not fix or resolve + /// anything, you can try mode 3 (SkipAnyCorruptedRecord) but this will + /// leave the server in a potentially inconsistent state. + /// + /// The default mode 1 (TolerateCorruptedTailRecords) will automatically + /// drop the last entry in the database if corrupted during shutdown, but + /// nothing more. It is extraordinarily unlikely this will desynchronize + /// clients. To disable any form of silent rollback set mode 0 + /// (AbsoluteConsistency). + /// + /// The options are: + /// 0 = AbsoluteConsistency + /// 1 = TolerateCorruptedTailRecords (default) + /// 2 = PointInTime (use me if trying to recover) + /// 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) + /// + /// See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information + /// + /// Defaults to 1 (TolerateCorruptedTailRecords) #[serde(default = "default_rocksdb_recovery_mode")] pub rocksdb_recovery_mode: u8, + + /// Database repair mode (for RocksDB SST corruption) + /// + /// Use this option when the server reports corruption while running or + /// panics. If the server refuses to start use the recovery mode options + /// first. Corruption errors containing the acronym 'SST' which occur after + /// startup will likely require this option. + /// + /// - Backing up your database directory is recommended prior to running the + /// repair. + /// - Disabling repair mode and restarting the server is recommended after + /// running the repair. + /// + /// Defaults to false #[serde(default)] pub rocksdb_repair: bool, + #[serde(default)] pub rocksdb_read_only: bool, + #[serde(default)] pub rocksdb_secondary: bool, + + /// Enables idle CPU priority for compaction thread. This is not enabled by + /// default to prevent compaction from falling too far behind on busy + /// systems. #[serde(default)] pub rocksdb_compaction_prio_idle: bool, + + /// Enables idle IO priority for compaction thread. This prevents any + /// unexpected lag in the server's operation and is usually a good idea. + /// Enabled by default. #[serde(default = "true_fn")] pub rocksdb_compaction_ioprio_idle: bool, + #[serde(default = "true_fn")] pub rocksdb_compaction: bool, + + /// Level of statistics collection. Some admin commands to display database + /// statistics may require this option to be set. Database performance may + /// be impacted by higher settings. + /// + /// Option is a number ranging from 0 to 6: + /// 0 = No statistics. + /// 1 = No statistics in release mode (default). + /// 2 to 3 = Statistics with no performance impact. + /// 3 to 5 = Statistics with possible performance impact. + /// 6 = All statistics. + /// + /// Defaults to 1 (No statistics, except in debug-mode) #[serde(default = "default_rocksdb_stats_level")] pub rocksdb_stats_level: u8, @@ -254,128 +801,420 @@ pub struct Config { #[serde(default = "default_notification_push_path")] pub notification_push_path: String, + /// Config option to control local (your server only) presence + /// updates/requests. Defaults to true. Note that presence on conduwuit is + /// very fast unlike Synapse's. If using outgoing presence, this MUST be + /// enabled. #[serde(default = "true_fn")] pub allow_local_presence: bool, + + /// Config option to control incoming federated presence updates/requests. + /// Defaults to true. This option receives presence updates from other + /// servers, but does not send any unless `allow_outgoing_presence` is true. + /// Note that presence on conduwuit is very fast unlike Synapse's. #[serde(default = "true_fn")] pub allow_incoming_presence: bool, + + /// Config option to control outgoing presence updates/requests. Defaults to + /// true. This option sends presence updates to other servers, but does not + /// receive any unless `allow_incoming_presence` is true. + /// Note that presence on conduwuit is very fast unlike Synapse's. + /// If using outgoing presence, you MUST enable `allow_local_presence` as + /// well. #[serde(default = "true_fn")] pub allow_outgoing_presence: bool, + + /// Config option to control how many seconds before presence updates that + /// you are idle. Defaults to 5 minutes. #[serde(default = "default_presence_idle_timeout_s")] pub presence_idle_timeout_s: u64, + + /// Config option to control how many seconds before presence updates that + /// you are offline. Defaults to 30 minutes. #[serde(default = "default_presence_offline_timeout_s")] pub presence_offline_timeout_s: u64, + + /// Config option to enable the presence idle timer for remote users. + /// Disabling is offered as an optimization for servers participating in + /// many large rooms or when resources are limited. Disabling it may cause + /// incorrect presence states (i.e. stuck online) to be seen for some + /// remote users. Defaults to true. #[serde(default = "true_fn")] pub presence_timeout_remote_users: bool, + /// Config option to control whether we should receive remote incoming read + /// receipts. Defaults to true. #[serde(default = "true_fn")] pub allow_incoming_read_receipts: bool, + + /// Config option to control whether we should send read receipts to remote + /// servers. Defaults to true. #[serde(default = "true_fn")] pub allow_outgoing_read_receipts: bool, + /// Config option to control outgoing typing updates to federation. Defaults + /// to true. #[serde(default = "true_fn")] pub allow_outgoing_typing: bool, + + /// Config option to control incoming typing updates from federation. + /// Defaults to true. #[serde(default = "true_fn")] pub allow_incoming_typing: bool, + + /// Config option to control maximum time federation user can indicate + /// typing. #[serde(default = "default_typing_federation_timeout_s")] pub typing_federation_timeout_s: u64, + + /// Config option to control minimum time local client can indicate typing. + /// This does not override a client's request to stop typing. It only + /// enforces a minimum value in case of no stop request. #[serde(default = "default_typing_client_timeout_min_s")] pub typing_client_timeout_min_s: u64, + + /// Config option to control maximum time local client can indicate typing. #[serde(default = "default_typing_client_timeout_max_s")] pub typing_client_timeout_max_s: u64, + /// Set this to true for conduwuit to compress HTTP response bodies using + /// zstd. This option does nothing if conduwuit was not built with + /// `zstd_compression` feature. Please be aware that enabling HTTP + /// compression may weaken TLS. Most users should not need to enable this. + /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH + /// before deciding to enable this. #[serde(default)] pub zstd_compression: bool, + + /// Set this to true for conduwuit to compress HTTP response bodies using + /// gzip. This option does nothing if conduwuit was not built with + /// `gzip_compression` feature. Please be aware that enabling HTTP + /// compression may weaken TLS. Most users should not need to enable this. + /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before + /// deciding to enable this. #[serde(default)] pub gzip_compression: bool, + + /// Set this to true for conduwuit to compress HTTP response bodies using + /// brotli. This option does nothing if conduwuit was not built with + /// `brotli_compression` feature. Please be aware that enabling HTTP + /// compression may weaken TLS. Most users should not need to enable this. + /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before + /// deciding to enable this. #[serde(default)] pub brotli_compression: bool, + /// Set to true to allow user type "guest" registrations. Element attempts + /// to register guest users automatically. Defaults to false #[serde(default)] pub allow_guest_registration: bool, + + /// Set to true to log guest registrations in the admin room. + /// Defaults to false as it may be noisy or unnecessary. #[serde(default)] pub log_guest_registrations: bool, + + /// Set to true to allow guest registrations/users to auto join any rooms + /// specified in `auto_join_rooms` Defaults to false #[serde(default)] pub allow_guests_auto_join_rooms: bool, + /// Config option to control whether the legacy unauthenticated Matrix media + /// repository endpoints will be enabled. These endpoints consist of: + /// - /_matrix/media/*/config + /// - /_matrix/media/*/upload + /// - /_matrix/media/*/preview_url + /// - /_matrix/media/*/download/* + /// - /_matrix/media/*/thumbnail/* + /// + /// The authenticated equivalent endpoints are always enabled. + /// + /// Defaults to true for now, but this is highly subject to change, likely + /// in the next release. #[serde(default = "true_fn")] pub allow_legacy_media: bool, + #[serde(default = "true_fn")] pub freeze_legacy_media: bool, + + /// Checks consistency of the media directory at startup: + /// 1. When `media_compat_file_link` is enbled, this check will upgrade + /// media when switching back and forth between Conduit and Conduwuit. + /// Both options must be enabled to handle this. + /// 2. When media is deleted from the directory, this check will also delete + /// its database entry. + /// + /// If none of these checks apply to your use cases, and your media + /// directory is significantly large setting this to false may reduce + /// startup time. + /// + /// Enabled by default. #[serde(default = "true_fn")] pub media_startup_check: bool, + + /// Enable backward-compatibility with Conduit's media directory by creating + /// symlinks of media. This option is only necessary if you plan on using + /// Conduit again. Otherwise setting this to false reduces filesystem + /// clutter and overhead for managing these symlinks in the directory. This + /// is now disabled by default. You may still return to upstream Conduit + /// but you have to run Conduwuit at least once with this set to true and + /// allow the media_startup_check to take place before shutting + /// down to return to Conduit. + /// + /// Disabled by default. #[serde(default)] pub media_compat_file_link: bool, + + /// Prunes missing media from the database as part of the media startup + /// checks. This means if you delete files from the media directory the + /// corresponding entries will be removed from the database. This is + /// disabled by default because if the media directory is accidentally moved + /// or inaccessible the metadata entries in the database will be lost with + /// sadness. + /// + /// Disabled by default. #[serde(default)] pub prune_missing_media: bool, + + /// Vector list of servers that conduwuit will refuse to download remote + /// media from. No default. #[serde(default = "Vec::new")] pub prevent_media_downloads_from: Vec, + /// List of forbidden server names that we will block incoming AND outgoing + /// federation with, and block client room joins / remote user invites. + /// + /// This check is applied on the room ID, room alias, sender server name, + /// sender user's server name, inbound federation X-Matrix origin, and + /// outbound federation handler. + /// + /// Basically "global" ACLs. No default. #[serde(default = "Vec::new")] pub forbidden_remote_server_names: Vec, + + /// List of forbidden server names that we will block all outgoing federated + /// room directory requests for. Useful for preventing our users from + /// wandering into bad servers or spaces. No default. #[serde(default = "Vec::new")] pub forbidden_remote_room_directory_server_names: Vec, + /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you + /// do not want conduwuit to send outbound requests to. Defaults to + /// RFC1918, unroutable, loopback, multicast, and testnet addresses for + /// security. + /// + /// To disable, set this to be an empty vector (`[]`). + /// Please be aware that this is *not* a guarantee. You should be using a + /// firewall with zones as doing this on the application layer may have + /// bypasses. + /// + /// Currently this does not account for proxies in use like Synapse does. #[serde(default = "default_ip_range_denylist")] pub ip_range_denylist: Vec, + /// Vector list of domains allowed to send requests to for URL previews. + /// Defaults to none. Note: this is a *contains* match, not an explicit + /// match. Putting "google.com" will match "https://google.com" and + /// "http://mymaliciousdomainexamplegoogle.com" Setting this to "*" will + /// allow all URL previews. Please note that this opens up significant + /// attack surface to your server, you are expected to be aware of the + /// risks by doing so. #[serde(default = "Vec::new")] pub url_preview_domain_contains_allowlist: Vec, + + /// Vector list of explicit domains allowed to send requests to for URL + /// previews. Defaults to none. Note: This is an *explicit* match, not a + /// contains match. Putting "google.com" will match "https://google.com", + /// "http://google.com", but not + /// "https://mymaliciousdomainexamplegoogle.com". Setting this to "*" will + /// allow all URL previews. Please note that this opens up significant + /// attack surface to your server, you are expected to be aware of the + /// risks by doing so. #[serde(default = "Vec::new")] pub url_preview_domain_explicit_allowlist: Vec, + + /// Vector list of explicit domains not allowed to send requests to for URL + /// previews. Defaults to none. Note: This is an *explicit* match, not a + /// contains match. Putting "google.com" will match "https://google.com", + /// "http://google.com", but not + /// "https://mymaliciousdomainexamplegoogle.com". The denylist is checked + /// first before allowlist. Setting this to "*" will not do anything. #[serde(default = "Vec::new")] pub url_preview_domain_explicit_denylist: Vec, + + /// Vector list of URLs allowed to send requests to for URL previews. + /// Defaults to none. Note that this is a *contains* match, not an + /// explicit match. Putting "google.com" will match + /// "https://google.com/", + /// "https://google.com/url?q=https://mymaliciousdomainexample.com", and + /// "https://mymaliciousdomainexample.com/hi/google.com" Setting this to + /// "*" will allow all URL previews. Please note that this opens up + /// significant attack surface to your server, you are expected to be + /// aware of the risks by doing so. #[serde(default = "Vec::new")] pub url_preview_url_contains_allowlist: Vec, + + /// Maximum amount of bytes allowed in a URL preview body size when + /// spidering. Defaults to 384KB (384_000 bytes) #[serde(default = "default_url_preview_max_spider_size")] pub url_preview_max_spider_size: usize, + + /// Option to decide whether you would like to run the domain allowlist + /// checks (contains and explicit) on the root domain or not. Does not apply + /// to URL contains allowlist. Defaults to false. Example: If this is + /// enabled and you have "wikipedia.org" allowed in the explicit and/or + /// contains domain allowlist, it will allow all subdomains under + /// "wikipedia.org" such as "en.m.wikipedia.org" as the root domain is + /// checked and matched. Useful if the domain contains allowlist is still + /// too broad for you but you still want to allow all the subdomains under a + /// root domain. #[serde(default)] pub url_preview_check_root_domain: bool, + /// List of forbidden room aliases and room IDs as patterns/strings. Values + /// in this list are matched as *contains*. This is checked upon room alias + /// creation, custom room ID creation if used, and startup as warnings if + /// any room aliases in your database have a forbidden room alias/ID. + /// No default. #[serde(default = "RegexSet::empty")] #[serde(with = "serde_regex")] pub forbidden_alias_names: RegexSet, + /// List of forbidden username patterns/strings. Values in this list are + /// matched as *contains*. This is checked upon username availability + /// check, registration, and startup as warnings if any local users in your + /// database have a forbidden username. + /// No default. #[serde(default = "RegexSet::empty")] #[serde(with = "serde_regex")] pub forbidden_usernames: RegexSet, + /// Retry failed and incomplete messages to remote servers immediately upon + /// startup. This is called bursting. If this is disabled, said messages + /// may not be delivered until more messages are queued for that server. Do + /// not change this option unless server resources are extremely limited or + /// the scale of the server's deployment is huge. Do not disable this + /// unless you know what you are doing. #[serde(default = "true_fn")] pub startup_netburst: bool, + + /// messages are dropped and not reattempted. The `startup_netburst` option + /// must be enabled for this value to have any effect. Do not change this + /// value unless you know what you are doing. Set this value to -1 to + /// reattempt every message without trimming the queues; this may consume + /// significant disk. Set this value to 0 to drop all messages without any + /// attempt at redelivery. #[serde(default = "default_startup_netburst_keep")] pub startup_netburst_keep: i64, + /// controls whether non-admin local users are forbidden from sending room + /// invites (local and remote), and if non-admin users can receive remote + /// room invites. admins are always allowed to send and receive all room + /// invites. defaults to false #[serde(default)] pub block_non_admin_invites: bool, + + /// Allows admins to enter commands in rooms other than #admins by prefixing + /// with \!admin. The reply will be publicly visible to the room, + /// originating from the sender. defaults to true #[serde(default = "true_fn")] pub admin_escape_commands: bool, + + /// Controls whether the conduwuit admin room console / CLI will immediately + /// activate on startup. This option can also be enabled with `--console` + /// conduwuit argument + /// + /// Defaults to false #[serde(default)] pub admin_console_automatic: bool, + + /// Controls what admin commands will be executed on startup. This is a + /// vector list of strings of admin commands to run. + /// + /// An example of this can be: `admin_execute = ["debug ping puppygock.gay", + /// "debug echo hi"]` + /// + /// This option can also be configured with the `--execute` conduwuit + /// argument and can take standard shell commands and environment variables + /// + /// Such example could be: `./conduwuit --execute "server admin-notice + /// conduwuit has started up at $(date)"` + /// + /// Defaults to nothing. #[serde(default)] pub admin_execute: Vec, + + /// Controls whether conduwuit should error and fail to start if an admin + /// execute command (`--execute` / `admin_execute`) fails + /// + /// Defaults to false #[serde(default)] pub admin_execute_errors_ignore: bool, + + /// Controls the max log level for admin command log captures (logs + /// generated from running admin commands) + /// + /// Defaults to "info" on release builds, else "debug" on debug builds #[serde(default = "default_admin_log_capture")] pub admin_log_capture: String, + #[serde(default = "default_admin_room_tag")] pub admin_room_tag: String, + /// Sentry.io crash/panic reporting, performance monitoring/metrics, etc. + /// This is NOT enabled by default. conduwuit's default Sentry reporting + /// endpoint is o4506996327251968.ingest.us.sentry.io + /// + /// Defaults to *false* #[serde(default)] pub sentry: bool, + + /// Sentry reporting URL if a custom one is desired + /// + /// Defaults to conduwuit's default Sentry endpoint: + /// "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, + + /// Report your Conduwuit server_name in Sentry.io crash reports and metrics + /// + /// Defaults to false #[serde(default)] pub sentry_send_server_name: bool, + + /// Performance monitoring/tracing sample rate for Sentry.io + /// + /// Note that too high values may impact performance, and can be disabled by + /// setting it to 0.0 (0%) This value is read as a percentage to Sentry, + /// represented as a decimal + /// + /// Defaults to 15% of traces (0.15) #[serde(default = "default_sentry_traces_sample_rate")] pub sentry_traces_sample_rate: f32, + + /// Whether to attach a stacktrace to Sentry reports. #[serde(default)] pub sentry_attach_stacktrace: bool, + + /// Send panics to sentry. This is true by default, but sentry has to be + /// enabled. #[serde(default = "true_fn")] pub sentry_send_panic: bool, + + /// Send errors to sentry. This is true by default, but sentry has to be + /// enabled. This option is only effective in release-mode; forced to false + /// in debug-mode. #[serde(default = "true_fn")] pub sentry_send_error: bool, + + /// Controls the tracing log level for Sentry to send things like + /// breadcrumbs and transactions Defaults to "info" #[serde(default = "default_sentry_filter")] pub sentry_filter: String, + /// Enable the tokio-console. This option is only relevant to developers. + /// See: docs/development.md#debugging-with-tokio-console for more + /// information. #[serde(default)] pub tokio_console: bool, From 2f24d7117a4f493bb90ea20b2b780486ef40272c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Oct 2024 05:15:24 +0000 Subject: [PATCH 0075/1248] further develop serializer for insertions add JSON delegator to db serializer consolidate writes through memfun; simplifications Signed-off-by: Jason Volk --- src/database/mod.rs | 3 +- src/database/ser.rs | 260 +++++++++++++++++++++++------------------- src/database/tests.rs | 232 +++++++++++++++++++++++++++++++++++++ 3 files changed, 376 insertions(+), 119 deletions(-) create mode 100644 src/database/tests.rs diff --git a/src/database/mod.rs b/src/database/mod.rs index e66abf68..c39b2b2f 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -10,6 +10,7 @@ pub mod maps; mod opts; mod ser; mod stream; +mod tests; mod util; mod watchers; @@ -28,7 +29,7 @@ pub use self::{ handle::Handle, keyval::{KeyVal, Slice}, map::Map, - ser::{Interfix, Separator}, + ser::{serialize, serialize_to_array, serialize_to_vec, Interfix, Json, Separator}, }; conduit::mod_ctor! {} diff --git a/src/database/ser.rs b/src/database/ser.rs index bd4bbd9a..742f1e34 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -1,12 +1,24 @@ use std::io::Write; -use conduit::{err, result::DebugInspect, utils::exchange, Error, Result}; +use arrayvec::ArrayVec; +use conduit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; use serde::{ser, Serialize}; #[inline] -pub(crate) fn serialize_to_vec(val: &T) -> Result> +pub fn serialize_to_array(val: T) -> Result> where - T: Serialize + ?Sized, + T: Serialize, +{ + let mut buf = ArrayVec::::new(); + serialize(&mut buf, val)?; + + Ok(buf) +} + +#[inline] +pub fn serialize_to_vec(val: T) -> Result> +where + T: Serialize, { let mut buf = Vec::with_capacity(64); serialize(&mut buf, val)?; @@ -15,10 +27,10 @@ where } #[inline] -pub(crate) fn serialize<'a, W, T>(out: &'a mut W, val: &'a T) -> Result<&'a [u8]> +pub fn serialize<'a, W, T>(out: &'a mut W, val: T) -> Result<&'a [u8]> where - W: Write + AsRef<[u8]>, - T: Serialize + ?Sized, + W: Write + AsRef<[u8]> + 'a, + T: Serialize, { let mut serializer = Serializer { out, @@ -43,6 +55,10 @@ pub(crate) struct Serializer<'a, W: Write> { fin: bool, } +/// Newtype for JSON serialization. +#[derive(Debug, Serialize)] +pub struct Json(pub T); + /// Directive to force separator serialization specifically for prefix keying /// use. This is a quirk of the database schema and prefix iterations. #[derive(Debug, Serialize)] @@ -56,38 +72,43 @@ pub struct Separator; impl Serializer<'_, W> { const SEP: &'static [u8] = b"\xFF"; + fn tuple_start(&mut self) { + debug_assert!(!self.sep, "Tuple start with separator set"); + self.sequence_start(); + } + + fn tuple_end(&mut self) -> Result { + self.sequence_end()?; + Ok(()) + } + fn sequence_start(&mut self) { debug_assert!(!self.is_finalized(), "Sequence start with finalization set"); - debug_assert!(!self.sep, "Sequence start with separator set"); - if cfg!(debug_assertions) { - self.depth = self.depth.saturating_add(1); - } + cfg!(debug_assertions).then(|| self.depth = self.depth.saturating_add(1)); } - fn sequence_end(&mut self) { - self.sep = false; - if cfg!(debug_assertions) { - self.depth = self.depth.saturating_sub(1); - } + fn sequence_end(&mut self) -> Result { + cfg!(debug_assertions).then(|| self.depth = self.depth.saturating_sub(1)); + Ok(()) } - fn record_start(&mut self) -> Result<()> { + fn record_start(&mut self) -> Result { debug_assert!(!self.is_finalized(), "Starting a record after serialization finalized"); exchange(&mut self.sep, true) .then(|| self.separator()) .unwrap_or(Ok(())) } - fn separator(&mut self) -> Result<()> { + fn separator(&mut self) -> Result { debug_assert!(!self.is_finalized(), "Writing a separator after serialization finalized"); self.out.write_all(Self::SEP).map_err(Into::into) } + fn write(&mut self, buf: &[u8]) -> Result { self.out.write_all(buf).map_err(Into::into) } + fn set_finalized(&mut self) { debug_assert!(!self.is_finalized(), "Finalization already set"); - if cfg!(debug_assertions) { - self.fin = true; - } + cfg!(debug_assertions).then(|| self.fin = true); } fn is_finalized(&self) -> bool { self.fin } @@ -104,53 +125,65 @@ impl ser::Serializer for &mut Serializer<'_, W> { type SerializeTupleStruct = Self; type SerializeTupleVariant = Self; - fn serialize_map(self, _len: Option) -> Result { - unimplemented!("serialize Map not implemented") - } - fn serialize_seq(self, _len: Option) -> Result { self.sequence_start(); - self.record_start()?; Ok(self) } fn serialize_tuple(self, _len: usize) -> Result { - self.sequence_start(); + self.tuple_start(); Ok(self) } fn serialize_tuple_struct(self, _name: &'static str, _len: usize) -> Result { - self.sequence_start(); + self.tuple_start(); Ok(self) } fn serialize_tuple_variant( self, _name: &'static str, _idx: u32, _var: &'static str, _len: usize, ) -> Result { - self.sequence_start(); - Ok(self) + unimplemented!("serialize Tuple Variant not implemented") + } + + fn serialize_map(self, _len: Option) -> Result { + unimplemented!( + "serialize Map not implemented; did you mean to use database::Json() around your serde_json::Value?" + ) } fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { - self.sequence_start(); - Ok(self) + unimplemented!( + "serialize Struct not implemented at this time; did you mean to use database::Json() around your struct?" + ) } fn serialize_struct_variant( self, _name: &'static str, _idx: u32, _var: &'static str, _len: usize, ) -> Result { - self.sequence_start(); - Ok(self) + unimplemented!("serialize Struct Variant not implemented") } - fn serialize_newtype_struct(self, _name: &'static str, _value: &T) -> Result { - unimplemented!("serialize New Type Struct not implemented") + #[allow(clippy::needless_borrows_for_generic_args)] // buggy + fn serialize_newtype_struct(self, name: &'static str, value: &T) -> Result + where + T: Serialize + ?Sized, + { + debug_assert!( + name != "Json" || type_name::() != "alloc::boxed::Box", + "serializing a Json(RawValue); you can skip serialization instead" + ); + + match name { + "Json" => serde_json::to_writer(&mut self.out, value).map_err(Into::into), + _ => unimplemented!("Unrecognized serialization Newtype {name:?}"), + } } fn serialize_newtype_variant( self, _name: &'static str, _idx: u32, _var: &'static str, _value: &T, ) -> Result { - unimplemented!("serialize New Type Variant not implemented") + unimplemented!("serialize Newtype Variant not implemented") } fn serialize_unit_struct(self, name: &'static str) -> Result { @@ -180,35 +213,94 @@ impl ser::Serializer for &mut Serializer<'_, W> { self.serialize_str(v.encode_utf8(&mut buf)) } - fn serialize_str(self, v: &str) -> Result { self.serialize_bytes(v.as_bytes()) } + fn serialize_str(self, v: &str) -> Result { + debug_assert!( + self.depth > 0, + "serializing string at the top-level; you can skip serialization instead" + ); - fn serialize_bytes(self, v: &[u8]) -> Result { self.out.write_all(v).map_err(Error::Io) } + self.serialize_bytes(v.as_bytes()) + } + + fn serialize_bytes(self, v: &[u8]) -> Result { + debug_assert!( + self.depth > 0, + "serializing byte array at the top-level; you can skip serialization instead" + ); + + self.write(v) + } fn serialize_f64(self, _v: f64) -> Result { unimplemented!("serialize f64 not implemented") } fn serialize_f32(self, _v: f32) -> Result { unimplemented!("serialize f32 not implemented") } - fn serialize_i64(self, v: i64) -> Result { self.out.write_all(&v.to_be_bytes()).map_err(Error::Io) } + fn serialize_i64(self, v: i64) -> Result { self.write(&v.to_be_bytes()) } - fn serialize_i32(self, _v: i32) -> Result { unimplemented!("serialize i32 not implemented") } + fn serialize_i32(self, v: i32) -> Result { self.write(&v.to_be_bytes()) } fn serialize_i16(self, _v: i16) -> Result { unimplemented!("serialize i16 not implemented") } fn serialize_i8(self, _v: i8) -> Result { unimplemented!("serialize i8 not implemented") } - fn serialize_u64(self, v: u64) -> Result { self.out.write_all(&v.to_be_bytes()).map_err(Error::Io) } + fn serialize_u64(self, v: u64) -> Result { self.write(&v.to_be_bytes()) } - fn serialize_u32(self, _v: u32) -> Result { unimplemented!("serialize u32 not implemented") } + fn serialize_u32(self, v: u32) -> Result { self.write(&v.to_be_bytes()) } fn serialize_u16(self, _v: u16) -> Result { unimplemented!("serialize u16 not implemented") } - fn serialize_u8(self, v: u8) -> Result { self.out.write_all(&[v]).map_err(Error::Io) } + fn serialize_u8(self, v: u8) -> Result { self.write(&[v]) } fn serialize_bool(self, _v: bool) -> Result { unimplemented!("serialize bool not implemented") } fn serialize_unit(self) -> Result { unimplemented!("serialize unit not implemented") } } +impl ser::SerializeSeq for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_element(&mut self, val: &T) -> Result { val.serialize(&mut **self) } + + fn end(self) -> Result { self.sequence_end() } +} + +impl ser::SerializeTuple for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_element(&mut self, val: &T) -> Result { + self.record_start()?; + val.serialize(&mut **self) + } + + fn end(self) -> Result { self.tuple_end() } +} + +impl ser::SerializeTupleStruct for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_field(&mut self, val: &T) -> Result { + self.record_start()?; + val.serialize(&mut **self) + } + + fn end(self) -> Result { self.tuple_end() } +} + +impl ser::SerializeTupleVariant for &mut Serializer<'_, W> { + type Error = Error; + type Ok = (); + + fn serialize_field(&mut self, val: &T) -> Result { + self.record_start()?; + val.serialize(&mut **self) + } + + fn end(self) -> Result { self.tuple_end() } +} + impl ser::SerializeMap for &mut Serializer<'_, W> { type Error = Error; type Ok = (); @@ -221,95 +313,27 @@ impl ser::SerializeMap for &mut Serializer<'_, W> { unimplemented!("serialize Map Val not implemented") } - fn end(self) -> Result { - self.sequence_end(); - Ok(()) - } -} - -impl ser::SerializeSeq for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_element(&mut self, val: &T) -> Result { val.serialize(&mut **self) } - - fn end(self) -> Result { - self.sequence_end(); - Ok(()) - } + fn end(self) -> Result { unimplemented!("serialize Map End not implemented") } } impl ser::SerializeStruct for &mut Serializer<'_, W> { type Error = Error; type Ok = (); - fn serialize_field(&mut self, _key: &'static str, val: &T) -> Result { - self.record_start()?; - val.serialize(&mut **self) + fn serialize_field(&mut self, _key: &'static str, _val: &T) -> Result { + unimplemented!("serialize Struct Field not implemented") } - fn end(self) -> Result { - self.sequence_end(); - Ok(()) - } + fn end(self) -> Result { unimplemented!("serialize Struct End not implemented") } } impl ser::SerializeStructVariant for &mut Serializer<'_, W> { type Error = Error; type Ok = (); - fn serialize_field(&mut self, _key: &'static str, val: &T) -> Result { - self.record_start()?; - val.serialize(&mut **self) + fn serialize_field(&mut self, _key: &'static str, _val: &T) -> Result { + unimplemented!("serialize Struct Variant Field not implemented") } - fn end(self) -> Result { - self.sequence_end(); - Ok(()) - } -} - -impl ser::SerializeTuple for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_element(&mut self, val: &T) -> Result { - self.record_start()?; - val.serialize(&mut **self) - } - - fn end(self) -> Result { - self.sequence_end(); - Ok(()) - } -} - -impl ser::SerializeTupleStruct for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_field(&mut self, val: &T) -> Result { - self.record_start()?; - val.serialize(&mut **self) - } - - fn end(self) -> Result { - self.sequence_end(); - Ok(()) - } -} - -impl ser::SerializeTupleVariant for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_field(&mut self, val: &T) -> Result { - self.record_start()?; - val.serialize(&mut **self) - } - - fn end(self) -> Result { - self.sequence_end(); - Ok(()) - } + fn end(self) -> Result { unimplemented!("serialize Struct Variant End not implemented") } } diff --git a/src/database/tests.rs b/src/database/tests.rs new file mode 100644 index 00000000..47dfb32c --- /dev/null +++ b/src/database/tests.rs @@ -0,0 +1,232 @@ +#![cfg(test)] +#![allow(clippy::needless_borrows_for_generic_args)] + +use std::fmt::Debug; + +use arrayvec::ArrayVec; +use conduit::ruma::{serde::Raw, RoomId, UserId}; +use serde::Serialize; + +use crate::{ + de, ser, + ser::{serialize_to_vec, Json}, + Interfix, +}; + +#[test] +#[should_panic(expected = "serializing string at the top-level")] +fn ser_str() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let s = serialize_to_vec(&user_id).expect("failed to serialize user_id"); + assert_eq!(&s, user_id.as_bytes()); +} + +#[test] +fn ser_tuple() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + let mut a = user_id.as_bytes().to_vec(); + a.push(0xFF); + a.extend_from_slice(room_id.as_bytes()); + + let b = (user_id, room_id); + let b = serialize_to_vec(&b).expect("failed to serialize tuple"); + + assert_eq!(a, b); +} + +#[test] +#[should_panic(expected = "I/O error: failed to write whole buffer")] +fn ser_overflow() { + const BUFSIZE: usize = 10; + + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + assert!(BUFSIZE < user_id.as_str().len() + room_id.as_str().len()); + let mut buf = ArrayVec::::new(); + + let val = (user_id, room_id); + _ = ser::serialize(&mut buf, val).unwrap(); +} + +#[test] +fn ser_complex() { + use conduit::ruma::Mxc; + + #[derive(Debug, Serialize)] + struct Dim { + width: u32, + height: u32, + } + + let mxc = Mxc { + server_name: "example.com".try_into().unwrap(), + media_id: "AbCdEfGhIjK", + }; + + let dim = Dim { + width: 123, + height: 456, + }; + + let mut a = Vec::new(); + a.extend_from_slice(b"mxc://"); + a.extend_from_slice(mxc.server_name.as_bytes()); + a.extend_from_slice(b"/"); + a.extend_from_slice(mxc.media_id.as_bytes()); + a.push(0xFF); + a.extend_from_slice(&dim.width.to_be_bytes()); + a.extend_from_slice(&dim.height.to_be_bytes()); + a.push(0xFF); + + let d: &[u32] = &[dim.width, dim.height]; + let b = (mxc, d, Interfix); + let b = serialize_to_vec(b).expect("failed to serialize complex"); + + assert_eq!(a, b); +} + +#[test] +fn ser_json() { + use conduit::ruma::api::client::filter::FilterDefinition; + + let filter = FilterDefinition { + event_fields: Some(vec!["content.body".to_owned()]), + ..Default::default() + }; + + let serialized = serialize_to_vec(Json(&filter)).expect("failed to serialize value"); + + let s = String::from_utf8_lossy(&serialized); + assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); +} + +#[test] +fn ser_json_value() { + use conduit::ruma::api::client::filter::FilterDefinition; + + let filter = FilterDefinition { + event_fields: Some(vec!["content.body".to_owned()]), + ..Default::default() + }; + + let value = serde_json::to_value(filter).expect("failed to serialize to serde_json::value"); + let serialized = serialize_to_vec(Json(value)).expect("failed to serialize value"); + + let s = String::from_utf8_lossy(&serialized); + assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); +} + +#[test] +fn ser_json_macro() { + use serde_json::json; + + #[derive(Serialize)] + struct Foo { + foo: String, + } + + let content = Foo { + foo: "bar".to_owned(), + }; + let content = serde_json::to_value(content).expect("failed to serialize content"); + let sender: &UserId = "@foo:example.com".try_into().unwrap(); + let serialized = serialize_to_vec(Json(json!({ + "sender": sender, + "content": content, + }))) + .expect("failed to serialize value"); + + let s = String::from_utf8_lossy(&serialized); + assert_eq!(&s, r#"{"content":{"foo":"bar"},"sender":"@foo:example.com"}"#); +} + +#[test] +#[should_panic(expected = "serializing string at the top-level")] +fn ser_json_raw() { + use conduit::ruma::api::client::filter::FilterDefinition; + + let filter = FilterDefinition { + event_fields: Some(vec!["content.body".to_owned()]), + ..Default::default() + }; + + let value = serde_json::value::to_raw_value(&filter).expect("failed to serialize to raw value"); + let a = serialize_to_vec(value.get()).expect("failed to serialize raw value"); + let s = String::from_utf8_lossy(&a); + assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); +} + +#[test] +#[should_panic(expected = "you can skip serialization instead")] +fn ser_json_raw_json() { + use conduit::ruma::api::client::filter::FilterDefinition; + + let filter = FilterDefinition { + event_fields: Some(vec!["content.body".to_owned()]), + ..Default::default() + }; + + let value = serde_json::value::to_raw_value(&filter).expect("failed to serialize to raw value"); + let a = serialize_to_vec(Json(value)).expect("failed to serialize json value"); + let s = String::from_utf8_lossy(&a); + assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); +} + +#[test] +fn de_tuple() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + let raw: &[u8] = b"@user:example.com\xFF!room:example.com"; + let (a, b): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); + + assert_eq!(a, user_id, "deserialized user_id does not match"); + assert_eq!(b, room_id, "deserialized room_id does not match"); +} + +#[test] +fn de_json_array() { + let a = &["foo", "bar", "baz"]; + let s = serde_json::to_vec(a).expect("failed to serialize to JSON array"); + + let b: Raw>> = de::from_slice(&s).expect("failed to deserialize"); + + let d: Vec = serde_json::from_str(b.json().get()).expect("failed to deserialize JSON"); + + for (i, a) in a.iter().enumerate() { + assert_eq!(*a, d[i]); + } +} + +#[test] +fn de_json_raw_array() { + let a = &["foo", "bar", "baz"]; + let s = serde_json::to_vec(a).expect("failed to serialize to JSON array"); + + let b: Raw>> = de::from_slice(&s).expect("failed to deserialize"); + + let c: Vec> = serde_json::from_str(b.json().get()).expect("failed to deserialize JSON"); + + for (i, a) in a.iter().enumerate() { + let c = serde_json::to_value(c[i].json()).expect("failed to deserialize JSON to string"); + assert_eq!(*a, c); + } +} + +#[test] +fn ser_array() { + let a: u64 = 123_456; + let b: u64 = 987_654; + + let arr: &[u64] = &[a, b]; + + let mut v = Vec::new(); + v.extend_from_slice(&a.to_be_bytes()); + v.extend_from_slice(&b.to_be_bytes()); + + let s = serialize_to_vec(arr).expect("failed to serialize"); + assert_eq!(&s, &v, "serialization does not match"); +} From d3d11356ee59858dcf26fa66ad2b6c9c4ac13a61 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Oct 2024 22:15:19 +0000 Subject: [PATCH 0076/1248] add serialized insert interface Signed-off-by: Jason Volk --- src/database/map/insert.rs | 187 +++++++++++++++++++++++++++++++++++-- src/database/map/remove.rs | 12 +-- 2 files changed, 186 insertions(+), 13 deletions(-) diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index 953c9c94..39a0c422 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -1,21 +1,194 @@ -use std::{convert::AsRef, fmt::Debug}; +//! Insert a Key+Value into the database. +//! +//! Overloads are provided for the user to choose the most efficient +//! serialization or bypass for pre=serialized (raw) inputs. +use std::{convert::AsRef, fmt::Debug, io::Write}; + +use arrayvec::ArrayVec; use conduit::implement; use rocksdb::WriteBatchWithTransaction; +use serde::Serialize; -use crate::util::or_else; +use crate::{ser, util::or_else}; +/// Insert Key/Value +/// +/// - Key is serialized +/// - Val is serialized #[implement(super::Map)] -#[tracing::instrument(skip(self, value), fields(%self), level = "trace")] -pub fn insert(&self, key: &K, value: &V) +pub fn put(&self, key: K, val: V) where - K: AsRef<[u8]> + ?Sized + Debug, - V: AsRef<[u8]> + ?Sized, + K: Serialize + Debug, + V: Serialize, +{ + let mut key_buf = Vec::new(); + let mut val_buf = Vec::new(); + self.bput(key, val, (&mut key_buf, &mut val_buf)); +} + +/// Insert Key/Value +/// +/// - Key is serialized +/// - Val is raw +#[implement(super::Map)] +pub fn put_raw(&self, key: K, val: V) +where + K: Serialize + Debug, + V: AsRef<[u8]>, +{ + let mut key_buf = Vec::new(); + self.bput_raw(key, val, &mut key_buf); +} + +/// Insert Key/Value +/// +/// - Key is raw +/// - Val is serialized +#[implement(super::Map)] +pub fn raw_put(&self, key: K, val: V) +where + K: AsRef<[u8]>, + V: Serialize, +{ + let mut val_buf = Vec::new(); + self.raw_bput(key, val, &mut val_buf); +} + +/// Insert Key/Value +/// +/// - Key is serialized +/// - Val is serialized to stack-buffer +#[implement(super::Map)] +pub fn put_aput(&self, key: K, val: V) +where + K: Serialize + Debug, + V: Serialize, +{ + let mut key_buf = Vec::new(); + let mut val_buf = ArrayVec::::new(); + self.bput(key, val, (&mut key_buf, &mut val_buf)); +} + +/// Insert Key/Value +/// +/// - Key is serialized to stack-buffer +/// - Val is serialized +#[implement(super::Map)] +pub fn aput_put(&self, key: K, val: V) +where + K: Serialize + Debug, + V: Serialize, +{ + let mut key_buf = ArrayVec::::new(); + let mut val_buf = Vec::new(); + self.bput(key, val, (&mut key_buf, &mut val_buf)); +} + +/// Insert Key/Value +/// +/// - Key is serialized to stack-buffer +/// - Val is serialized to stack-buffer +#[implement(super::Map)] +pub fn aput(&self, key: K, val: V) +where + K: Serialize + Debug, + V: Serialize, +{ + let mut key_buf = ArrayVec::::new(); + let mut val_buf = ArrayVec::::new(); + self.bput(key, val, (&mut key_buf, &mut val_buf)); +} + +/// Insert Key/Value +/// +/// - Key is serialized to stack-buffer +/// - Val is raw +#[implement(super::Map)] +pub fn aput_raw(&self, key: K, val: V) +where + K: Serialize + Debug, + V: AsRef<[u8]>, +{ + let mut key_buf = ArrayVec::::new(); + self.bput_raw(key, val, &mut key_buf); +} + +/// Insert Key/Value +/// +/// - Key is raw +/// - Val is serialized to stack-buffer +#[implement(super::Map)] +pub fn raw_aput(&self, key: K, val: V) +where + K: AsRef<[u8]>, + V: Serialize, +{ + let mut val_buf = ArrayVec::::new(); + self.raw_bput(key, val, &mut val_buf); +} + +/// Insert Key/Value +/// +/// - Key is serialized to supplied buffer +/// - Val is serialized to supplied buffer +#[implement(super::Map)] +pub fn bput(&self, key: K, val: V, mut buf: (Bk, Bv)) +where + K: Serialize + Debug, + V: Serialize, + Bk: Write + AsRef<[u8]>, + Bv: Write + AsRef<[u8]>, +{ + let val = ser::serialize(&mut buf.1, val).expect("failed to serialize insertion val"); + self.bput_raw(key, val, &mut buf.0); +} + +/// Insert Key/Value +/// +/// - Key is serialized to supplied buffer +/// - Val is raw +#[implement(super::Map)] +pub fn bput_raw(&self, key: K, val: V, mut buf: Bk) +where + K: Serialize + Debug, + V: AsRef<[u8]>, + Bk: Write + AsRef<[u8]>, +{ + let key = ser::serialize(&mut buf, key).expect("failed to serialize insertion key"); + self.insert(&key, val); +} + +/// Insert Key/Value +/// +/// - Key is raw +/// - Val is serialized to supplied buffer +#[implement(super::Map)] +pub fn raw_bput(&self, key: K, val: V, mut buf: Bv) +where + K: AsRef<[u8]>, + V: Serialize, + Bv: Write + AsRef<[u8]>, +{ + let val = ser::serialize(&mut buf, val).expect("failed to serialize insertion val"); + self.insert(&key, val); +} + +/// Insert Key/Value +/// +/// - Key is raw +/// - Val is raw +#[implement(super::Map)] +#[tracing::instrument(skip_all, fields(%self), level = "trace")] +pub fn insert(&self, key: &K, val: V) +where + K: AsRef<[u8]> + ?Sized, + V: AsRef<[u8]>, { let write_options = &self.write_options; self.db .db - .put_cf_opt(&self.cf(), key, value, write_options) + .put_cf_opt(&self.cf(), key, val, write_options) .or_else(or_else) .expect("database insert error"); diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs index 10bb2ff0..42eaa477 100644 --- a/src/database/map/remove.rs +++ b/src/database/map/remove.rs @@ -7,18 +7,18 @@ use serde::Serialize; use crate::{ser, util::or_else}; #[implement(super::Map)] -pub fn del(&self, key: &K) +pub fn del(&self, key: K) where - K: Serialize + ?Sized + Debug, + K: Serialize + Debug, { let mut buf = Vec::::with_capacity(64); self.bdel(key, &mut buf); } #[implement(super::Map)] -pub fn adel(&self, key: &K) +pub fn adel(&self, key: K) where - K: Serialize + ?Sized + Debug, + K: Serialize + Debug, { let mut buf = ArrayVec::::new(); self.bdel(key, &mut buf); @@ -26,9 +26,9 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] -pub fn bdel(&self, key: &K, buf: &mut B) +pub fn bdel(&self, key: K, buf: &mut B) where - K: Serialize + ?Sized + Debug, + K: Serialize + Debug, B: Write + AsRef<[u8]>, { let key = ser::serialize(buf, key).expect("failed to serialize deletion key"); From 19880ce12bf3bf79bcfa8cb21223de48ab268686 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 9 Oct 2024 19:41:52 +0000 Subject: [PATCH 0077/1248] add IgnoreAll directive to deserializer Signed-off-by: Jason Volk --- src/database/de.rs | 27 +++++++++++++++++++++++++-- src/database/mod.rs | 2 +- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index 9ee52267..e5fdf7cb 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -5,6 +5,7 @@ use serde::{ Deserialize, }; +/// Deserialize into T from buffer. pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result where T: Deserialize<'a>, @@ -22,6 +23,7 @@ where }) } +/// Deserialization state. pub(crate) struct Deserializer<'de> { buf: &'de [u8], pos: usize, @@ -33,6 +35,11 @@ pub(crate) struct Deserializer<'de> { #[derive(Debug, Deserialize)] pub struct Ignore; +/// Directive to ignore all remaining records. This can be used in a sequence to +/// ignore the rest of the sequence. +#[derive(Debug, Deserialize)] +pub struct IgnoreAll; + impl<'de> Deserializer<'de> { /// Record separator; an intentionally invalid-utf8 byte. const SEP: u8 = b'\xFF'; @@ -53,6 +60,13 @@ impl<'de> Deserializer<'de> { ))) } + /// Called at the start of arrays and tuples + #[inline] + fn sequence_start(&mut self) { + debug_assert!(!self.seq, "Nested sequences are not handled at this time"); + self.seq = true; + } + /// Consume the current record to ignore it. Inside a sequence the next /// record is skipped but at the top-level all records are skipped such that /// deserialization completes with self.finished() == Ok. @@ -61,10 +75,16 @@ impl<'de> Deserializer<'de> { if self.seq { self.record_next(); } else { - self.record_trail(); + self.record_ignore_all(); } } + /// Consume the current and all remaining records to ignore them. Similar to + /// Ignore at the top-level, but it can be provided in a sequence to Ignore + /// all remaining elements. + #[inline] + fn record_ignore_all(&mut self) { self.record_trail(); } + /// Consume the current record. The position pointer is moved to the start /// of the next record. Slice of the current record is returned. #[inline] @@ -101,7 +121,6 @@ impl<'de> Deserializer<'de> { ); self.inc_pos(started.into()); - self.seq = true; } /// Consume all remaining bytes, which may include record separators, @@ -128,6 +147,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { where V: Visitor<'de>, { + self.sequence_start(); visitor.visit_seq(self) } @@ -135,6 +155,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { where V: Visitor<'de>, { + self.sequence_start(); visitor.visit_seq(self) } @@ -142,6 +163,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { where V: Visitor<'de>, { + self.sequence_start(); visitor.visit_seq(self) } @@ -170,6 +192,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { { match name { "Ignore" => self.record_ignore(), + "IgnoreAll" => self.record_ignore_all(), _ => unimplemented!("Unrecognized deserialization Directive {name:?}"), }; diff --git a/src/database/mod.rs b/src/database/mod.rs index c39b2b2f..6d3b2079 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -24,7 +24,7 @@ extern crate rust_rocksdb as rocksdb; pub use self::{ database::Database, - de::Ignore, + de::{Ignore, IgnoreAll}, deserialized::Deserialized, handle::Handle, keyval::{KeyVal, Slice}, From 8258d16a94855dae3df68ae8dcdea1bde0601f4e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 9 Oct 2024 05:08:22 +0000 Subject: [PATCH 0078/1248] re-scheme naming of stream iterator overloads Signed-off-by: Jason Volk --- src/database/map/count.rs | 8 +++----- src/database/map/keys_from.rs | 6 +++--- src/database/map/keys_prefix.rs | 6 +++--- src/database/map/rev_keys_from.rs | 6 +++--- src/database/map/rev_keys_prefix.rs | 6 +++--- src/database/map/rev_stream_from.rs | 6 +++--- src/database/map/rev_stream_prefix.rs | 6 +++--- src/database/map/stream_from.rs | 6 +++--- src/database/map/stream_prefix.rs | 6 +++--- src/service/key_backups/mod.rs | 10 +++++----- src/service/rooms/alias/mod.rs | 2 +- src/service/rooms/lazy_loading/mod.rs | 2 +- src/service/rooms/metadata/mod.rs | 2 +- src/service/rooms/read_receipt/data.rs | 2 +- src/service/rooms/state/data.rs | 2 +- src/service/rooms/state_cache/mod.rs | 4 ++-- src/service/sending/data.rs | 4 ++-- src/service/users/mod.rs | 2 +- 18 files changed, 42 insertions(+), 44 deletions(-) diff --git a/src/database/map/count.rs b/src/database/map/count.rs index 4356b71f..dab45b7a 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -4,12 +4,10 @@ use conduit::implement; use futures::stream::StreamExt; use serde::Serialize; -use crate::de::Ignore; - /// Count the total number of entries in the map. #[implement(super::Map)] #[inline] -pub fn count(&self) -> impl Future + Send + '_ { self.keys::().count() } +pub fn count(&self) -> impl Future + Send + '_ { self.raw_keys().count() } /// Count the number of entries in the map starting from a lower-bound. /// @@ -20,7 +18,7 @@ pub fn count_from<'a, P>(&'a self, from: &P) -> impl Future + Se where P: Serialize + ?Sized + Debug + 'a, { - self.keys_from::(from).count() + self.keys_from_raw(from).count() } /// Count the number of entries in the map matching a prefix. @@ -32,5 +30,5 @@ pub fn count_prefix<'a, P>(&'a self, prefix: &P) -> impl Future where P: Serialize + ?Sized + Debug + 'a, { - self.keys_prefix::(prefix).count() + self.keys_prefix_raw(prefix).count() } diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 1993750a..4eb3b12e 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -13,13 +13,13 @@ where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, { - self.keys_raw_from(from) + self.keys_from_raw(from) .map(keyval::result_deserialize_key::) } #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn keys_raw_from

    (&self, from: &P) -> impl Stream>> + Send +pub fn keys_from_raw

    (&self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -29,7 +29,7 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn keys_from_raw<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn keys_raw_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index d6c0927b..0ff755f3 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -17,13 +17,13 @@ where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, { - self.keys_raw_prefix(prefix) + self.keys_prefix_raw(prefix) .map(keyval::result_deserialize_key::) } #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn keys_raw_prefix

    (&self, prefix: &P) -> impl Stream>> + Send +pub fn keys_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -34,7 +34,7 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn keys_prefix_raw<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn keys_raw_prefix<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, K: Deserialize<'a> + Send + 'a, diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index e012e60a..b142718c 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -13,13 +13,13 @@ where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, { - self.rev_keys_raw_from(from) + self.rev_keys_from_raw(from) .map(keyval::result_deserialize_key::) } #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_keys_raw_from

    (&self, from: &P) -> impl Stream>> + Send +pub fn rev_keys_from_raw

    (&self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -29,7 +29,7 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_keys_from_raw<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn rev_keys_raw_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index 162c4f9b..5297cecf 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -17,13 +17,13 @@ where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, { - self.rev_keys_raw_prefix(prefix) + self.rev_keys_prefix_raw(prefix) .map(keyval::result_deserialize_key::) } #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_keys_raw_prefix

    (&self, prefix: &P) -> impl Stream>> + Send +pub fn rev_keys_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -34,7 +34,7 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_keys_prefix_raw<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn rev_keys_raw_prefix<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, K: Deserialize<'a> + Send + 'a, diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index c48f406b..78318a7f 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -18,7 +18,7 @@ where K: Deserialize<'a> + Send, V: Deserialize<'a> + Send, { - self.rev_stream_raw_from(from) + self.rev_stream_from_raw(from) .map(keyval::result_deserialize::) } @@ -28,7 +28,7 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_stream_raw_from

    (&self, from: &P) -> impl Stream>> + Send +pub fn rev_stream_from_raw

    (&self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -42,7 +42,7 @@ where /// - Result is deserialized #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_stream_from_raw<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn rev_stream_raw_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index 9ef89e9c..601c3298 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -22,7 +22,7 @@ where K: Deserialize<'a> + Send, V: Deserialize<'a> + Send, { - self.rev_stream_raw_prefix(prefix) + self.rev_stream_prefix_raw(prefix) .map(keyval::result_deserialize::) } @@ -32,7 +32,7 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_stream_raw_prefix

    (&self, prefix: &P) -> impl Stream>> + Send +pub fn rev_stream_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -47,7 +47,7 @@ where /// - Result is deserialized #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_stream_prefix_raw<'a, K, V, P>( +pub fn rev_stream_raw_prefix<'a, K, V, P>( &'a self, prefix: &'a P, ) -> impl Stream>> + Send + 'a where diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index db828125..0d3bb1e1 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -18,7 +18,7 @@ where K: Deserialize<'a> + Send, V: Deserialize<'a> + Send, { - self.stream_raw_from(from) + self.stream_from_raw(from) .map(keyval::result_deserialize::) } @@ -28,7 +28,7 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn stream_raw_from

    (&self, from: &P) -> impl Stream>> + Send +pub fn stream_from_raw

    (&self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -42,7 +42,7 @@ where /// - Result is deserialized #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn stream_from_raw<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn stream_raw_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index 56154a8b..cab3dd09 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -22,7 +22,7 @@ where K: Deserialize<'a> + Send, V: Deserialize<'a> + Send, { - self.stream_raw_prefix(prefix) + self.stream_prefix_raw(prefix) .map(keyval::result_deserialize::) } @@ -32,7 +32,7 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn stream_raw_prefix

    (&self, prefix: &P) -> impl Stream>> + Send +pub fn stream_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -47,7 +47,7 @@ where /// - Result is deserialized #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn stream_prefix_raw<'a, K, V, P>( +pub fn stream_raw_prefix<'a, K, V, P>( &'a self, prefix: &'a P, ) -> impl Stream>> + Send + 'a where diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 55263eeb..4c303757 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -79,7 +79,7 @@ pub async fn delete_backup(&self, user_id: &UserId, version: &str) { let key = (user_id, version, Interfix); self.db .backupkeyid_backup - .keys_raw_prefix(&key) + .keys_prefix_raw(&key) .ignore_err() .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) .await; @@ -181,7 +181,7 @@ pub async fn count_keys(&self, user_id: &UserId, version: &str) -> usize { let prefix = (user_id, version); self.db .backupkeyid_backup - .keys_raw_prefix(&prefix) + .keys_prefix_raw(&prefix) .count() .await } @@ -256,7 +256,7 @@ pub async fn delete_all_keys(&self, user_id: &UserId, version: &str) { let key = (user_id, version, Interfix); self.db .backupkeyid_backup - .keys_raw_prefix(&key) + .keys_prefix_raw(&key) .ignore_err() .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) .await; @@ -267,7 +267,7 @@ pub async fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: & let key = (user_id, version, room_id, Interfix); self.db .backupkeyid_backup - .keys_raw_prefix(&key) + .keys_prefix_raw(&key) .ignore_err() .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) .await; @@ -278,7 +278,7 @@ pub async fn delete_room_key(&self, user_id: &UserId, version: &str, room_id: &R let key = (user_id, version, room_id, session_id); self.db .backupkeyid_backup - .keys_raw_prefix(&key) + .keys_prefix_raw(&key) .ignore_err() .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) .await; diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 7fac6be6..3f944729 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -101,7 +101,7 @@ impl Service { let prefix = (&room_id, Interfix); self.db .aliasid_alias - .keys_raw_prefix(&prefix) + .keys_prefix_raw(&prefix) .ignore_err() .ready_for_each(|key| self.db.aliasid_alias.remove(key)) .await; diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index e0816d3f..9493dcc4 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -99,7 +99,7 @@ pub async fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room let prefix = (user_id, device_id, room_id, Interfix); self.db .lazyloadedids - .keys_raw_prefix(&prefix) + .keys_prefix_raw(&prefix) .ignore_err() .ready_for_each(|key| self.db.lazyloadedids.remove(key)) .await; diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index d8be6aab..8367eb72 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -50,7 +50,7 @@ pub async fn exists(&self, room_id: &RoomId) -> bool { // Look for PDUs in that room. self.db .pduid_pdu - .keys_raw_prefix(&prefix) + .keys_prefix_raw(&prefix) .ignore_err() .next() .await diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index a2c0fabc..74b649ef 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -84,7 +84,7 @@ impl Data { let prefix2 = prefix.clone(); self.readreceiptid_readreceipt - .stream_raw_from(&first_possible_edu) + .stream_from_raw(&first_possible_edu) .ignore_err() .ready_take_while(move |(k, _)| k.starts_with(&prefix2)) .map(move |(k, v)| { diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 3072e3c6..7265038f 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -52,7 +52,7 @@ impl Data { ) { let prefix = (room_id, Interfix); self.roomid_pduleaves - .keys_raw_prefix(&prefix) + .keys_prefix_raw(&prefix) .ignore_err() .ready_for_each(|key| self.roomid_pduleaves.remove(key)) .await; diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 8539c940..edfae529 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -408,7 +408,7 @@ impl Service { pub fn rooms_joined<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { self.db .userroomid_joined - .keys_prefix_raw(user_id) + .keys_raw_prefix(user_id) .ignore_err() .map(|(_, room_id): (Ignore, &RoomId)| room_id) } @@ -469,7 +469,7 @@ impl Service { self.db .roomid_inviteviaservers - .stream_prefix_raw(room_id) + .stream_raw_prefix(room_id) .ignore_err() .map(|(_, servers): KeyVal<'_>| *servers.last().expect("at least one server")) } diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 6f4b5b97..96d4a6a9 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -101,7 +101,7 @@ impl Data { pub fn active_requests_for(&self, destination: &Destination) -> impl Stream + Send + '_ { let prefix = destination.get_prefix(); self.servercurrentevent_data - .stream_raw_prefix(&prefix) + .stream_prefix_raw(&prefix) .ignore_err() .map(|(key, val)| { let (_, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); @@ -136,7 +136,7 @@ impl Data { pub fn queued_requests(&self, destination: &Destination) -> impl Stream + Send + '_ { let prefix = destination.get_prefix(); self.servernameevent_data - .stream_raw_prefix(&prefix) + .stream_prefix_raw(&prefix) .ignore_err() .map(|(key, val)| { let (_, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 71a93666..a99a7df4 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -311,7 +311,7 @@ impl Service { let prefix = (user_id, device_id, Interfix); self.db .todeviceid_events - .keys_raw_prefix(&prefix) + .keys_prefix_raw(&prefix) .ignore_err() .ready_for_each(|key| self.db.todeviceid_events.remove(key)) .await; From 2ed0c267eb698c33befc4daa482811f0ae45707a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 7 Oct 2024 17:54:27 +0000 Subject: [PATCH 0079/1248] Refactor for structured insertions Signed-off-by: Jason Volk --- Cargo.lock | 26 +-- Cargo.toml | 2 +- src/api/client/sync.rs | 5 +- src/api/server/invite.rs | 6 - src/service/account_data/mod.rs | 40 +--- src/service/globals/data.rs | 32 ++- src/service/globals/migrations.rs | 17 +- src/service/key_backups/mod.rs | 62 ++---- src/service/media/data.rs | 103 +++------ src/service/media/migrations.rs | 2 +- src/service/presence/data.rs | 18 +- src/service/presence/presence.rs | 4 - src/service/pusher/mod.rs | 16 +- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/lazy_loading/mod.rs | 12 +- src/service/rooms/metadata/mod.rs | 8 +- src/service/rooms/outlier/mod.rs | 7 +- src/service/rooms/pdu_metadata/data.rs | 16 +- src/service/rooms/read_receipt/data.rs | 40 +--- src/service/rooms/short/mod.rs | 47 ++-- src/service/rooms/state/data.rs | 13 +- src/service/rooms/state_accessor/mod.rs | 6 + src/service/rooms/state_cache/data.rs | 67 +++--- src/service/rooms/state_cache/mod.rs | 89 ++++---- src/service/rooms/timeline/data.rs | 18 +- src/service/rooms/user/data.rs | 26 +-- src/service/sending/data.rs | 3 +- src/service/sending/dest.rs | 2 +- src/service/uiaa/mod.rs | 17 +- src/service/updates/mod.rs | 5 +- src/service/users/mod.rs | 272 +++++++++--------------- 31 files changed, 364 insertions(+), 621 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cae6994c..db1394ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2976,7 +2976,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "assign", "js_int", @@ -2998,7 +2998,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "js_int", "ruma-common", @@ -3010,7 +3010,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "as_variant", "assign", @@ -3033,7 +3033,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "as_variant", "base64 0.22.1", @@ -3063,7 +3063,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3087,7 +3087,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "bytes", "http", @@ -3105,7 +3105,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "js_int", "thiserror", @@ -3114,7 +3114,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "js_int", "ruma-common", @@ -3124,7 +3124,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "cfg-if", "once_cell", @@ -3140,7 +3140,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "js_int", "ruma-common", @@ -3152,7 +3152,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "headers", "http", @@ -3165,7 +3165,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3181,7 +3181,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f485a0265c67a59df75fc6686787538172fa4cac#f485a0265c67a59df75fc6686787538172fa4cac" +source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" dependencies = [ "futures-util", "itertools 0.13.0", diff --git a/Cargo.toml b/Cargo.toml index 25d1001d..0a98befd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -315,7 +315,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "f485a0265c67a59df75fc6686787538172fa4cac" +rev = "3109496a1f91357c89cbb57cf86f179e2cb013e7" features = [ "compat", "rand", diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index 8c4c6a44..65af775d 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -7,7 +7,7 @@ use std::{ use axum::extract::State; use conduit::{ debug, err, error, is_equal_to, - result::{FlatOk, IntoIsOk}, + result::FlatOk, utils::{ math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, @@ -1136,8 +1136,7 @@ async fn share_encrypted_room( services .rooms .state_accessor - .room_state_get(other_room_id, &StateEventType::RoomEncryption, "") - .map(Result::into_is_ok) + .is_encrypted_room(other_room_id) }) .await } diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 447e54be..f02655e6 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -65,12 +65,6 @@ pub(crate) async fn create_invite_route( return Err!(Request(Forbidden("Server is banned on this homeserver."))); } - if let Some(via) = &body.via { - if via.is_empty() { - return Err!(Request(InvalidParam("via field must not be empty."))); - } - } - let mut signed_event = utils::to_canonical_object(&body.event) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 8065ac55..ac3f5f83 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -5,7 +5,7 @@ use conduit::{ utils::{stream::TryIgnore, ReadyExt}, Err, Error, Result, }; -use database::{Deserialized, Handle, Map}; +use database::{Deserialized, Handle, Json, Map}; use futures::{StreamExt, TryFutureExt}; use ruma::{ events::{ @@ -56,41 +56,19 @@ impl crate::Service for Service { pub async fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, data: &serde_json::Value, ) -> Result<()> { - let event_type = event_type.to_string(); - let count = self.services.globals.next_count()?; - - let mut prefix = room_id - .map(ToString::to_string) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xFF); - - let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&count.to_be_bytes()); - roomuserdataid.push(0xFF); - roomuserdataid.extend_from_slice(event_type.as_bytes()); - - let mut key = prefix; - key.extend_from_slice(event_type.as_bytes()); - if data.get("type").is_none() || data.get("content").is_none() { return Err!(Request(InvalidParam("Account data doesn't have all required fields."))); } - self.db.roomuserdataid_accountdata.insert( - &roomuserdataid, - &serde_json::to_vec(&data).expect("to_vec always works on json values"), - ); - - let prev_key = (room_id, user_id, &event_type); - let prev = self.db.roomusertype_roomuserdataid.qry(&prev_key).await; - + let count = self.services.globals.next_count().unwrap(); + let roomuserdataid = (room_id, user_id, count, &event_type); self.db - .roomusertype_roomuserdataid - .insert(&key, &roomuserdataid); + .roomuserdataid_accountdata + .put(roomuserdataid, Json(data)); + + let key = (room_id, user_id, &event_type); + let prev = self.db.roomusertype_roomuserdataid.qry(&key).await; + self.db.roomusertype_roomuserdataid.put(key, roomuserdataid); // Remove old entry if let Ok(prev) = prev { diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 57a295d9..3638cb56 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -4,7 +4,7 @@ use std::{ }; use conduit::{trace, utils, utils::rand, Error, Result, Server}; -use database::{Database, Deserialized, Map}; +use database::{Database, Deserialized, Json, Map}; use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, @@ -83,7 +83,7 @@ impl Data { .checked_add(1) .expect("counter must not overflow u64"); - self.global.insert(COUNTER, &counter.to_be_bytes()); + self.global.insert(COUNTER, counter.to_be_bytes()); Ok(*counter) } @@ -259,29 +259,21 @@ impl Data { pub async fn add_signing_key( &self, origin: &ServerName, new_keys: ServerSigningKeys, ) -> BTreeMap { - // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.get(origin).await; - - let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(&keys).map_err(Into::into)) + // (timo) Not atomic, but this is not critical + let mut keys: ServerSigningKeys = self + .server_signingkeys + .get(origin) + .await + .deserialized() .unwrap_or_else(|_| { // Just insert "now", it doesn't matter ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) }); - let ServerSigningKeys { - verify_keys, - old_verify_keys, - .. - } = new_keys; + keys.verify_keys.extend(new_keys.verify_keys); + keys.old_verify_keys.extend(new_keys.old_verify_keys); - keys.verify_keys.extend(verify_keys); - keys.old_verify_keys.extend(old_verify_keys); - - self.server_signingkeys.insert( - origin.as_bytes(), - &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), - ); + self.server_signingkeys.raw_put(origin, Json(&keys)); let mut tree = keys.verify_keys; tree.extend( @@ -324,7 +316,7 @@ impl Data { #[inline] pub fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.global.insert(b"version", &new_version.to_be_bytes()); + self.global.raw_put(b"version", new_version); Ok(()) } diff --git a/src/service/globals/migrations.rs b/src/service/globals/migrations.rs index 334e71c6..c953e7b1 100644 --- a/src/service/globals/migrations.rs +++ b/src/service/globals/migrations.rs @@ -2,7 +2,7 @@ use conduit::{ debug_info, debug_warn, error, info, result::NotFound, utils::{stream::TryIgnore, IterStream, ReadyExt}, - warn, Err, Error, Result, + warn, Err, Result, }; use futures::{FutureExt, StreamExt}; use itertools::Itertools; @@ -37,10 +37,9 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> { // requires recreating the database from scratch. if users_count > 0 { let conduit_user = &services.globals.server_user; - if !services.users.exists(conduit_user).await { - error!("The {} server user does not exist, and the database is not new.", conduit_user); - return Err(Error::bad_database( + error!("The {conduit_user} server user does not exist, and the database is not new."); + return Err!(Database( "Cannot reuse an existing database after changing the server name, please delete the old one first.", )); } @@ -62,9 +61,9 @@ async fn fresh(services: &Services) -> Result<()> { .db .bump_database_version(DATABASE_VERSION)?; - db["global"].insert(b"feat_sha256_media", &[]); - db["global"].insert(b"fix_bad_double_separator_in_state_cache", &[]); - db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[]); + db["global"].insert(b"feat_sha256_media", []); + db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); + db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []); // Create the admin room and server user on first run crate::admin::create_admin_room(services).await?; @@ -359,7 +358,7 @@ async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result< .await; db.db.cleanup()?; - db["global"].insert(b"fix_bad_double_separator_in_state_cache", &[]); + db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); info!("Finished fixing"); Ok(()) @@ -440,7 +439,7 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) } db.db.cleanup()?; - db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[]); + db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []); info!("Finished fixing"); Ok(()) diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 4c303757..bae6f214 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -5,7 +5,7 @@ use conduit::{ utils::stream::{ReadyExt, TryIgnore}, Err, Result, }; -use database::{Deserialized, Ignore, Interfix, Map}; +use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::StreamExt; use ruma::{ api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, @@ -50,31 +50,21 @@ impl crate::Service for Service { #[implement(Service)] pub fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw) -> Result { let version = self.services.globals.next_count()?.to_string(); + let count = self.services.globals.next_count()?; - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); + let key = (user_id, &version); + self.db.backupid_algorithm.put(key, Json(backup_metadata)); - self.db.backupid_algorithm.insert( - &key, - &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), - ); - - self.db - .backupid_etag - .insert(&key, &self.services.globals.next_count()?.to_be_bytes()); + self.db.backupid_etag.put(key, count); Ok(version) } #[implement(Service)] pub async fn delete_backup(&self, user_id: &UserId, version: &str) { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - - self.db.backupid_algorithm.remove(&key); - self.db.backupid_etag.remove(&key); + let key = (user_id, version); + self.db.backupid_algorithm.del(key); + self.db.backupid_etag.del(key); let key = (user_id, version, Interfix); self.db @@ -86,26 +76,21 @@ pub async fn delete_backup(&self, user_id: &UserId, version: &str) { } #[implement(Service)] -pub async fn update_backup( - &self, user_id: &UserId, version: &str, backup_metadata: &Raw, -) -> Result { +pub async fn update_backup<'a>( + &self, user_id: &UserId, version: &'a str, backup_metadata: &Raw, +) -> Result<&'a str> { let key = (user_id, version); if self.db.backupid_algorithm.qry(&key).await.is_err() { return Err!(Request(NotFound("Tried to update nonexistent backup."))); } - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - + let count = self.services.globals.next_count().unwrap(); + self.db.backupid_etag.put(key, count); self.db .backupid_algorithm - .insert(&key, backup_metadata.json().get().as_bytes()); - self.db - .backupid_etag - .insert(&key, &self.services.globals.next_count()?.to_be_bytes()); + .put_raw(key, backup_metadata.json().get()); - Ok(version.to_owned()) + Ok(version) } #[implement(Service)] @@ -156,22 +141,13 @@ pub async fn add_key( return Err!(Request(NotFound("Tried to update nonexistent backup."))); } - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(version.as_bytes()); - - self.db - .backupid_etag - .insert(&key, &self.services.globals.next_count()?.to_be_bytes()); - - key.push(0xFF); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(session_id.as_bytes()); + let count = self.services.globals.next_count().unwrap(); + self.db.backupid_etag.put(key, count); + let key = (user_id, version, room_id, session_id); self.db .backupkeyid_backup - .insert(&key, key_data.json().get().as_bytes()); + .put_raw(key, key_data.json().get()); Ok(()) } diff --git a/src/service/media/data.rs b/src/service/media/data.rs index b2271883..9afbd708 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,13 +1,13 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use conduit::{ - debug, debug_info, trace, + debug, debug_info, err, utils::{str_from_bytes, stream::TryIgnore, string_from_bytes, ReadyExt}, Err, Error, Result, }; -use database::{Database, Map}; +use database::{Database, Interfix, Map}; use futures::StreamExt; -use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; +use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; use super::{preview::UrlPreviewData, thumbnail::Dim}; @@ -37,39 +37,13 @@ impl Data { &self, mxc: &Mxc<'_>, user: Option<&UserId>, dim: &Dim, content_disposition: Option<&ContentDisposition>, content_type: Option<&str>, ) -> Result> { - let mut key: Vec = Vec::new(); - key.extend_from_slice(b"mxc://"); - key.extend_from_slice(mxc.server_name.as_bytes()); - key.extend_from_slice(b"/"); - key.extend_from_slice(mxc.media_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(&dim.width.to_be_bytes()); - key.extend_from_slice(&dim.height.to_be_bytes()); - key.push(0xFF); - key.extend_from_slice( - content_disposition - .map(ToString::to_string) - .unwrap_or_default() - .as_bytes(), - ); - key.push(0xFF); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); - - self.mediaid_file.insert(&key, &[]); - + let dim: &[u32] = &[dim.width, dim.height]; + let key = (mxc, dim, content_disposition, content_type); + let key = database::serialize_to_vec(key)?; + self.mediaid_file.insert(&key, []); if let Some(user) = user { - let mut key: Vec = Vec::new(); - key.extend_from_slice(b"mxc://"); - key.extend_from_slice(mxc.server_name.as_bytes()); - key.extend_from_slice(b"/"); - key.extend_from_slice(mxc.media_id.as_bytes()); - let user = user.as_bytes().to_vec(); - self.mediaid_user.insert(&key, &user); + let key = (mxc, user); + self.mediaid_user.put_raw(key, user); } Ok(key) @@ -78,33 +52,23 @@ impl Data { pub(super) async fn delete_file_mxc(&self, mxc: &Mxc<'_>) { debug!("MXC URI: {mxc}"); - let mut prefix: Vec = Vec::new(); - prefix.extend_from_slice(b"mxc://"); - prefix.extend_from_slice(mxc.server_name.as_bytes()); - prefix.extend_from_slice(b"/"); - prefix.extend_from_slice(mxc.media_id.as_bytes()); - prefix.push(0xFF); - - trace!("MXC db prefix: {prefix:?}"); + let prefix = (mxc, Interfix); self.mediaid_file - .raw_keys_prefix(&prefix) + .keys_prefix_raw(&prefix) .ignore_err() - .ready_for_each(|key| { - debug!("Deleting key: {:?}", key); - self.mediaid_file.remove(key); - }) + .ready_for_each(|key| self.mediaid_file.remove(key)) .await; self.mediaid_user - .raw_stream_prefix(&prefix) + .stream_prefix_raw(&prefix) .ignore_err() .ready_for_each(|(key, val)| { - if key.starts_with(&prefix) { - let user = str_from_bytes(val).unwrap_or_default(); - debug_info!("Deleting key {key:?} which was uploaded by user {user}"); + debug_assert!(key.starts_with(mxc.to_string().as_bytes()), "key should start with the mxc"); - self.mediaid_user.remove(key); - } + let user = str_from_bytes(val).unwrap_or_default(); + debug_info!("Deleting key {key:?} which was uploaded by user {user}"); + + self.mediaid_user.remove(key); }) .await; } @@ -113,16 +77,10 @@ impl Data { pub(super) async fn search_mxc_metadata_prefix(&self, mxc: &Mxc<'_>) -> Result>> { debug!("MXC URI: {mxc}"); - let mut prefix: Vec = Vec::new(); - prefix.extend_from_slice(b"mxc://"); - prefix.extend_from_slice(mxc.server_name.as_bytes()); - prefix.extend_from_slice(b"/"); - prefix.extend_from_slice(mxc.media_id.as_bytes()); - prefix.push(0xFF); - + let prefix = (mxc, Interfix); let keys: Vec> = self .mediaid_file - .raw_keys_prefix(&prefix) + .keys_prefix_raw(&prefix) .ignore_err() .map(<[u8]>::to_vec) .collect() @@ -138,24 +96,17 @@ impl Data { } pub(super) async fn search_file_metadata(&self, mxc: &Mxc<'_>, dim: &Dim) -> Result { - let mut prefix: Vec = Vec::new(); - prefix.extend_from_slice(b"mxc://"); - prefix.extend_from_slice(mxc.server_name.as_bytes()); - prefix.extend_from_slice(b"/"); - prefix.extend_from_slice(mxc.media_id.as_bytes()); - prefix.push(0xFF); - prefix.extend_from_slice(&dim.width.to_be_bytes()); - prefix.extend_from_slice(&dim.height.to_be_bytes()); - prefix.push(0xFF); + let dim: &[u32] = &[dim.width, dim.height]; + let prefix = (mxc, dim, Interfix); let key = self .mediaid_file - .raw_keys_prefix(&prefix) + .keys_prefix_raw(&prefix) .ignore_err() .map(ToOwned::to_owned) .next() .await - .ok_or_else(|| Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; + .ok_or_else(|| err!(Request(NotFound("Media not found"))))?; let mut parts = key.rsplit(|&b| b == 0xFF); @@ -215,9 +166,7 @@ impl Data { Ok(()) } - pub(super) fn set_url_preview( - &self, url: &str, data: &UrlPreviewData, timestamp: std::time::Duration, - ) -> Result<()> { + pub(super) fn set_url_preview(&self, url: &str, data: &UrlPreviewData, timestamp: Duration) -> Result<()> { let mut value = Vec::::new(); value.extend_from_slice(×tamp.as_secs().to_be_bytes()); value.push(0xFF); diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 2d1b39f9..0e358d44 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -54,7 +54,7 @@ pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { services.globals.db.bump_database_version(13)?; } - db["global"].insert(b"feat_sha256_media", &[]); + db["global"].insert(b"feat_sha256_media", []); info!("Finished applying sha256_media"); Ok(()) } diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 9c9d0ae3..8522746f 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -5,7 +5,7 @@ use conduit::{ utils::{stream::TryIgnore, ReadyExt}, Result, }; -use database::{Deserialized, Map}; +use database::{Deserialized, Json, Map}; use futures::Stream; use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; @@ -107,14 +107,12 @@ impl Data { last_active_ts, status_msg, ); + let count = self.services.globals.next_count()?; let key = presenceid_key(count, user_id); - self.presenceid_presence - .insert(&key, &presence.to_json_bytes()?); - - self.userid_presenceid - .insert(user_id.as_bytes(), &count.to_be_bytes()); + self.presenceid_presence.raw_put(key, Json(presence)); + self.userid_presenceid.raw_put(user_id, count); if let Ok((last_count, _)) = last_presence { let key = presenceid_key(last_count, user_id); @@ -136,7 +134,7 @@ impl Data { let key = presenceid_key(count, user_id); self.presenceid_presence.remove(&key); - self.userid_presenceid.remove(user_id.as_bytes()); + self.userid_presenceid.remove(user_id); } pub fn presence_since(&self, since: u64) -> impl Stream)> + Send + '_ { @@ -152,7 +150,11 @@ impl Data { #[inline] fn presenceid_key(count: u64, user_id: &UserId) -> Vec { - [count.to_be_bytes().to_vec(), user_id.as_bytes().to_vec()].concat() + let cap = size_of::().saturating_add(user_id.as_bytes().len()); + let mut key = Vec::with_capacity(cap); + key.extend_from_slice(&count.to_be_bytes()); + key.extend_from_slice(user_id.as_bytes()); + key } #[inline] diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index 0d5c226b..c4372003 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -35,10 +35,6 @@ impl Presence { serde_json::from_slice(bytes).map_err(|_| Error::bad_database("Invalid presence data in database")) } - pub(super) fn to_json_bytes(&self) -> Result> { - serde_json::to_vec(self).map_err(|_| Error::bad_database("Could not serialize Presence to JSON")) - } - /// Creates a PresenceEvent from available data. pub(super) async fn to_presence_event(&self, user_id: &UserId, users: &users::Service) -> PresenceEvent { let now = utils::millis_since_unix_epoch(); diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index e7b1824a..af15e332 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -6,7 +6,7 @@ use conduit::{ utils::{stream::TryIgnore, string_from_bytes}, Err, PduEvent, Result, }; -use database::{Deserialized, Ignore, Interfix, Map}; +use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ @@ -68,18 +68,12 @@ impl Service { pub fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) { match pusher { set_pusher::v3::PusherAction::Post(data) => { - let mut key = sender.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(data.pusher.ids.pushkey.as_bytes()); - self.db - .senderkey_pusher - .insert(&key, &serde_json::to_vec(pusher).expect("Pusher is valid JSON value")); + let key = (sender, &data.pusher.ids.pushkey); + self.db.senderkey_pusher.put(key, Json(pusher)); }, set_pusher::v3::PusherAction::Delete(ids) => { - let mut key = sender.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(ids.pushkey.as_bytes()); - self.db.senderkey_pusher.remove(&key); + let key = (sender, &ids.pushkey); + self.db.senderkey_pusher.del(key); }, } } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 2112ecef..f366ffe2 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -26,10 +26,10 @@ impl crate::Service for Service { } #[implement(Service)] -pub fn set_public(&self, room_id: &RoomId) { self.db.publicroomids.insert(room_id.as_bytes(), &[]); } +pub fn set_public(&self, room_id: &RoomId) { self.db.publicroomids.insert(room_id, []); } #[implement(Service)] -pub fn set_not_public(&self, room_id: &RoomId) { self.db.publicroomids.remove(room_id.as_bytes()); } +pub fn set_not_public(&self, room_id: &RoomId) { self.db.publicroomids.remove(room_id); } #[implement(Service)] pub async fn is_public_room(&self, room_id: &RoomId) -> bool { self.db.publicroomids.get(room_id).await.is_ok() } diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 9493dcc4..7a4da2a6 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -79,17 +79,9 @@ pub fn lazy_load_confirm_delivery(&self, user_id: &UserId, device_id: &DeviceId, return; }; - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xFF); - for ll_id in &user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.db.lazyloadedids.insert(&key, &[]); + let key = (user_id, device_id, room_id, ll_id); + self.db.lazyloadedids.put_raw(key, []); } } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 8367eb72..4ee390a5 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -64,9 +64,9 @@ pub fn iter_ids(&self) -> impl Stream + Send + '_ { self.db.room #[inline] pub fn disable_room(&self, room_id: &RoomId, disabled: bool) { if disabled { - self.db.disabledroomids.insert(room_id.as_bytes(), &[]); + self.db.disabledroomids.insert(room_id, []); } else { - self.db.disabledroomids.remove(room_id.as_bytes()); + self.db.disabledroomids.remove(room_id); } } @@ -74,9 +74,9 @@ pub fn disable_room(&self, room_id: &RoomId, disabled: bool) { #[inline] pub fn ban_room(&self, room_id: &RoomId, banned: bool) { if banned { - self.db.bannedroomids.insert(room_id.as_bytes(), &[]); + self.db.bannedroomids.insert(room_id, []); } else { - self.db.bannedroomids.remove(room_id.as_bytes()); + self.db.bannedroomids.remove(room_id); } } diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index b9d04263..03e77838 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use conduit::{implement, Result}; -use database::{Deserialized, Map}; +use database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; use crate::PduEvent; @@ -50,8 +50,5 @@ pub async fn get_pdu_outlier(&self, event_id: &EventId) -> Result { #[implement(Service)] #[tracing::instrument(skip(self, pdu), level = "debug")] pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) { - self.db.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ); + self.db.eventid_outlierpdu.raw_put(event_id, Json(pdu)); } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 8e045658..4d570e6d 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -39,9 +39,10 @@ impl Data { } pub(super) fn add_relation(&self, from: u64, to: u64) { - let mut key = to.to_be_bytes().to_vec(); - key.extend_from_slice(&from.to_be_bytes()); - self.tofrom_relation.insert(&key, &[]); + const BUFSIZE: usize = size_of::() * 2; + + let key: &[u64] = &[to, from]; + self.tofrom_relation.aput_raw::(key, []); } pub(super) fn relations_until<'a>( @@ -78,9 +79,8 @@ impl Data { pub(super) fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) { for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[]); + let key = (room_id, prev); + self.referencedevents.put_raw(key, []); } } @@ -89,9 +89,7 @@ impl Data { self.referencedevents.qry(&key).await.is_ok() } - pub(super) fn mark_event_soft_failed(&self, event_id: &EventId) { - self.softfailedeventids.insert(event_id.as_bytes(), &[]); - } + pub(super) fn mark_event_soft_failed(&self, event_id: &EventId) { self.softfailedeventids.insert(event_id, []); } pub(super) async fn is_event_soft_failed(&self, event_id: &EventId) -> bool { self.softfailedeventids.get(event_id).await.is_ok() diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index 74b649ef..80a35e88 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -5,7 +5,7 @@ use conduit::{ utils::{stream::TryIgnore, ReadyExt}, Error, Result, }; -use database::{Deserialized, Map}; +use database::{Deserialized, Json, Map}; use futures::{Stream, StreamExt}; use ruma::{ events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent}, @@ -44,33 +44,19 @@ impl Data { pub(super) async fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) { type KeyVal<'a> = (&'a RoomId, u64, &'a UserId); - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xFF); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - // Remove old entry + let last_possible_key = (room_id, u64::MAX); self.readreceiptid_readreceipt - .rev_keys_from_raw(&last_possible_key) + .rev_keys_from(&last_possible_key) .ignore_err() .ready_take_while(|(r, ..): &KeyVal<'_>| *r == room_id) .ready_filter_map(|(r, c, u): KeyVal<'_>| (u == user_id).then_some((r, c, u))) - .ready_for_each(|old: KeyVal<'_>| { - // This is the old room_latest - self.readreceiptid_readreceipt.del(&old); - }) + .ready_for_each(|old: KeyVal<'_>| self.readreceiptid_readreceipt.del(old)) .await; - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&self.services.globals.next_count().unwrap().to_be_bytes()); - room_latest_id.push(0xFF); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(event).expect("EduEvent::to_string always works"), - ); + let count = self.services.globals.next_count().unwrap(); + let latest_id = (room_id, count, user_id); + self.readreceiptid_readreceipt.put(latest_id, Json(event)); } pub(super) fn readreceipts_since<'a>( @@ -113,15 +99,11 @@ impl Data { } pub(super) fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(user_id.as_bytes()); + let key = (room_id, user_id); + let next_count = self.services.globals.next_count().unwrap(); - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes()); - - self.roomuserid_lastprivatereadupdate - .insert(&key, &self.services.globals.next_count().unwrap().to_be_bytes()); + self.roomuserid_privateread.put(key, count); + self.roomuserid_lastprivatereadupdate.put(key, next_count); } pub(super) async fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result { diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index bd8fdcc9..609c0e07 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{mem::size_of_val, sync::Arc}; use conduit::{err, implement, utils, Result}; use database::{Deserialized, Map}; @@ -46,6 +46,8 @@ impl crate::Service for Service { #[implement(Service)] pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { + const BUFSIZE: usize = size_of::(); + if let Ok(shorteventid) = self .db .eventid_shorteventid @@ -57,12 +59,15 @@ pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { } let shorteventid = self.services.globals.next_count().unwrap(); + debug_assert!(size_of_val(&shorteventid) == BUFSIZE, "buffer requirement changed"); + self.db .eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes()); + .raw_aput::(event_id, shorteventid); + self.db .shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes()); + .aput_raw::(shorteventid, event_id); shorteventid } @@ -77,13 +82,17 @@ pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> .map(|(i, result)| match result { Ok(ref short) => utils::u64_from_u8(short), Err(_) => { + const BUFSIZE: usize = size_of::(); + let short = self.services.globals.next_count().unwrap(); + debug_assert!(size_of_val(&short) == BUFSIZE, "buffer requirement changed"); + self.db .eventid_shorteventid - .insert(event_ids[i], &short.to_be_bytes()); + .raw_aput::(event_ids[i], short); self.db .shorteventid_eventid - .insert(&short.to_be_bytes(), event_ids[i]); + .aput_raw::(short, event_ids[i]); short }, @@ -103,7 +112,9 @@ pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &s #[implement(Service)] pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> u64 { - let key = (event_type.to_string(), state_key); + const BUFSIZE: usize = size_of::(); + + let key = (event_type, state_key); if let Ok(shortstatekey) = self .db .statekey_shortstatekey @@ -114,17 +125,16 @@ pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, sta return shortstatekey; } - let mut key = event_type.to_string().as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(state_key.as_bytes()); - let shortstatekey = self.services.globals.next_count().unwrap(); + debug_assert!(size_of_val(&shortstatekey) == BUFSIZE, "buffer requirement changed"); + self.db .statekey_shortstatekey - .insert(&key, &shortstatekey.to_be_bytes()); + .put_aput::(key, shortstatekey); + self.db .shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &key); + .aput_put::(shortstatekey, key); shortstatekey } @@ -177,6 +187,8 @@ pub async fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(State /// Returns (shortstatehash, already_existed) #[implement(Service)] pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, bool) { + const BUFSIZE: usize = size_of::(); + if let Ok(shortstatehash) = self .db .statehash_shortstatehash @@ -188,9 +200,11 @@ pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, boo } let shortstatehash = self.services.globals.next_count().unwrap(); + debug_assert!(size_of_val(&shortstatehash) == BUFSIZE, "buffer requirement changed"); + self.db .statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes()); + .raw_aput::(state_hash, shortstatehash); (shortstatehash, false) } @@ -208,10 +222,15 @@ pub async fn get_or_create_shortroomid(&self, room_id: &RoomId) -> u64 { .await .deserialized() .unwrap_or_else(|_| { + const BUFSIZE: usize = size_of::(); + let short = self.services.globals.next_count().unwrap(); + debug_assert!(size_of_val(&short) == BUFSIZE, "buffer requirement changed"); + self.db .roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes()); + .raw_aput::(room_id, short); + short }) } diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 7265038f..813f48ae 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -36,12 +36,12 @@ impl Data { _mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex ) { self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes()); + .raw_put(room_id, new_shortstatehash); } pub(super) fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) { self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes()); + .put(shorteventid, shortstatehash); } pub(super) async fn set_forward_extremities( @@ -57,12 +57,9 @@ impl Data { .ready_for_each(|key| self.roomid_pduleaves.remove(key)) .await; - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xFF); - for event_id in event_ids { - let mut key = prefix.clone(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes()); + for event_id in &event_ids { + let key = (room_id, event_id); + self.roomid_pduleaves.put_raw(key, event_id); } } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 19f1f141..561db18a 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -481,4 +481,10 @@ impl Service { .await .map(|content: RoomEncryptionEventContent| content.algorithm) } + + pub async fn is_encrypted_room(&self, room_id: &RoomId) -> bool { + self.room_state_get(room_id, &StateEventType::RoomEncryption, "") + .await + .is_ok() + } } diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index 6e01e49d..c06c8107 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -4,7 +4,7 @@ use std::{ }; use conduit::{utils::stream::TryIgnore, Result}; -use database::{Deserialized, Interfix, Map}; +use database::{serialize_to_vec, Deserialized, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, @@ -63,71 +63,62 @@ impl Data { } pub(super) fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - self.roomuseroncejoinedids.insert(&userroom_id, &[]); + let key = (user_id, room_id); + + self.roomuseroncejoinedids.put_raw(key, []); } pub(super) fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { - let roomid = room_id.as_bytes().to_vec(); + let userroom_id = (user_id, room_id); + let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); - let mut roomuser_id = roomid.clone(); - roomuser_id.push(0xFF); - roomuser_id.extend_from_slice(user_id.as_bytes()); + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); + self.userroomid_joined.insert(&userroom_id, []); + self.roomuserid_joined.insert(&roomuser_id, []); - self.userroomid_joined.insert(&userroom_id, &[]); - self.roomuserid_joined.insert(&roomuser_id, &[]); self.userroomid_invitestate.remove(&userroom_id); self.roomuserid_invitecount.remove(&roomuser_id); + self.userroomid_leftstate.remove(&userroom_id); self.roomuserid_leftcount.remove(&roomuser_id); - self.roomid_inviteviaservers.remove(&roomid); + self.roomid_inviteviaservers.remove(room_id); } pub(super) fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { - let roomid = room_id.as_bytes().to_vec(); + let userroom_id = (user_id, room_id); + let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); - let mut roomuser_id = roomid.clone(); - roomuser_id.push(0xFF); - roomuser_id.extend_from_slice(user_id.as_bytes()); + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); + // (timo) TODO + let leftstate = Vec::>::new(); + let count = self.services.globals.next_count().unwrap(); + + self.userroomid_leftstate + .raw_put(&userroom_id, Json(leftstate)); + self.roomuserid_leftcount.raw_put(&roomuser_id, count); - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - ); // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &self.services.globals.next_count().unwrap().to_be_bytes()); self.userroomid_joined.remove(&userroom_id); self.roomuserid_joined.remove(&roomuser_id); + self.userroomid_invitestate.remove(&userroom_id); self.roomuserid_invitecount.remove(&roomuser_id); - self.roomid_inviteviaservers.remove(&roomid); + self.roomid_inviteviaservers.remove(room_id); } /// Makes a user forget a room. #[tracing::instrument(skip(self), level = "debug")] pub(super) fn forget(&self, room_id: &RoomId, user_id: &UserId) { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); + let userroom_id = (user_id, room_id); + let roomuser_id = (room_id, user_id); - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xFF); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id); - self.roomuserid_leftcount.remove(&roomuser_id); + self.userroomid_leftstate.del(userroom_id); + self.roomuserid_leftcount.del(roomuser_id); } /// Returns an iterator over all rooms a user was invited to. diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index edfae529..077eee10 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -3,13 +3,13 @@ mod data; use std::{collections::HashSet, sync::Arc}; use conduit::{ - err, + err, is_not_empty, utils::{stream::TryIgnore, ReadyExt, StreamTools}, warn, Result, }; use data::Data; -use database::{Deserialized, Ignore, Interfix}; -use futures::{Stream, StreamExt}; +use database::{serialize_to_vec, Deserialized, Ignore, Interfix, Json}; +use futures::{stream::iter, Stream, StreamExt}; use itertools::Itertools; use ruma::{ events::{ @@ -547,50 +547,37 @@ impl Service { .unwrap_or(0), ); - self.db - .roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes()); - - self.db - .roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes()); + self.db.roomid_joinedcount.raw_put(room_id, joinedcount); + self.db.roomid_invitedcount.raw_put(room_id, invitedcount); self.room_servers(room_id) .ready_for_each(|old_joined_server| { - if !joined_servers.remove(old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xFF); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xFF); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.db.roomserverids.remove(&roomserver_id); - self.db.serverroomids.remove(&serverroom_id); + if joined_servers.remove(old_joined_server) { + return; } + + // Server not in room anymore + let roomserver_id = (room_id, old_joined_server); + let serverroom_id = (old_joined_server, room_id); + + self.db.roomserverids.del(roomserver_id); + self.db.serverroomids.del(serverroom_id); }) .await; // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xFF); - roomserver_id.extend_from_slice(server.as_bytes()); + for server in &joined_servers { + let roomserver_id = (room_id, server); + let serverroom_id = (server, room_id); - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xFF); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.db.roomserverids.insert(&roomserver_id, &[]); - self.db.serverroomids.insert(&serverroom_id, &[]); + self.db.roomserverids.put_raw(roomserver_id, []); + self.db.serverroomids.put_raw(serverroom_id, []); } self.db .appservice_in_room_cache .write() - .unwrap() + .expect("locked") .remove(room_id); } @@ -598,44 +585,44 @@ impl Service { &self, user_id: &UserId, room_id: &RoomId, last_state: Option>>, invite_via: Option>, ) { - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xFF); - roomuser_id.extend_from_slice(user_id.as_bytes()); + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); + let userroom_id = (user_id, room_id); + let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); + + self.db + .userroomid_invitestate + .raw_put(&userroom_id, Json(last_state.unwrap_or_default())); - self.db.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()).expect("state to bytes always works"), - ); self.db .roomuserid_invitecount - .insert(&roomuser_id, &self.services.globals.next_count().unwrap().to_be_bytes()); + .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); + self.db.userroomid_joined.remove(&userroom_id); self.db.roomuserid_joined.remove(&roomuser_id); + self.db.userroomid_leftstate.remove(&userroom_id); self.db.roomuserid_leftcount.remove(&roomuser_id); - if let Some(servers) = invite_via.as_deref() { + if let Some(servers) = invite_via.filter(is_not_empty!()) { self.add_servers_invite_via(room_id, servers).await; } } #[tracing::instrument(skip(self, servers), level = "debug")] - pub async fn add_servers_invite_via(&self, room_id: &RoomId, servers: &[OwnedServerName]) { - let mut prev_servers: Vec<_> = self + pub async fn add_servers_invite_via(&self, room_id: &RoomId, servers: Vec) { + let mut servers: Vec<_> = self .servers_invite_via(room_id) .map(ToOwned::to_owned) + .chain(iter(servers.into_iter())) .collect() .await; - prev_servers.extend(servers.to_owned()); - prev_servers.sort_unstable(); - prev_servers.dedup(); + servers.sort_unstable(); + servers.dedup(); - let servers = prev_servers + let servers = servers .iter() .map(|server| server.as_bytes()) .collect_vec() diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index cb85cf19..c51b7856 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -11,7 +11,7 @@ use conduit::{ utils::{stream::TryIgnore, u64_from_u8, ReadyExt}, Err, PduCount, PduEvent, Result, }; -use database::{Database, Deserialized, KeyVal, Map}; +use database::{Database, Deserialized, Json, KeyVal, Map}; use futures::{FutureExt, Stream, StreamExt}; use ruma::{CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use tokio::sync::Mutex; @@ -168,10 +168,7 @@ impl Data { } pub(super) async fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), - ); + self.pduid_pdu.raw_put(pdu_id, Json(json)); self.lasttimelinecount_cache .lock() @@ -183,13 +180,10 @@ impl Data { } pub(super) fn prepend_backfill_pdu(&self, pdu_id: &[u8], event_id: &EventId, json: &CanonicalJsonObject) { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), - ); + self.pduid_pdu.raw_put(pdu_id, Json(json)); - self.eventid_pduid.insert(event_id.as_bytes(), pdu_id); - self.eventid_outlierpdu.remove(event_id.as_bytes()); + self.eventid_pduid.insert(event_id, pdu_id); + self.eventid_outlierpdu.remove(event_id); } /// Removes a pdu and creates a new one with the same id. @@ -328,5 +322,5 @@ pub(super) fn pdu_count(pdu_id: &[u8]) -> PduCount { fn increment(db: &Arc, key: &[u8]) { let old = db.get_blocking(key); let new = utils::increment(old.ok().as_deref()); - db.insert(key, &new); + db.insert(key, new); } diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index d4d9874c..96b009f8 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -38,20 +38,13 @@ impl Data { } pub(super) fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xFF); - roomuser_id.extend_from_slice(user_id.as_bytes()); + let userroom_id = (user_id, room_id); + self.userroomid_highlightcount.put(userroom_id, 0_u64); + self.userroomid_notificationcount.put(userroom_id, 0_u64); - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes()); - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes()); - - self.roomuserid_lastnotificationread - .insert(&roomuser_id, &self.services.globals.next_count().unwrap().to_be_bytes()); + let roomuser_id = (room_id, user_id); + let count = self.services.globals.next_count().unwrap(); + self.roomuserid_lastnotificationread.put(roomuser_id, count); } pub(super) async fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { @@ -89,11 +82,8 @@ impl Data { .await .expect("room exists"); - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()); + let key: &[u64] = &[shortroomid, token]; + self.roomsynctoken_shortstatehash.put(key, shortstatehash); } pub(super) async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result { diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 96d4a6a9..f75a212c 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -146,8 +146,7 @@ impl Data { } pub(super) fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) { - self.servername_educount - .insert(server_name.as_bytes(), &last_count.to_be_bytes()); + self.servername_educount.raw_put(server_name, last_count); } pub async fn get_latest_educount(&self, server_name: &ServerName) -> u64 { diff --git a/src/service/sending/dest.rs b/src/service/sending/dest.rs index 9968acd7..234a0b90 100644 --- a/src/service/sending/dest.rs +++ b/src/service/sending/dest.rs @@ -12,7 +12,7 @@ pub enum Destination { #[implement(Destination)] #[must_use] -pub fn get_prefix(&self) -> Vec { +pub(super) fn get_prefix(&self) -> Vec { match self { Self::Normal(server) => { let len = server.as_bytes().len().saturating_add(1); diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index f75f1bcd..d2865d88 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -8,7 +8,7 @@ use conduit::{ utils::{hash, string::EMPTY}, Error, Result, }; -use database::{Deserialized, Map}; +use database::{Deserialized, Json, Map}; use ruma::{ api::client::{ error::ErrorKind, @@ -217,21 +217,14 @@ pub fn get_uiaa_request( #[implement(Service)] fn update_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session: &str, uiaainfo: Option<&UiaaInfo>) { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xFF); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xFF); - userdevicesessionid.extend_from_slice(session.as_bytes()); + let key = (user_id, device_id, session); if let Some(uiaainfo) = uiaainfo { - self.db.userdevicesessionid_uiaainfo.insert( - &userdevicesessionid, - &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), - ); - } else { self.db .userdevicesessionid_uiaainfo - .remove(&userdevicesessionid); + .put(key, Json(uiaainfo)); + } else { + self.db.userdevicesessionid_uiaainfo.del(key); } } diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index fca63725..adc85fe6 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -121,10 +121,7 @@ impl Service { } #[inline] - pub fn update_check_for_updates_id(&self, id: u64) { - self.db - .insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes()); - } + pub fn update_check_for_updates_id(&self, id: u64) { self.db.raw_put(LAST_CHECK_FOR_UPDATES_COUNT, id); } pub async fn last_check_for_updates_id(&self) -> u64 { self.db diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index a99a7df4..589aee8a 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -3,18 +3,19 @@ use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; use conduit::{ debug_warn, err, utils, utils::{stream::TryIgnore, string::Unquoted, ReadyExt}, - warn, Err, Error, Result, Server, + Err, Error, Result, Server, }; -use database::{Deserialized, Ignore, Interfix, Map}; -use futures::{pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; +use database::{Deserialized, Ignore, Interfix, Json, Map}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; use ruma::{ api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType, StateEventType}, + events::{ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType}, serde::Raw, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, OwnedUserId, UInt, UserId, }; +use serde_json::json; use crate::{account_data, admin, globals, rooms, Dep}; @@ -194,22 +195,16 @@ impl Service { /// Hash and set the user's password to the Argon2 hash pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - if let Some(password) = password { - if let Ok(hash) = utils::hash::password(password) { - self.db - .userid_password - .insert(user_id.as_bytes(), hash.as_bytes()); - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Password does not meet the requirements.", - )) - } - } else { - self.db.userid_password.insert(user_id.as_bytes(), b""); - Ok(()) - } + password + .map(utils::hash::password) + .transpose() + .map_err(|e| err!(Request(InvalidParam("Password does not meet the requirements: {e}"))))? + .map_or_else( + || self.db.userid_password.insert(user_id, b""), + |hash| self.db.userid_password.insert(user_id, hash), + ); + + Ok(()) } /// Returns the displayname of a user on this homeserver. @@ -221,11 +216,9 @@ impl Service { /// need to nofify all rooms of this change. pub fn set_displayname(&self, user_id: &UserId, displayname: Option) { if let Some(displayname) = displayname { - self.db - .userid_displayname - .insert(user_id.as_bytes(), displayname.as_bytes()); + self.db.userid_displayname.insert(user_id, displayname); } else { - self.db.userid_displayname.remove(user_id.as_bytes()); + self.db.userid_displayname.remove(user_id); } } @@ -237,11 +230,9 @@ impl Service { /// Sets a new avatar_url or removes it if avatar_url is None. pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) { if let Some(avatar_url) = avatar_url { - self.db - .userid_avatarurl - .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes()); + self.db.userid_avatarurl.insert(user_id, &avatar_url); } else { - self.db.userid_avatarurl.remove(user_id.as_bytes()); + self.db.userid_avatarurl.remove(user_id); } } @@ -253,11 +244,9 @@ impl Service { /// Sets a new avatar_url or removes it if avatar_url is None. pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) { if let Some(blurhash) = blurhash { - self.db - .userid_blurhash - .insert(user_id.as_bytes(), blurhash.as_bytes()); + self.db.userid_blurhash.insert(user_id, blurhash); } else { - self.db.userid_blurhash.remove(user_id.as_bytes()); + self.db.userid_blurhash.remove(user_id); } } @@ -269,41 +258,29 @@ impl Service { // This method should never be called for nonexistent users. We shouldn't assert // though... if !self.exists(user_id).await { - warn!("Called create_device for non-existent user {} in database", user_id); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "User does not exist.")); + return Err!(Request(InvalidParam(error!("Called create_device for non-existent {user_id}")))); } - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); + let key = (user_id, device_id); + let val = Device { + device_id: device_id.into(), + display_name: initial_device_display_name, + last_seen_ip: client_ip, + last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), + }; increment(&self.db.userid_devicelistversion, user_id.as_bytes()); - - self.db.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(&Device { - device_id: device_id.into(), - display_name: initial_device_display_name, - last_seen_ip: client_ip, - last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), - }) - .expect("Device::to_string never fails."), - ); - - self.set_token(user_id, device_id, token).await?; - - Ok(()) + self.db.userdeviceid_metadata.put(key, Json(val)); + self.set_token(user_id, device_id, token).await } /// Removes a device from a user. pub async fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); + let userdeviceid = (user_id, device_id); // Remove tokens - if let Ok(old_token) = self.db.userdeviceid_token.get(&userdeviceid).await { - self.db.userdeviceid_token.remove(&userdeviceid); + if let Ok(old_token) = self.db.userdeviceid_token.qry(&userdeviceid).await { + self.db.userdeviceid_token.del(userdeviceid); self.db.token_userdeviceid.remove(&old_token); } @@ -320,7 +297,7 @@ impl Service { increment(&self.db.userid_devicelistversion, user_id.as_bytes()); - self.db.userdeviceid_metadata.remove(&userdeviceid); + self.db.userdeviceid_metadata.del(userdeviceid); } /// Returns an iterator over all device ids of this user. @@ -333,6 +310,11 @@ impl Service { .map(|(_, device_id): (Ignore, &DeviceId)| device_id) } + pub async fn get_token(&self, user_id: &UserId, device_id: &DeviceId) -> Result { + let key = (user_id, device_id); + self.db.userdeviceid_token.qry(&key).await.deserialized() + } + /// Replaces the access token of one device. pub async fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let key = (user_id, device_id); @@ -352,15 +334,8 @@ impl Service { } // Assign token to user device combination - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - self.db - .userdeviceid_token - .insert(&userdeviceid, token.as_bytes()); - self.db - .token_userdeviceid - .insert(token.as_bytes(), &userdeviceid); + self.db.userdeviceid_token.put_raw(key, token); + self.db.token_userdeviceid.raw_put(token, key); Ok(()) } @@ -393,14 +368,12 @@ impl Service { .as_bytes(), ); - self.db.onetimekeyid_onetimekeys.insert( - &key, - &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), - ); - self.db - .userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &self.services.globals.next_count()?.to_be_bytes()); + .onetimekeyid_onetimekeys + .raw_put(key, Json(one_time_key_value)); + + let count = self.services.globals.next_count().unwrap(); + self.db.userid_lastonetimekeyupdate.raw_put(user_id, count); Ok(()) } @@ -417,9 +390,8 @@ impl Service { pub async fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, ) -> Result<(OwnedDeviceKeyId, Raw)> { - self.db - .userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &self.services.globals.next_count()?.to_be_bytes()); + let count = self.services.globals.next_count()?.to_be_bytes(); + self.db.userid_lastonetimekeyupdate.insert(user_id, count); let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xFF); @@ -488,15 +460,9 @@ impl Service { } pub async fn add_device_keys(&self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw) { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.db.keyid_key.insert( - &userdeviceid, - &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), - ); + let key = (user_id, device_id); + self.db.keyid_key.put(key, Json(device_keys)); self.mark_device_key_update(user_id).await; } @@ -611,13 +577,8 @@ impl Service { .ok_or_else(|| err!(Database("signatures in keyid_key for a user is invalid.")))? .insert(signature.0, signature.1.into()); - let mut key = target_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(key_id.as_bytes()); - self.db.keyid_key.insert( - &key, - &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), - ); + let key = (target_id, key_id); + self.db.keyid_key.put(key, Json(cross_signing_key)); self.mark_device_key_update(target_id).await; @@ -640,34 +601,21 @@ impl Service { } pub async fn mark_device_key_update(&self, user_id: &UserId) { - let count = self.services.globals.next_count().unwrap().to_be_bytes(); + let count = self.services.globals.next_count().unwrap(); - let rooms_joined = self.services.state_cache.rooms_joined(user_id); - - pin_mut!(rooms_joined); - while let Some(room_id) = rooms_joined.next().await { + self.services + .state_cache + .rooms_joined(user_id) // Don't send key updates to unencrypted rooms - if self - .services - .state_accessor - .room_state_get(room_id, &StateEventType::RoomEncryption, "") - .await - .is_err() - { - continue; - } + .filter(|room_id| self.services.state_accessor.is_encrypted_room(room_id)) + .ready_for_each(|room_id| { + let key = (room_id, count); + self.db.keychangeid_userid.put_raw(key, user_id); + }) + .await; - let mut key = room_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(&count); - - self.db.keychangeid_userid.insert(&key, user_id.as_bytes()); - } - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(&count); - self.db.keychangeid_userid.insert(&key, user_id.as_bytes()); + let key = (user_id, count); + self.db.keychangeid_userid.put_raw(key, user_id); } pub async fn get_device_keys<'a>(&'a self, user_id: &'a UserId, device_id: &DeviceId) -> Result> { @@ -681,12 +629,7 @@ impl Service { where F: Fn(&UserId) -> bool + Send + Sync, { - let key = self - .db - .keyid_key - .get(key_id) - .await - .deserialized::()?; + let key: serde_json::Value = self.db.keyid_key.get(key_id).await.deserialized()?; let cleaned = clean_signatures(key, sender_user, user_id, allowed_signatures)?; let raw_value = serde_json::value::to_raw_value(&cleaned)?; @@ -718,29 +661,29 @@ impl Service { } pub async fn get_user_signing_key(&self, user_id: &UserId) -> Result> { - let key_id = self.db.userid_usersigningkeyid.get(user_id).await?; - - self.db.keyid_key.get(&*key_id).await.deserialized() + self.db + .userid_usersigningkeyid + .get(user_id) + .and_then(|key_id| self.db.keyid_key.get(&*key_id)) + .await + .deserialized() } pub async fn add_to_device_event( &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, content: serde_json::Value, ) { - let mut key = target_user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(target_device_id.as_bytes()); - key.push(0xFF); - key.extend_from_slice(&self.services.globals.next_count().unwrap().to_be_bytes()); + let count = self.services.globals.next_count().unwrap(); - let mut json = serde_json::Map::new(); - json.insert("type".to_owned(), event_type.to_owned().into()); - json.insert("sender".to_owned(), sender.to_string().into()); - json.insert("content".to_owned(), content); - - let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); - - self.db.todeviceid_events.insert(&key, &value); + let key = (target_user_id, target_device_id, count); + self.db.todeviceid_events.put( + key, + Json(json!({ + "type": event_type, + "sender": sender, + "content": content, + })), + ); } pub fn get_to_device_events<'a>( @@ -783,13 +726,8 @@ impl Service { pub async fn update_device_metadata(&self, user_id: &UserId, device_id: &DeviceId, device: &Device) -> Result<()> { increment(&self.db.userid_devicelistversion, user_id.as_bytes()); - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xFF); - userdeviceid.extend_from_slice(device_id.as_bytes()); - self.db.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(device).expect("Device::to_string always works"), - ); + let key = (user_id, device_id); + self.db.userdeviceid_metadata.put(key, Json(device)); Ok(()) } @@ -824,23 +762,15 @@ impl Service { pub fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> String { let filter_id = utils::random_string(4); - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(filter_id.as_bytes()); - - self.db - .userfilterid_filter - .insert(&key, &serde_json::to_vec(&filter).expect("filter is valid json")); + let key = (user_id, &filter_id); + self.db.userfilterid_filter.put(key, Json(filter)); filter_id } pub async fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result { - self.db - .userfilterid_filter - .qry(&(user_id, filter_id)) - .await - .deserialized() + let key = (user_id, filter_id); + self.db.userfilterid_filter.qry(&key).await.deserialized() } /// Creates an OpenID token, which can be used to prove that a user has @@ -913,17 +843,13 @@ impl Service { /// Sets a new profile key value, removes the key if value is None pub fn set_profile_key(&self, user_id: &UserId, profile_key: &str, profile_key_value: Option) { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(profile_key.as_bytes()); - // TODO: insert to the stable MSC4175 key when it's stable - if let Some(value) = profile_key_value { - let value = serde_json::to_vec(&value).unwrap(); + let key = (user_id, profile_key); - self.db.useridprofilekey_value.insert(&key, &value); + if let Some(value) = profile_key_value { + self.db.useridprofilekey_value.put(key, value); } else { - self.db.useridprofilekey_value.remove(&key); + self.db.useridprofilekey_value.del(key); } } @@ -945,17 +871,13 @@ impl Service { /// Sets a new timezone or removes it if timezone is None. pub fn set_timezone(&self, user_id: &UserId, timezone: Option) { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(b"us.cloke.msc4175.tz"); - // TODO: insert to the stable MSC4175 key when it's stable + let key = (user_id, "us.cloke.msc4175.tz"); + if let Some(timezone) = timezone { - self.db - .useridprofilekey_value - .insert(&key, timezone.as_bytes()); + self.db.useridprofilekey_value.put_raw(key, &timezone); } else { - self.db.useridprofilekey_value.remove(&key); + self.db.useridprofilekey_value.del(key); } } } @@ -1012,5 +934,5 @@ where fn increment(db: &Arc, key: &[u8]) { let old = db.get_blocking(key); let new = utils::increment(old.ok().as_deref()); - db.insert(key, &new); + db.insert(key, new); } From 89b5c4ee1c6cfd662a60d704b1ffec736d7a3600 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 11 Oct 2024 20:32:32 +0000 Subject: [PATCH 0080/1248] add timepoint_from_now to complement timepoint_ago in utils Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 2 +- src/core/utils/time.rs | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 4dbecf91..13221341 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -34,7 +34,7 @@ pub use self::{ stream::{IterStream, ReadyExt, Tools as StreamTools, TryReadyExt}, string::{str_from_bytes, string_from_bytes}, sys::available_parallelism, - time::now_millis as millis_since_unix_epoch, + time::{now_millis as millis_since_unix_epoch, timepoint_ago, timepoint_from_now}, }; #[inline] diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs index 04f47ac3..f96a27d0 100644 --- a/src/core/utils/time.rs +++ b/src/core/utils/time.rs @@ -22,6 +22,13 @@ pub fn timepoint_ago(duration: Duration) -> Result { .ok_or_else(|| err!(Arithmetic("Duration {duration:?} is too large"))) } +#[inline] +pub fn timepoint_from_now(duration: Duration) -> Result { + SystemTime::now() + .checked_add(duration) + .ok_or_else(|| err!(Arithmetic("Duration {duration:?} is too large"))) +} + #[inline] pub fn parse_duration(duration: &str) -> Result { cyborgtime::parse_duration(duration) From 1a09eb0f0235a1dfe7c51f525f656cffad62b60d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 13 Oct 2024 00:57:08 +0000 Subject: [PATCH 0081/1248] use string::EMPTY; minor formatting and misc cleanups Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 12 ++++++++---- src/api/client/membership.rs | 11 +++++++---- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 350e08c6..fd8c39f7 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,7 @@ use std::{ }; use api::client::validate_and_add_event_id; -use conduit::{debug, debug_error, err, info, trace, utils, warn, Error, PduEvent, Result}; +use conduit::{debug, debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result}; use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::event::get_room_state}, @@ -57,7 +57,9 @@ pub(super) async fn get_auth_chain(&self, event_id: Box) -> Result Result { - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" + if self.body.len() < 2 + || !self.body[0].trim().starts_with("```") + || self.body.last().unwrap_or(&EMPTY).trim() != "```" { return Ok(RoomMessageEventContent::text_plain( "Expected code block in command body. Add --help for details.", @@ -134,7 +136,9 @@ pub(super) async fn get_remote_pdu_list( )); } - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" + if self.body.len() < 2 + || !self.body[0].trim().starts_with("```") + || self.body.last().unwrap_or(&EMPTY).trim() != "```" { return Ok(RoomMessageEventContent::text_plain( "Expected code block in command body. Add --help for details.", @@ -843,7 +847,7 @@ pub(super) async fn database_stats( &self, property: Option, map: Option, ) -> Result { let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); - let map_name = map.as_ref().map_or(utils::string::EMPTY, String::as_str); + let map_name = map.as_ref().map_or(EMPTY, String::as_str); let mut out = String::new(); for (name, map) in self.services.db.iter_maps() { diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 06035592..a7a5b166 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -658,13 +658,16 @@ pub async fn join_room_by_id_helper( }); } - if services + let server_in_room = services .rooms .state_cache .server_in_room(services.globals.server_name(), room_id) - .await || servers.is_empty() - || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])) - { + .await; + + let local_join = + server_in_room || servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_join { join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, third_party_signed, state_lock) .boxed() .await From d82ea331cfdcd51c2c746618deb26e1fd220abc0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 14 Oct 2024 05:16:18 +0000 Subject: [PATCH 0082/1248] add random shuffle util Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 2 +- src/core/utils/rand.rs | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 13221341..3adecc6c 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -30,7 +30,7 @@ pub use self::{ json::{deserialize_from_str, to_canonical_object}, math::clamp, mutex_map::{Guard as MutexMapGuard, MutexMap}, - rand::string as random_string, + rand::{shuffle, string as random_string}, stream::{IterStream, ReadyExt, Tools as StreamTools, TryReadyExt}, string::{str_from_bytes, string_from_bytes}, sys::available_parallelism, diff --git a/src/core/utils/rand.rs b/src/core/utils/rand.rs index d717c4bd..9e6fc7a8 100644 --- a/src/core/utils/rand.rs +++ b/src/core/utils/rand.rs @@ -4,7 +4,12 @@ use std::{ }; use arrayvec::ArrayString; -use rand::{thread_rng, Rng}; +use rand::{seq::SliceRandom, thread_rng, Rng}; + +pub fn shuffle(vec: &mut [T]) { + let mut rng = thread_rng(); + vec.shuffle(&mut rng); +} pub fn string(length: usize) -> String { thread_rng() From c0939c3e9a9d7c193e8092333cd9289499540463 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 11 Oct 2024 18:57:59 +0000 Subject: [PATCH 0083/1248] Refactor server_keys service/interface and related callsites Signed-off-by: Jason Volk Signed-off-by: strawberry --- Cargo.lock | 26 +- Cargo.toml | 2 +- src/admin/debug/commands.rs | 173 +++---- src/admin/debug/mod.rs | 17 +- src/admin/query/globals.rs | 13 +- src/api/client/membership.rs | 252 +++------ src/api/client/mod.rs | 2 +- src/api/router/args.rs | 2 +- src/api/router/auth.rs | 222 ++++---- src/api/server/invite.rs | 11 +- src/api/server/key.rs | 70 +-- src/api/server/send.rs | 22 +- src/api/server/send_join.rs | 24 +- src/api/server/send_leave.rs | 13 +- src/core/config/mod.rs | 28 - src/core/error/mod.rs | 4 + src/core/pdu/mod.rs | 25 +- src/service/globals/data.rs | 118 +---- src/service/globals/mod.rs | 50 +- src/service/rooms/event_handler/mod.rs | 105 ++-- src/service/rooms/timeline/mod.rs | 45 +- src/service/sending/mod.rs | 4 +- src/service/sending/send.rs | 24 +- src/service/server_keys/acquire.rs | 175 +++++++ src/service/server_keys/get.rs | 86 ++++ src/service/server_keys/keypair.rs | 64 +++ src/service/server_keys/mod.rs | 678 +++++-------------------- src/service/server_keys/request.rs | 97 ++++ src/service/server_keys/sign.rs | 18 + src/service/server_keys/verify.rs | 33 ++ 30 files changed, 1025 insertions(+), 1378 deletions(-) create mode 100644 src/service/server_keys/acquire.rs create mode 100644 src/service/server_keys/get.rs create mode 100644 src/service/server_keys/keypair.rs create mode 100644 src/service/server_keys/request.rs create mode 100644 src/service/server_keys/sign.rs create mode 100644 src/service/server_keys/verify.rs diff --git a/Cargo.lock b/Cargo.lock index db1394ce..4ac7cc35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2976,7 +2976,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "assign", "js_int", @@ -2998,7 +2998,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "js_int", "ruma-common", @@ -3010,7 +3010,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "as_variant", "assign", @@ -3033,7 +3033,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "as_variant", "base64 0.22.1", @@ -3063,7 +3063,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3087,7 +3087,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "bytes", "http", @@ -3105,7 +3105,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "js_int", "thiserror", @@ -3114,7 +3114,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "js_int", "ruma-common", @@ -3124,7 +3124,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "cfg-if", "once_cell", @@ -3140,7 +3140,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "js_int", "ruma-common", @@ -3152,7 +3152,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "headers", "http", @@ -3165,7 +3165,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3181,7 +3181,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=3109496a1f91357c89cbb57cf86f179e2cb013e7#3109496a1f91357c89cbb57cf86f179e2cb013e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" dependencies = [ "futures-util", "itertools 0.13.0", diff --git a/Cargo.toml b/Cargo.toml index 0a98befd..966c2818 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -315,7 +315,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "3109496a1f91357c89cbb57cf86f179e2cb013e7" +rev = "d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" features = [ "compat", "rand", diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index fd8c39f7..7fe8addf 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -1,19 +1,17 @@ use std::{ - collections::{BTreeMap, HashMap}, + collections::HashMap, fmt::Write, sync::Arc, time::{Instant, SystemTime}, }; -use api::client::validate_and_add_event_id; -use conduit::{debug, debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result}; +use conduit::{debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result}; use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::event::get_room_state}, events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName, }; -use tokio::sync::RwLock; use tracing_subscriber::EnvFilter; use crate::admin_command; @@ -219,7 +217,7 @@ pub(super) async fn get_remote_pdu( })?; trace!("Attempting to parse PDU: {:?}", &response.pdu); - let parsed_pdu = { + let _parsed_pdu = { let parsed_result = self .services .rooms @@ -241,22 +239,11 @@ pub(super) async fn get_remote_pdu( vec![(event_id, value, room_id)] }; - let pub_key_map = RwLock::new(BTreeMap::new()); - - debug!("Attempting to fetch homeserver signing keys for {server}"); - self.services - .server_keys - .fetch_required_signing_keys(parsed_pdu.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map) - .await - .unwrap_or_else(|e| { - warn!("Could not fetch all signatures for PDUs from {server}: {e:?}"); - }); - info!("Attempting to handle event ID {event_id} as backfilled PDU"); self.services .rooms .timeline - .backfill_pdu(&server, response.pdu, &pub_key_map) + .backfill_pdu(&server, response.pdu) .await?; let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); @@ -433,12 +420,10 @@ pub(super) async fn sign_json(&self) -> Result { let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str(&string) { Ok(mut value) => { - ruma::signatures::sign_json( - self.services.globals.server_name().as_str(), - self.services.globals.keypair(), - &mut value, - ) - .expect("our request json is what ruma expects"); + self.services + .server_keys + .sign_json(&mut value) + .expect("our request json is what ruma expects"); let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json"); Ok(RoomMessageEventContent::text_plain(json_text)) }, @@ -456,27 +441,31 @@ pub(super) async fn verify_json(&self) -> Result { } let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => { - let pub_key_map = RwLock::new(BTreeMap::new()); - - self.services - .server_keys - .fetch_required_signing_keys([&value], &pub_key_map) - .await?; - - let pub_key_map = pub_key_map.read().await; - match ruma::signatures::verify_json(&pub_key_map, &value) { - Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Signature verification failed: {e}" - ))), - } + match serde_json::from_str::(&string) { + Ok(value) => match self.services.server_keys.verify_json(&value, None).await { + Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Signature verification failed: {e}" + ))), }, Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } } +#[admin_command] +pub(super) async fn verify_pdu(&self, event_id: Box) -> Result { + let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?; + + event.remove("event_id"); + let msg = match self.services.server_keys.verify_event(&event, None).await { + Ok(ruma::signatures::Verified::Signatures) => "signatures OK, but content hash failed (redaction).", + Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.", + Err(e) => return Err(e), + }; + + Ok(RoomMessageEventContent::notice_plain(msg)) +} + #[admin_command] #[tracing::instrument(skip(self))] pub(super) async fn first_pdu_in_room(&self, room_id: Box) -> Result { @@ -557,7 +546,6 @@ pub(super) async fn force_set_room_state_from_server( let room_version = self.services.rooms.state.get_room_version(&room_id).await?; let mut state: HashMap> = HashMap::new(); - let pub_key_map = RwLock::new(BTreeMap::new()); let remote_state_response = self .services @@ -571,38 +559,28 @@ pub(super) async fn force_set_room_state_from_server( ) .await?; - let mut events = Vec::with_capacity(remote_state_response.pdus.len()); - for pdu in remote_state_response.pdus.clone() { - events.push( - match self - .services - .rooms - .event_handler - .parse_incoming_pdu(&pdu) - .await - { - Ok(t) => t, - Err(e) => { - warn!("Could not parse PDU, ignoring: {e}"); - continue; - }, + match self + .services + .rooms + .event_handler + .parse_incoming_pdu(&pdu) + .await + { + Ok(t) => t, + Err(e) => { + warn!("Could not parse PDU, ignoring: {e}"); + continue; }, - ); + }; } - info!("Fetching required signing keys for all the state events we got"); - self.services - .server_keys - .fetch_required_signing_keys(events.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map) - .await?; - info!("Going through room_state response PDUs"); - for result in remote_state_response - .pdus - .iter() - .map(|pdu| validate_and_add_event_id(self.services, pdu, &room_version, &pub_key_map)) - { + for result in remote_state_response.pdus.iter().map(|pdu| { + self.services + .server_keys + .validate_and_add_event_id(pdu, &room_version) + }) { let Ok((event_id, value)) = result.await else { continue; }; @@ -630,11 +608,11 @@ pub(super) async fn force_set_room_state_from_server( } info!("Going through auth_chain response"); - for result in remote_state_response - .auth_chain - .iter() - .map(|pdu| validate_and_add_event_id(self.services, pdu, &room_version, &pub_key_map)) - { + for result in remote_state_response.auth_chain.iter().map(|pdu| { + self.services + .server_keys + .validate_and_add_event_id(pdu, &room_version) + }) { let Ok((event_id, value)) = result.await else { continue; }; @@ -686,10 +664,33 @@ pub(super) async fn force_set_room_state_from_server( #[admin_command] pub(super) async fn get_signing_keys( - &self, server_name: Option>, _cached: bool, + &self, server_name: Option>, notary: Option>, query: bool, ) -> Result { let server_name = server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); - let signing_keys = self.services.globals.signing_keys_for(&server_name).await?; + + if let Some(notary) = notary { + let signing_keys = self + .services + .server_keys + .notary_request(¬ary, &server_name) + .await?; + + return Ok(RoomMessageEventContent::notice_markdown(format!( + "```rs\n{signing_keys:#?}\n```" + ))); + } + + let signing_keys = if query { + self.services + .server_keys + .server_request(&server_name) + .await? + } else { + self.services + .server_keys + .signing_keys_for(&server_name) + .await? + }; Ok(RoomMessageEventContent::notice_markdown(format!( "```rs\n{signing_keys:#?}\n```" @@ -697,34 +698,20 @@ pub(super) async fn get_signing_keys( } #[admin_command] -#[allow(dead_code)] -pub(super) async fn get_verify_keys( - &self, server_name: Option>, cached: bool, -) -> Result { +pub(super) async fn get_verify_keys(&self, server_name: Option>) -> Result { let server_name = server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); - let mut out = String::new(); - if cached { - writeln!(out, "| Key ID | VerifyKey |")?; - writeln!(out, "| --- | --- |")?; - for (key_id, verify_key) in self.services.globals.verify_keys_for(&server_name).await? { - writeln!(out, "| {key_id} | {verify_key:?} |")?; - } - - return Ok(RoomMessageEventContent::notice_markdown(out)); - } - - let signature_ids: Vec = Vec::new(); let keys = self .services .server_keys - .fetch_signing_keys_for_server(&server_name, signature_ids) - .await?; + .verify_keys_for(&server_name) + .await; + let mut out = String::new(); writeln!(out, "| Key ID | Public Key |")?; writeln!(out, "| --- | --- |")?; for (key_id, key) in keys { - writeln!(out, "| {key_id} | {key} |")?; + writeln!(out, "| {key_id} | {key:?} |")?; } Ok(RoomMessageEventContent::notice_markdown(out)) diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index 20ddbf2f..b74e9c36 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -80,8 +80,16 @@ pub(super) enum DebugCommand { GetSigningKeys { server_name: Option>, + #[arg(long)] + notary: Option>, + #[arg(short, long)] - cached: bool, + query: bool, + }, + + /// - Get and display signing keys from local cache or remote server. + GetVerifyKeys { + server_name: Option>, }, /// - Sends a federation request to the remote server's @@ -119,6 +127,13 @@ pub(super) enum DebugCommand { /// the command. VerifyJson, + /// - Verify PDU + /// + /// This re-verifies a PDU existing in the database found by ID. + VerifyPdu { + event_id: Box, + }, + /// - Prints the very first PDU in the specified room (typically /// m.room.create) FirstPduInRoom { diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 150a213c..837d34e6 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -13,8 +13,6 @@ pub(crate) enum GlobalsCommand { LastCheckForUpdatesId, - LoadKeypair, - /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// for the server. SigningKeysFor { @@ -54,20 +52,11 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - GlobalsCommand::LoadKeypair => { - let timer = tokio::time::Instant::now(); - let results = services.globals.db.load_keypair(); - let query_time = timer.elapsed(); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) - }, GlobalsCommand::SigningKeysFor { origin, } => { let timer = tokio::time::Instant::now(); - let results = services.globals.db.verify_keys_for(&origin).await; + let results = services.server_keys.verify_keys_for(&origin).await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index a7a5b166..2fa34ff7 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,17 +1,16 @@ use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, net::IpAddr, sync::Arc, - time::Instant, }; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduit::{ - debug, debug_error, debug_warn, err, error, info, + debug, debug_info, debug_warn, err, error, info, pdu, pdu::{gen_event_id_canonical_json, PduBuilder}, trace, utils, - utils::{math::continue_exponential_backoff_secs, IterStream, ReadyExt}, + utils::{IterStream, ReadyExt}, warn, Err, Error, PduEvent, Result, }; use futures::{FutureExt, StreamExt}; @@ -36,13 +35,10 @@ use ruma::{ }, StateEventType, }, - serde::Base64, - state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, + RoomVersionId, ServerName, UserId, }; -use serde_json::value::RawValue as RawJsonValue; use service::{appservice::RegistrationInfo, rooms::state::RoomMutexGuard, Services}; -use tokio::sync::RwLock; use crate::{client::full_user_deactivate, Ruma}; @@ -670,20 +666,22 @@ pub async fn join_room_by_id_helper( if local_join { join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, third_party_signed, state_lock) .boxed() - .await + .await?; } else { // Ask a remote server if we are not participating in this room join_room_by_id_helper_remote(services, sender_user, room_id, reason, servers, third_party_signed, state_lock) .boxed() - .await + .await?; } + + Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) } #[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")] async fn join_room_by_id_helper_remote( services: &Services, sender_user: &UserId, room_id: &RoomId, reason: Option, servers: &[OwnedServerName], _third_party_signed: Option<&ThirdPartySigned>, state_lock: RoomMutexGuard, -) -> Result { +) -> Result { info!("Joining {room_id} over federation."); let (make_join_response, remote_server) = make_join_request(services, sender_user, room_id, servers).await?; @@ -751,43 +749,33 @@ async fn join_room_by_id_helper_remote( // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present - ruma::signatures::hash_and_sign_event( - services.globals.server_name().as_str(), - services.globals.keypair(), - &mut join_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); + services + .server_keys + .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; // Generate event id - let event_id = format!( - "${}", - ruma::signatures::reference_hash(&join_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - ); - let event_id = <&EventId>::try_from(event_id.as_str()).expect("ruma's reference hashes are valid event ids"); + let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; // Add event_id back - join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); // It has enough fields to be called a proper event now let mut join_event = join_event_stub; info!("Asking {remote_server} for send_join in room {room_id}"); + let send_join_request = federation::membership::create_join_event::v2::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + omit_members: false, + pdu: services + .sending + .convert_to_outgoing_federation_event(join_event.clone()) + .await, + }; + let send_join_response = services .sending - .send_federation_request( - &remote_server, - federation::membership::create_join_event::v2::Request { - room_id: room_id.to_owned(), - event_id: event_id.to_owned(), - omit_members: false, - pdu: services - .sending - .convert_to_outgoing_federation_event(join_event.clone()) - .await, - }, - ) + .send_federation_request(&remote_server, send_join_request) .await?; info!("send_join finished"); @@ -805,7 +793,7 @@ async fn join_room_by_id_helper_remote( // validate and send signatures _ => { if let Some(signed_raw) = &send_join_response.room_state.event { - info!( + debug_info!( "There is a signed event. This room is probably using restricted joins. Adding signature to \ our event" ); @@ -862,25 +850,25 @@ async fn join_room_by_id_helper_remote( .await; info!("Parsing join event"); - let parsed_join_pdu = PduEvent::from_id_val(event_id, join_event.clone()) + let parsed_join_pdu = PduEvent::from_id_val(&event_id, join_event.clone()) .map_err(|e| err!(BadServerResponse("Invalid join event PDU: {e:?}")))?; - let mut state = HashMap::new(); - let pub_key_map = RwLock::new(BTreeMap::new()); - - info!("Fetching join signing keys"); + info!("Acquiring server signing keys for response events"); + let resp_events = &send_join_response.room_state; + let resp_state = &resp_events.state; + let resp_auth = &resp_events.auth_chain; services .server_keys - .fetch_join_signing_keys(&send_join_response, &room_version_id, &pub_key_map) - .await?; + .acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter())) + .await; info!("Going through send_join response room_state"); - for result in send_join_response - .room_state - .state - .iter() - .map(|pdu| validate_and_add_event_id(services, pdu, &room_version_id, &pub_key_map)) - { + let mut state = HashMap::new(); + for result in send_join_response.room_state.state.iter().map(|pdu| { + services + .server_keys + .validate_and_add_event_id(pdu, &room_version_id) + }) { let Ok((event_id, value)) = result.await else { continue; }; @@ -902,12 +890,11 @@ async fn join_room_by_id_helper_remote( } info!("Going through send_join response auth_chain"); - for result in send_join_response - .room_state - .auth_chain - .iter() - .map(|pdu| validate_and_add_event_id(services, pdu, &room_version_id, &pub_key_map)) - { + for result in send_join_response.room_state.auth_chain.iter().map(|pdu| { + services + .server_keys + .validate_and_add_event_id(pdu, &room_version_id) + }) { let Ok((event_id, value)) = result.await else { continue; }; @@ -937,29 +924,22 @@ async fn join_room_by_id_helper_remote( return Err!(Request(Forbidden("Auth check failed"))); } - info!("Saving state from send_join"); + info!("Compressing state from send_join"); + let compressed = state + .iter() + .stream() + .then(|(&k, id)| services.rooms.state_compressor.compress_state_event(k, id)) + .collect() + .await; + + debug!("Saving compressed state"); let (statehash_before_join, new, removed) = services .rooms .state_compressor - .save_state( - room_id, - Arc::new( - state - .into_iter() - .stream() - .then(|(k, id)| async move { - services - .rooms - .state_compressor - .compress_state_event(k, &id) - .await - }) - .collect() - .await, - ), - ) + .save_state(room_id, Arc::new(compressed)) .await?; + debug!("Forcing state for new room"); services .rooms .state @@ -1002,14 +982,14 @@ async fn join_room_by_id_helper_remote( .state .set_room_state(room_id, statehash_after_join, &state_lock); - Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) + Ok(()) } #[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")] async fn join_room_by_id_helper_local( services: &Services, sender_user: &UserId, room_id: &RoomId, reason: Option, servers: &[OwnedServerName], _third_party_signed: Option<&ThirdPartySigned>, state_lock: RoomMutexGuard, -) -> Result { +) -> Result { debug!("We can join locally"); let join_rules_event_content = services @@ -1089,7 +1069,7 @@ async fn join_room_by_id_helper_local( ) .await { - Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())), + Ok(_) => return Ok(()), Err(e) => e, }; @@ -1159,24 +1139,15 @@ async fn join_room_by_id_helper_local( // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present - ruma::signatures::hash_and_sign_event( - services.globals.server_name().as_str(), - services.globals.keypair(), - &mut join_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); + services + .server_keys + .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; // Generate event id - let event_id = format!( - "${}", - ruma::signatures::reference_hash(&join_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - ); - let event_id = <&EventId>::try_from(event_id.as_str()).expect("ruma's reference hashes are valid event ids"); + let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; // Add event_id back - join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); // It has enough fields to be called a proper event now let join_event = join_event_stub; @@ -1187,7 +1158,7 @@ async fn join_room_by_id_helper_local( &remote_server, federation::membership::create_join_event::v2::Request { room_id: room_id.to_owned(), - event_id: event_id.to_owned(), + event_id: event_id.clone(), omit_members: false, pdu: services .sending @@ -1214,15 +1185,10 @@ async fn join_room_by_id_helper_local( } drop(state_lock); - let pub_key_map = RwLock::new(BTreeMap::new()); - services - .server_keys - .fetch_required_signing_keys([&signed_value], &pub_key_map) - .await?; services .rooms .event_handler - .handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true, &pub_key_map) + .handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true) .await?; } else { return Err(error); @@ -1231,7 +1197,7 @@ async fn join_room_by_id_helper_local( return Err(error); } - Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) + Ok(()) } async fn make_join_request( @@ -1301,62 +1267,6 @@ async fn make_join_request( make_join_response_and_server } -pub async fn validate_and_add_event_id( - services: &Services, pdu: &RawJsonValue, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, -) -> Result<(OwnedEventId, CanonicalJsonObject)> { - let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()) - .map_err(|e| err!(BadServerResponse(debug_error!("Invalid PDU in server response: {e:?}"))))?; - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&value, room_version).expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - let back_off = |id| async { - match services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry(id) - { - Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - }, - Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)); - }, - } - }; - - if let Some((time, tries)) = services - .globals - .bad_event_ratelimiter - .read() - .expect("locked") - .get(&event_id) - { - // Exponential backoff - const MIN: u64 = 60 * 5; - const MAX: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN, MAX, time.elapsed(), *tries) { - return Err!(BadServerResponse("bad event {event_id:?}, still backing off")); - } - } - - if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version) { - debug_error!("Event {event_id} failed verification {pdu:#?}"); - let e = Err!(BadServerResponse(debug_error!("Event {event_id} failed verification: {e:?}"))); - back_off(event_id).await; - return e; - } - - value.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); - - Ok((event_id, value)) -} - pub(crate) async fn invite_helper( services: &Services, sender_user: &UserId, user_id: &UserId, room_id: &RoomId, reason: Option, is_direct: bool, @@ -1423,8 +1333,6 @@ pub(crate) async fn invite_helper( ) .await?; - let pub_key_map = RwLock::new(BTreeMap::new()); - // We do not add the event_id field to the pdu here because of signature and // hashes checks let Ok((event_id, value)) = gen_event_id_canonical_json(&response.event, &room_version_id) else { @@ -1452,15 +1360,10 @@ pub(crate) async fn invite_helper( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - services - .server_keys - .fetch_required_signing_keys([&value], &pub_key_map) - .await?; - let pdu_id: Vec = services .rooms .event_handler - .handle_incoming_pdu(&origin, room_id, &event_id, value, true, &pub_key_map) + .handle_incoming_pdu(&origin, room_id, &event_id, value, true) .await? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, @@ -1714,24 +1617,15 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present - ruma::signatures::hash_and_sign_event( - services.globals.server_name().as_str(), - services.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); + services + .server_keys + .hash_and_sign_event(&mut leave_event_stub, &room_version_id)?; // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + let event_id = pdu::gen_event_id(&leave_event_stub, &room_version_id)?; // Add event_id back - leave_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + leave_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); // It has enough fields to be called a proper event now let leave_event = leave_event_stub; diff --git a/src/api/client/mod.rs b/src/api/client/mod.rs index 4b7b64b9..2928be87 100644 --- a/src/api/client/mod.rs +++ b/src/api/client/mod.rs @@ -52,7 +52,7 @@ pub(super) use keys::*; pub(super) use media::*; pub(super) use media_legacy::*; pub(super) use membership::*; -pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, validate_and_add_event_id}; +pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room}; pub(super) use message::*; pub(super) use openid::*; pub(super) use presence::*; diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 7381a55f..746e1cfc 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -48,7 +48,7 @@ where async fn from_request(request: hyper::Request, services: &State) -> Result { let mut request = request::from(services, request).await?; let mut json_body = serde_json::from_slice::(&request.body).ok(); - let auth = auth::auth(services, &mut request, &json_body, &T::METADATA).await?; + let auth = auth::auth(services, &mut request, json_body.as_ref(), &T::METADATA).await?; Ok(Self { body: make_body::(services, &mut request, &mut json_body, &auth)?, origin: auth.origin, diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 8d76b4be..6b90c5ff 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -1,19 +1,20 @@ -use std::collections::BTreeMap; - use axum::RequestPartsExt; use axum_extra::{ headers::{authorization::Bearer, Authorization}, typed_header::TypedHeaderRejectionReason, TypedHeader, }; -use conduit::{debug_info, warn, Err, Error, Result}; +use conduit::{debug_error, err, warn, Err, Error, Result}; use http::uri::PathAndQuery; use ruma::{ api::{client::error::ErrorKind, AuthScheme, Metadata}, server_util::authorization::XMatrix, - CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, + CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, +}; +use service::{ + server_keys::{PubKeyMap, PubKeys}, + Services, }; -use service::Services; use super::request::Request; use crate::service::appservice::RegistrationInfo; @@ -33,7 +34,7 @@ pub(super) struct Auth { } pub(super) async fn auth( - services: &Services, request: &mut Request, json_body: &Option, metadata: &Metadata, + services: &Services, request: &mut Request, json_body: Option<&CanonicalJsonValue>, metadata: &Metadata, ) -> Result { let bearer: Option>> = request.parts.extract().await?; let token = match &bearer { @@ -151,27 +152,24 @@ pub(super) async fn auth( } async fn auth_appservice(services: &Services, request: &Request, info: Box) -> Result { - let user_id = request + let user_id_default = + || UserId::parse_with_server_name(info.registration.sender_localpart.as_str(), services.globals.server_name()); + + let Ok(user_id) = request .query .user_id .clone() - .map_or_else( - || { - UserId::parse_with_server_name( - info.registration.sender_localpart.as_str(), - services.globals.server_name(), - ) - }, - UserId::parse, - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + .map_or_else(user_id_default, UserId::parse) + else { + return Err!(Request(InvalidUsername("Username is invalid."))); + }; if !info.is_user_match(&user_id) { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace.")); + return Err!(Request(Exclusive("User is not in namespace."))); } if !services.users.exists(&user_id).await { - return Err(Error::BadRequest(ErrorKind::forbidden(), "User does not exist.")); + return Err!(Request(Forbidden("User does not exist."))); } Ok(Auth { @@ -182,118 +180,104 @@ async fn auth_appservice(services: &Services, request: &Request, info: Box, -) -> Result { +async fn auth_server(services: &Services, request: &mut Request, body: Option<&CanonicalJsonValue>) -> Result { + type Member = (String, CanonicalJsonValue); + type Object = CanonicalJsonObject; + type Value = CanonicalJsonValue; + + let x_matrix = parse_x_matrix(request).await?; + auth_server_checks(services, &x_matrix)?; + + let destination = services.globals.server_name(); + let origin = &x_matrix.origin; + #[allow(clippy::or_fun_call)] + let signature_uri = request + .parts + .uri + .path_and_query() + .unwrap_or(&PathAndQuery::from_static("/")) + .to_string(); + + let signature: [Member; 1] = [(x_matrix.key.to_string(), Value::String(x_matrix.sig.to_string()))]; + let signatures: [Member; 1] = [(origin.to_string(), Value::Object(signature.into()))]; + let authorization: [Member; 5] = [ + ("destination".into(), Value::String(destination.into())), + ("method".into(), Value::String(request.parts.method.to_string())), + ("origin".into(), Value::String(origin.to_string())), + ("signatures".into(), Value::Object(signatures.into())), + ("uri".into(), Value::String(signature_uri)), + ]; + + let mut authorization: Object = authorization.into(); + if let Some(body) = body { + authorization.insert("content".to_owned(), body.clone()); + } + + let key = services + .server_keys + .get_verify_key(origin, &x_matrix.key) + .await + .map_err(|e| err!(Request(Forbidden(warn!("Failed to fetch signing keys: {e}")))))?; + + let keys: PubKeys = [(x_matrix.key.to_string(), key.key)].into(); + let keys: PubKeyMap = [(origin.to_string(), keys)].into(); + if let Err(e) = ruma::signatures::verify_json(&keys, authorization) { + debug_error!("Failed to verify federation request from {origin}: {e}"); + if request.parts.uri.to_string().contains('@') { + warn!( + "Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: \ + use nocanon)" + ); + } + + return Err!(Request(Forbidden("Failed to verify X-Matrix signatures."))); + } + + Ok(Auth { + origin: origin.to_owned().into(), + sender_user: None, + sender_device: None, + appservice_info: None, + }) +} + +fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { if !services.server.config.allow_federation { return Err!(Config("allow_federation", "Federation is disabled.")); } - let TypedHeader(Authorization(x_matrix)) = request - .parts - .extract::>>() - .await - .map_err(|e| { - warn!("Missing or invalid Authorization header: {e}"); - - let msg = match e.reason() { - TypedHeaderRejectionReason::Missing => "Missing Authorization header.", - TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.", - _ => "Unknown header-related error", - }; - - Error::BadRequest(ErrorKind::forbidden(), msg) - })?; + let destination = services.globals.server_name(); + if x_matrix.destination.as_deref() != Some(destination) { + return Err!(Request(Forbidden("Invalid destination."))); + } let origin = &x_matrix.origin; - if services .server .config .forbidden_remote_server_names .contains(origin) { - debug_info!("Refusing to accept inbound federation request to {origin}"); - return Err!(Request(Forbidden("Federation with this homeserver is not allowed."))); + return Err!(Request(Forbidden(debug_warn!("Federation requests from {origin} denied.")))); } - let signatures = - BTreeMap::from_iter([(x_matrix.key.clone(), CanonicalJsonValue::String(x_matrix.sig.to_string()))]); - let signatures = BTreeMap::from_iter([( - origin.as_str().to_owned(), - CanonicalJsonValue::Object( - signatures - .into_iter() - .map(|(k, v)| (k.to_string(), v)) - .collect(), - ), - )]); - - let server_destination = services.globals.server_name().as_str().to_owned(); - if let Some(destination) = x_matrix.destination.as_ref() { - if destination != &server_destination { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Invalid authorization.")); - } - } - - #[allow(clippy::or_fun_call)] - let signature_uri = CanonicalJsonValue::String( - request - .parts - .uri - .path_and_query() - .unwrap_or(&PathAndQuery::from_static("/")) - .to_string(), - ); - - let mut request_map = BTreeMap::from_iter([ - ( - "method".to_owned(), - CanonicalJsonValue::String(request.parts.method.to_string()), - ), - ("uri".to_owned(), signature_uri), - ("origin".to_owned(), CanonicalJsonValue::String(origin.as_str().to_owned())), - ("destination".to_owned(), CanonicalJsonValue::String(server_destination)), - ("signatures".to_owned(), CanonicalJsonValue::Object(signatures)), - ]); - - if let Some(json_body) = json_body { - request_map.insert("content".to_owned(), json_body.clone()); - }; - - let keys_result = services - .server_keys - .fetch_signing_keys_for_server(origin, vec![x_matrix.key.to_string()]) - .await; - - let keys = keys_result.map_err(|e| { - warn!("Failed to fetch signing keys: {e}"); - Error::BadRequest(ErrorKind::forbidden(), "Failed to fetch signing keys.") - })?; - - let pub_key_map = BTreeMap::from_iter([(origin.as_str().to_owned(), keys)]); - - match ruma::signatures::verify_json(&pub_key_map, &request_map) { - Ok(()) => Ok(Auth { - origin: Some(origin.clone()), - sender_user: None, - sender_device: None, - appservice_info: None, - }), - Err(e) => { - warn!("Failed to verify json request from {origin}: {e}\n{request_map:?}"); - - if request.parts.uri.to_string().contains('@') { - warn!( - "Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri \ - (apache: use nocanon)" - ); - } - - Err(Error::BadRequest( - ErrorKind::forbidden(), - "Failed to verify X-Matrix signatures.", - )) - }, - } + Ok(()) +} + +async fn parse_x_matrix(request: &mut Request) -> Result { + let TypedHeader(Authorization(x_matrix)) = request + .parts + .extract::>>() + .await + .map_err(|e| { + let msg = match e.reason() { + TypedHeaderRejectionReason::Missing => "Missing Authorization header.", + TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.", + _ => "Unknown header-related error", + }; + + err!(Request(Forbidden(warn!("{msg}: {e}")))) + })?; + + Ok(x_matrix) } diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index f02655e6..a9e404c5 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -85,13 +85,10 @@ pub(crate) async fn create_invite_route( .acl_check(invited_user.server_name(), &body.room_id) .await?; - ruma::signatures::hash_and_sign_event( - services.globals.server_name().as_str(), - services.globals.keypair(), - &mut signed_event, - &body.room_version, - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + services + .server_keys + .hash_and_sign_event(&mut signed_event, &body.room_version) + .map_err(|e| err!(Request(InvalidParam("Failed to sign event: {e}"))))?; // Generate event id let event_id = EventId::parse(format!( diff --git a/src/api/server/key.rs b/src/api/server/key.rs index 686e4424..3913ce43 100644 --- a/src/api/server/key.rs +++ b/src/api/server/key.rs @@ -1,20 +1,16 @@ -use std::{ - collections::BTreeMap, - time::{Duration, SystemTime}, -}; +use std::{collections::BTreeMap, time::Duration}; use axum::{extract::State, response::IntoResponse, Json}; +use conduit::{utils::timepoint_from_now, Result}; use ruma::{ api::{ - federation::discovery::{get_server_keys, ServerSigningKeys, VerifyKey}, + federation::discovery::{get_server_keys, ServerSigningKeys}, OutgoingResponse, }, - serde::{Base64, Raw}, - MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, + serde::Raw, + MilliSecondsSinceUnixEpoch, }; -use crate::Result; - /// # `GET /_matrix/key/v2/server` /// /// Gets the public signing keys of this server. @@ -24,47 +20,33 @@ use crate::Result; // Response type for this endpoint is Json because we need to calculate a // signature for the response pub(crate) async fn get_server_keys_route(State(services): State) -> Result { - let verify_keys: BTreeMap = BTreeMap::from([( - format!("ed25519:{}", services.globals.keypair().version()) - .try_into() - .expect("found invalid server signing keys in DB"), - VerifyKey { - key: Base64::new(services.globals.keypair().public_key().to_vec()), - }, - )]); + let server_name = services.globals.server_name(); + let verify_keys = services.server_keys.verify_keys_for(server_name).await; + let server_key = ServerSigningKeys { + verify_keys, + server_name: server_name.to_owned(), + valid_until_ts: valid_until_ts(), + old_verify_keys: BTreeMap::new(), + signatures: BTreeMap::new(), + }; - let mut response = serde_json::from_slice( - get_server_keys::v2::Response { - server_key: Raw::new(&ServerSigningKeys { - server_name: services.globals.server_name().to_owned(), - verify_keys, - old_verify_keys: BTreeMap::new(), - signatures: BTreeMap::new(), - valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(86400 * 7)) - .expect("valid_until_ts should not get this high"), - ) - .expect("time is valid"), - }) - .expect("static conversion, no errors"), - } - .try_into_http_response::>() - .unwrap() - .body(), - ) - .unwrap(); + let response = get_server_keys::v2::Response { + server_key: Raw::new(&server_key)?, + } + .try_into_http_response::>()?; - ruma::signatures::sign_json( - services.globals.server_name().as_str(), - services.globals.keypair(), - &mut response, - ) - .unwrap(); + let mut response = serde_json::from_slice(response.body())?; + services.server_keys.sign_json(&mut response)?; Ok(Json(response)) } +fn valid_until_ts() -> MilliSecondsSinceUnixEpoch { + let dur = Duration::from_secs(86400 * 7); + let timepoint = timepoint_from_now(dur).expect("SystemTime should not overflow"); + MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow") +} + /// # `GET /_matrix/key/v2/server/{keyId}` /// /// Gets the public signing keys of this server. diff --git a/src/api/server/send.rs b/src/api/server/send.rs index f6916ccf..40f9403b 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -21,7 +21,6 @@ use ruma::{ OwnedEventId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; -use tokio::sync::RwLock; use crate::{ services::Services, @@ -109,22 +108,6 @@ async fn handle_pdus( // and hashes checks } - // We go through all the signatures we see on the PDUs and fetch the - // corresponding signing keys - let pub_key_map = RwLock::new(BTreeMap::new()); - if !parsed_pdus.is_empty() { - services - .server_keys - .fetch_required_signing_keys(parsed_pdus.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map) - .await - .unwrap_or_else(|e| warn!("Could not fetch all signatures for PDUs from {origin}: {e:?}")); - - debug!( - elapsed = ?txn_start_time.elapsed(), - "Fetched signing keys" - ); - } - let mut resolved_map = BTreeMap::new(); for (event_id, value, room_id) in parsed_pdus { let pdu_start_time = Instant::now(); @@ -134,17 +117,18 @@ async fn handle_pdus( .mutex_federation .lock(&room_id) .await; + resolved_map.insert( event_id.clone(), services .rooms .event_handler - .handle_incoming_pdu(origin, &room_id, &event_id, value, true, &pub_key_map) + .handle_incoming_pdu(origin, &room_id, &event_id, value, true) .await .map(|_| ()), ); - drop(mutex_lock); + drop(mutex_lock); debug!( pdu_elapsed = ?pdu_start_time.elapsed(), txn_elapsed = ?txn_start_time.elapsed(), diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index f9257690..d888d75e 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -1,6 +1,6 @@ #![allow(deprecated)] -use std::{borrow::Borrow, collections::BTreeMap}; +use std::borrow::Borrow; use axum::extract::State; use conduit::{err, pdu::gen_event_id_canonical_json, utils::IterStream, warn, Error, Result}; @@ -15,7 +15,6 @@ use ruma::{ }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use service::Services; -use tokio::sync::RwLock; use crate::Ruma; @@ -43,9 +42,6 @@ async fn create_join_event( .await .map_err(|_| err!(Request(NotFound("Event state not found."))))?; - let pub_key_map = RwLock::new(BTreeMap::new()); - // let mut auth_cache = EventMap::new(); - // We do not add the event_id field to the pdu here because of signature and // hashes checks let room_version_id = services.rooms.state.get_room_version(room_id).await?; @@ -137,20 +133,12 @@ async fn create_join_event( .await .unwrap_or_default() { - ruma::signatures::hash_and_sign_event( - services.globals.server_name().as_str(), - services.globals.keypair(), - &mut value, - &room_version_id, - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + services + .server_keys + .hash_and_sign_event(&mut value, &room_version_id) + .map_err(|e| err!(Request(InvalidParam("Failed to sign event: {e}"))))?; } - services - .server_keys - .fetch_required_signing_keys([&value], &pub_key_map) - .await?; - let origin: OwnedServerName = serde_json::from_value( serde_json::to_value( value @@ -171,7 +159,7 @@ async fn create_join_event( let pdu_id: Vec = services .rooms .event_handler - .handle_incoming_pdu(&origin, room_id, &event_id, value.clone(), true, &pub_key_map) + .handle_incoming_pdu(&origin, room_id, &event_id, value.clone(), true) .await? .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Could not accept as timeline event."))?; diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index 81f41af0..0530f9dd 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,7 +1,5 @@ #![allow(deprecated)] -use std::collections::BTreeMap; - use axum::extract::State; use conduit::{utils::ReadyExt, Error, Result}; use ruma::{ @@ -13,7 +11,6 @@ use ruma::{ OwnedServerName, OwnedUserId, RoomId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; -use tokio::sync::RwLock; use crate::{ service::{pdu::gen_event_id_canonical_json, Services}, @@ -60,8 +57,6 @@ async fn create_leave_event( .acl_check(origin, room_id) .await?; - let pub_key_map = RwLock::new(BTreeMap::new()); - // We do not add the event_id field to the pdu here because of signature and // hashes checks let room_version_id = services.rooms.state.get_room_version(room_id).await?; @@ -154,21 +149,17 @@ async fn create_leave_event( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?; - services - .server_keys - .fetch_required_signing_keys([&value], &pub_key_map) - .await?; - let mutex_lock = services .rooms .event_handler .mutex_federation .lock(room_id) .await; + let pdu_id: Vec = services .rooms .event_handler - .handle_incoming_pdu(&origin, room_id, &event_id, value, true, &pub_key_map) + .handle_incoming_pdu(&origin, room_id, &event_id, value, true) .await? .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Could not accept as timeline event."))?; diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index b5e07da2..114c6e76 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -490,30 +490,6 @@ pub struct Config { #[serde(default = "default_trusted_servers")] pub trusted_servers: Vec, - /// Option to control whether conduwuit will query your list of trusted - /// notary key servers (`trusted_servers`) for remote homeserver signing - /// keys it doesn't know *first*, or query the individual servers first - /// before falling back to the trusted key servers. - /// - /// The former/default behaviour makes federated/remote rooms joins - /// generally faster because we're querying a single (or list of) server - /// that we know works, is reasonably fast, and is reliable for just about - /// all the homeserver signing keys in the room. Querying individual - /// servers may take longer depending on the general infrastructure of - /// everyone in there, how many dead servers there are, etc. - /// - /// However, this does create an increased reliance on one single or - /// multiple large entities as `trusted_servers` should generally - /// contain long-term and large servers who know a very large number of - /// homeservers. - /// - /// If you don't know what any of this means, leave this and - /// `trusted_servers` alone to their defaults. - /// - /// Defaults to true as this is the fastest option for federation. - #[serde(default = "true_fn")] - pub query_trusted_key_servers_first: bool, - /// max log level for conduwuit. allows debug, info, warn, or error /// see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives /// **Caveat**: @@ -1518,10 +1494,6 @@ impl fmt::Display for Config { .map(|server| server.host()) .join(", "), ); - line( - "Query Trusted Key Servers First", - &self.query_trusted_key_servers_first.to_string(), - ); line("OpenID Token TTL", &self.openid_token_ttl.to_string()); line( "TURN username", diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 39fa4340..42250a0c 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -85,6 +85,8 @@ pub enum Error { BadRequest(ruma::api::client::error::ErrorKind, &'static str), //TODO: remove #[error("{0}")] BadServerResponse(Cow<'static, str>), + #[error(transparent)] + CanonicalJson(#[from] ruma::CanonicalJsonError), #[error("There was a problem with the '{0}' directive in your configuration: {1}")] Config(&'static str, Cow<'static, str>), #[error("{0}")] @@ -110,6 +112,8 @@ pub enum Error { #[error(transparent)] Ruma(#[from] ruma::api::client::error::Error), #[error(transparent)] + Signatures(#[from] ruma::signatures::Error), + #[error(transparent)] StateRes(#[from] ruma::state_res::Error), #[error("uiaa")] Uiaa(ruma::api::client::uiaa::UiaaInfo), diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 5f50fe5b..274b96bd 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -408,10 +408,13 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } - pub fn from_id_val(event_id: &EventId, mut json: CanonicalJsonObject) -> Result { - json.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + pub fn from_id_val(event_id: &EventId, mut json: CanonicalJsonObject) -> Result { + json.insert("event_id".into(), CanonicalJsonValue::String(event_id.into())); - serde_json::from_value(serde_json::to_value(json).expect("valid JSON")) + let value = serde_json::to_value(json)?; + let pdu = serde_json::from_value(value)?; + + Ok(pdu) } } @@ -462,13 +465,15 @@ pub fn gen_event_id_canonical_json( let value: CanonicalJsonObject = serde_json::from_str(pdu.get()) .map_err(|e| err!(BadServerResponse(warn!("Error parsing incoming event: {e:?}"))))?; - let event_id = format!( - "${}", - // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, room_version_id).expect("ruma can calculate reference hashes") - ) - .try_into() - .expect("ruma's reference hashes are valid event ids"); + let event_id = gen_event_id(&value, room_version_id)?; Ok((event_id, value)) } + +/// Generates a correct eventId for the incoming pdu. +pub fn gen_event_id(value: &CanonicalJsonObject, room_version_id: &RoomVersionId) -> Result { + let reference_hash = ruma::signatures::reference_hash(value, room_version_id)?; + let event_id: OwnedEventId = format!("${reference_hash}").try_into()?; + + Ok(event_id) +} diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 3638cb56..eea7597a 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,16 +1,9 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, RwLock}, -}; +use std::sync::{Arc, RwLock}; -use conduit::{trace, utils, utils::rand, Error, Result, Server}; -use database::{Database, Deserialized, Json, Map}; +use conduit::{trace, utils, Result, Server}; +use database::{Database, Deserialized, Map}; use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; -use ruma::{ - api::federation::discovery::{ServerSigningKeys, VerifyKey}, - signatures::Ed25519KeyPair, - DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId, -}; +use ruma::{DeviceId, UserId}; use crate::{rooms, Dep}; @@ -25,7 +18,6 @@ pub struct Data { pduid_pdu: Arc, keychangeid_userid: Arc, roomusertype_roomuserdataid: Arc, - server_signingkeys: Arc, readreceiptid_readreceipt: Arc, userid_lastonetimekeyupdate: Arc, counter: RwLock, @@ -56,7 +48,6 @@ impl Data { pduid_pdu: db["pduid_pdu"].clone(), keychangeid_userid: db["keychangeid_userid"].clone(), roomusertype_roomuserdataid: db["roomusertype_roomuserdataid"].clone(), - server_signingkeys: db["server_signingkeys"].clone(), readreceiptid_readreceipt: db["readreceiptid_readreceipt"].clone(), userid_lastonetimekeyupdate: db["userid_lastonetimekeyupdate"].clone(), counter: RwLock::new(Self::stored_count(&db["global"]).expect("initialized global counter")), @@ -205,107 +196,6 @@ impl Data { Ok(()) } - pub fn load_keypair(&self) -> Result { - let generate = |_| { - let keypair = Ed25519KeyPair::generate().expect("Ed25519KeyPair generation always works (?)"); - - let mut value = rand::string(8).as_bytes().to_vec(); - value.push(0xFF); - value.extend_from_slice(&keypair); - - self.global.insert(b"keypair", &value); - value - }; - - let keypair_bytes: Vec = self - .global - .get_blocking(b"keypair") - .map_or_else(generate, Into::into); - - let mut parts = keypair_bytes.splitn(2, |&b| b == 0xFF); - utils::string_from_bytes( - // 1. version - parts - .next() - .expect("splitn always returns at least one element"), - ) - .map_err(|_| Error::bad_database("Invalid version bytes in keypair.")) - .and_then(|version| { - // 2. key - parts - .next() - .ok_or_else(|| Error::bad_database("Invalid keypair format in database.")) - .map(|key| (version, key)) - }) - .and_then(|(version, key)| { - Ed25519KeyPair::from_der(key, version) - .map_err(|_| Error::bad_database("Private or public keys are invalid.")) - }) - } - - #[inline] - pub fn remove_keypair(&self) -> Result<()> { - self.global.remove(b"keypair"); - Ok(()) - } - - /// TODO: the key valid until timestamp (`valid_until_ts`) is only honored - /// in room version > 4 - /// - /// Remove the outdated keys and insert the new ones. - /// - /// This doesn't actually check that the keys provided are newer than the - /// old set. - pub async fn add_signing_key( - &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> BTreeMap { - // (timo) Not atomic, but this is not critical - let mut keys: ServerSigningKeys = self - .server_signingkeys - .get(origin) - .await - .deserialized() - .unwrap_or_else(|_| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); - - keys.verify_keys.extend(new_keys.verify_keys); - keys.old_verify_keys.extend(new_keys.old_verify_keys); - - self.server_signingkeys.raw_put(origin, Json(&keys)); - - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - - tree - } - - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found - /// for the server. - pub async fn verify_keys_for(&self, origin: &ServerName) -> Result> { - self.signing_keys_for(origin).await.map_or_else( - |_| Ok(BTreeMap::new()), - |keys: ServerSigningKeys| { - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - Ok(tree) - }, - ) - } - - pub async fn signing_keys_for(&self, origin: &ServerName) -> Result { - self.server_signingkeys.get(origin).await.deserialized() - } - pub async fn database_version(&self) -> u64 { self.global .get(b"version") diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index fb970f07..7680007d 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -2,7 +2,7 @@ mod data; pub(super) mod migrations; use std::{ - collections::{BTreeMap, HashMap}, + collections::HashMap, fmt::Write, sync::{Arc, RwLock}, time::Instant, @@ -13,13 +13,8 @@ use data::Data; use ipaddress::IPAddress; use regex::RegexSet; use ruma::{ - api::{ - client::discovery::discover_support::ContactRole, - federation::discovery::{ServerSigningKeys, VerifyKey}, - }, - serde::Base64, - DeviceId, OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomAliasId, - RoomVersionId, ServerName, UserId, + api::client::discovery::discover_support::ContactRole, DeviceId, OwnedEventId, OwnedRoomAliasId, OwnedServerName, + OwnedUserId, RoomAliasId, RoomVersionId, ServerName, UserId, }; use tokio::sync::Mutex; use url::Url; @@ -31,7 +26,6 @@ pub struct Service { pub config: Config, pub cidr_range_denylist: Vec, - keypair: Arc, jwt_decoding_key: Option, pub stable_room_versions: Vec, pub unstable_room_versions: Vec, @@ -50,16 +44,6 @@ impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let db = Data::new(&args); let config = &args.server.config; - let keypair = db.load_keypair(); - - let keypair = match keypair { - Ok(k) => k, - Err(e) => { - error!("Keypair invalid. Deleting..."); - db.remove_keypair()?; - return Err(e); - }, - }; let jwt_decoding_key = config .jwt_secret @@ -115,7 +99,6 @@ impl crate::Service for Service { db, config: config.clone(), cidr_range_denylist, - keypair: Arc::new(keypair), jwt_decoding_key, stable_room_versions, unstable_room_versions, @@ -175,9 +158,6 @@ impl crate::Service for Service { } impl Service { - /// Returns this server's keypair. - pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { &self.keypair } - #[inline] pub fn next_count(&self) -> Result { self.db.next_count() } @@ -224,8 +204,6 @@ impl Service { pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } - pub fn query_trusted_key_servers_first(&self) -> bool { self.config.query_trusted_key_servers_first } - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { self.jwt_decoding_key.as_ref() } pub fn turn_password(&self) -> &String { &self.config.turn_password } @@ -302,28 +280,6 @@ impl Service { } } - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found - /// for the server. - pub async fn verify_keys_for(&self, origin: &ServerName) -> Result> { - let mut keys = self.db.verify_keys_for(origin).await?; - if origin == self.server_name() { - keys.insert( - format!("ed25519:{}", self.keypair().version()) - .try_into() - .expect("found invalid server signing keys in DB"), - VerifyKey { - key: Base64::new(self.keypair.public_key().to_vec()), - }, - ); - } - - Ok(keys) - } - - pub async fn signing_keys_for(&self, origin: &ServerName) -> Result { - self.db.signing_keys_for(origin).await - } - pub fn well_known_client(&self) -> &Option { &self.config.well_known.client } pub fn well_known_server(&self) -> &Option { &self.config.well_known.server } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index f8042b67..8448404b 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -28,12 +28,10 @@ use ruma::{ StateEventType, TimelineEventType, }, int, - serde::Base64, state_res::{self, EventTypeExt, RoomVersion, StateMap}, - uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, - ServerName, UserId, + uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, + RoomId, RoomVersionId, ServerName, UserId, }; -use tokio::sync::RwLock; use super::state_compressor::CompressedStateEvent; use crate::{globals, rooms, sending, server_keys, Dep}; @@ -129,11 +127,10 @@ impl Service { /// 13. Use state resolution to find new room state /// 14. Check if the event passes auth based on the "current state" of the /// room, if not soft fail it - #[tracing::instrument(skip(self, origin, value, is_timeline_event, pub_key_map), name = "pdu")] + #[tracing::instrument(skip(self, origin, value, is_timeline_event), name = "pdu")] pub async fn handle_incoming_pdu<'a>( &self, origin: &'a ServerName, room_id: &'a RoomId, event_id: &'a EventId, value: BTreeMap, is_timeline_event: bool, - pub_key_map: &'a RwLock>>, ) -> Result>> { // 1. Skip the PDU if we already have it as a timeline event if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await { @@ -177,7 +174,7 @@ impl Service { let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; let (incoming_pdu, val) = self - .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false, pub_key_map) + .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false) .boxed() .await?; @@ -200,7 +197,6 @@ impl Service { &create_event, room_id, &room_version_id, - pub_key_map, incoming_pdu.prev_events.clone(), ) .await?; @@ -212,7 +208,6 @@ impl Service { origin, event_id, room_id, - pub_key_map, &mut eventid_info, &create_event, &first_pdu_in_room, @@ -250,7 +245,7 @@ impl Service { .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); let r = self - .upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id, pub_key_map) + .upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id) .await; self.federation_handletime @@ -264,12 +259,11 @@ impl Service { #[allow(clippy::type_complexity)] #[allow(clippy::too_many_arguments)] #[tracing::instrument( - skip(self, origin, event_id, room_id, pub_key_map, eventid_info, create_event, first_pdu_in_room), + skip(self, origin, event_id, room_id, eventid_info, create_event, first_pdu_in_room), name = "prev" )] pub async fn handle_prev_pdu<'a>( &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - pub_key_map: &'a RwLock>>, eventid_info: &mut HashMap, (Arc, BTreeMap)>, create_event: &Arc, first_pdu_in_room: &Arc, prev_id: &EventId, ) -> Result<()> { @@ -318,7 +312,7 @@ impl Service { .expect("locked") .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id, pub_key_map) + self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) .await?; self.federation_handletime @@ -338,8 +332,7 @@ impl Service { #[allow(clippy::too_many_arguments)] async fn handle_outlier_pdu<'a>( &self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, - mut value: BTreeMap, auth_events_known: bool, - pub_key_map: &'a RwLock>>, + mut value: CanonicalJsonObject, auth_events_known: bool, ) -> Result<(Arc, BTreeMap)> { // 1. Remove unsigned field value.remove("unsigned"); @@ -349,14 +342,13 @@ impl Service { // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match let room_version_id = Self::get_room_version_id(create_event)?; - - let guard = pub_key_map.read().await; - let mut val = match ruma::signatures::verify_event(&guard, &value, &room_version_id) { - Err(e) => { - // Drop - warn!("Dropping bad event {event_id}: {e}"); - return Err!(Request(InvalidParam("Signature verification failed"))); - }, + let mut val = match self + .services + .server_keys + .verify_event(&value, Some(&room_version_id)) + .await + { + Ok(ruma::signatures::Verified::All) => value, Ok(ruma::signatures::Verified::Signatures) => { // Redact debug_info!("Calculated hash does not match (redaction): {event_id}"); @@ -371,11 +363,13 @@ impl Service { obj }, - Ok(ruma::signatures::Verified::All) => value, + Err(e) => { + return Err!(Request(InvalidParam(debug_error!( + "Signature verification failed for {event_id}: {e}" + )))) + }, }; - drop(guard); - // Now that we have checked the signature and hashes we can add the eventID and // convert to our PduEvent type val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); @@ -404,7 +398,6 @@ impl Service { create_event, room_id, &room_version_id, - pub_key_map, ), ) .await; @@ -487,7 +480,7 @@ impl Service { pub async fn upgrade_outlier_to_timeline_pdu( &self, incoming_pdu: Arc, val: BTreeMap, create_event: &PduEvent, - origin: &ServerName, room_id: &RoomId, pub_key_map: &RwLock>>, + origin: &ServerName, room_id: &RoomId, ) -> Result>> { // Skip the PDU if we already have it as a timeline event if let Ok(pduid) = self @@ -526,14 +519,7 @@ impl Service { if state_at_incoming_event.is_none() { state_at_incoming_event = self - .fetch_state( - origin, - create_event, - room_id, - &room_version_id, - pub_key_map, - &incoming_pdu.event_id, - ) + .fetch_state(origin, create_event, room_id, &room_version_id, &incoming_pdu.event_id) .await?; } @@ -1021,10 +1007,10 @@ impl Service { /// Call /state_ids to find out what the state at this pdu is. We trust the /// server's response to some extend (sic), but we still do a lot of checks /// on the events - #[tracing::instrument(skip(self, pub_key_map, create_event, room_version_id))] + #[tracing::instrument(skip(self, create_event, room_version_id))] async fn fetch_state( &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, - pub_key_map: &RwLock>>, event_id: &EventId, + event_id: &EventId, ) -> Result>>> { debug!("Fetching state ids"); let res = self @@ -1048,7 +1034,7 @@ impl Service { .collect::>(); let state_vec = self - .fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id, pub_key_map) + .fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id) .boxed() .await; @@ -1102,7 +1088,7 @@ impl Service { /// d. TODO: Ask other servers over federation? pub async fn fetch_and_handle_outliers<'a>( &self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, - room_version_id: &'a RoomVersionId, pub_key_map: &'a RwLock>>, + room_version_id: &'a RoomVersionId, ) -> Vec<(Arc, Option>)> { let back_off = |id| match self .services @@ -1222,22 +1208,6 @@ impl Service { events_with_auth_events.push((id, None, events_in_reverse_order)); } - // We go through all the signatures we see on the PDUs and their unresolved - // dependencies and fetch the corresponding signing keys - self.services - .server_keys - .fetch_required_signing_keys( - events_with_auth_events - .iter() - .flat_map(|(_id, _local_pdu, events)| events) - .map(|(_event_id, event)| event), - pub_key_map, - ) - .await - .unwrap_or_else(|e| { - warn!("Could not fetch all signatures for PDUs from {origin}: {e:?}"); - }); - let mut pdus = Vec::with_capacity(events_with_auth_events.len()); for (id, local_pdu, events_in_reverse_order) in events_with_auth_events { // a. Look in the main timeline (pduid_pdu tree) @@ -1266,16 +1236,8 @@ impl Service { } } - match Box::pin(self.handle_outlier_pdu( - origin, - create_event, - &next_id, - room_id, - value.clone(), - true, - pub_key_map, - )) - .await + match Box::pin(self.handle_outlier_pdu(origin, create_event, &next_id, room_id, value.clone(), true)) + .await { Ok((pdu, json)) => { if next_id == *id { @@ -1296,7 +1258,7 @@ impl Service { #[tracing::instrument(skip_all)] async fn fetch_prev( &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, - pub_key_map: &RwLock>>, initial_set: Vec>, + initial_set: Vec>, ) -> Result<( Vec>, HashMap, (Arc, BTreeMap)>, @@ -1311,14 +1273,7 @@ impl Service { while let Some(prev_event_id) = todo_outlier_stack.pop() { if let Some((pdu, mut json_opt)) = self - .fetch_and_handle_outliers( - origin, - &[prev_event_id.clone()], - create_event, - room_id, - room_version_id, - pub_key_map, - ) + .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id, room_version_id) .boxed() .await .pop() diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 21e5395d..902e50ff 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -16,7 +16,7 @@ use conduit::{ }; use futures::{future, future::ready, Future, FutureExt, Stream, StreamExt, TryStreamExt}; use ruma::{ - api::{client::error::ErrorKind, federation}, + api::federation, canonical_json::to_canonical_value, events::{ push_rules::PushRulesEvent, @@ -30,14 +30,12 @@ use ruma::{ GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - serde::Base64, state_res::{self, Event, RoomVersion}, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use tokio::sync::RwLock; use self::data::Data; pub use self::data::PdusIterItem; @@ -784,21 +782,15 @@ impl Service { to_canonical_value(self.services.globals.server_name()).expect("server name is a valid CanonicalJsonValue"), ); - match ruma::signatures::hash_and_sign_event( - self.services.globals.server_name().as_str(), - self.services.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(()) => {}, - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => { - Err(Error::BadRequest(ErrorKind::TooLarge, "Message is too long")) - }, - _ => Err(Error::BadRequest(ErrorKind::Unknown, "Signing event failed")), - } - }, + if let Err(e) = self + .services + .server_keys + .hash_and_sign_event(&mut pdu_json, &room_version_id) + { + return match e { + Error::Signatures(ruma::signatures::Error::PduSize) => Err!(Request(TooLarge("Message is too long"))), + _ => Err!(Request(Unknown("Signing event failed"))), + }; } // Generate event id @@ -1106,9 +1098,8 @@ impl Service { .await; match response { Ok(response) => { - let pub_key_map = RwLock::new(BTreeMap::new()); for pdu in response.pdus { - if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await { + if let Err(e) = self.backfill_pdu(backfill_server, pdu).await { warn!("Failed to add backfilled pdu in room {room_id}: {e}"); } } @@ -1124,11 +1115,8 @@ impl Service { Ok(()) } - #[tracing::instrument(skip(self, pdu, pub_key_map))] - pub async fn backfill_pdu( - &self, origin: &ServerName, pdu: Box, - pub_key_map: &RwLock>>, - ) -> Result<()> { + #[tracing::instrument(skip(self, pdu))] + pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box) -> Result<()> { let (event_id, value, room_id) = self.services.event_handler.parse_incoming_pdu(&pdu).await?; // Lock so we cannot backfill the same pdu twice at the same time @@ -1146,14 +1134,9 @@ impl Service { return Ok(()); } - self.services - .server_keys - .fetch_required_signing_keys([&value], pub_key_map) - .await?; - self.services .event_handler - .handle_incoming_pdu(origin, &room_id, &event_id, value, false, pub_key_map) + .handle_incoming_pdu(origin, &room_id, &event_id, value, false) .await?; let value = self diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index e3582f2e..5970c383 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -17,7 +17,7 @@ use tokio::sync::Mutex; use self::data::Data; pub use self::dest::Destination; -use crate::{account_data, client, globals, presence, pusher, resolver, rooms, users, Dep}; +use crate::{account_data, client, globals, presence, pusher, resolver, rooms, server_keys, users, Dep}; pub struct Service { server: Arc, @@ -41,6 +41,7 @@ struct Services { account_data: Dep, appservice: Dep, pusher: Dep, + server_keys: Dep, } #[derive(Clone, Debug, PartialEq, Eq)] @@ -78,6 +79,7 @@ impl crate::Service for Service { account_data: args.depend::("account_data"), appservice: args.depend::("appservice"), pusher: args.depend::("pusher"), + server_keys: args.depend::("server_keys"), }, db: Data::new(&args), sender, diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 9a8f408b..73b6a468 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -1,8 +1,8 @@ use std::{fmt::Debug, mem}; use conduit::{ - debug, debug_error, debug_info, debug_warn, err, error::inspect_debug_log, trace, utils::string::EMPTY, Err, Error, - Result, + debug, debug_error, debug_info, debug_warn, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, + Err, Error, Result, }; use http::{header::AUTHORIZATION, HeaderValue}; use ipaddress::IPAddress; @@ -18,7 +18,7 @@ use ruma::{ }; use crate::{ - globals, resolver, + resolver, resolver::{actual::ActualDest, cache::CachedDest}, }; @@ -75,7 +75,7 @@ impl super::Service { .try_into_http_request::>(&actual.string, SATIR, &VERSIONS) .map_err(|e| err!(BadServerResponse("Invalid destination: {e:?}")))?; - sign_request::(&self.services.globals, dest, &mut http_request); + self.sign_request::(dest, &mut http_request); let request = Request::try_from(http_request)?; self.validate_url(request.url())?; @@ -178,7 +178,8 @@ where Err(e.into()) } -fn sign_request(globals: &globals::Service, dest: &ServerName, http_request: &mut http::Request>) +#[implement(super::Service)] +fn sign_request(&self, dest: &ServerName, http_request: &mut http::Request>) where T: OutgoingRequest + Debug + Send, { @@ -200,11 +201,13 @@ where .to_string() .into(), ); - req_map.insert("origin".to_owned(), globals.server_name().as_str().into()); + req_map.insert("origin".to_owned(), self.services.globals.server_name().to_string().into()); req_map.insert("destination".to_owned(), dest.as_str().into()); let mut req_json = serde_json::from_value(req_map.into()).expect("valid JSON is valid BTreeMap"); - ruma::signatures::sign_json(globals.server_name().as_str(), globals.keypair(), &mut req_json) + self.services + .server_keys + .sign_json(&mut req_json) .expect("our request json is what ruma expects"); let req_json: serde_json::Map = @@ -231,7 +234,12 @@ where http_request.headers_mut().insert( AUTHORIZATION, - HeaderValue::from(&XMatrix::new(globals.config.server_name.clone(), dest.to_owned(), key, sig)), + HeaderValue::from(&XMatrix::new( + self.services.globals.server_name().to_owned(), + dest.to_owned(), + key, + sig, + )), ); } } diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs new file mode 100644 index 00000000..2b170040 --- /dev/null +++ b/src/service/server_keys/acquire.rs @@ -0,0 +1,175 @@ +use std::{ + borrow::Borrow, + collections::{BTreeMap, BTreeSet}, +}; + +use conduit::{debug, debug_warn, error, implement, result::FlatOk, warn}; +use futures::{stream::FuturesUnordered, StreamExt}; +use ruma::{ + api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, OwnedServerName, + OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, +}; +use serde_json::value::RawValue as RawJsonValue; + +use super::key_exists; + +type Batch = BTreeMap>; + +#[implement(super::Service)] +pub async fn acquire_events_pubkeys<'a, I>(&self, events: I) +where + I: Iterator> + Send, +{ + type Batch = BTreeMap>; + type Signatures = BTreeMap>; + + let mut batch = Batch::new(); + events + .cloned() + .map(Raw::::from_json) + .map(|event| event.get_field::("signatures")) + .filter_map(FlatOk::flat_ok) + .flat_map(IntoIterator::into_iter) + .for_each(|(server, sigs)| { + batch.entry(server).or_default().extend(sigs.into_keys()); + }); + + let batch = batch + .iter() + .map(|(server, keys)| (server.borrow(), keys.iter().map(Borrow::borrow))); + + self.acquire_pubkeys(batch).await; +} + +#[implement(super::Service)] +pub async fn acquire_pubkeys<'a, S, K>(&self, batch: S) +where + S: Iterator + Send + Clone, + K: Iterator + Send + Clone, +{ + let requested_servers = batch.clone().count(); + let requested_keys = batch.clone().flat_map(|(_, key_ids)| key_ids).count(); + + debug!("acquire {requested_keys} keys from {requested_servers}"); + + let missing = self.acquire_locals(batch).await; + let missing_keys = keys_count(&missing); + let missing_servers = missing.len(); + if missing_servers == 0 { + return; + } + + debug!("missing {missing_keys} keys for {missing_servers} servers locally"); + + let missing = self.acquire_origins(missing.into_iter()).await; + let missing_keys = keys_count(&missing); + let missing_servers = missing.len(); + if missing_servers == 0 { + return; + } + + debug_warn!("missing {missing_keys} keys for {missing_servers} servers unreachable"); + + let missing = self.acquire_notary(missing.into_iter()).await; + let missing_keys = keys_count(&missing); + let missing_servers = missing.len(); + if missing_keys > 0 { + debug_warn!("still missing {missing_keys} keys for {missing_servers} servers from all notaries"); + warn!("did not obtain {missing_keys} of {requested_keys} keys; some events may not be accepted"); + } +} + +#[implement(super::Service)] +async fn acquire_locals<'a, S, K>(&self, batch: S) -> Batch +where + S: Iterator + Send, + K: Iterator + Send, +{ + let mut missing = Batch::new(); + for (server, key_ids) in batch { + for key_id in key_ids { + if !self.verify_key_exists(server, key_id).await { + missing + .entry(server.into()) + .or_default() + .push(key_id.into()); + } + } + } + + missing +} + +#[implement(super::Service)] +async fn acquire_origins(&self, batch: I) -> Batch +where + I: Iterator)> + Send, +{ + let mut requests: FuturesUnordered<_> = batch + .map(|(origin, key_ids)| self.acquire_origin(origin, key_ids)) + .collect(); + + let mut missing = Batch::new(); + while let Some((origin, key_ids)) = requests.next().await { + if !key_ids.is_empty() { + missing.insert(origin, key_ids); + } + } + + missing +} + +#[implement(super::Service)] +async fn acquire_origin( + &self, origin: OwnedServerName, mut key_ids: Vec, +) -> (OwnedServerName, Vec) { + if let Ok(server_keys) = self.server_request(&origin).await { + self.add_signing_keys(server_keys.clone()).await; + key_ids.retain(|key_id| !key_exists(&server_keys, key_id)); + } + + (origin, key_ids) +} + +#[implement(super::Service)] +async fn acquire_notary(&self, batch: I) -> Batch +where + I: Iterator)> + Send, +{ + let mut missing: Batch = batch.collect(); + for notary in self.services.globals.trusted_servers() { + let missing_keys = keys_count(&missing); + let missing_servers = missing.len(); + debug!("Asking notary {notary} for {missing_keys} missing keys from {missing_servers} servers"); + + let batch = missing + .iter() + .map(|(server, keys)| (server.borrow(), keys.iter().map(Borrow::borrow))); + + match self.batch_notary_request(notary, batch).await { + Err(e) => error!("Failed to contact notary {notary:?}: {e}"), + Ok(results) => { + for server_keys in results { + self.acquire_notary_result(&mut missing, server_keys).await; + } + }, + } + } + + missing +} + +#[implement(super::Service)] +async fn acquire_notary_result(&self, missing: &mut Batch, server_keys: ServerSigningKeys) { + let server = &server_keys.server_name; + self.add_signing_keys(server_keys.clone()).await; + + if let Some(key_ids) = missing.get_mut(server) { + key_ids.retain(|key_id| key_exists(&server_keys, key_id)); + if key_ids.is_empty() { + missing.remove(server); + } + } +} + +fn keys_count(batch: &Batch) -> usize { batch.iter().flat_map(|(_, key_ids)| key_ids.iter()).count() } diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs new file mode 100644 index 00000000..0f449b46 --- /dev/null +++ b/src/service/server_keys/get.rs @@ -0,0 +1,86 @@ +use std::borrow::Borrow; + +use conduit::{implement, Err, Result}; +use ruma::{api::federation::discovery::VerifyKey, CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId}; + +use super::{extract_key, PubKeyMap, PubKeys}; + +#[implement(super::Service)] +pub async fn get_event_keys(&self, object: &CanonicalJsonObject, version: &RoomVersionId) -> Result { + use ruma::signatures::required_keys; + + let required = match required_keys(object, version) { + Ok(required) => required, + Err(e) => return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")), + }; + + let batch = required + .iter() + .map(|(s, ids)| (s.borrow(), ids.iter().map(Borrow::borrow))); + + Ok(self.get_pubkeys(batch).await) +} + +#[implement(super::Service)] +pub async fn get_pubkeys<'a, S, K>(&self, batch: S) -> PubKeyMap +where + S: Iterator + Send, + K: Iterator + Send, +{ + let mut keys = PubKeyMap::new(); + for (server, key_ids) in batch { + let pubkeys = self.get_pubkeys_for(server, key_ids).await; + keys.insert(server.into(), pubkeys); + } + + keys +} + +#[implement(super::Service)] +pub async fn get_pubkeys_for<'a, I>(&self, origin: &ServerName, key_ids: I) -> PubKeys +where + I: Iterator + Send, +{ + let mut keys = PubKeys::new(); + for key_id in key_ids { + if let Ok(verify_key) = self.get_verify_key(origin, key_id).await { + keys.insert(key_id.into(), verify_key.key); + } + } + + keys +} + +#[implement(super::Service)] +pub async fn get_verify_key(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> Result { + if let Some(result) = self.verify_keys_for(origin).await.remove(key_id) { + return Ok(result); + } + + if let Ok(server_key) = self.server_request(origin).await { + self.add_signing_keys(server_key.clone()).await; + if let Some(result) = extract_key(server_key, key_id) { + return Ok(result); + } + } + + for notary in self.services.globals.trusted_servers() { + if let Ok(server_keys) = self.notary_request(notary, origin).await { + for server_key in &server_keys { + self.add_signing_keys(server_key.clone()).await; + } + + for server_key in server_keys { + if let Some(result) = extract_key(server_key, key_id) { + return Ok(result); + } + } + } + } + + Err!(BadServerResponse(debug_error!( + ?key_id, + ?origin, + "Failed to fetch federation signing-key" + ))) +} diff --git a/src/service/server_keys/keypair.rs b/src/service/server_keys/keypair.rs new file mode 100644 index 00000000..31a24cdf --- /dev/null +++ b/src/service/server_keys/keypair.rs @@ -0,0 +1,64 @@ +use std::sync::Arc; + +use conduit::{debug, debug_info, err, error, utils, utils::string_from_bytes, Result}; +use database::Database; +use ruma::{api::federation::discovery::VerifyKey, serde::Base64, signatures::Ed25519KeyPair}; + +use super::VerifyKeys; + +pub(super) fn init(db: &Arc) -> Result<(Box, VerifyKeys)> { + let keypair = load(db).inspect_err(|_e| { + error!("Keypair invalid. Deleting..."); + remove(db); + })?; + + let verify_key = VerifyKey { + key: Base64::new(keypair.public_key().to_vec()), + }; + + let id = format!("ed25519:{}", keypair.version()); + let verify_keys: VerifyKeys = [(id.try_into()?, verify_key)].into(); + + Ok((keypair, verify_keys)) +} + +fn load(db: &Arc) -> Result> { + let (version, key) = db["global"] + .get_blocking(b"keypair") + .map(|ref val| { + // database deserializer is having trouble with this so it's manual for now + let mut elems = val.split(|&b| b == b'\xFF'); + let vlen = elems.next().expect("invalid keypair entry").len(); + let ver = string_from_bytes(&val[..vlen]).expect("invalid keypair version"); + let der = val[vlen.saturating_add(1)..].to_vec(); + debug!("Found existing Ed25519 keypair: {ver:?}"); + (ver, der) + }) + .or_else(|e| { + assert!(e.is_not_found(), "unexpected error fetching keypair"); + create(db) + })?; + + let key = + Ed25519KeyPair::from_der(&key, version).map_err(|e| err!("Failed to load ed25519 keypair from der: {e:?}"))?; + + Ok(Box::new(key)) +} + +fn create(db: &Arc) -> Result<(String, Vec)> { + let keypair = Ed25519KeyPair::generate().map_err(|e| err!("Failed to generate new ed25519 keypair: {e:?}"))?; + + let id = utils::rand::string(8); + debug_info!("Generated new Ed25519 keypair: {id:?}"); + + let value: (String, Vec) = (id, keypair.to_vec()); + db["global"].raw_put(b"keypair", &value); + + Ok(value) +} + +#[inline] +fn remove(db: &Arc) { + let global = &db["global"]; + global.remove(b"keypair"); +} diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index ae2b8c3c..c3b84cb3 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -1,31 +1,30 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - sync::Arc, - time::{Duration, SystemTime}, -}; +mod acquire; +mod get; +mod keypair; +mod request; +mod sign; +mod verify; -use conduit::{debug, debug_error, debug_warn, err, error, info, trace, warn, Err, Result}; -use futures::{stream::FuturesUnordered, StreamExt}; +use std::{collections::BTreeMap, sync::Arc, time::Duration}; + +use conduit::{implement, utils::time::timepoint_from_now, Result}; +use database::{Deserialized, Json, Map}; use ruma::{ - api::federation::{ - discovery::{ - get_remote_server_keys, - get_remote_server_keys_batch::{self, v2::QueryCriteria}, - get_server_keys, - }, - membership::create_join_event, - }, - serde::Base64, - CanonicalJsonObject, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedServerSigningKeyId, - RoomVersionId, ServerName, + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + serde::Raw, + signatures::{Ed25519KeyPair, PublicKeyMap, PublicKeySet}, + MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, }; use serde_json::value::RawValue as RawJsonValue; -use tokio::sync::{RwLock, RwLockWriteGuard}; use crate::{globals, sending, Dep}; pub struct Service { + keypair: Box, + verify_keys: VerifyKeys, + minimum_valid: Duration, services: Services, + db: Data, } struct Services { @@ -33,546 +32,135 @@ struct Services { sending: Dep, } +struct Data { + server_signingkeys: Arc, +} + +pub type VerifyKeys = BTreeMap; +pub type PubKeyMap = PublicKeyMap; +pub type PubKeys = PublicKeySet; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { + let minimum_valid = Duration::from_secs(3600); + let (keypair, verify_keys) = keypair::init(args.db)?; + Ok(Arc::new(Self { + keypair, + verify_keys, + minimum_valid, services: Services { globals: args.depend::("globals"), sending: args.depend::("sending"), }, + db: Data { + server_signingkeys: args.db["server_signingkeys"].clone(), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - pub async fn fetch_required_signing_keys<'a, E>( - &'a self, events: E, pub_key_map: &RwLock>>, - ) -> Result<()> - where - E: IntoIterator> + Send, - { - let mut server_key_ids = HashMap::new(); - for event in events { - for (signature_server, signature) in event - .get("signatures") - .ok_or(err!(BadServerResponse("No signatures in server response pdu.")))? - .as_object() - .ok_or(err!(BadServerResponse("Invalid signatures object in server response pdu.")))? - { - let signature_object = signature.as_object().ok_or(err!(BadServerResponse( - "Invalid signatures content object in server response pdu.", - )))?; +#[implement(Service)] +#[inline] +pub fn keypair(&self) -> &Ed25519KeyPair { &self.keypair } - for signature_id in signature_object.keys() { - server_key_ids - .entry(signature_server.clone()) - .or_insert_with(HashSet::new) - .insert(signature_id.clone()); - } - } - } +#[implement(Service)] +async fn add_signing_keys(&self, new_keys: ServerSigningKeys) { + let origin = &new_keys.server_name; - if server_key_ids.is_empty() { - // Nothing to do, can exit early - trace!("server_key_ids is empty, not fetching any keys"); - return Ok(()); - } + // (timo) Not atomic, but this is not critical + let mut keys: ServerSigningKeys = self + .db + .server_signingkeys + .get(origin) + .await + .deserialized() + .unwrap_or_else(|_| { + // Just insert "now", it doesn't matter + ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) + }); - trace!( - "Fetch keys for {}", - server_key_ids - .keys() - .cloned() - .collect::>() - .join(", ") - ); - - let mut server_keys: FuturesUnordered<_> = server_key_ids - .into_iter() - .map(|(signature_server, signature_ids)| async { - let fetch_res = self - .fetch_signing_keys_for_server( - signature_server.as_str().try_into().map_err(|e| { - ( - signature_server.clone(), - err!(BadServerResponse( - "Invalid servername in signatures of server response pdu: {e:?}" - )), - ) - })?, - signature_ids.into_iter().collect(), // HashSet to Vec - ) - .await; - - match fetch_res { - Ok(keys) => Ok((signature_server, keys)), - Err(e) => { - debug_error!( - "Signature verification failed: Could not fetch signing key for {signature_server}: {e}", - ); - Err((signature_server, e)) - }, - } - }) - .collect(); - - while let Some(fetch_res) = server_keys.next().await { - match fetch_res { - Ok((signature_server, keys)) => { - pub_key_map - .write() - .await - .insert(signature_server.clone(), keys); - }, - Err((signature_server, e)) => { - debug_warn!("Failed to fetch keys for {signature_server}: {e:?}"); - }, - } - } - - Ok(()) - } - - // Gets a list of servers for which we don't have the signing key yet. We go - // over the PDUs and either cache the key or add it to the list that needs to be - // retrieved. - async fn get_server_keys_from_cache( - &self, pdu: &RawJsonValue, - servers: &mut BTreeMap>, - _room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, - ) -> Result<()> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - debug_error!("Invalid PDU in server response: {pdu:#?}"); - err!(BadServerResponse(error!("Invalid PDU in server response: {e:?}"))) - })?; - - let signatures = value - .get("signatures") - .ok_or(err!(BadServerResponse("No signatures in server response pdu.")))? - .as_object() - .ok_or(err!(BadServerResponse("Invalid signatures object in server response pdu.")))?; - - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(err!(BadServerResponse( - "Invalid signatures content object in server response pdu.", - )))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|e| { - err!(BadServerResponse( - "Invalid servername in signatures of server response pdu: {e:?}" - )) - })?; - - if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { - continue; - } - - debug!("Loading signing keys for {origin}"); - let result: BTreeMap<_, _> = self - .services - .globals - .verify_keys_for(origin) - .await? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if !contains_all_ids(&result) { - debug_warn!("Signing key not loaded for {origin}"); - servers.insert(origin.to_owned(), BTreeMap::new()); - } - - pub_key_map.insert(origin.to_string(), result); - } - - Ok(()) - } - - /// Batch requests homeserver signing keys from trusted notary key servers - /// (`trusted_servers` config option) - async fn batch_request_signing_keys( - &self, mut servers: BTreeMap>, - pub_key_map: &RwLock>>, - ) -> Result<()> { - for server in self.services.globals.trusted_servers() { - debug!("Asking batch signing keys from trusted server {server}"); - match self - .services - .sending - .send_federation_request( - server, - get_remote_server_keys_batch::v2::Request { - server_keys: servers.clone(), - }, - ) - .await - { - Ok(keys) => { - debug!("Got signing keys: {keys:?}"); - let mut pkm = pub_key_map.write().await; - for k in keys.server_keys { - let k = match k.deserialize() { - Ok(key) => key, - Err(e) => { - warn!( - "Received error {e} while fetching keys from trusted server {server}: {:#?}", - k.into_json() - ); - continue; - }, - }; - - // TODO: Check signature from trusted server? - servers.remove(&k.server_name); - - let result = self - .services - .globals - .db - .add_signing_key(&k.server_name, k.clone()) - .await - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); - - pkm.insert(k.server_name.to_string(), result); - } - }, - Err(e) => error!( - "Failed sending batched key request to trusted key server {server} for the remote servers \ - {servers:?}: {e}" - ), - } - } - - Ok(()) - } - - /// Requests multiple homeserver signing keys from individual servers (not - /// trused notary servers) - async fn request_signing_keys( - &self, servers: BTreeMap>, - pub_key_map: &RwLock>>, - ) -> Result<()> { - debug!("Asking individual servers for signing keys: {servers:?}"); - let mut futures: FuturesUnordered<_> = servers - .into_keys() - .map(|server| async move { - ( - self.services - .sending - .send_federation_request(&server, get_server_keys::v2::Request::new()) - .await, - server, - ) - }) - .collect(); - - while let Some(result) = futures.next().await { - debug!("Received new Future result"); - if let (Ok(get_keys_response), origin) = result { - debug!("Result is from {origin}"); - if let Ok(key) = get_keys_response.server_key.deserialize() { - let result: BTreeMap<_, _> = self - .services - .globals - .db - .add_signing_key(&origin, key) - .await - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - pub_key_map.write().await.insert(origin.to_string(), result); - } - } - debug!("Done handling Future result"); - } - - Ok(()) - } - - pub async fn fetch_join_signing_keys( - &self, event: &create_join_event::v2::Response, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, - ) -> Result<()> { - let mut servers: BTreeMap> = BTreeMap::new(); - - { - let mut pkm = pub_key_map.write().await; - - // Try to fetch keys, failure is okay. Servers we couldn't find in the cache - // will be added to `servers` - for pdu in event - .room_state - .state - .iter() - .chain(&event.room_state.auth_chain) - { - if let Err(error) = self - .get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm) - .await - { - debug!(%error, "failed to get server keys from cache"); - }; - } - - drop(pkm); - }; - - if servers.is_empty() { - trace!("We had all keys cached locally, not fetching any keys from remote servers"); - return Ok(()); - } - - if self.services.globals.query_trusted_key_servers_first() { - info!( - "query_trusted_key_servers_first is set to true, querying notary trusted key servers first for \ - homeserver signing keys." - ); - - self.batch_request_signing_keys(servers.clone(), pub_key_map) - .await?; - - if servers.is_empty() { - debug!("Trusted server supplied all signing keys, no more keys to fetch"); - return Ok(()); - } - - debug!("Remaining servers left that the notary/trusted servers did not provide: {servers:?}"); - - self.request_signing_keys(servers.clone(), pub_key_map) - .await?; - } else { - debug!("query_trusted_key_servers_first is set to false, querying individual homeservers first"); - - self.request_signing_keys(servers.clone(), pub_key_map) - .await?; - - if servers.is_empty() { - debug!("Individual homeservers supplied all signing keys, no more keys to fetch"); - return Ok(()); - } - - debug!("Remaining servers left the individual homeservers did not provide: {servers:?}"); - - self.batch_request_signing_keys(servers.clone(), pub_key_map) - .await?; - } - - debug!("Search for signing keys done"); - - /*if servers.is_empty() { - warn!("Failed to find homeserver signing keys for the remaining servers: {servers:?}"); - }*/ - - Ok(()) - } - - /// Search the DB for the signing keys of the given server, if we don't have - /// them fetch them from the server and save to our DB. - #[tracing::instrument(skip_all)] - pub async fn fetch_signing_keys_for_server( - &self, origin: &ServerName, signature_ids: Vec, - ) -> Result> { - let contains_all_ids = |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let mut result: BTreeMap<_, _> = self - .services - .globals - .verify_keys_for(origin) - .await? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if contains_all_ids(&result) { - trace!("We have all homeserver signing keys locally for {origin}, not fetching any remotely"); - return Ok(result); - } - - // i didnt split this out into their own functions because it's relatively small - if self.services.globals.query_trusted_key_servers_first() { - info!( - "query_trusted_key_servers_first is set to true, querying notary trusted servers first for {origin} \ - keys" - ); - - for server in self.services.globals.trusted_servers() { - debug!("Asking notary server {server} for {origin}'s signing key"); - if let Some(server_keys) = self - .services - .sending - .send_federation_request( - server, - get_remote_server_keys::v2::Request::new( - origin.to_owned(), - MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime too large"), - ) - .expect("time is valid"), - ), - ) - .await - .ok() - .map(|resp| { - resp.server_keys - .into_iter() - .filter_map(|e| e.deserialize().ok()) - .collect::>() - }) { - debug!("Got signing keys: {:?}", server_keys); - for k in server_keys { - self.services - .globals - .db - .add_signing_key(origin, k.clone()) - .await; - result.extend( - k.verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } - - if contains_all_ids(&result) { - return Ok(result); - } - } - } - - debug!("Asking {origin} for their signing keys over federation"); - if let Some(server_key) = self - .services - .sending - .send_federation_request(origin, get_server_keys::v2::Request::new()) - .await - .ok() - .and_then(|resp| resp.server_key.deserialize().ok()) - { - self.services - .globals - .db - .add_signing_key(origin, server_key.clone()) - .await; - - result.extend( - server_key - .verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - server_key - .old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - - if contains_all_ids(&result) { - return Ok(result); - } - } - } else { - info!("query_trusted_key_servers_first is set to false, querying {origin} first"); - debug!("Asking {origin} for their signing keys over federation"); - if let Some(server_key) = self - .services - .sending - .send_federation_request(origin, get_server_keys::v2::Request::new()) - .await - .ok() - .and_then(|resp| resp.server_key.deserialize().ok()) - { - self.services - .globals - .db - .add_signing_key(origin, server_key.clone()) - .await; - - result.extend( - server_key - .verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - server_key - .old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - - if contains_all_ids(&result) { - return Ok(result); - } - } - - for server in self.services.globals.trusted_servers() { - debug!("Asking notary server {server} for {origin}'s signing key"); - if let Some(server_keys) = self - .services - .sending - .send_federation_request( - server, - get_remote_server_keys::v2::Request::new( - origin.to_owned(), - MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime too large"), - ) - .expect("time is valid"), - ), - ) - .await - .ok() - .map(|resp| { - resp.server_keys - .into_iter() - .filter_map(|e| e.deserialize().ok()) - .collect::>() - }) { - debug!("Got signing keys: {server_keys:?}"); - for k in server_keys { - self.services - .globals - .db - .add_signing_key(origin, k.clone()) - .await; - result.extend( - k.verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } - - if contains_all_ids(&result) { - return Ok(result); - } - } - } - } - - Err!(BadServerResponse(warn!("Failed to find public key for server {origin:?}"))) - } + keys.verify_keys.extend(new_keys.verify_keys); + keys.old_verify_keys.extend(new_keys.old_verify_keys); + self.db.server_signingkeys.raw_put(origin, Json(&keys)); +} + +#[implement(Service)] +async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> bool { + type KeysMap<'a> = BTreeMap<&'a ServerSigningKeyId, &'a RawJsonValue>; + + let Ok(keys) = self + .db + .server_signingkeys + .get(origin) + .await + .deserialized::>() + else { + return false; + }; + + if let Ok(Some(verify_keys)) = keys.get_field::>("verify_keys") { + if verify_keys.contains_key(key_id) { + return true; + } + } + + if let Ok(Some(old_verify_keys)) = keys.get_field::>("old_verify_keys") { + if old_verify_keys.contains_key(key_id) { + return true; + } + } + + false +} + +#[implement(Service)] +pub async fn verify_keys_for(&self, origin: &ServerName) -> VerifyKeys { + let mut keys = self + .signing_keys_for(origin) + .await + .map(|keys| merge_old_keys(keys).verify_keys) + .unwrap_or(BTreeMap::new()); + + if self.services.globals.server_is_ours(origin) { + keys.extend(self.verify_keys.clone().into_iter()); + } + + keys +} + +#[implement(Service)] +pub async fn signing_keys_for(&self, origin: &ServerName) -> Result { + self.db.server_signingkeys.get(origin).await.deserialized() +} + +#[implement(Service)] +fn minimum_valid_ts(&self) -> MilliSecondsSinceUnixEpoch { + let timepoint = timepoint_from_now(self.minimum_valid).expect("SystemTime should not overflow"); + MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow") +} + +fn merge_old_keys(mut keys: ServerSigningKeys) -> ServerSigningKeys { + keys.verify_keys.extend( + keys.old_verify_keys + .clone() + .into_iter() + .map(|(key_id, old)| (key_id, VerifyKey::new(old.key))), + ); + + keys +} + +fn extract_key(mut keys: ServerSigningKeys, key_id: &ServerSigningKeyId) -> Option { + keys.verify_keys.remove(key_id).or_else(|| { + keys.old_verify_keys + .remove(key_id) + .map(|old| VerifyKey::new(old.key)) + }) +} + +fn key_exists(keys: &ServerSigningKeys, key_id: &ServerSigningKeyId) -> bool { + keys.verify_keys.contains_key(key_id) || keys.old_verify_keys.contains_key(key_id) } diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs new file mode 100644 index 00000000..84dd2871 --- /dev/null +++ b/src/service/server_keys/request.rs @@ -0,0 +1,97 @@ +use std::collections::BTreeMap; + +use conduit::{implement, Err, Result}; +use ruma::{ + api::federation::discovery::{ + get_remote_server_keys, + get_remote_server_keys_batch::{self, v2::QueryCriteria}, + get_server_keys, ServerSigningKeys, + }, + OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, +}; + +#[implement(super::Service)] +pub(super) async fn batch_notary_request<'a, S, K>( + &self, notary: &ServerName, batch: S, +) -> Result> +where + S: Iterator + Send, + K: Iterator + Send, +{ + use get_remote_server_keys_batch::v2::Request; + type RumaBatch = BTreeMap>; + + let criteria = QueryCriteria { + minimum_valid_until_ts: Some(self.minimum_valid_ts()), + }; + + let mut server_keys = RumaBatch::new(); + for (server, key_ids) in batch { + let entry = server_keys.entry(server.into()).or_default(); + for key_id in key_ids { + entry.insert(key_id.into(), criteria.clone()); + } + } + + debug_assert!(!server_keys.is_empty(), "empty batch request to notary"); + let request = Request { + server_keys, + }; + + self.services + .sending + .send_federation_request(notary, request) + .await + .map(|response| response.server_keys) + .map(|keys| { + keys.into_iter() + .map(|key| key.deserialize()) + .filter_map(Result::ok) + .collect() + }) +} + +#[implement(super::Service)] +pub async fn notary_request(&self, notary: &ServerName, target: &ServerName) -> Result> { + use get_remote_server_keys::v2::Request; + + let request = Request { + server_name: target.into(), + minimum_valid_until_ts: self.minimum_valid_ts(), + }; + + self.services + .sending + .send_federation_request(notary, request) + .await + .map(|response| response.server_keys) + .map(|keys| { + keys.into_iter() + .map(|key| key.deserialize()) + .filter_map(Result::ok) + .collect() + }) +} + +#[implement(super::Service)] +pub async fn server_request(&self, target: &ServerName) -> Result { + use get_server_keys::v2::Request; + + let server_signing_key = self + .services + .sending + .send_federation_request(target, Request::new()) + .await + .map(|response| response.server_key) + .and_then(|key| key.deserialize().map_err(Into::into))?; + + if server_signing_key.server_name != target { + return Err!(BadServerResponse(debug_warn!( + requested = ?target, + response = ?server_signing_key.server_name, + "Server responded with bogus server_name" + ))); + } + + Ok(server_signing_key) +} diff --git a/src/service/server_keys/sign.rs b/src/service/server_keys/sign.rs new file mode 100644 index 00000000..28fd7e80 --- /dev/null +++ b/src/service/server_keys/sign.rs @@ -0,0 +1,18 @@ +use conduit::{implement, Result}; +use ruma::{CanonicalJsonObject, RoomVersionId}; + +#[implement(super::Service)] +pub fn sign_json(&self, object: &mut CanonicalJsonObject) -> Result { + use ruma::signatures::sign_json; + + let server_name = self.services.globals.server_name().as_str(); + sign_json(server_name, self.keypair(), object).map_err(Into::into) +} + +#[implement(super::Service)] +pub fn hash_and_sign_event(&self, object: &mut CanonicalJsonObject, room_version: &RoomVersionId) -> Result { + use ruma::signatures::hash_and_sign_event; + + let server_name = self.services.globals.server_name().as_str(); + hash_and_sign_event(server_name, self.keypair(), object, room_version).map_err(Into::into) +} diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs new file mode 100644 index 00000000..ad20fec7 --- /dev/null +++ b/src/service/server_keys/verify.rs @@ -0,0 +1,33 @@ +use conduit::{implement, pdu::gen_event_id_canonical_json, Err, Result}; +use ruma::{signatures::Verified, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId}; +use serde_json::value::RawValue as RawJsonValue; + +#[implement(super::Service)] +pub async fn validate_and_add_event_id( + &self, pdu: &RawJsonValue, room_version: &RoomVersionId, +) -> Result<(OwnedEventId, CanonicalJsonObject)> { + let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?; + if let Err(e) = self.verify_event(&value, Some(room_version)).await { + return Err!(BadServerResponse(debug_error!("Event {event_id} failed verification: {e:?}"))); + } + + value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into())); + + Ok((event_id, value)) +} + +#[implement(super::Service)] +pub async fn verify_event( + &self, event: &CanonicalJsonObject, room_version: Option<&RoomVersionId>, +) -> Result { + let room_version = room_version.unwrap_or(&RoomVersionId::V11); + let keys = self.get_event_keys(event, room_version).await?; + ruma::signatures::verify_event(&keys, event, room_version).map_err(Into::into) +} + +#[implement(super::Service)] +pub async fn verify_json(&self, event: &CanonicalJsonObject, room_version: Option<&RoomVersionId>) -> Result { + let room_version = room_version.unwrap_or(&RoomVersionId::V11); + let keys = self.get_event_keys(event, room_version).await?; + ruma::signatures::verify_json(&keys, event.clone()).map_err(Into::into) +} From b4ec1e9d3cbc58f68c3733061c11c55700ff3018 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 14 Oct 2024 03:58:25 +0000 Subject: [PATCH 0084/1248] add federation client for select high-timeout requests Signed-off-by: Jason Volk --- src/api/client/membership.rs | 4 ++-- src/core/config/mod.rs | 2 +- src/service/client/mod.rs | 9 ++++++++- src/service/rooms/event_handler/mod.rs | 2 +- src/service/sending/mod.rs | 11 +++++++++++ 5 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 2fa34ff7..31fd9076 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -775,7 +775,7 @@ async fn join_room_by_id_helper_remote( let send_join_response = services .sending - .send_federation_request(&remote_server, send_join_request) + .send_synapse_request(&remote_server, send_join_request) .await?; info!("send_join finished"); @@ -1154,7 +1154,7 @@ async fn join_room_by_id_helper_local( let send_join_response = services .sending - .send_federation_request( + .send_synapse_request( &remote_server, federation::membership::create_join_event::v2::Request { room_id: room_id.to_owned(), diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 114c6e76..02b277d0 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1775,7 +1775,7 @@ fn default_well_known_conn_timeout() -> u64 { 6 } fn default_well_known_timeout() -> u64 { 10 } -fn default_federation_timeout() -> u64 { 300 } +fn default_federation_timeout() -> u64 { 25 } fn default_federation_idle_timeout() -> u64 { 25 } diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index b21f9dab..f9a89e99 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -11,6 +11,7 @@ pub struct Service { pub extern_media: reqwest::Client, pub well_known: reqwest::Client, pub federation: reqwest::Client, + pub synapse: reqwest::Client, pub sender: reqwest::Client, pub appservice: reqwest::Client, pub pusher: reqwest::Client, @@ -48,12 +49,18 @@ impl crate::Service for Service { federation: base(config)? .dns_resolver(resolver.resolver.hooked.clone()) .read_timeout(Duration::from_secs(config.federation_timeout)) - .timeout(Duration::from_secs(config.federation_timeout)) .pool_max_idle_per_host(config.federation_idle_per_host.into()) .pool_idle_timeout(Duration::from_secs(config.federation_idle_timeout)) .redirect(redirect::Policy::limited(3)) .build()?, + synapse: base(config)? + .dns_resolver(resolver.resolver.hooked.clone()) + .read_timeout(Duration::from_secs(305)) + .pool_max_idle_per_host(0) + .redirect(redirect::Policy::limited(3)) + .build()?, + sender: base(config)? .dns_resolver(resolver.resolver.hooked.clone()) .read_timeout(Duration::from_secs(config.sender_timeout)) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 8448404b..0ffd9659 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1016,7 +1016,7 @@ impl Service { let res = self .services .sending - .send_federation_request( + .send_synapse_request( origin, get_room_state_ids::v1::Request { room_id: room_id.to_owned(), diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 5970c383..63c5e655 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -245,6 +245,7 @@ impl Service { .await } + /// Sends a request to a federation server #[tracing::instrument(skip_all, name = "request")] pub async fn send_federation_request(&self, dest: &ServerName, request: T) -> Result where @@ -254,6 +255,16 @@ impl Service { self.send(client, dest, request).await } + /// Like send_federation_request() but with a very large timeout + #[tracing::instrument(skip_all, name = "synapse")] + pub async fn send_synapse_request(&self, dest: &ServerName, request: T) -> Result + where + T: OutgoingRequest + Debug + Send, + { + let client = &self.services.client.synapse; + self.send(client, dest, request).await + } + /// Sends a request to an appservice /// /// Only returns None if there is no url specified in the appservice From d0ee4b6d253079ceef7ee9094db9e3f70f1ed048 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 14 Oct 2024 01:01:12 +0000 Subject: [PATCH 0085/1248] add resolve_with_servers() to alias service; simplify api Signed-off-by: Jason Volk --- src/admin/user/commands.rs | 9 ++- src/api/client/alias.rs | 16 ++--- src/api/client/membership.rs | 66 +++++++++-------- src/service/rooms/alias/mod.rs | 47 +++++++----- src/service/rooms/alias/remote.rs | 116 ++++++++++++++---------------- 5 files changed, 131 insertions(+), 123 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index df393833..fb6d2bf1 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -381,13 +381,18 @@ pub(super) async fn force_join_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, ) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; - let room_id = self.services.rooms.alias.resolve(&room_id).await?; + let (room_id, servers) = self + .services + .rooms + .alias + .resolve_with_servers(&room_id, None) + .await?; assert!( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); - join_room_by_id_helper(self.services, &user_id, &room_id, None, &[], None, &None).await?; + join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None).await?; Ok(RoomMessageEventContent::notice_markdown(format!( "{user_id} has been joined to {room_id}.", diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 2399a355..83f3291d 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -86,25 +86,19 @@ pub(crate) async fn get_alias_route( State(services): State, body: Ruma, ) -> Result { let room_alias = body.body.room_alias; - let servers = None; - let Ok((room_id, pre_servers)) = services - .rooms - .alias - .resolve_alias(&room_alias, servers.as_ref()) - .await - else { + let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await else { return Err!(Request(NotFound("Room with alias not found."))); }; - let servers = room_available_servers(&services, &room_id, &room_alias, &pre_servers).await; + let servers = room_available_servers(&services, &room_id, &room_alias, servers).await; debug!(?room_alias, ?room_id, "available servers: {servers:?}"); Ok(get_alias::v3::Response::new(room_id, servers)) } async fn room_available_servers( - services: &Services, room_id: &RoomId, room_alias: &RoomAliasId, pre_servers: &Option>, + services: &Services, room_id: &RoomId, room_alias: &RoomAliasId, pre_servers: Vec, ) -> Vec { // find active servers in room state cache to suggest let mut servers: Vec = services @@ -117,9 +111,7 @@ async fn room_available_servers( // push any servers we want in the list already (e.g. responded remote alias // servers, room alias server itself) - if let Some(pre_servers) = pre_servers { - servers.extend(pre_servers.clone()); - }; + servers.extend(pre_servers); servers.sort_unstable(); servers.dedup(); diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 31fd9076..27de60c6 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,8 +9,9 @@ use axum_client_ip::InsecureClientIp; use conduit::{ debug, debug_info, debug_warn, err, error, info, pdu, pdu::{gen_event_id_canonical_json, PduBuilder}, + result::FlatOk, trace, utils, - utils::{IterStream, ReadyExt}, + utils::{shuffle, IterStream, ReadyExt}, warn, Err, Error, PduEvent, Result, }; use futures::{FutureExt, StreamExt}; @@ -188,6 +189,10 @@ pub(crate) async fn join_room_by_id_route( servers.push(server.into()); } + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + join_room_by_id_helper( &services, sender_user, @@ -251,45 +256,48 @@ pub(crate) async fn join_room_by_id_or_alias_route( servers.push(server.to_owned()); } + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + (servers, room_id) }, Err(room_alias) => { - let response = services + let (room_id, mut servers) = services .rooms .alias - .resolve_alias(&room_alias, Some(&body.via.clone())) + .resolve_alias(&room_alias, Some(body.via.clone())) .await?; - let (room_id, mut pre_servers) = response; banned_room_check(&services, sender_user, Some(&room_id), Some(room_alias.server_name()), client).await?; - let mut servers = body.via; - if let Some(pre_servers) = &mut pre_servers { - servers.append(pre_servers); - } + let addl_via_servers = services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned); - servers.extend( - services - .rooms - .state_cache - .servers_invite_via(&room_id) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); + let addl_state_servers = services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default(); - servers.extend( - services - .rooms - .state_cache - .invite_state(sender_user, &room_id) - .await - .unwrap_or_default() - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); + let mut addl_servers: Vec<_> = addl_state_servers + .iter() + .map(|event| event.get_field("sender")) + .filter_map(FlatOk::flat_ok) + .map(|user: &UserId| user.server_name().to_owned()) + .stream() + .chain(addl_via_servers) + .collect() + .await; + + addl_servers.sort_unstable(); + addl_servers.dedup(); + shuffle(&mut addl_servers); + servers.append(&mut addl_servers); (servers, room_id) }, diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 3f944729..0cdec8ee 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -112,40 +112,51 @@ impl Service { Ok(()) } + #[inline] pub async fn resolve(&self, room: &RoomOrAliasId) -> Result { + self.resolve_with_servers(room, None) + .await + .map(|(room_id, _)| room_id) + } + + pub async fn resolve_with_servers( + &self, room: &RoomOrAliasId, servers: Option>, + ) -> Result<(OwnedRoomId, Vec)> { if room.is_room_id() { - let room_id: &RoomId = &RoomId::parse(room).expect("valid RoomId"); - Ok(room_id.to_owned()) + let room_id = RoomId::parse(room).expect("valid RoomId"); + Ok((room_id, servers.unwrap_or_default())) } else { - let alias: &RoomAliasId = &RoomAliasId::parse(room).expect("valid RoomAliasId"); - Ok(self.resolve_alias(alias, None).await?.0) + let alias = &RoomAliasId::parse(room).expect("valid RoomAliasId"); + self.resolve_alias(alias, servers).await } } #[tracing::instrument(skip(self), name = "resolve")] pub async fn resolve_alias( - &self, room_alias: &RoomAliasId, servers: Option<&Vec>, - ) -> Result<(OwnedRoomId, Option>)> { - if !self - .services - .globals - .server_is_ours(room_alias.server_name()) - && (!servers + &self, room_alias: &RoomAliasId, servers: Option>, + ) -> Result<(OwnedRoomId, Vec)> { + let server_name = room_alias.server_name(); + let server_is_ours = self.services.globals.server_is_ours(server_name); + let servers_contains_ours = || { + servers .as_ref() - .is_some_and(|servers| servers.contains(&self.services.globals.server_name().to_owned())) - || servers.as_ref().is_none()) - { - return self.remote_resolve(room_alias, servers).await; + .is_some_and(|servers| servers.contains(&self.services.globals.config.server_name)) + }; + + if !server_is_ours && !servers_contains_ours() { + return self + .remote_resolve(room_alias, servers.unwrap_or_default()) + .await; } - let room_id: Option = match self.resolve_local_alias(room_alias).await { + let room_id = match self.resolve_local_alias(room_alias).await { Ok(r) => Some(r), Err(_) => self.resolve_appservice_alias(room_alias).await?, }; room_id.map_or_else( - || Err(Error::BadRequest(ErrorKind::NotFound, "Room with alias not found.")), - |room_id| Ok((room_id, None)), + || Err!(Request(NotFound("Room with alias not found."))), + |room_id| Ok((room_id, Vec::new())), ) } diff --git a/src/service/rooms/alias/remote.rs b/src/service/rooms/alias/remote.rs index 5d835240..d9acccc9 100644 --- a/src/service/rooms/alias/remote.rs +++ b/src/service/rooms/alias/remote.rs @@ -1,75 +1,67 @@ -use conduit::{debug, debug_warn, Error, Result}; -use ruma::{ - api::{client::error::ErrorKind, federation}, - OwnedRoomId, OwnedServerName, RoomAliasId, -}; +use std::iter::once; -impl super::Service { - pub(super) async fn remote_resolve( - &self, room_alias: &RoomAliasId, servers: Option<&Vec>, - ) -> Result<(OwnedRoomId, Option>)> { - debug!(?room_alias, ?servers, "resolve"); +use conduit::{debug, debug_error, err, implement, Result}; +use federation::query::get_room_information::v1::Response; +use ruma::{api::federation, OwnedRoomId, OwnedServerName, RoomAliasId, ServerName}; - let mut response = self - .services - .sending - .send_federation_request( - room_alias.server_name(), - federation::query::get_room_information::v1::Request { - room_alias: room_alias.to_owned(), - }, - ) - .await; +#[implement(super::Service)] +pub(super) async fn remote_resolve( + &self, room_alias: &RoomAliasId, servers: Vec, +) -> Result<(OwnedRoomId, Vec)> { + debug!(?room_alias, servers = ?servers, "resolve"); + let servers = once(room_alias.server_name()) + .map(ToOwned::to_owned) + .chain(servers.into_iter()); - debug!("room alias server_name get_alias_helper response: {response:?}"); + let mut resolved_servers = Vec::new(); + let mut resolved_room_id: Option = None; + for server in servers { + match self.remote_request(room_alias, &server).await { + Err(e) => debug_error!("Failed to query for {room_alias:?} from {server}: {e}"), + Ok(Response { + room_id, + servers, + }) => { + debug!("Server {server} answered with {room_id:?} for {room_alias:?} servers: {servers:?}"); - if let Err(ref e) = response { - debug_warn!( - "Server {} of the original room alias failed to assist in resolving room alias: {e}", - room_alias.server_name(), - ); - } + resolved_room_id.get_or_insert(room_id); + add_server(&mut resolved_servers, server); - if response.as_ref().is_ok_and(|resp| resp.servers.is_empty()) || response.as_ref().is_err() { - if let Some(servers) = servers { - for server in servers { - response = self - .services - .sending - .send_federation_request( - server, - federation::query::get_room_information::v1::Request { - room_alias: room_alias.to_owned(), - }, - ) - .await; - debug!("Got response from server {server} for room aliases: {response:?}"); - - if let Ok(ref response) = response { - if !response.servers.is_empty() { - break; - } - debug_warn!( - "Server {server} responded with room aliases, but was empty? Response: {response:?}" - ); - } + if !servers.is_empty() { + add_servers(&mut resolved_servers, servers); + break; } - } + }, } + } - if let Ok(response) = response { - let room_id = response.room_id; + resolved_room_id + .map(|room_id| (room_id, resolved_servers)) + .ok_or_else(|| err!(Request(NotFound("No servers could assist in resolving the room alias")))) +} - let mut pre_servers = response.servers; - // since the room alis server responded, insert it into the list - pre_servers.push(room_alias.server_name().into()); +#[implement(super::Service)] +async fn remote_request(&self, room_alias: &RoomAliasId, server: &ServerName) -> Result { + use federation::query::get_room_information::v1::Request; - return Ok((room_id, Some(pre_servers))); - } + let request = Request { + room_alias: room_alias.to_owned(), + }; - Err(Error::BadRequest( - ErrorKind::NotFound, - "No servers could assist in resolving the room alias", - )) + self.services + .sending + .send_federation_request(server, request) + .await +} + +fn add_servers(servers: &mut Vec, new: Vec) { + for server in new { + add_server(servers, server); + } +} + +fn add_server(servers: &mut Vec, server: OwnedServerName) { + if !servers.contains(&server) { + servers.push(server); } } From ed5b5d7877996f0ca4862ee3b08cfebd35744904 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 15 Oct 2024 09:34:43 +0000 Subject: [PATCH 0086/1248] merge rooms state service and data Signed-off-by: Jason Volk --- src/service/rooms/state/data.rs | 65 ----------------------------- src/service/rooms/state/mod.rs | 73 +++++++++++++++++++++++---------- 2 files changed, 52 insertions(+), 86 deletions(-) delete mode 100644 src/service/rooms/state/data.rs diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs deleted file mode 100644 index 813f48ae..00000000 --- a/src/service/rooms/state/data.rs +++ /dev/null @@ -1,65 +0,0 @@ -use std::sync::Arc; - -use conduit::{ - utils::{stream::TryIgnore, ReadyExt}, - Result, -}; -use database::{Database, Deserialized, Interfix, Map}; -use ruma::{OwnedEventId, RoomId}; - -use super::RoomMutexGuard; - -pub(super) struct Data { - shorteventid_shortstatehash: Arc, - roomid_shortstatehash: Arc, - pub(super) roomid_pduleaves: Arc, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - shorteventid_shortstatehash: db["shorteventid_shortstatehash"].clone(), - roomid_shortstatehash: db["roomid_shortstatehash"].clone(), - roomid_pduleaves: db["roomid_pduleaves"].clone(), - } - } - - pub(super) async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result { - self.roomid_shortstatehash.get(room_id).await.deserialized() - } - - #[inline] - pub(super) fn set_room_state( - &self, - room_id: &RoomId, - new_shortstatehash: u64, - _mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex - ) { - self.roomid_shortstatehash - .raw_put(room_id, new_shortstatehash); - } - - pub(super) fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) { - self.shorteventid_shortstatehash - .put(shorteventid, shortstatehash); - } - - pub(super) async fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: Vec, - _mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex - ) { - let prefix = (room_id, Interfix); - self.roomid_pduleaves - .keys_prefix_raw(&prefix) - .ignore_err() - .ready_for_each(|key| self.roomid_pduleaves.remove(key)) - .await; - - for event_id in &event_ids { - let key = (room_id, event_id); - self.roomid_pduleaves.put_raw(key, event_id); - } - } -} diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index cfcb2da6..6abaa198 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,5 +1,3 @@ -mod data; - use std::{ collections::{HashMap, HashSet}, fmt::Write, @@ -10,11 +8,10 @@ use std::{ use conduit::{ err, result::FlatOk, - utils::{calculate_hash, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard}, + utils::{calculate_hash, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, warn, PduEvent, Result, }; -use data::Data; -use database::{Ignore, Interfix}; +use database::{Deserialized, Ignore, Interfix, Map}; use futures::{future::join_all, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ events::{ @@ -30,9 +27,9 @@ use super::state_compressor::CompressedStateEvent; use crate::{globals, rooms, Dep}; pub struct Service { + pub mutex: RoomMutexMap, services: Services, db: Data, - pub mutex: RoomMutexMap, } struct Services { @@ -45,12 +42,19 @@ struct Services { timeline: Dep, } +struct Data { + shorteventid_shortstatehash: Arc, + roomid_shortstatehash: Arc, + roomid_pduleaves: Arc, +} + type RoomMutexMap = MutexMap; pub type RoomMutexGuard = MutexMapGuard; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { + mutex: RoomMutexMap::new(), services: Services { globals: args.depend::("globals"), short: args.depend::("rooms::short"), @@ -60,12 +64,15 @@ impl crate::Service for Service { state_compressor: args.depend::("rooms::state_compressor"), timeline: args.depend::("rooms::timeline"), }, - db: Data::new(args.db), - mutex: RoomMutexMap::new(), + db: Data { + shorteventid_shortstatehash: args.db["shorteventid_shortstatehash"].clone(), + roomid_shortstatehash: args.db["roomid_shortstatehash"].clone(), + roomid_pduleaves: args.db["roomid_pduleaves"].clone(), + }, })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + fn memory_usage(&self, out: &mut dyn Write) -> Result { let mutex = self.mutex.len(); writeln!(out, "state_mutex: {mutex}")?; @@ -84,7 +91,7 @@ impl Service { statediffnew: Arc>, _statediffremoved: Arc>, state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) -> Result { let event_ids = statediffnew.iter().stream().filter_map(|new| { self.services .state_compressor @@ -127,7 +134,7 @@ impl Service { self.services.state_cache.update_joined_count(room_id).await; - self.db.set_room_state(room_id, shortstatehash, state_lock); + self.set_room_state(room_id, shortstatehash, state_lock); Ok(()) } @@ -140,13 +147,15 @@ impl Service { pub async fn set_event_state( &self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: Arc>, ) -> Result { + const BUFSIZE: usize = size_of::(); + let shorteventid = self .services .short .get_or_create_shorteventid(event_id) .await; - let previous_shortstatehash = self.db.get_room_shortstatehash(room_id).await; + let previous_shortstatehash = self.get_room_shortstatehash(room_id).await; let state_hash = calculate_hash( &state_ids_compressed @@ -196,7 +205,9 @@ impl Service { )?; } - self.db.set_event_state(shorteventid, shortstatehash); + self.db + .shorteventid_shortstatehash + .aput::(shorteventid, shortstatehash); Ok(shortstatehash) } @@ -207,6 +218,8 @@ impl Service { /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. #[tracing::instrument(skip(self, new_pdu), level = "debug")] pub async fn append_to_state(&self, new_pdu: &PduEvent) -> Result { + const BUFSIZE: usize = size_of::(); + let shorteventid = self .services .short @@ -216,7 +229,9 @@ impl Service { let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id).await; if let Ok(p) = previous_shortstatehash { - self.db.set_event_state(shorteventid, p); + self.db + .shorteventid_shortstatehash + .aput::(shorteventid, p); } if let Some(state_key) = &new_pdu.state_key { @@ -306,14 +321,18 @@ impl Service { } /// Set the state hash to a new version, but does not update state_cache. - #[tracing::instrument(skip(self, mutex_lock), level = "debug")] + #[tracing::instrument(skip(self, _mutex_lock), level = "debug")] pub fn set_room_state( &self, room_id: &RoomId, shortstatehash: u64, - mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex ) { - self.db.set_room_state(room_id, shortstatehash, mutex_lock); + const BUFSIZE: usize = size_of::(); + + self.db + .roomid_shortstatehash + .raw_aput::(room_id, shortstatehash); } /// Returns the room's version. @@ -327,9 +346,12 @@ impl Service { .map_err(|e| err!(Request(NotFound("No create event found: {e:?}")))) } - #[inline] pub async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result { - self.db.get_room_shortstatehash(room_id).await + self.db + .roomid_shortstatehash + .get(room_id) + .await + .deserialized() } pub fn get_forward_extremities<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + '_ { @@ -346,11 +368,20 @@ impl Service { &self, room_id: &RoomId, event_ids: Vec, - state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex + _state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex ) { + let prefix = (room_id, Interfix); self.db - .set_forward_extremities(room_id, event_ids, state_lock) + .roomid_pduleaves + .keys_prefix_raw(&prefix) + .ignore_err() + .ready_for_each(|key| self.db.roomid_pduleaves.remove(key)) .await; + + for event_id in &event_ids { + let key = (room_id, event_id); + self.db.roomid_pduleaves.put_raw(key, event_id); + } } /// This fetches auth events from the current state. From 4576313a7c4a2f89fc2c2b1f04ad739dbb546b0a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 15 Oct 2024 09:54:20 +0000 Subject: [PATCH 0087/1248] merge rooms user service and data Signed-off-by: Jason Volk --- src/service/rooms/user/data.rs | 108 --------------------- src/service/rooms/user/mod.rs | 168 +++++++++++++++++++++++---------- 2 files changed, 118 insertions(+), 158 deletions(-) delete mode 100644 src/service/rooms/user/data.rs diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs deleted file mode 100644 index 96b009f8..00000000 --- a/src/service/rooms/user/data.rs +++ /dev/null @@ -1,108 +0,0 @@ -use std::sync::Arc; - -use conduit::Result; -use database::{Deserialized, Map}; -use futures::{Stream, StreamExt}; -use ruma::{RoomId, UserId}; - -use crate::{globals, rooms, Dep}; - -pub(super) struct Data { - userroomid_notificationcount: Arc, - userroomid_highlightcount: Arc, - roomuserid_lastnotificationread: Arc, - roomsynctoken_shortstatehash: Arc, - services: Services, -} - -struct Services { - globals: Dep, - short: Dep, - state_cache: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - userroomid_notificationcount: db["userroomid_notificationcount"].clone(), - userroomid_highlightcount: db["userroomid_highlightcount"].clone(), - roomuserid_lastnotificationread: db["userroomid_highlightcount"].clone(), //< NOTE: known bug from conduit - roomsynctoken_shortstatehash: db["roomsynctoken_shortstatehash"].clone(), - services: Services { - globals: args.depend::("globals"), - short: args.depend::("rooms::short"), - state_cache: args.depend::("rooms::state_cache"), - }, - } - } - - pub(super) fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) { - let userroom_id = (user_id, room_id); - self.userroomid_highlightcount.put(userroom_id, 0_u64); - self.userroomid_notificationcount.put(userroom_id, 0_u64); - - let roomuser_id = (room_id, user_id); - let count = self.services.globals.next_count().unwrap(); - self.roomuserid_lastnotificationread.put(roomuser_id, count); - } - - pub(super) async fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - let key = (user_id, room_id); - self.userroomid_notificationcount - .qry(&key) - .await - .deserialized() - .unwrap_or(0) - } - - pub(super) async fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - let key = (user_id, room_id); - self.userroomid_highlightcount - .qry(&key) - .await - .deserialized() - .unwrap_or(0) - } - - pub(super) async fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - let key = (room_id, user_id); - self.roomuserid_lastnotificationread - .qry(&key) - .await - .deserialized() - .unwrap_or(0) - } - - pub(super) async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) { - let shortroomid = self - .services - .short - .get_shortroomid(room_id) - .await - .expect("room exists"); - - let key: &[u64] = &[shortroomid, token]; - self.roomsynctoken_shortstatehash.put(key, shortstatehash); - } - - pub(super) async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result { - let shortroomid = self.services.short.get_shortroomid(room_id).await?; - - let key: &[u64] = &[shortroomid, token]; - self.roomsynctoken_shortstatehash - .qry(key) - .await - .deserialized() - } - - //TODO: optimize; replace point-queries with dual iteration - pub(super) fn get_shared_rooms<'a>( - &'a self, user_a: &'a UserId, user_b: &'a UserId, - ) -> impl Stream + Send + 'a { - self.services - .state_cache - .rooms_joined(user_a) - .filter(|room_id| self.services.state_cache.is_joined(user_b, room_id)) - } -} diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index d9d90ecf..e484203d 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,71 +1,139 @@ -mod data; - use std::sync::Arc; -use conduit::Result; +use conduit::{implement, Result}; +use database::{Deserialized, Map}; use futures::{pin_mut, Stream, StreamExt}; use ruma::{RoomId, UserId}; -use self::data::Data; +use crate::{globals, rooms, Dep}; pub struct Service { db: Data, + services: Services, +} + +struct Data { + userroomid_notificationcount: Arc, + userroomid_highlightcount: Arc, + roomuserid_lastnotificationread: Arc, + roomsynctoken_shortstatehash: Arc, +} + +struct Services { + globals: Dep, + short: Dep, + state_cache: Dep, } impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(&args), + db: Data { + userroomid_notificationcount: args.db["userroomid_notificationcount"].clone(), + userroomid_highlightcount: args.db["userroomid_highlightcount"].clone(), + roomuserid_lastnotificationread: args.db["userroomid_highlightcount"].clone(), //< NOTE: known bug from conduit + roomsynctoken_shortstatehash: args.db["roomsynctoken_shortstatehash"].clone(), + }, + + services: Services { + globals: args.depend::("globals"), + short: args.depend::("rooms::short"), + state_cache: args.depend::("rooms::state_cache"), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - #[inline] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) { - self.db.reset_notification_counts(user_id, room_id); - } +#[implement(Service)] +pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) { + let userroom_id = (user_id, room_id); + self.db.userroomid_highlightcount.put(userroom_id, 0_u64); + self.db.userroomid_notificationcount.put(userroom_id, 0_u64); - #[inline] - pub async fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - self.db.notification_count(user_id, room_id).await - } - - #[inline] - pub async fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - self.db.highlight_count(user_id, room_id).await - } - - #[inline] - pub async fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - self.db.last_notification_read(user_id, room_id).await - } - - #[inline] - pub async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) { - self.db - .associate_token_shortstatehash(room_id, token, shortstatehash) - .await; - } - - #[inline] - pub async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result { - self.db.get_token_shortstatehash(room_id, token).await - } - - #[inline] - pub fn get_shared_rooms<'a>( - &'a self, user_a: &'a UserId, user_b: &'a UserId, - ) -> impl Stream + Send + 'a { - self.db.get_shared_rooms(user_a, user_b) - } - - pub async fn has_shared_rooms<'a>(&'a self, user_a: &'a UserId, user_b: &'a UserId) -> bool { - let get_shared_rooms = self.get_shared_rooms(user_a, user_b); - - pin_mut!(get_shared_rooms); - get_shared_rooms.next().await.is_some() - } + let roomuser_id = (room_id, user_id); + let count = self.services.globals.next_count().unwrap(); + self.db + .roomuserid_lastnotificationread + .put(roomuser_id, count); +} + +#[implement(Service)] +pub async fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + let key = (user_id, room_id); + self.db + .userroomid_notificationcount + .qry(&key) + .await + .deserialized() + .unwrap_or(0) +} + +#[implement(Service)] +pub async fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + let key = (user_id, room_id); + self.db + .userroomid_highlightcount + .qry(&key) + .await + .deserialized() + .unwrap_or(0) +} + +#[implement(Service)] +pub async fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + let key = (room_id, user_id); + self.db + .roomuserid_lastnotificationread + .qry(&key) + .await + .deserialized() + .unwrap_or(0) +} + +#[implement(Service)] +pub async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) { + let shortroomid = self + .services + .short + .get_shortroomid(room_id) + .await + .expect("room exists"); + + let key: &[u64] = &[shortroomid, token]; + self.db + .roomsynctoken_shortstatehash + .put(key, shortstatehash); +} + +#[implement(Service)] +pub async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result { + let shortroomid = self.services.short.get_shortroomid(room_id).await?; + + let key: &[u64] = &[shortroomid, token]; + self.db + .roomsynctoken_shortstatehash + .qry(key) + .await + .deserialized() +} + +#[implement(Service)] +pub async fn has_shared_rooms<'a>(&'a self, user_a: &'a UserId, user_b: &'a UserId) -> bool { + let get_shared_rooms = self.get_shared_rooms(user_a, user_b); + + pin_mut!(get_shared_rooms); + get_shared_rooms.next().await.is_some() +} + +//TODO: optimize; replace point-queries with dual iteration +#[implement(Service)] +pub fn get_shared_rooms<'a>( + &'a self, user_a: &'a UserId, user_b: &'a UserId, +) -> impl Stream + Send + 'a { + self.services + .state_cache + .rooms_joined(user_a) + .filter(|room_id| self.services.state_cache.is_joined(user_b, room_id)) } From 0b085ea84fae3125d956e874cf082532e9f57ca8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 15 Oct 2024 10:34:37 +0000 Subject: [PATCH 0088/1248] merge remaining rooms state_cache data and service Signed-off-by: Jason Volk --- src/service/rooms/state_cache/data.rs | 179 ------------------------- src/service/rooms/state_cache/mod.rs | 184 ++++++++++++++++++++------ 2 files changed, 147 insertions(+), 216 deletions(-) delete mode 100644 src/service/rooms/state_cache/data.rs diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs deleted file mode 100644 index c06c8107..00000000 --- a/src/service/rooms/state_cache/data.rs +++ /dev/null @@ -1,179 +0,0 @@ -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; - -use conduit::{utils::stream::TryIgnore, Result}; -use database::{serialize_to_vec, Deserialized, Interfix, Json, Map}; -use futures::{Stream, StreamExt}; -use ruma::{ - events::{AnyStrippedStateEvent, AnySyncStateEvent}, - serde::Raw, - OwnedRoomId, RoomId, UserId, -}; - -use crate::{globals, Dep}; - -type AppServiceInRoomCache = RwLock>>; -type StrippedStateEventItem = (OwnedRoomId, Vec>); -type SyncStateEventItem = (OwnedRoomId, Vec>); - -pub(super) struct Data { - pub(super) appservice_in_room_cache: AppServiceInRoomCache, - pub(super) roomid_invitedcount: Arc, - pub(super) roomid_inviteviaservers: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomserverids: Arc, - pub(super) roomuserid_invitecount: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomuserid_leftcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) serverroomids: Arc, - pub(super) userroomid_invitestate: Arc, - pub(super) userroomid_joined: Arc, - pub(super) userroomid_leftstate: Arc, - services: Services, -} - -struct Services { - globals: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - appservice_in_room_cache: RwLock::new(HashMap::new()), - roomid_invitedcount: db["roomid_invitedcount"].clone(), - roomid_inviteviaservers: db["roomid_inviteviaservers"].clone(), - roomid_joinedcount: db["roomid_joinedcount"].clone(), - roomserverids: db["roomserverids"].clone(), - roomuserid_invitecount: db["roomuserid_invitecount"].clone(), - roomuserid_joined: db["roomuserid_joined"].clone(), - roomuserid_leftcount: db["roomuserid_leftcount"].clone(), - roomuseroncejoinedids: db["roomuseroncejoinedids"].clone(), - serverroomids: db["serverroomids"].clone(), - userroomid_invitestate: db["userroomid_invitestate"].clone(), - userroomid_joined: db["userroomid_joined"].clone(), - userroomid_leftstate: db["userroomid_leftstate"].clone(), - services: Services { - globals: args.depend::("globals"), - }, - } - } - - pub(super) fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) { - let key = (user_id, room_id); - - self.roomuseroncejoinedids.put_raw(key, []); - } - - pub(super) fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { - let userroom_id = (user_id, room_id); - let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); - - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); - - self.userroomid_joined.insert(&userroom_id, []); - self.roomuserid_joined.insert(&roomuser_id, []); - - self.userroomid_invitestate.remove(&userroom_id); - self.roomuserid_invitecount.remove(&roomuser_id); - - self.userroomid_leftstate.remove(&userroom_id); - self.roomuserid_leftcount.remove(&roomuser_id); - - self.roomid_inviteviaservers.remove(room_id); - } - - pub(super) fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { - let userroom_id = (user_id, room_id); - let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); - - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); - - // (timo) TODO - let leftstate = Vec::>::new(); - let count = self.services.globals.next_count().unwrap(); - - self.userroomid_leftstate - .raw_put(&userroom_id, Json(leftstate)); - self.roomuserid_leftcount.raw_put(&roomuser_id, count); - - self.userroomid_joined.remove(&userroom_id); - self.roomuserid_joined.remove(&roomuser_id); - - self.userroomid_invitestate.remove(&userroom_id); - self.roomuserid_invitecount.remove(&roomuser_id); - - self.roomid_inviteviaservers.remove(room_id); - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self), level = "debug")] - pub(super) fn forget(&self, room_id: &RoomId, user_id: &UserId) { - let userroom_id = (user_id, room_id); - let roomuser_id = (room_id, user_id); - - self.userroomid_leftstate.del(userroom_id); - self.roomuserid_leftcount.del(roomuser_id); - } - - /// Returns an iterator over all rooms a user was invited to. - #[inline] - pub(super) fn rooms_invited<'a>( - &'a self, user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - type Key<'a> = (&'a UserId, &'a RoomId); - type KeyVal<'a> = (Key<'a>, Raw>); - - let prefix = (user_id, Interfix); - self.userroomid_invitestate - .stream_prefix(&prefix) - .ignore_err() - .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) - .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) - .ignore_err() - } - - /// Returns an iterator over all rooms a user left. - #[inline] - pub(super) fn rooms_left<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { - type Key<'a> = (&'a UserId, &'a RoomId); - type KeyVal<'a> = (Key<'a>, Raw>>); - - let prefix = (user_id, Interfix); - self.userroomid_leftstate - .stream_prefix(&prefix) - .ignore_err() - .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) - .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) - .ignore_err() - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) async fn invite_state( - &self, user_id: &UserId, room_id: &RoomId, - ) -> Result>> { - let key = (user_id, room_id); - self.userroomid_invitestate - .qry(&key) - .await - .deserialized() - .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub(super) async fn left_state( - &self, user_id: &UserId, room_id: &RoomId, - ) -> Result>> { - let key = (user_id, room_id); - self.userroomid_leftstate - .qry(&key) - .await - .deserialized() - .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) - } -} diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 077eee10..4f4ff264 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -1,14 +1,14 @@ -mod data; - -use std::{collections::HashSet, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::{Arc, RwLock}, +}; use conduit::{ err, is_not_empty, utils::{stream::TryIgnore, ReadyExt, StreamTools}, warn, Result, }; -use data::Data; -use database::{serialize_to_vec, Deserialized, Ignore, Interfix, Json}; +use database::{serialize_to_vec, Deserialized, Ignore, Interfix, Json, Map}; use futures::{stream::iter, Stream, StreamExt}; use itertools::Itertools; use ruma::{ @@ -29,6 +29,7 @@ use ruma::{ use crate::{account_data, appservice::RegistrationInfo, globals, rooms, users, Dep}; pub struct Service { + appservice_in_room_cache: AppServiceInRoomCache, services: Services, db: Data, } @@ -40,16 +41,49 @@ struct Services { users: Dep, } +struct Data { + roomid_invitedcount: Arc, + roomid_inviteviaservers: Arc, + roomid_joinedcount: Arc, + roomserverids: Arc, + roomuserid_invitecount: Arc, + roomuserid_joined: Arc, + roomuserid_leftcount: Arc, + roomuseroncejoinedids: Arc, + serverroomids: Arc, + userroomid_invitestate: Arc, + userroomid_joined: Arc, + userroomid_leftstate: Arc, +} + +type AppServiceInRoomCache = RwLock>>; +type StrippedStateEventItem = (OwnedRoomId, Vec>); +type SyncStateEventItem = (OwnedRoomId, Vec>); + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { + appservice_in_room_cache: RwLock::new(HashMap::new()), services: Services { account_data: args.depend::("account_data"), globals: args.depend::("globals"), state_accessor: args.depend::("rooms::state_accessor"), users: args.depend::("users"), }, - db: Data::new(&args), + db: Data { + roomid_invitedcount: args.db["roomid_invitedcount"].clone(), + roomid_inviteviaservers: args.db["roomid_inviteviaservers"].clone(), + roomid_joinedcount: args.db["roomid_joinedcount"].clone(), + roomserverids: args.db["roomserverids"].clone(), + roomuserid_invitecount: args.db["roomuserid_invitecount"].clone(), + roomuserid_joined: args.db["roomuserid_joined"].clone(), + roomuserid_leftcount: args.db["roomuserid_leftcount"].clone(), + roomuseroncejoinedids: args.db["roomuseroncejoinedids"].clone(), + serverroomids: args.db["serverroomids"].clone(), + userroomid_invitestate: args.db["userroomid_invitestate"].clone(), + userroomid_joined: args.db["userroomid_joined"].clone(), + userroomid_leftstate: args.db["userroomid_leftstate"].clone(), + }, })) } @@ -107,7 +141,7 @@ impl Service { // Check if the user never joined this room if !self.once_joined(user_id, room_id).await { // Add the user ID to the join list then - self.db.mark_as_once_joined(user_id, room_id); + self.mark_as_once_joined(user_id, room_id); // Check if the room has a predecessor if let Ok(Some(predecessor)) = self @@ -186,7 +220,7 @@ impl Service { } } - self.db.mark_as_joined(user_id, room_id); + self.mark_as_joined(user_id, room_id); }, MembershipState::Invite => { // We want to know if the sender is ignored by the receiver @@ -198,7 +232,7 @@ impl Service { .await; }, MembershipState::Leave | MembershipState::Ban => { - self.db.mark_as_left(user_id, room_id); + self.mark_as_left(user_id, room_id); }, _ => {}, } @@ -213,10 +247,9 @@ impl Service { #[tracing::instrument(skip(self, room_id, appservice), level = "debug")] pub async fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> bool { let maybe = self - .db .appservice_in_room_cache .read() - .unwrap() + .expect("locked") .get(room_id) .and_then(|map| map.get(&appservice.registration.id)) .copied(); @@ -242,10 +275,9 @@ impl Service { .ready_any(|userid| appservice.users.is_match(userid.as_str())) .await; - self.db - .appservice_in_room_cache + self.appservice_in_room_cache .write() - .unwrap() + .expect("locked") .entry(room_id.to_owned()) .or_default() .insert(appservice.registration.id.clone(), in_room); @@ -254,21 +286,67 @@ impl Service { } } - /// Direct DB function to directly mark a user as left. It is not - /// recommended to use this directly. You most likely should use - /// `update_membership` instead - #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { self.db.mark_as_left(user_id, room_id); } - /// Direct DB function to directly mark a user as joined. It is not /// recommended to use this directly. You most likely should use /// `update_membership` instead #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { self.db.mark_as_joined(user_id, room_id); } + pub fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { + let userroom_id = (user_id, room_id); + let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); + + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); + + self.db.userroomid_joined.insert(&userroom_id, []); + self.db.roomuserid_joined.insert(&roomuser_id, []); + + self.db.userroomid_invitestate.remove(&userroom_id); + self.db.roomuserid_invitecount.remove(&roomuser_id); + + self.db.userroomid_leftstate.remove(&userroom_id); + self.db.roomuserid_leftcount.remove(&roomuser_id); + + self.db.roomid_inviteviaservers.remove(room_id); + } + + /// Direct DB function to directly mark a user as left. It is not + /// recommended to use this directly. You most likely should use + /// `update_membership` instead + #[tracing::instrument(skip(self), level = "debug")] + pub fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { + let userroom_id = (user_id, room_id); + let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); + + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); + + // (timo) TODO + let leftstate = Vec::>::new(); + let count = self.services.globals.next_count().unwrap(); + + self.db + .userroomid_leftstate + .raw_put(&userroom_id, Json(leftstate)); + self.db.roomuserid_leftcount.raw_put(&roomuser_id, count); + + self.db.userroomid_joined.remove(&userroom_id); + self.db.roomuserid_joined.remove(&roomuser_id); + + self.db.userroomid_invitestate.remove(&userroom_id); + self.db.roomuserid_invitecount.remove(&roomuser_id); + + self.db.roomid_inviteviaservers.remove(room_id); + } /// Makes a user forget a room. #[tracing::instrument(skip(self), level = "debug")] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) { self.db.forget(room_id, user_id); } + pub fn forget(&self, room_id: &RoomId, user_id: &UserId) { + let userroom_id = (user_id, room_id); + let roomuser_id = (room_id, user_id); + + self.db.userroomid_leftstate.del(userroom_id); + self.db.roomuserid_leftcount.del(roomuser_id); + } /// Returns an iterator of all servers participating in this room. #[tracing::instrument(skip(self), level = "debug")] @@ -415,28 +493,56 @@ impl Service { /// Returns an iterator over all rooms a user was invited to. #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_invited<'a>( - &'a self, user_id: &'a UserId, - ) -> impl Stream>)> + Send + 'a { - self.db.rooms_invited(user_id) + pub fn rooms_invited<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + type KeyVal<'a> = (Key<'a>, Raw>); + type Key<'a> = (&'a UserId, &'a RoomId); + + let prefix = (user_id, Interfix); + self.db + .userroomid_invitestate + .stream_prefix(&prefix) + .ignore_err() + .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) + .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) + .ignore_err() } #[tracing::instrument(skip(self), level = "debug")] pub async fn invite_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>> { - self.db.invite_state(user_id, room_id).await + let key = (user_id, room_id); + self.db + .userroomid_invitestate + .qry(&key) + .await + .deserialized() + .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) } #[tracing::instrument(skip(self), level = "debug")] pub async fn left_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>> { - self.db.left_state(user_id, room_id).await + let key = (user_id, room_id); + self.db + .userroomid_leftstate + .qry(&key) + .await + .deserialized() + .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) } /// Returns an iterator over all rooms a user left. #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_left<'a>( - &'a self, user_id: &'a UserId, - ) -> impl Stream>)> + Send + 'a { - self.db.rooms_left(user_id) + pub fn rooms_left<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + type KeyVal<'a> = (Key<'a>, Raw>>); + type Key<'a> = (&'a UserId, &'a RoomId); + + let prefix = (user_id, Interfix); + self.db + .userroomid_leftstate + .stream_prefix(&prefix) + .ignore_err() + .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) + .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) + .ignore_err() } #[tracing::instrument(skip(self), level = "debug")] @@ -515,13 +621,13 @@ impl Service { } pub fn get_appservice_in_room_cache_usage(&self) -> (usize, usize) { - let cache = self.db.appservice_in_room_cache.read().expect("locked"); + let cache = self.appservice_in_room_cache.read().expect("locked"); + (cache.len(), cache.capacity()) } pub fn clear_appservice_in_room_cache(&self) { - self.db - .appservice_in_room_cache + self.appservice_in_room_cache .write() .expect("locked") .clear(); @@ -574,13 +680,17 @@ impl Service { self.db.serverroomids.put_raw(serverroom_id, []); } - self.db - .appservice_in_room_cache + self.appservice_in_room_cache .write() .expect("locked") .remove(room_id); } + fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) { + let key = (user_id, room_id); + self.db.roomuseroncejoinedids.put_raw(key, []); + } + pub async fn mark_as_invited( &self, user_id: &UserId, room_id: &RoomId, last_state: Option>>, invite_via: Option>, From 84191656fb1e8340afb1ace819e48537a77c053e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 16 Oct 2024 02:31:36 +0000 Subject: [PATCH 0089/1248] slightly cleanup appservice_in_room Signed-off-by: Jason Volk --- src/service/rooms/state_cache/mod.rs | 61 +++++++++++++--------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 4f4ff264..11684eab 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -5,6 +5,7 @@ use std::{ use conduit::{ err, is_not_empty, + result::LogErr, utils::{stream::TryIgnore, ReadyExt, StreamTools}, warn, Result, }; @@ -246,44 +247,40 @@ impl Service { #[tracing::instrument(skip(self, room_id, appservice), level = "debug")] pub async fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> bool { - let maybe = self + if let Some(cached) = self .appservice_in_room_cache .read() .expect("locked") .get(room_id) .and_then(|map| map.get(&appservice.registration.id)) - .copied(); - - if let Some(b) = maybe { - b - } else { - let bridge_user_id = UserId::parse_with_server_name( - appservice.registration.sender_localpart.as_str(), - self.services.globals.server_name(), - ) - .ok(); - - let in_room = if let Some(id) = &bridge_user_id { - self.is_joined(id, room_id).await - } else { - false - }; - - let in_room = in_room - || self - .room_members(room_id) - .ready_any(|userid| appservice.users.is_match(userid.as_str())) - .await; - - self.appservice_in_room_cache - .write() - .expect("locked") - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.registration.id.clone(), in_room); - - in_room + .copied() + { + return cached; } + + let bridge_user_id = UserId::parse_with_server_name( + appservice.registration.sender_localpart.as_str(), + self.services.globals.server_name(), + ); + + let Ok(bridge_user_id) = bridge_user_id.log_err() else { + return false; + }; + + let in_room = self.is_joined(&bridge_user_id, room_id).await + || self + .room_members(room_id) + .ready_any(|user_id| appservice.users.is_match(user_id.as_str())) + .await; + + self.appservice_in_room_cache + .write() + .expect("locked") + .entry(room_id.into()) + .or_default() + .insert(appservice.registration.id.clone(), in_room); + + in_room } /// Direct DB function to directly mark a user as joined. It is not From 55b8908894303f1e784a881e879d5f8d6773abaf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 16 Oct 2024 03:12:30 +0000 Subject: [PATCH 0090/1248] merge rooms state_compressor service and data Signed-off-by: Jason Volk --- src/service/rooms/state_compressor/data.rs | 84 ------------ src/service/rooms/state_compressor/mod.rs | 142 ++++++++++++++------- 2 files changed, 97 insertions(+), 129 deletions(-) delete mode 100644 src/service/rooms/state_compressor/data.rs diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs deleted file mode 100644 index cb020470..00000000 --- a/src/service/rooms/state_compressor/data.rs +++ /dev/null @@ -1,84 +0,0 @@ -use std::{collections::HashSet, mem::size_of, sync::Arc}; - -use conduit::{err, expected, utils, Result}; -use database::{Database, Map}; - -use super::CompressedStateEvent; - -pub(super) struct StateDiff { - pub(super) parent: Option, - pub(super) added: Arc>, - pub(super) removed: Arc>, -} - -pub(super) struct Data { - shortstatehash_statediff: Arc, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - shortstatehash_statediff: db["shortstatehash_statediff"].clone(), - } - } - - pub(super) async fn get_statediff(&self, shortstatehash: u64) -> Result { - const BUFSIZE: usize = size_of::(); - - let value = self - .shortstatehash_statediff - .aqry::(&shortstatehash) - .await - .map_err(|e| err!(Database("Failed to find StateDiff from short {shortstatehash:?}: {e}")))?; - - let parent = utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - let parent = if parent != 0 { - Some(parent) - } else { - None - }; - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let stride = size_of::(); - let mut i = stride; - while let Some(v) = value.get(i..expected!(i + 2 * stride)) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i = expected!(i + stride); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i = expected!(i + 2 * stride); - } - - Ok(StateDiff { - parent, - added: Arc::new(added), - removed: Arc::new(removed), - }) - } - - pub(super) fn save_statediff(&self, shortstatehash: u64, diff: &StateDiff) { - let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); - for new in diff.added.iter() { - value.extend_from_slice(&new[..]); - } - - if !diff.removed.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in diff.removed.iter() { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value); - } -} diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index cd3f2f73..be66c597 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,53 +1,21 @@ -mod data; - use std::{ collections::HashSet, fmt::Write, mem::size_of, - sync::{Arc, Mutex as StdMutex, Mutex}, + sync::{Arc, Mutex}, }; -use conduit::{checked, utils, utils::math::usize_from_f64, Result}; -use data::Data; +use conduit::{checked, err, expected, utils, utils::math::usize_from_f64, Result}; +use database::Map; use lru_cache::LruCache; use ruma::{EventId, RoomId}; -use self::data::StateDiff; use crate::{rooms, Dep}; -type StateInfoLruCache = Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed - )>, - >, ->; - -type ShortStateInfoResult = Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed -)>; - -type ParentStatesVec = Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed -)>; - -type HashSetCompressStateEvent = (u64, Arc>, Arc>); -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - pub struct Service { + pub stateinfo_cache: Mutex, db: Data, services: Services, - pub stateinfo_cache: StateInfoLruCache, } struct Services { @@ -55,17 +23,42 @@ struct Services { state: Dep, } +struct Data { + shortstatehash_statediff: Arc, +} + +struct StateDiff { + parent: Option, + added: Arc>, + removed: Arc>, +} + +type StateInfoLruCache = LruCache; +type ShortStateInfoVec = Vec; +type ParentStatesVec = Vec; +type ShortStateInfo = ( + u64, // sstatehash + Arc>, // full state + Arc>, // added + Arc>, // removed +); + +type HashSetCompressStateEvent = (u64, Arc>, Arc>); +pub type CompressedStateEvent = [u8; 2 * size_of::()]; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; let cache_capacity = f64::from(config.stateinfo_cache_capacity) * config.cache_capacity_modifier; Ok(Arc::new(Self { - db: Data::new(args.db), + stateinfo_cache: LruCache::new(usize_from_f64(cache_capacity)?).into(), + db: Data { + shortstatehash_statediff: args.db["shortstatehash_statediff"].clone(), + }, services: Services { short: args.depend::("rooms::short"), state: args.depend::("rooms::state"), }, - stateinfo_cache: StdMutex::new(LruCache::new(usize_from_f64(cache_capacity)?)), })) } @@ -84,7 +77,7 @@ impl crate::Service for Service { impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and /// removed diff for the selected shortstatehash and each parent layer. - pub async fn load_shortstatehash_info(&self, shortstatehash: u64) -> Result { + pub async fn load_shortstatehash_info(&self, shortstatehash: u64) -> Result { if let Some(r) = self .stateinfo_cache .lock() @@ -98,7 +91,7 @@ impl Service { parent, added, removed, - } = self.db.get_statediff(shortstatehash).await?; + } = self.get_statediff(shortstatehash).await?; if let Some(parent) = parent { let mut response = Box::pin(self.load_shortstatehash_info(parent)).await?; @@ -177,12 +170,12 @@ impl Service { /// for this layer /// * `parent_states` - A stack with info on shortstatehash, full state, /// added diff and removed diff for each parent layer - #[tracing::instrument(skip(self, statediffnew, statediffremoved, diff_to_sibling, parent_states), level = "debug")] + #[tracing::instrument(skip_all, level = "debug")] pub fn save_state_from_diff( &self, shortstatehash: u64, statediffnew: Arc>, statediffremoved: Arc>, diff_to_sibling: usize, mut parent_states: ParentStatesVec, - ) -> Result<()> { + ) -> Result { let statediffnew_len = statediffnew.len(); let statediffremoved_len = statediffremoved.len(); let diffsum = checked!(statediffnew_len + statediffremoved_len)?; @@ -226,7 +219,7 @@ impl Service { if parent_states.is_empty() { // There is no parent layer, create a new state - self.db.save_statediff( + self.save_statediff( shortstatehash, &StateDiff { parent: None, @@ -279,7 +272,7 @@ impl Service { )?; } else { // Diff small enough, we add diff as layer on top of parent - self.db.save_statediff( + self.save_statediff( shortstatehash, &StateDiff { parent: Some(parent.0), @@ -324,7 +317,7 @@ impl Service { let states_parents = if let Some(p) = previous_shortstatehash { self.load_shortstatehash_info(p).await.unwrap_or_default() } else { - ShortStateInfoResult::new() + ShortStateInfoVec::new() }; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -356,4 +349,63 @@ impl Service { Ok((new_shortstatehash, statediffnew, statediffremoved)) } + + async fn get_statediff(&self, shortstatehash: u64) -> Result { + const BUFSIZE: usize = size_of::(); + const STRIDE: usize = size_of::(); + + let value = self + .db + .shortstatehash_statediff + .aqry::(&shortstatehash) + .await + .map_err(|e| err!(Database("Failed to find StateDiff from short {shortstatehash:?}: {e}")))?; + + let parent = utils::u64_from_bytes(&value[0..size_of::()]) + .ok() + .take_if(|parent| *parent != 0); + + let mut add_mode = true; + let mut added = HashSet::new(); + let mut removed = HashSet::new(); + + let mut i = STRIDE; + while let Some(v) = value.get(i..expected!(i + 2 * STRIDE)) { + if add_mode && v.starts_with(&0_u64.to_be_bytes()) { + add_mode = false; + i = expected!(i + STRIDE); + continue; + } + if add_mode { + added.insert(v.try_into()?); + } else { + removed.insert(v.try_into()?); + } + i = expected!(i + 2 * STRIDE); + } + + Ok(StateDiff { + parent, + added: Arc::new(added), + removed: Arc::new(removed), + }) + } + + fn save_statediff(&self, shortstatehash: u64, diff: &StateDiff) { + let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); + for new in diff.added.iter() { + value.extend_from_slice(&new[..]); + } + + if !diff.removed.is_empty() { + value.extend_from_slice(&0_u64.to_be_bytes()); + for removed in diff.removed.iter() { + value.extend_from_slice(&removed[..]); + } + } + + self.db + .shortstatehash_statediff + .insert(&shortstatehash.to_be_bytes(), &value); + } } From 828cb96ba9dd8e323a37c0e33d8a6e1c23b84ac5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 16 Oct 2024 05:32:27 +0000 Subject: [PATCH 0091/1248] split client/sync Signed-off-by: Jason Volk --- src/api/client/sync/mod.rs | 67 ++ src/api/client/{sync.rs => sync/v3.rs} | 835 +------------------------ src/api/client/sync/v4.rs | 784 +++++++++++++++++++++++ src/core/utils/mod.rs | 10 + 4 files changed, 870 insertions(+), 826 deletions(-) create mode 100644 src/api/client/sync/mod.rs rename src/api/client/{sync.rs => sync/v3.rs} (56%) create mode 100644 src/api/client/sync/v4.rs diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs new file mode 100644 index 00000000..0cfc7b8b --- /dev/null +++ b/src/api/client/sync/mod.rs @@ -0,0 +1,67 @@ +mod v3; +mod v4; + +use conduit::{ + utils::{math::usize_from_u64_truncated, ReadyExt}, + PduCount, +}; +use futures::StreamExt; +use ruma::{RoomId, UserId}; + +pub(crate) use self::{v3::sync_events_route, v4::sync_events_v4_route}; +use crate::{service::Services, Error, PduEvent, Result}; + +async fn load_timeline( + services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64, +) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { + let timeline_pdus; + let limited = if services + .rooms + .timeline + .last_timeline_count(sender_user, room_id) + .await? + > roomsincecount + { + let mut non_timeline_pdus = services + .rooms + .timeline + .pdus_until(sender_user, room_id, PduCount::max()) + .await? + .ready_take_while(|(pducount, _)| pducount > &roomsincecount); + + // Take the last events for the timeline + timeline_pdus = non_timeline_pdus + .by_ref() + .take(usize_from_u64_truncated(limit)) + .collect::>() + .await + .into_iter() + .rev() + .collect::>(); + + // They /sync response doesn't always return all messages, so we say the output + // is limited unless there are events in non_timeline_pdus + non_timeline_pdus.next().await.is_some() + } else { + timeline_pdus = Vec::new(); + false + }; + Ok((timeline_pdus, limited)) +} + +async fn share_encrypted_room( + services: &Services, sender_user: &UserId, user_id: &UserId, ignore_room: Option<&RoomId>, +) -> bool { + services + .rooms + .user + .get_shared_rooms(sender_user, user_id) + .ready_filter(|&room_id| Some(room_id) != ignore_room) + .any(|other_room_id| { + services + .rooms + .state_accessor + .is_encrypted_room(other_room_id) + }) + .await +} diff --git a/src/api/client/sync.rs b/src/api/client/sync/v3.rs similarity index 56% rename from src/api/client/sync.rs rename to src/api/client/sync/v3.rs index 65af775d..f29fe220 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync/v3.rs @@ -1,23 +1,19 @@ use std::{ - cmp::{self, Ordering}, - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, + cmp::{self}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, time::Duration, }; use axum::extract::State; use conduit::{ - debug, err, error, is_equal_to, + err, error, extract_variant, is_equal_to, result::FlatOk, - utils::{ - math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, - BoolExt, IterStream, ReadyExt, TryFutureExtExt, - }, - warn, PduCount, + utils::{math::ruma_from_u64, BoolExt, IterStream, ReadyExt, TryFutureExtExt}, + PduCount, }; -use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt, TryFutureExt}; +use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt}; use ruma::{ api::client::{ - error::ErrorKind, filter::{FilterDefinition, LazyLoadOptions}, sync::sync_events::{ self, @@ -25,43 +21,27 @@ use ruma::{ Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State as RoomState, Timeline, ToDevice, }, - v4::{SlidingOp, SlidingSyncRoomHero}, DeviceLists, UnreadNotificationsCount, }, uiaa::UiaaResponse, }, - directory::RoomTypeFilter, events::{ presence::PresenceEvent, room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, StateEventType, - TimelineEventType::{self, *}, + TimelineEventType::*, }, serde::Raw, - state_res::Event, - uint, DeviceId, EventId, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, + uint, DeviceId, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId, }; -use service::rooms::read_receipt::pack_receipts; use tracing::{Instrument as _, Span}; +use super::{load_timeline, share_encrypted_room}; use crate::{ service::{pdu::EventHash, Services}, utils, Error, PduEvent, Result, Ruma, RumaResponse, }; -const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; -const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = - &[RoomMessage, RoomEncrypted, Sticker, CallInvite, PollStart, Beacon]; - -macro_rules! extract_variant { - ($e:expr, $variant:path) => { - match $e { - $variant(value) => Some(value), - _ => None, - } - }; -} - /// # `GET /_matrix/client/r0/sync` /// /// Synchronize the client's state with the latest state on the server. @@ -1085,800 +1065,3 @@ async fn load_joined_room( unread_thread_notifications: BTreeMap::new(), }) } - -async fn load_timeline( - services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64, -) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { - let timeline_pdus; - let limited = if services - .rooms - .timeline - .last_timeline_count(sender_user, room_id) - .await? - > roomsincecount - { - let mut non_timeline_pdus = services - .rooms - .timeline - .pdus_until(sender_user, room_id, PduCount::max()) - .await? - .ready_take_while(|(pducount, _)| pducount > &roomsincecount); - - // Take the last events for the timeline - timeline_pdus = non_timeline_pdus - .by_ref() - .take(usize_from_u64_truncated(limit)) - .collect::>() - .await - .into_iter() - .rev() - .collect::>(); - - // They /sync response doesn't always return all messages, so we say the output - // is limited unless there are events in non_timeline_pdus - non_timeline_pdus.next().await.is_some() - } else { - timeline_pdus = Vec::new(); - false - }; - Ok((timeline_pdus, limited)) -} - -async fn share_encrypted_room( - services: &Services, sender_user: &UserId, user_id: &UserId, ignore_room: Option<&RoomId>, -) -> bool { - services - .rooms - .user - .get_shared_rooms(sender_user, user_id) - .ready_filter(|&room_id| Some(room_id) != ignore_room) - .any(|other_room_id| { - services - .rooms - .state_accessor - .is_encrypted_room(other_room_id) - }) - .await -} - -/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` -/// -/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`) -pub(crate) async fn sync_events_v4_route( - State(services): State, body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.expect("user is authenticated"); - let mut body = body.body; - // Setup watchers, so if there's no response, we can wait for them - let watcher = services.globals.watch(sender_user, &sender_device); - - let next_batch = services.globals.next_count()?; - - let conn_id = body - .conn_id - .clone() - .unwrap_or_else(|| SINGLE_CONNECTION_SYNC.to_owned()); - - let globalsince = body - .pos - .as_ref() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - - if globalsince != 0 - && !services - .sync - .remembered(sender_user.clone(), sender_device.clone(), conn_id.clone()) - { - debug!("Restarting sync stream because it was gone from the database"); - return Err(Error::Request( - ErrorKind::UnknownPos, - "Connection data lost since last time".into(), - http::StatusCode::BAD_REQUEST, - )); - } - - if globalsince == 0 { - services - .sync - .forget_sync_request_connection(sender_user.clone(), sender_device.clone(), conn_id.clone()); - } - - // Get sticky parameters from cache - let known_rooms = - services - .sync - .update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body); - - let all_joined_rooms: Vec<_> = services - .rooms - .state_cache - .rooms_joined(sender_user) - .map(ToOwned::to_owned) - .collect() - .await; - - let all_invited_rooms: Vec<_> = services - .rooms - .state_cache - .rooms_invited(sender_user) - .map(|r| r.0) - .collect() - .await; - - let all_rooms = all_joined_rooms - .iter() - .chain(all_invited_rooms.iter()) - .map(Clone::clone) - .collect(); - - if body.extensions.to_device.enabled.unwrap_or(false) { - services - .users - .remove_to_device_events(sender_user, &sender_device, globalsince) - .await; - } - - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in - let mut device_list_changes = HashSet::new(); - let mut device_list_left = HashSet::new(); - - let mut receipts = sync_events::v4::Receipts { - rooms: BTreeMap::new(), - }; - - let mut account_data = sync_events::v4::AccountData { - global: Vec::new(), - rooms: BTreeMap::new(), - }; - if body.extensions.account_data.enabled.unwrap_or(false) { - account_data.global = services - .account_data - .changes_since(None, sender_user, globalsince) - .await? - .into_iter() - .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) - .collect(); - - if let Some(rooms) = body.extensions.account_data.rooms { - for room in rooms { - account_data.rooms.insert( - room.clone(), - services - .account_data - .changes_since(Some(&room), sender_user, globalsince) - .await? - .into_iter() - .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect(), - ); - } - } - } - - if body.extensions.e2ee.enabled.unwrap_or(false) { - // Look for device list updates of this account - device_list_changes.extend( - services - .users - .keys_changed(sender_user.as_ref(), globalsince, None) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - for room_id in &all_joined_rooms { - let Ok(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id).await else { - error!("Room {room_id} has no state"); - continue; - }; - - let since_shortstatehash = services - .rooms - .user - .get_token_shortstatehash(room_id, globalsince) - .await - .ok(); - - let encrypted_room = services - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .await - .is_ok(); - - if let Some(since_shortstatehash) = since_shortstatehash { - // Skip if there are only timeline changes - if since_shortstatehash == current_shortstatehash { - continue; - } - - let since_encryption = services - .rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") - .await; - - let since_sender_member: Option = services - .rooms - .state_accessor - .state_get_content(since_shortstatehash, &StateEventType::RoomMember, sender_user.as_str()) - .ok() - .await; - - let joined_since_last_sync = - since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); - - let new_encrypted_room = encrypted_room && since_encryption.is_err(); - - if encrypted_room { - let current_state_ids = services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - - let since_state_ids = services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - - for (key, id) in current_state_ids { - if since_state_ids.get(&key) != Some(&id) { - let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { - error!("Pdu in state not found: {id}"); - continue; - }; - if pdu.kind == RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = UserId::parse(state_key.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == *sender_user { - continue; - } - - let content: RoomMemberEventContent = pdu.get_content()?; - match content.membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(&services, sender_user, &user_id, Some(room_id)) - .await - { - device_list_changes.insert(user_id); - } - }, - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - }, - _ => {}, - } - } - } - } - } - if joined_since_last_sync || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_changes.extend( - services - .rooms - .state_cache - .room_members(room_id) - // Don't send key updates from the sender to the sender - .ready_filter(|user_id| sender_user != user_id) - // Only send keys if the sender doesn't share an encrypted room with the target - // already - .filter_map(|user_id| { - share_encrypted_room(&services, sender_user, user_id, Some(room_id)) - .map(|res| res.or_some(user_id.to_owned())) - }) - .collect::>() - .await, - ); - } - } - } - // Look for device list updates in this room - device_list_changes.extend( - services - .users - .keys_changed(room_id.as_ref(), globalsince, None) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - } - - for user_id in left_encrypted_users { - let dont_share_encrypted_room = !share_encrypted_room(&services, sender_user, &user_id, None).await; - - // If the user doesn't share an encrypted room with the target anymore, we need - // to tell them - if dont_share_encrypted_room { - device_list_left.insert(user_id); - } - } - } - - let mut lists = BTreeMap::new(); - let mut todo_rooms = BTreeMap::new(); // and required state - - for (list_id, list) in &body.lists { - let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { - Some(true) => &all_invited_rooms, - Some(false) => &all_joined_rooms, - None => &all_rooms, - }; - - let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { - Some(filter) if filter.is_empty() => active_rooms.clone(), - Some(value) => filter_rooms(active_rooms, State(services), &value, true).await, - None => active_rooms.clone(), - }; - - let active_rooms = match list.filters.clone().map(|f| f.room_types) { - Some(filter) if filter.is_empty() => active_rooms.clone(), - Some(value) => filter_rooms(&active_rooms, State(services), &value, false).await, - None => active_rooms, - }; - - let mut new_known_rooms = BTreeSet::new(); - - let ranges = list.ranges.clone(); - lists.insert( - list_id.clone(), - sync_events::v4::SyncList { - ops: ranges - .into_iter() - .map(|mut r| { - r.0 = r.0.clamp( - uint!(0), - UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX), - ); - r.1 = - r.1.clamp(r.0, UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX)); - - let room_ids = if !active_rooms.is_empty() { - active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec() - } else { - Vec::new() - }; - - new_known_rooms.extend(room_ids.iter().cloned()); - for room_id in &room_ids { - let todo_room = todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0, u64::MAX)); - - let limit = list - .room_details - .timeline_limit - .map_or(10, u64::from) - .min(100); - - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); - - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get(list_id.as_str()) - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - } - sync_events::v4::SyncOp { - op: SlidingOp::Sync, - range: Some(r), - index: None, - room_ids, - room_id: None, - } - }) - .collect(), - count: ruma_from_usize(active_rooms.len()), - }, - ); - - if let Some(conn_id) = &body.conn_id { - services.sync.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - list_id.clone(), - new_known_rooms, - globalsince, - ); - } - } - - let mut known_subscription_rooms = BTreeSet::new(); - for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await { - continue; - } - let todo_room = todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0, u64::MAX)); - let limit = room.timeline_limit.map_or(10, u64::from).min(100); - todo_room.0.extend(room.required_state.iter().cloned()); - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get("subscriptions") - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - known_subscription_rooms.insert(room_id.clone()); - } - - for r in body.unsubscribe_rooms { - known_subscription_rooms.remove(&r); - body.room_subscriptions.remove(&r); - } - - if let Some(conn_id) = &body.conn_id { - services.sync.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - "subscriptions".to_owned(), - known_subscription_rooms, - globalsince, - ); - } - - if let Some(conn_id) = &body.conn_id { - services.sync.update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ); - } - - let mut rooms = BTreeMap::new(); - for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms { - let roomsincecount = PduCount::Normal(*roomsince); - - let mut timestamp: Option<_> = None; - let mut invite_state = None; - let (timeline_pdus, limited); - if all_invited_rooms.contains(room_id) { - // TODO: figure out a timestamp we can use for remote invites - invite_state = services - .rooms - .state_cache - .invite_state(sender_user, room_id) - .await - .ok(); - - (timeline_pdus, limited) = (Vec::new(), true); - } else { - (timeline_pdus, limited) = - match load_timeline(&services, sender_user, room_id, roomsincecount, *timeline_limit).await { - Ok(value) => value, - Err(err) => { - warn!("Encountered missing timeline in {}, error {}", room_id, err); - continue; - }, - }; - } - - account_data.rooms.insert( - room_id.clone(), - services - .account_data - .changes_since(Some(room_id), sender_user, *roomsince) - .await? - .into_iter() - .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect(), - ); - - let vector: Vec<_> = services - .rooms - .read_receipt - .readreceipts_since(room_id, *roomsince) - .filter_map(|(read_user, ts, v)| async move { - (!services - .users - .user_is_ignored(&read_user, sender_user) - .await) - .then_some((read_user, ts, v)) - }) - .collect() - .await; - - let receipt_size = vector.len(); - receipts - .rooms - .insert(room_id.clone(), pack_receipts(Box::new(vector.into_iter()))); - - if roomsince != &0 - && timeline_pdus.is_empty() - && account_data.rooms.get(room_id).is_some_and(Vec::is_empty) - && receipt_size == 0 - { - continue; - } - - let prev_batch = timeline_pdus - .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - PduCount::Backfilled(_) => { - error!("timeline in backfill state?!"); - "0".to_owned() - }, - PduCount::Normal(c) => c.to_string(), - })) - })? - .or_else(|| { - if roomsince != &0 { - Some(roomsince.to_string()) - } else { - None - } - }); - - let room_events: Vec<_> = timeline_pdus - .iter() - .stream() - .filter_map(|(_, pdu)| async move { - // list of safe and common non-state events to ignore - if matches!( - &pdu.kind, - RoomMessage - | Sticker | CallInvite - | CallNotify | RoomEncrypted - | Image | File | Audio - | Voice | Video | UnstablePollStart - | PollStart | KeyVerificationStart - | Reaction | Emote | Location - ) && services - .users - .user_is_ignored(&pdu.sender, sender_user) - .await - { - return None; - } - - Some(pdu.to_sync_room_event()) - }) - .collect() - .await; - - for (_, pdu) in timeline_pdus { - let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts); - if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) && !timestamp.is_some_and(|time| time > ts) { - timestamp = Some(ts); - } - } - - let required_state = required_state_request - .iter() - .stream() - .filter_map(|state| async move { - services - .rooms - .state_accessor - .room_state_get(room_id, &state.0, &state.1) - .await - .map(|s| s.to_sync_state_event()) - .ok() - }) - .collect() - .await; - - // Heroes - let heroes: Vec<_> = services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|member| member != sender_user) - .filter_map(|user_id| { - services - .rooms - .state_accessor - .get_member(room_id, user_id) - .map_ok(|memberevent| SlidingSyncRoomHero { - user_id: user_id.into(), - name: memberevent.displayname, - avatar: memberevent.avatar_url, - }) - .ok() - }) - .take(5) - .collect() - .await; - - let name = match heroes.len().cmp(&(1_usize)) { - Ordering::Greater => { - let firsts = heroes[1..] - .iter() - .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) - .collect::>() - .join(", "); - - let last = heroes[0] - .name - .clone() - .unwrap_or_else(|| heroes[0].user_id.to_string()); - - Some(format!("{firsts} and {last}")) - }, - Ordering::Equal => Some( - heroes[0] - .name - .clone() - .unwrap_or_else(|| heroes[0].user_id.to_string()), - ), - Ordering::Less => None, - }; - - let heroes_avatar = if heroes.len() == 1 { - heroes[0].avatar.clone() - } else { - None - }; - - rooms.insert( - room_id.clone(), - sync_events::v4::SlidingSyncRoom { - name: services - .rooms - .state_accessor - .get_name(room_id) - .await - .ok() - .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { - ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), - ruma::JsOption::Null => ruma::JsOption::Null, - ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } - }, - initial: Some(roomsince == &0), - is_dm: None, - invite_state, - unread_notifications: UnreadNotificationsCount { - highlight_count: Some( - services - .rooms - .user - .highlight_count(sender_user, room_id) - .await - .try_into() - .expect("notification count can't go that high"), - ), - notification_count: Some( - services - .rooms - .user - .notification_count(sender_user, room_id) - .await - .try_into() - .expect("notification count can't go that high"), - ), - }, - timeline: room_events, - required_state, - prev_batch, - limited, - joined_count: Some( - services - .rooms - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0) - .try_into() - .unwrap_or_else(|_| uint!(0)), - ), - invited_count: Some( - services - .rooms - .state_cache - .room_invited_count(room_id) - .await - .unwrap_or(0) - .try_into() - .unwrap_or_else(|_| uint!(0)), - ), - num_live: None, // Count events in timeline greater than global sync counter - timestamp, - heroes: Some(heroes), - }, - ); - } - - if rooms - .iter() - .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) - { - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let default = Duration::from_secs(30); - let duration = cmp::min(body.timeout.unwrap_or(default), default); - _ = tokio::time::timeout(duration, watcher).await; - } - - Ok(sync_events::v4::Response { - initial: globalsince == 0, - txn_id: body.txn_id.clone(), - pos: next_batch.to_string(), - lists, - rooms, - extensions: sync_events::v4::Extensions { - to_device: if body.extensions.to_device.enabled.unwrap_or(false) { - Some(sync_events::v4::ToDevice { - events: services - .users - .get_to_device_events(sender_user, &sender_device) - .collect() - .await, - next_batch: next_batch.to_string(), - }) - } else { - None - }, - e2ee: sync_events::v4::E2EE { - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, - device_one_time_keys_count: services - .users - .count_one_time_keys(sender_user, &sender_device) - .await, - // Fallback keys are not yet supported - device_unused_fallback_key_types: None, - }, - account_data, - receipts, - typing: sync_events::v4::Typing { - rooms: BTreeMap::new(), - }, - }, - delta_token: None, - }) -} - -async fn filter_rooms( - rooms: &[OwnedRoomId], State(services): State, filter: &[RoomTypeFilter], negate: bool, -) -> Vec { - rooms - .iter() - .stream() - .filter_map(|r| async move { - let room_type = services.rooms.state_accessor.get_room_type(r).await; - - if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { - return None; - } - - let room_type_filter = RoomTypeFilter::from(room_type.ok()); - - let include = if negate { - !filter.contains(&room_type_filter) - } else { - filter.is_empty() || filter.contains(&room_type_filter) - }; - - include.then_some(r.to_owned()) - }) - .collect() - .await -} diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs new file mode 100644 index 00000000..2adb3b71 --- /dev/null +++ b/src/api/client/sync/v4.rs @@ -0,0 +1,784 @@ +use std::{ + cmp::{self, Ordering}, + collections::{BTreeMap, BTreeSet, HashSet}, + time::Duration, +}; + +use axum::extract::State; +use conduit::{ + debug, error, extract_variant, + utils::{ + math::{ruma_from_usize, usize_from_ruma}, + BoolExt, IterStream, ReadyExt, TryFutureExtExt, + }, + warn, Error, PduCount, Result, +}; +use futures::{FutureExt, StreamExt, TryFutureExt}; +use ruma::{ + api::client::{ + error::ErrorKind, + sync::sync_events::{ + self, + v4::{SlidingOp, SlidingSyncRoomHero}, + DeviceLists, UnreadNotificationsCount, + }, + }, + directory::RoomTypeFilter, + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + AnyRawAccountDataEvent, StateEventType, + TimelineEventType::{self, *}, + }, + state_res::Event, + uint, MilliSecondsSinceUnixEpoch, OwnedRoomId, UInt, UserId, +}; +use service::{rooms::read_receipt::pack_receipts, Services}; + +use super::{load_timeline, share_encrypted_room}; +use crate::Ruma; + +const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; +const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = + &[RoomMessage, RoomEncrypted, Sticker, CallInvite, PollStart, Beacon]; + +/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` +/// +/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`) +pub(crate) async fn sync_events_v4_route( + State(services): State, body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.expect("user is authenticated"); + let mut body = body.body; + // Setup watchers, so if there's no response, we can wait for them + let watcher = services.globals.watch(sender_user, &sender_device); + + let next_batch = services.globals.next_count()?; + + let conn_id = body + .conn_id + .clone() + .unwrap_or_else(|| SINGLE_CONNECTION_SYNC.to_owned()); + + let globalsince = body + .pos + .as_ref() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); + + if globalsince != 0 + && !services + .sync + .remembered(sender_user.clone(), sender_device.clone(), conn_id.clone()) + { + debug!("Restarting sync stream because it was gone from the database"); + return Err(Error::Request( + ErrorKind::UnknownPos, + "Connection data lost since last time".into(), + http::StatusCode::BAD_REQUEST, + )); + } + + if globalsince == 0 { + services + .sync + .forget_sync_request_connection(sender_user.clone(), sender_device.clone(), conn_id.clone()); + } + + // Get sticky parameters from cache + let known_rooms = + services + .sync + .update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body); + + let all_joined_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_joined(sender_user) + .map(ToOwned::to_owned) + .collect() + .await; + + let all_invited_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_invited(sender_user) + .map(|r| r.0) + .collect() + .await; + + let all_rooms = all_joined_rooms + .iter() + .chain(all_invited_rooms.iter()) + .map(Clone::clone) + .collect(); + + if body.extensions.to_device.enabled.unwrap_or(false) { + services + .users + .remove_to_device_events(sender_user, &sender_device, globalsince) + .await; + } + + let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in + let mut device_list_changes = HashSet::new(); + let mut device_list_left = HashSet::new(); + + let mut receipts = sync_events::v4::Receipts { + rooms: BTreeMap::new(), + }; + + let mut account_data = sync_events::v4::AccountData { + global: Vec::new(), + rooms: BTreeMap::new(), + }; + if body.extensions.account_data.enabled.unwrap_or(false) { + account_data.global = services + .account_data + .changes_since(None, sender_user, globalsince) + .await? + .into_iter() + .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) + .collect(); + + if let Some(rooms) = body.extensions.account_data.rooms { + for room in rooms { + account_data.rooms.insert( + room.clone(), + services + .account_data + .changes_since(Some(&room), sender_user, globalsince) + .await? + .into_iter() + .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect(), + ); + } + } + } + + if body.extensions.e2ee.enabled.unwrap_or(false) { + // Look for device list updates of this account + device_list_changes.extend( + services + .users + .keys_changed(sender_user.as_ref(), globalsince, None) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + + for room_id in &all_joined_rooms { + let Ok(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id).await else { + error!("Room {room_id} has no state"); + continue; + }; + + let since_shortstatehash = services + .rooms + .user + .get_token_shortstatehash(room_id, globalsince) + .await + .ok(); + + let encrypted_room = services + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") + .await + .is_ok(); + + if let Some(since_shortstatehash) = since_shortstatehash { + // Skip if there are only timeline changes + if since_shortstatehash == current_shortstatehash { + continue; + } + + let since_encryption = services + .rooms + .state_accessor + .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") + .await; + + let since_sender_member: Option = services + .rooms + .state_accessor + .state_get_content(since_shortstatehash, &StateEventType::RoomMember, sender_user.as_str()) + .ok() + .await; + + let joined_since_last_sync = + since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + + let new_encrypted_room = encrypted_room && since_encryption.is_err(); + + if encrypted_room { + let current_state_ids = services + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let since_state_ids = services + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if since_state_ids.get(&key) != Some(&id) { + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { + error!("Pdu in state not found: {id}"); + continue; + }; + if pdu.kind == RoomMember { + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + if user_id == *sender_user { + continue; + } + + let content: RoomMemberEventContent = pdu.get_content()?; + match content.membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(&services, sender_user, &user_id, Some(room_id)) + .await + { + device_list_changes.insert(user_id); + } + }, + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + }, + _ => {}, + } + } + } + } + } + if joined_since_last_sync || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_changes.extend( + services + .rooms + .state_cache + .room_members(room_id) + // Don't send key updates from the sender to the sender + .ready_filter(|user_id| sender_user != user_id) + // Only send keys if the sender doesn't share an encrypted room with the target + // already + .filter_map(|user_id| { + share_encrypted_room(&services, sender_user, user_id, Some(room_id)) + .map(|res| res.or_some(user_id.to_owned())) + }) + .collect::>() + .await, + ); + } + } + } + // Look for device list updates in this room + device_list_changes.extend( + services + .users + .keys_changed(room_id.as_ref(), globalsince, None) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + } + + for user_id in left_encrypted_users { + let dont_share_encrypted_room = !share_encrypted_room(&services, sender_user, &user_id, None).await; + + // If the user doesn't share an encrypted room with the target anymore, we need + // to tell them + if dont_share_encrypted_room { + device_list_left.insert(user_id); + } + } + } + + let mut lists = BTreeMap::new(); + let mut todo_rooms = BTreeMap::new(); // and required state + + for (list_id, list) in &body.lists { + let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { + Some(true) => &all_invited_rooms, + Some(false) => &all_joined_rooms, + None => &all_rooms, + }; + + let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { + Some(filter) if filter.is_empty() => active_rooms.clone(), + Some(value) => filter_rooms(&services, active_rooms, &value, true).await, + None => active_rooms.clone(), + }; + + let active_rooms = match list.filters.clone().map(|f| f.room_types) { + Some(filter) if filter.is_empty() => active_rooms.clone(), + Some(value) => filter_rooms(&services, &active_rooms, &value, false).await, + None => active_rooms, + }; + + let mut new_known_rooms = BTreeSet::new(); + + let ranges = list.ranges.clone(); + lists.insert( + list_id.clone(), + sync_events::v4::SyncList { + ops: ranges + .into_iter() + .map(|mut r| { + r.0 = r.0.clamp( + uint!(0), + UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX), + ); + r.1 = + r.1.clamp(r.0, UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX)); + + let room_ids = if !active_rooms.is_empty() { + active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec() + } else { + Vec::new() + }; + + new_known_rooms.extend(room_ids.iter().cloned()); + for room_id in &room_ids { + let todo_room = todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0, u64::MAX)); + + let limit = list + .room_details + .timeline_limit + .map_or(10, u64::from) + .min(100); + + todo_room + .0 + .extend(list.room_details.required_state.iter().cloned()); + + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get(list_id.as_str()) + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + } + sync_events::v4::SyncOp { + op: SlidingOp::Sync, + range: Some(r), + index: None, + room_ids, + room_id: None, + } + }) + .collect(), + count: ruma_from_usize(active_rooms.len()), + }, + ); + + if let Some(conn_id) = &body.conn_id { + services.sync.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + list_id.clone(), + new_known_rooms, + globalsince, + ); + } + } + + let mut known_subscription_rooms = BTreeSet::new(); + for (room_id, room) in &body.room_subscriptions { + if !services.rooms.metadata.exists(room_id).await { + continue; + } + let todo_room = todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0, u64::MAX)); + let limit = room.timeline_limit.map_or(10, u64::from).min(100); + todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get("subscriptions") + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + known_subscription_rooms.insert(room_id.clone()); + } + + for r in body.unsubscribe_rooms { + known_subscription_rooms.remove(&r); + body.room_subscriptions.remove(&r); + } + + if let Some(conn_id) = &body.conn_id { + services.sync.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + "subscriptions".to_owned(), + known_subscription_rooms, + globalsince, + ); + } + + if let Some(conn_id) = &body.conn_id { + services.sync.update_sync_subscriptions( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + body.room_subscriptions, + ); + } + + let mut rooms = BTreeMap::new(); + for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms { + let roomsincecount = PduCount::Normal(*roomsince); + + let mut timestamp: Option<_> = None; + let mut invite_state = None; + let (timeline_pdus, limited); + if all_invited_rooms.contains(room_id) { + // TODO: figure out a timestamp we can use for remote invites + invite_state = services + .rooms + .state_cache + .invite_state(sender_user, room_id) + .await + .ok(); + + (timeline_pdus, limited) = (Vec::new(), true); + } else { + (timeline_pdus, limited) = + match load_timeline(&services, sender_user, room_id, roomsincecount, *timeline_limit).await { + Ok(value) => value, + Err(err) => { + warn!("Encountered missing timeline in {}, error {}", room_id, err); + continue; + }, + }; + } + + account_data.rooms.insert( + room_id.clone(), + services + .account_data + .changes_since(Some(room_id), sender_user, *roomsince) + .await? + .into_iter() + .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect(), + ); + + let vector: Vec<_> = services + .rooms + .read_receipt + .readreceipts_since(room_id, *roomsince) + .filter_map(|(read_user, ts, v)| async move { + (!services + .users + .user_is_ignored(&read_user, sender_user) + .await) + .then_some((read_user, ts, v)) + }) + .collect() + .await; + + let receipt_size = vector.len(); + receipts + .rooms + .insert(room_id.clone(), pack_receipts(Box::new(vector.into_iter()))); + + if roomsince != &0 + && timeline_pdus.is_empty() + && account_data.rooms.get(room_id).is_some_and(Vec::is_empty) + && receipt_size == 0 + { + continue; + } + + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { + Ok(Some(match pdu_count { + PduCount::Backfilled(_) => { + error!("timeline in backfill state?!"); + "0".to_owned() + }, + PduCount::Normal(c) => c.to_string(), + })) + })? + .or_else(|| { + if roomsince != &0 { + Some(roomsince.to_string()) + } else { + None + } + }); + + let room_events: Vec<_> = timeline_pdus + .iter() + .stream() + .filter_map(|(_, pdu)| async move { + // list of safe and common non-state events to ignore + if matches!( + &pdu.kind, + RoomMessage + | Sticker | CallInvite + | CallNotify | RoomEncrypted + | Image | File | Audio + | Voice | Video | UnstablePollStart + | PollStart | KeyVerificationStart + | Reaction | Emote | Location + ) && services + .users + .user_is_ignored(&pdu.sender, sender_user) + .await + { + return None; + } + + Some(pdu.to_sync_room_event()) + }) + .collect() + .await; + + for (_, pdu) in timeline_pdus { + let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts); + if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) && !timestamp.is_some_and(|time| time > ts) { + timestamp = Some(ts); + } + } + + let required_state = required_state_request + .iter() + .stream() + .filter_map(|state| async move { + services + .rooms + .state_accessor + .room_state_get(room_id, &state.0, &state.1) + .await + .map(|s| s.to_sync_state_event()) + .ok() + }) + .collect() + .await; + + // Heroes + let heroes: Vec<_> = services + .rooms + .state_cache + .room_members(room_id) + .ready_filter(|member| member != sender_user) + .filter_map(|user_id| { + services + .rooms + .state_accessor + .get_member(room_id, user_id) + .map_ok(|memberevent| SlidingSyncRoomHero { + user_id: user_id.into(), + name: memberevent.displayname, + avatar: memberevent.avatar_url, + }) + .ok() + }) + .take(5) + .collect() + .await; + + let name = match heroes.len().cmp(&(1_usize)) { + Ordering::Greater => { + let firsts = heroes[1..] + .iter() + .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) + .collect::>() + .join(", "); + + let last = heroes[0] + .name + .clone() + .unwrap_or_else(|| heroes[0].user_id.to_string()); + + Some(format!("{firsts} and {last}")) + }, + Ordering::Equal => Some( + heroes[0] + .name + .clone() + .unwrap_or_else(|| heroes[0].user_id.to_string()), + ), + Ordering::Less => None, + }; + + let heroes_avatar = if heroes.len() == 1 { + heroes[0].avatar.clone() + } else { + None + }; + + rooms.insert( + room_id.clone(), + sync_events::v4::SlidingSyncRoom { + name: services + .rooms + .state_accessor + .get_name(room_id) + .await + .ok() + .or(name), + avatar: if let Some(heroes_avatar) = heroes_avatar { + ruma::JsOption::Some(heroes_avatar) + } else { + match services.rooms.state_accessor.get_avatar(room_id).await { + ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), + ruma::JsOption::Null => ruma::JsOption::Null, + ruma::JsOption::Undefined => ruma::JsOption::Undefined, + } + }, + initial: Some(roomsince == &0), + is_dm: None, + invite_state, + unread_notifications: UnreadNotificationsCount { + highlight_count: Some( + services + .rooms + .user + .highlight_count(sender_user, room_id) + .await + .try_into() + .expect("notification count can't go that high"), + ), + notification_count: Some( + services + .rooms + .user + .notification_count(sender_user, room_id) + .await + .try_into() + .expect("notification count can't go that high"), + ), + }, + timeline: room_events, + required_state, + prev_batch, + limited, + joined_count: Some( + services + .rooms + .state_cache + .room_joined_count(room_id) + .await + .unwrap_or(0) + .try_into() + .unwrap_or_else(|_| uint!(0)), + ), + invited_count: Some( + services + .rooms + .state_cache + .room_invited_count(room_id) + .await + .unwrap_or(0) + .try_into() + .unwrap_or_else(|_| uint!(0)), + ), + num_live: None, // Count events in timeline greater than global sync counter + timestamp, + heroes: Some(heroes), + }, + ); + } + + if rooms + .iter() + .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let default = Duration::from_secs(30); + let duration = cmp::min(body.timeout.unwrap_or(default), default); + _ = tokio::time::timeout(duration, watcher).await; + } + + Ok(sync_events::v4::Response { + initial: globalsince == 0, + txn_id: body.txn_id.clone(), + pos: next_batch.to_string(), + lists, + rooms, + extensions: sync_events::v4::Extensions { + to_device: if body.extensions.to_device.enabled.unwrap_or(false) { + Some(sync_events::v4::ToDevice { + events: services + .users + .get_to_device_events(sender_user, &sender_device) + .collect() + .await, + next_batch: next_batch.to_string(), + }) + } else { + None + }, + e2ee: sync_events::v4::E2EE { + device_lists: DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, + device_one_time_keys_count: services + .users + .count_one_time_keys(sender_user, &sender_device) + .await, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, + }, + account_data, + receipts, + typing: sync_events::v4::Typing { + rooms: BTreeMap::new(), + }, + }, + delta_token: None, + }) +} + +async fn filter_rooms( + services: &Services, rooms: &[OwnedRoomId], filter: &[RoomTypeFilter], negate: bool, +) -> Vec { + rooms + .iter() + .stream() + .filter_map(|r| async move { + let room_type = services.rooms.state_accessor.get_room_type(r).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(r.to_owned()) + }) + .collect() + .await +} diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 3adecc6c..96a98537 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -40,6 +40,16 @@ pub use self::{ #[inline] pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, source) } +#[macro_export] +macro_rules! extract_variant { + ($e:expr, $variant:path) => { + match $e { + $variant(value) => Some(value), + _ => None, + } + }; +} + #[macro_export] macro_rules! at { ($idx:tt) => { From 1fdcab0319f2461ae000a0876823848f9e5af921 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 16 Oct 2024 06:58:37 +0000 Subject: [PATCH 0092/1248] additional sync cleanup Signed-off-by: Jason Volk --- src/api/client/sync/mod.rs | 51 +++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 0cfc7b8b..ed22010c 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -14,38 +14,37 @@ use crate::{service::Services, Error, PduEvent, Result}; async fn load_timeline( services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64, ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { - let timeline_pdus; - let limited = if services + let last_timeline_count = services .rooms .timeline .last_timeline_count(sender_user, room_id) + .await?; + + if last_timeline_count <= roomsincecount { + return Ok((Vec::new(), false)); + } + + let mut non_timeline_pdus = services + .rooms + .timeline + .pdus_until(sender_user, room_id, PduCount::max()) .await? - > roomsincecount - { - let mut non_timeline_pdus = services - .rooms - .timeline - .pdus_until(sender_user, room_id, PduCount::max()) - .await? - .ready_take_while(|(pducount, _)| pducount > &roomsincecount); + .ready_take_while(|(pducount, _)| pducount > &roomsincecount); - // Take the last events for the timeline - timeline_pdus = non_timeline_pdus - .by_ref() - .take(usize_from_u64_truncated(limit)) - .collect::>() - .await - .into_iter() - .rev() - .collect::>(); + // Take the last events for the timeline + let timeline_pdus: Vec<_> = non_timeline_pdus + .by_ref() + .take(usize_from_u64_truncated(limit)) + .collect::>() + .await + .into_iter() + .rev() + .collect(); + + // They /sync response doesn't always return all messages, so we say the output + // is limited unless there are events in non_timeline_pdus + let limited = non_timeline_pdus.next().await.is_some(); - // They /sync response doesn't always return all messages, so we say the output - // is limited unless there are events in non_timeline_pdus - non_timeline_pdus.next().await.is_some() - } else { - timeline_pdus = Vec::new(); - false - }; Ok((timeline_pdus, limited)) } From 93130fbb85e4f5645d1b98aeddd8e1828b1b936c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 21 Oct 2024 20:21:00 +0000 Subject: [PATCH 0093/1248] add is_ok to futures TryExtExt utils Signed-off-by: Jason Volk --- src/core/utils/future/try_ext_ext.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index d30d2cac..7c0b36a2 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -10,6 +10,17 @@ pub trait TryExtExt where Self: TryFuture + Send, { + /// Resolves to a bool for whether the TryFuture (Future of a Result) + /// resolved to Ok or Err. + /// + /// is_ok() has to consume *self rather than borrow. The intent of this + /// extension is therefor for a caller only ever caring about result status + /// while discarding all contents. + #[allow(clippy::wrong_self_convention)] + fn is_ok(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> + where + Self: Sized; + fn map_ok_or( self, default: U, f: F, ) -> MapOkOrElse U, impl FnOnce(Self::Error) -> U> @@ -32,6 +43,14 @@ impl TryExtExt for Fut where Fut: TryFuture + Send, { + #[inline] + fn is_ok(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> + where + Self: Sized, + { + self.map_ok_or(false, |_| true) + } + #[inline] fn map_ok_or( self, default: U, f: F, From ac75ebee8afd9874d02a2c33b86ab388e7ab289b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 16 Oct 2024 11:33:24 +0000 Subject: [PATCH 0094/1248] event_handler/timeline service cleanups Signed-off-by: Jason Volk --- src/service/rooms/event_handler/mod.rs | 67 ++++++++++++++------------ src/service/rooms/timeline/data.rs | 29 ++++------- 2 files changed, 45 insertions(+), 51 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 0ffd9659..41ab79f1 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -169,7 +169,7 @@ impl Service { .await?; // Procure the room version - let room_version_id = Self::get_room_version_id(&create_event)?; + let room_version_id = get_room_version_id(&create_event)?; let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; @@ -178,7 +178,7 @@ impl Service { .boxed() .await?; - Self::check_room_id(room_id, &incoming_pdu)?; + check_room_id(room_id, &incoming_pdu)?; // 8. if not timeline event: stop if !is_timeline_event { @@ -341,7 +341,7 @@ impl Service { // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let room_version_id = Self::get_room_version_id(create_event)?; + let room_version_id = get_room_version_id(create_event)?; let mut val = match self .services .server_keys @@ -378,7 +378,7 @@ impl Service { ) .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; - Self::check_room_id(room_id, &incoming_pdu)?; + check_room_id(room_id, &incoming_pdu)?; if !auth_events_known { // 4. fetch any missing auth events doing all checks listed here starting at 1. @@ -414,7 +414,7 @@ impl Service { continue; }; - Self::check_room_id(room_id, &auth_event)?; + check_room_id(room_id, &auth_event)?; match auth_events.entry(( auth_event.kind.to_string().into(), @@ -454,7 +454,7 @@ impl Service { }; let auth_check = state_res::event_auth::auth_check( - &Self::to_room_version(&room_version_id), + &to_room_version(&room_version_id), &incoming_pdu, None, // TODO: third party invite state_fetch, @@ -502,8 +502,8 @@ impl Service { } debug!("Upgrading to timeline pdu"); - let timer = tokio::time::Instant::now(); - let room_version_id = Self::get_room_version_id(create_event)?; + let timer = Instant::now(); + let room_version_id = get_room_version_id(create_event)?; // 10. Fetch missing state and auth chain events by calling /state_ids at // backwards extremities doing all the checks in this list starting at 1. @@ -524,7 +524,7 @@ impl Service { } let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); - let room_version = Self::to_room_version(&room_version_id); + let room_version = to_room_version(&room_version_id); debug!("Performing auth check"); // 11. Check the auth of the event passes based on the state of the event @@ -1278,7 +1278,7 @@ impl Service { .await .pop() { - Self::check_room_id(room_id, &pdu)?; + check_room_id(room_id, &pdu)?; let limit = self.services.globals.max_fetch_prev_events(); if amount > limit { @@ -1370,31 +1370,34 @@ impl Service { } } - fn check_room_id(room_id: &RoomId, pdu: &PduEvent) -> Result<()> { - if pdu.room_id != room_id { - return Err!(Request(InvalidParam( - warn!(pdu_event_id = ?pdu.event_id, pdu_room_id = ?pdu.room_id, ?room_id, "Found event from room in room") - ))); - } - - Ok(()) - } - - fn get_room_version_id(create_event: &PduEvent) -> Result { - let content: RoomCreateEventContent = create_event.get_content()?; - let room_version = content.room_version; - - Ok(room_version) - } - - #[inline] - fn to_room_version(room_version_id: &RoomVersionId) -> RoomVersion { - RoomVersion::new(room_version_id).expect("room version is supported") - } - async fn event_exists(&self, event_id: Arc) -> bool { self.services.timeline.pdu_exists(&event_id).await } async fn event_fetch(&self, event_id: Arc) -> Option> { self.services.timeline.get_pdu(&event_id).await.ok() } } + +fn check_room_id(room_id: &RoomId, pdu: &PduEvent) -> Result { + if pdu.room_id != room_id { + return Err!(Request(InvalidParam(error!( + pdu_event_id = ?pdu.event_id, + pdu_room_id = ?pdu.room_id, + ?room_id, + "Found event from room in room", + )))); + } + + Ok(()) +} + +fn get_room_version_id(create_event: &PduEvent) -> Result { + let content: RoomCreateEventContent = create_event.get_content()?; + let room_version = content.room_version; + + Ok(room_version) +} + +#[inline] +fn to_room_version(room_version_id: &RoomVersionId) -> RoomVersion { + RoomVersion::new(room_version_id).expect("room version is supported") +} diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index c51b7856..5428a3b9 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -8,11 +8,11 @@ use conduit::{ err, expected, result::{LogErr, NotFound}, utils, - utils::{stream::TryIgnore, u64_from_u8, ReadyExt}, + utils::{future::TryExtExt, stream::TryIgnore, u64_from_u8, ReadyExt}, Err, PduCount, PduEvent, Result, }; use database::{Database, Deserialized, Json, KeyVal, Map}; -use futures::{FutureExt, Stream, StreamExt}; +use futures::{Stream, StreamExt}; use ruma::{CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use tokio::sync::Mutex; @@ -115,12 +115,10 @@ impl Data { /// Like get_non_outlier_pdu(), but without the expense of fetching and /// parsing the PduEvent - pub(super) async fn non_outlier_pdu_exists(&self, event_id: &EventId) -> Result<()> { + pub(super) async fn non_outlier_pdu_exists(&self, event_id: &EventId) -> Result { let pduid = self.get_pdu_id(event_id).await?; - self.pduid_pdu.get(&pduid).await?; - - Ok(()) + self.pduid_pdu.get(&pduid).await.map(|_| ()) } /// Returns the pdu. @@ -140,16 +138,14 @@ impl Data { /// Like get_non_outlier_pdu(), but without the expense of fetching and /// parsing the PduEvent - pub(super) async fn outlier_pdu_exists(&self, event_id: &EventId) -> Result<()> { - self.eventid_outlierpdu.get(event_id).await?; - - Ok(()) + pub(super) async fn outlier_pdu_exists(&self, event_id: &EventId) -> Result { + self.eventid_outlierpdu.get(event_id).await.map(|_| ()) } /// Like get_pdu(), but without the expense of fetching and parsing the data pub(super) async fn pdu_exists(&self, event_id: &EventId) -> bool { - let non_outlier = self.non_outlier_pdu_exists(event_id).map(|res| res.is_ok()); - let outlier = self.outlier_pdu_exists(event_id).map(|res| res.is_ok()); + let non_outlier = self.non_outlier_pdu_exists(event_id).is_ok(); + let outlier = self.outlier_pdu_exists(event_id).is_ok(); //TODO: parallelize non_outlier.await || outlier.await @@ -169,7 +165,6 @@ impl Data { pub(super) async fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) { self.pduid_pdu.raw_put(pdu_id, Json(json)); - self.lasttimelinecount_cache .lock() .await @@ -181,21 +176,17 @@ impl Data { pub(super) fn prepend_backfill_pdu(&self, pdu_id: &[u8], event_id: &EventId, json: &CanonicalJsonObject) { self.pduid_pdu.raw_put(pdu_id, Json(json)); - self.eventid_pduid.insert(event_id, pdu_id); self.eventid_outlierpdu.remove(event_id); } /// Removes a pdu and creates a new one with the same id. - pub(super) async fn replace_pdu( - &self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, _pdu: &PduEvent, - ) -> Result<()> { + pub(super) async fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, _pdu: &PduEvent) -> Result { if self.pduid_pdu.get(pdu_id).await.is_not_found() { return Err!(Request(NotFound("PDU does not exist."))); } - let pdu = serde_json::to_vec(pdu_json)?; - self.pduid_pdu.insert(pdu_id, &pdu); + self.pduid_pdu.raw_put(pdu_id, Json(pdu_json)); Ok(()) } From b505f0d0d7a8ec2accc4b38dfe3391c9f780ba25 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 21 Oct 2024 22:00:39 +0000 Subject: [PATCH 0095/1248] add (back) query_trusted_key_servers_first w/ additional configuration detail Signed-off-by: Jason Volk --- src/core/config/mod.rs | 29 +++++++++++++++ src/service/server_keys/acquire.rs | 59 +++++++++++++++++++++++------- src/service/server_keys/get.rs | 47 ++++++++++++++++++++---- src/service/server_keys/mod.rs | 4 +- 4 files changed, 116 insertions(+), 23 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 02b277d0..52ce8a01 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -490,6 +490,35 @@ pub struct Config { #[serde(default = "default_trusted_servers")] pub trusted_servers: Vec, + /// Whether to query the servers listed in trusted_servers first or query + /// the origin server first. For best security, querying the origin server + /// first is advised to minimize the exposure to a compromised trusted + /// server. For maximum performance this can be set to true, however other + /// options exist to query trusted servers first under specific high-load + /// circumstances and should be evaluated before setting this to true. + #[serde(default)] + pub query_trusted_key_servers_first: bool, + + /// Whether to query the servers listed in trusted_servers first + /// specifically on room joins. This option limits the exposure to a + /// compromised trusted server to room joins only. The join operation + /// requires gathering keys from many origin servers which can cause + /// significant delays. Therefor this defaults to true to mitigate + /// unexpected delays out-of-the-box. The security-paranoid or those + /// willing to tolerate delays are advised to set this to false. Note that + /// setting query_trusted_key_servers_first to true causes this option to + /// be ignored. + #[serde(default = "true_fn")] + pub query_trusted_key_servers_first_on_join: bool, + + /// Only query trusted servers for keys and never the origin server. This is + /// intended for clusters or custom deployments using their trusted_servers + /// as forwarding-agents to cache and deduplicate requests. Notary servers + /// do not act as forwarding-agents by default, therefor do not enable this + /// unless you know exactly what you are doing. + #[serde(default)] + pub only_query_trusted_key_servers: bool, + /// max log level for conduwuit. allows debug, info, warn, or error /// see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives /// **Caveat**: diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index 2b170040..25b676b8 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -47,35 +47,66 @@ where S: Iterator + Send + Clone, K: Iterator + Send + Clone, { + let notary_only = self.services.server.config.only_query_trusted_key_servers; + let notary_first_always = self.services.server.config.query_trusted_key_servers_first; + let notary_first_on_join = self + .services + .server + .config + .query_trusted_key_servers_first_on_join; + let requested_servers = batch.clone().count(); let requested_keys = batch.clone().flat_map(|(_, key_ids)| key_ids).count(); debug!("acquire {requested_keys} keys from {requested_servers}"); - let missing = self.acquire_locals(batch).await; - let missing_keys = keys_count(&missing); - let missing_servers = missing.len(); + let mut missing = self.acquire_locals(batch).await; + let mut missing_keys = keys_count(&missing); + let mut missing_servers = missing.len(); if missing_servers == 0 { return; } debug!("missing {missing_keys} keys for {missing_servers} servers locally"); - let missing = self.acquire_origins(missing.into_iter()).await; - let missing_keys = keys_count(&missing); - let missing_servers = missing.len(); - if missing_servers == 0 { - return; + if notary_first_always || notary_first_on_join { + missing = self.acquire_notary(missing.into_iter()).await; + missing_keys = keys_count(&missing); + missing_servers = missing.len(); + if missing_keys == 0 { + return; + } + + debug_warn!("missing {missing_keys} keys for {missing_servers} servers from all notaries first"); } - debug_warn!("missing {missing_keys} keys for {missing_servers} servers unreachable"); + if !notary_only { + missing = self.acquire_origins(missing.into_iter()).await; + missing_keys = keys_count(&missing); + missing_servers = missing.len(); + if missing_keys == 0 { + return; + } + + debug_warn!("missing {missing_keys} keys for {missing_servers} servers unreachable"); + } + + if !notary_first_always && !notary_first_on_join { + missing = self.acquire_notary(missing.into_iter()).await; + missing_keys = keys_count(&missing); + missing_servers = missing.len(); + if missing_keys == 0 { + return; + } + + debug_warn!("still missing {missing_keys} keys for {missing_servers} servers from all notaries."); + } - let missing = self.acquire_notary(missing.into_iter()).await; - let missing_keys = keys_count(&missing); - let missing_servers = missing.len(); if missing_keys > 0 { - debug_warn!("still missing {missing_keys} keys for {missing_servers} servers from all notaries"); - warn!("did not obtain {missing_keys} of {requested_keys} keys; some events may not be accepted"); + warn!( + "did not obtain {missing_keys} keys for {missing_servers} servers out of {requested_keys} total keys for \ + {requested_servers} total servers; some events may not be verifiable" + ); } } diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 0f449b46..441e33d4 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -53,17 +53,40 @@ where #[implement(super::Service)] pub async fn get_verify_key(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> Result { + let notary_first = self.services.server.config.query_trusted_key_servers_first; + let notary_only = self.services.server.config.only_query_trusted_key_servers; + if let Some(result) = self.verify_keys_for(origin).await.remove(key_id) { return Ok(result); } - if let Ok(server_key) = self.server_request(origin).await { - self.add_signing_keys(server_key.clone()).await; - if let Some(result) = extract_key(server_key, key_id) { + if notary_first { + if let Ok(result) = self.get_verify_key_from_notaries(origin, key_id).await { return Ok(result); } } + if !notary_only { + if let Ok(result) = self.get_verify_key_from_origin(origin, key_id).await { + return Ok(result); + } + } + + if !notary_first { + if let Ok(result) = self.get_verify_key_from_notaries(origin, key_id).await { + return Ok(result); + } + } + + Err!(BadServerResponse(debug_error!( + ?key_id, + ?origin, + "Failed to fetch federation signing-key" + ))) +} + +#[implement(super::Service)] +async fn get_verify_key_from_notaries(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> Result { for notary in self.services.globals.trusted_servers() { if let Ok(server_keys) = self.notary_request(notary, origin).await { for server_key in &server_keys { @@ -78,9 +101,17 @@ pub async fn get_verify_key(&self, origin: &ServerName, key_id: &ServerSigningKe } } - Err!(BadServerResponse(debug_error!( - ?key_id, - ?origin, - "Failed to fetch federation signing-key" - ))) + Err!(Request(NotFound("Failed to fetch signing-key from notaries"))) +} + +#[implement(super::Service)] +async fn get_verify_key_from_origin(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> Result { + if let Ok(server_key) = self.server_request(origin).await { + self.add_signing_keys(server_key.clone()).await; + if let Some(result) = extract_key(server_key, key_id) { + return Ok(result); + } + } + + Err!(Request(NotFound("Failed to fetch signing-key from origin"))) } diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index c3b84cb3..dc09703c 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -7,7 +7,7 @@ mod verify; use std::{collections::BTreeMap, sync::Arc, time::Duration}; -use conduit::{implement, utils::time::timepoint_from_now, Result}; +use conduit::{implement, utils::time::timepoint_from_now, Result, Server}; use database::{Deserialized, Json, Map}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, @@ -30,6 +30,7 @@ pub struct Service { struct Services { globals: Dep, sending: Dep, + server: Arc, } struct Data { @@ -52,6 +53,7 @@ impl crate::Service for Service { services: Services { globals: args.depend::("globals"), sending: args.depend::("sending"), + server: args.server.clone(), }, db: Data { server_signingkeys: args.db["server_signingkeys"].clone(), From 0e55fa2de24a945e55469ec496f85e29e5f10d5b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 21 Oct 2024 23:54:54 +0000 Subject: [PATCH 0096/1248] add ready_try_for_each to TryReadyExt extension utils Signed-off-by: Jason Volk --- src/core/utils/stream/try_ready.rs | 18 +++++++++++++++++- src/service/sending/mod.rs | 14 +++++++++----- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index ab37d9b3..df356456 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -2,7 +2,7 @@ use futures::{ future::{ready, Ready}, - stream::{AndThen, TryStream, TryStreamExt}, + stream::{AndThen, TryForEach, TryStream, TryStreamExt}, }; use crate::Result; @@ -18,6 +18,12 @@ where fn ready_and_then(self, f: F) -> AndThen>, impl FnMut(S::Ok) -> Ready>> where F: Fn(S::Ok) -> Result; + + fn ready_try_for_each( + self, f: F, + ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> + where + F: Fn(S::Ok) -> Result<(), E>; } impl TryReadyExt for S @@ -32,4 +38,14 @@ where { self.and_then(move |t| ready(f(t))) } + + #[inline] + fn ready_try_for_each( + self, f: F, + ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> + where + F: Fn(S::Ok) -> Result<(), E>, + { + self.try_for_each(move |t| ready(f(t))) + } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 63c5e655..a1d5f692 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -7,8 +7,12 @@ mod sender; use std::{fmt::Debug, sync::Arc}; use async_trait::async_trait; -use conduit::{err, utils::ReadyExt, warn, Result, Server}; -use futures::{future::ready, Stream, StreamExt, TryStreamExt}; +use conduit::{ + err, + utils::{ReadyExt, TryReadyExt}, + warn, Result, Server, +}; +use futures::{Stream, StreamExt}; use ruma::{ api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, @@ -235,12 +239,12 @@ impl Service { .map(ToOwned::to_owned) .map(Destination::Normal) .map(Ok) - .try_for_each(|dest| { - ready(self.dispatch(Msg { + .ready_try_for_each(|dest| { + self.dispatch(Msg { dest, event: SendingEvent::Flush, queue_id: Vec::::new(), - })) + }) }) .await } From 167807e0a6e333a4a8f7be9b8ed0da46831ce234 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 00:09:55 +0000 Subject: [PATCH 0097/1248] de-wrapper max_fetch_prev_event; increase default config Signed-off-by: Jason Volk --- src/core/config/mod.rs | 2 +- src/service/globals/mod.rs | 2 -- src/service/rooms/event_handler/mod.rs | 6 ++++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 52ce8a01..23d35424 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1822,7 +1822,7 @@ fn default_appservice_idle_timeout() -> u64 { 300 } fn default_pusher_idle_timeout() -> u64 { 15 } -fn default_max_fetch_prev_events() -> u16 { 100_u16 } +fn default_max_fetch_prev_events() -> u16 { 192_u16 } fn default_tracing_flame_filter() -> String { cfg!(debug_assertions) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 7680007d..329a6583 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -171,8 +171,6 @@ impl Service { #[inline] pub fn server_name(&self) -> &ServerName { self.config.server_name.as_ref() } - pub fn max_fetch_prev_events(&self) -> u16 { self.config.max_fetch_prev_events } - pub fn allow_registration(&self) -> bool { self.config.allow_registration } pub fn allow_guest_registration(&self) -> bool { self.config.allow_guest_registration } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 41ab79f1..8f96f68e 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -13,7 +13,7 @@ use conduit::{ result::LogErr, trace, utils::{math::continue_exponential_backoff_secs, IterStream, MutexMap}, - warn, Err, Error, PduEvent, Result, + warn, Err, Error, PduEvent, Result, Server, }; use futures::{future, future::ready, FutureExt, StreamExt, TryFutureExt}; use ruma::{ @@ -55,6 +55,7 @@ struct Services { state_accessor: Dep, state_compressor: Dep, timeline: Dep, + server: Arc, } type RoomMutexMap = MutexMap; @@ -76,6 +77,7 @@ impl crate::Service for Service { state_accessor: args.depend::("rooms::state_accessor"), state_compressor: args.depend::("rooms::state_compressor"), timeline: args.depend::("rooms::timeline"), + server: args.server.clone(), }, federation_handletime: HandleTimeMap::new().into(), mutex_federation: RoomMutexMap::new(), @@ -1280,7 +1282,7 @@ impl Service { { check_room_id(room_id, &pdu)?; - let limit = self.services.globals.max_fetch_prev_events(); + let limit = self.services.server.config.max_fetch_prev_events; if amount > limit { debug_warn!("Max prev event limit reached! Limit: {limit}"); graph.insert(prev_event_id.clone(), HashSet::new()); From c06f560913ce637419f9a825ca8b6ffaca698bc8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 03:21:56 +0000 Subject: [PATCH 0098/1248] add some additional database::de test cases Signed-off-by: Jason Volk --- src/database/tests.rs | 62 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/src/database/tests.rs b/src/database/tests.rs index 47dfb32c..bfab99ef 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -10,7 +10,7 @@ use serde::Serialize; use crate::{ de, ser, ser::{serialize_to_vec, Json}, - Interfix, + Ignore, Interfix, }; #[test] @@ -187,6 +187,66 @@ fn de_tuple() { assert_eq!(b, room_id, "deserialized room_id does not match"); } +#[test] +#[should_panic(expected = "failed to deserialize")] +fn de_tuple_invalid() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + let raw: &[u8] = b"@user:example.com\xFF@user:example.com"; + let (a, b): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); + + assert_eq!(a, user_id, "deserialized user_id does not match"); + assert_eq!(b, room_id, "deserialized room_id does not match"); +} + +#[test] +#[should_panic(expected = "failed to deserialize")] +fn de_tuple_incomplete() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let raw: &[u8] = b"@user:example.com"; + let (a, _): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); + + assert_eq!(a, user_id, "deserialized user_id does not match"); +} + +#[test] +#[should_panic(expected = "failed to deserialize")] +fn de_tuple_incomplete_with_sep() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let raw: &[u8] = b"@user:example.com\xFF"; + let (a, _): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); + + assert_eq!(a, user_id, "deserialized user_id does not match"); +} + +#[test] +#[should_panic(expected = "deserialization failed to consume trailing bytes")] +fn de_tuple_unfinished() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + let raw: &[u8] = b"@user:example.com\xFF!room:example.com\xFF@user:example.com"; + let (a, b): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); + + assert_eq!(a, user_id, "deserialized user_id does not match"); + assert_eq!(b, room_id, "deserialized room_id does not match"); +} + +#[test] +fn de_tuple_ignore() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + let raw: &[u8] = b"@user:example.com\xFF@user2:example.net\xFF!room:example.com"; + let (a, _, c): (&UserId, Ignore, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); + + assert_eq!(a, user_id, "deserialized user_id does not match"); + assert_eq!(c, room_id, "deserialized room_id does not match"); +} + #[test] fn de_json_array() { let a = &["foo", "bar", "baz"]; From 0e0438e1f9b49a3fa1b8fc0dece769d91c2bafbf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 03:28:45 +0000 Subject: [PATCH 0099/1248] further optimize presence_since iteration Signed-off-by: Jason Volk --- src/admin/query/presence.rs | 10 +++++++--- src/api/client/sync/v3.rs | 8 ++++---- src/service/presence/data.rs | 11 ++++++----- src/service/presence/mod.rs | 3 +-- src/service/sending/sender.rs | 8 +++++--- 5 files changed, 23 insertions(+), 17 deletions(-) diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 6189270c..0963429e 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -42,12 +42,16 @@ pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) since, } => { let timer = tokio::time::Instant::now(); - let results = services.presence.db.presence_since(since); - let presence_since: Vec<(_, _, _)> = results.collect().await; + let results: Vec<(_, _, _)> = services + .presence + .presence_since(since) + .map(|(user_id, count, bytes)| (user_id.to_owned(), count, bytes.to_vec())) + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{presence_since:#?}\n```" + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, } diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index f29fe220..2bd318df 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -488,7 +488,7 @@ async fn process_presence_updates( if !services .rooms .state_cache - .user_sees_user(syncing_user, &user_id) + .user_sees_user(syncing_user, user_id) .await { continue; @@ -496,10 +496,10 @@ async fn process_presence_updates( let presence_event = services .presence - .from_json_bytes_to_event(&presence_bytes, &user_id) + .from_json_bytes_to_event(presence_bytes, user_id) .await?; - match presence_updates.entry(user_id) { + match presence_updates.entry(user_id.into()) { Entry::Vacant(slot) => { slot.insert(presence_event); }, @@ -524,7 +524,7 @@ async fn process_presence_updates( .currently_active .or(curr_content.currently_active); }, - } + }; } Ok(()) diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 8522746f..68b2c3fe 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -7,7 +7,7 @@ use conduit::{ }; use database::{Deserialized, Json, Map}; use futures::Stream; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; +use ruma::{events::presence::PresenceEvent, presence::PresenceState, UInt, UserId}; use super::Presence; use crate::{globals, users, Dep}; @@ -137,13 +137,14 @@ impl Data { self.userid_presenceid.remove(user_id); } - pub fn presence_since(&self, since: u64) -> impl Stream)> + Send + '_ { + #[inline] + pub(super) fn presence_since(&self, since: u64) -> impl Stream + Send + '_ { self.presenceid_presence .raw_stream() .ignore_err() - .ready_filter_map(move |(key, presence_bytes)| { - let (count, user_id) = presenceid_parse(key).expect("invalid presenceid_parse"); - (count > since).then(|| (user_id.to_owned(), count, presence_bytes.to_vec())) + .ready_filter_map(move |(key, presence)| { + let (count, user_id) = presenceid_parse(key).ok()?; + (count > since).then_some((user_id, count, presence)) }) } } diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index 82a99bd5..b2106f3f 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -162,8 +162,7 @@ impl Service { /// Returns the most recent presence updates that happened after the event /// with id `since`. - #[inline] - pub fn presence_since(&self, since: u64) -> impl Stream)> + Send + '_ { + pub fn presence_since(&self, since: u64) -> impl Stream + Send + '_ { self.db.presence_since(since) } diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 5c0a324b..a57d4aea 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -7,7 +7,9 @@ use std::{ use base64::{engine::general_purpose, Engine as _}; use conduit::{ - debug, debug_warn, err, trace, + debug, debug_warn, err, + result::LogErr, + trace, utils::{calculate_hash, math::continue_exponential_backoff_secs, ReadyExt}, warn, Error, Result, }; @@ -315,14 +317,14 @@ impl Service { while let Some((user_id, count, presence_bytes)) = presence_since.next().await { *max_edu_count = cmp::max(count, *max_edu_count); - if !self.services.globals.user_is_local(&user_id) { + if !self.services.globals.user_is_local(user_id) { continue; } if !self .services .state_cache - .server_sees_user(server_name, &user_id) + .server_sees_user(server_name, user_id) .await { continue; From a74461fc9a9bd8f5a237662b399431cacc3f29e6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 04:03:07 +0000 Subject: [PATCH 0100/1248] split keys_changed for stronger-type overloads Signed-off-by: Jason Volk --- src/api/client/keys.rs | 5 +++-- src/api/client/sync/v3.rs | 5 +++-- src/api/client/sync/v4.rs | 5 +++-- src/api/mod.rs | 1 - src/service/users/mod.rs | 21 ++++++++++++++++++--- 5 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 254d92cc..44d9164c 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -232,7 +232,7 @@ pub(crate) async fn get_key_changes_route( device_list_updates.extend( services .users - .keys_changed(sender_user.as_str(), from, Some(to)) + .keys_changed(sender_user, from, Some(to)) .map(ToOwned::to_owned) .collect::>() .await, @@ -244,7 +244,8 @@ pub(crate) async fn get_key_changes_route( device_list_updates.extend( services .users - .keys_changed(room_id.as_str(), from, Some(to)) + .room_keys_changed(room_id, from, Some(to)) + .map(|(user_id, _)| user_id) .map(ToOwned::to_owned) .collect::>() .await, diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 2bd318df..ccca1f85 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -138,7 +138,7 @@ pub(crate) async fn sync_events_route( device_list_updates.extend( services .users - .keys_changed(sender_user.as_ref(), since, None) + .keys_changed(&sender_user, since, None) .map(ToOwned::to_owned) .collect::>() .await, @@ -917,7 +917,8 @@ async fn load_joined_room( device_list_updates.extend( services .users - .keys_changed(room_id.as_ref(), since, None) + .room_keys_changed(room_id, since, None) + .map(|(user_id, _)| user_id) .map(ToOwned::to_owned) .collect::>() .await, diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 2adb3b71..4f8323e6 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -162,7 +162,7 @@ pub(crate) async fn sync_events_v4_route( device_list_changes.extend( services .users - .keys_changed(sender_user.as_ref(), globalsince, None) + .keys_changed(sender_user, globalsince, None) .map(ToOwned::to_owned) .collect::>() .await, @@ -285,7 +285,8 @@ pub(crate) async fn sync_events_v4_route( device_list_changes.extend( services .users - .keys_changed(room_id.as_ref(), globalsince, None) + .room_keys_changed(room_id, globalsince, None) + .map(|(user_id, _)| user_id) .map(ToOwned::to_owned) .collect::>() .await, diff --git a/src/api/mod.rs b/src/api/mod.rs index 96837470..ed8aacf2 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -6,7 +6,6 @@ extern crate conduit_core as conduit; extern crate conduit_service as service; pub(crate) use conduit::{debug_info, pdu::PduEvent, utils, Error, Result}; -pub(crate) use service::services; pub(crate) use self::router::{Ruma, RumaResponse, State}; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 589aee8a..b9183e12 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -13,7 +13,7 @@ use ruma::{ events::{ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType}, serde::Raw, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId, - OwnedMxcUri, OwnedUserId, UInt, UserId, + OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, }; use serde_json::json; @@ -585,9 +585,24 @@ impl Service { Ok(()) } + #[inline] pub fn keys_changed<'a>( - &'a self, user_or_room_id: &'a str, from: u64, to: Option, + &'a self, user_id: &'a UserId, from: u64, to: Option, ) -> impl Stream + Send + 'a { + self.keys_changed_user_or_room(user_id.as_str(), from, to) + .map(|(user_id, ..)| user_id) + } + + #[inline] + pub fn room_keys_changed<'a>( + &'a self, room_id: &'a RoomId, from: u64, to: Option, + ) -> impl Stream + Send + 'a { + self.keys_changed_user_or_room(room_id.as_str(), from, to) + } + + fn keys_changed_user_or_room<'a>( + &'a self, user_or_room_id: &'a str, from: u64, to: Option, + ) -> impl Stream + Send + 'a { type KeyVal<'a> = ((&'a str, u64), &'a UserId); let to = to.unwrap_or(u64::MAX); @@ -597,7 +612,7 @@ impl Service { .stream_from(&start) .ignore_err() .ready_take_while(move |((prefix, count), _): &KeyVal<'_>| *prefix == user_or_room_id && *count <= to) - .map(|((..), user_id): KeyVal<'_>| user_id) + .map(|((_, count), user_id): KeyVal<'_>| (user_id, count)) } pub async fn mark_device_key_update(&self, user_id: &UserId) { From d35376a90cb521b578aff75a1441699e10695bac Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 05:30:28 +0000 Subject: [PATCH 0101/1248] aggregate receipts into single edu; dedup presence; refactor selection limits etc Signed-off-by: Jason Volk --- src/api/server/send.rs | 8 +- src/service/sending/mod.rs | 5 +- src/service/sending/sender.rs | 307 ++++++++++++++++++++-------------- 3 files changed, 190 insertions(+), 130 deletions(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 40f9403b..e2100a0f 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -21,16 +21,16 @@ use ruma::{ OwnedEventId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; +use service::{ + sending::{EDU_LIMIT, PDU_LIMIT}, + Services, +}; use crate::{ - services::Services, utils::{self}, Ruma, }; -const PDU_LIMIT: usize = 50; -const EDU_LIMIT: usize = 100; - type ResolvedMap = BTreeMap>; /// # `PUT /_matrix/federation/v1/send/{txnId}` diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index a1d5f692..ea266883 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -20,7 +20,10 @@ use ruma::{ use tokio::sync::Mutex; use self::data::Data; -pub use self::dest::Destination; +pub use self::{ + dest::Destination, + sender::{EDU_LIMIT, PDU_LIMIT}, +}; use crate::{account_data, client, globals, presence, pusher, resolver, rooms, server_keys, users, Dep}; pub struct Service { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index a57d4aea..d9087d44 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -7,7 +7,7 @@ use std::{ use base64::{engine::general_purpose, Engine as _}; use conduit::{ - debug, debug_warn, err, + debug, debug_warn, err, error, result::LogErr, trace, utils::{calculate_hash, math::continue_exponential_backoff_secs, ReadyExt}, @@ -26,8 +26,8 @@ use ruma::{ }, device_id, events::{push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType}, - push, uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, - ServerName, UInt, + push, uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, + RoomVersionId, ServerName, UInt, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use tokio::time::sleep_until; @@ -47,10 +47,16 @@ type SendingFuture<'a> = BoxFuture<'a, SendingResult>; type SendingFutures<'a> = FuturesUnordered>; type CurTransactionStatus = HashMap; -const DEQUEUE_LIMIT: usize = 48; -const SELECT_EDU_LIMIT: usize = 16; const CLEANUP_TIMEOUT_MS: u64 = 3500; +const SELECT_PRESENCE_LIMIT: usize = 256; +const SELECT_RECEIPT_LIMIT: usize = 256; +const SELECT_EDU_LIMIT: usize = EDU_LIMIT - 2; +const DEQUEUE_LIMIT: usize = 48; + +pub const PDU_LIMIT: usize = 50; +pub const EDU_LIMIT: usize = 100; + impl Service { #[tracing::instrument(skip_all, name = "sender")] pub(super) async fn sender(&self) -> Result<()> { @@ -216,6 +222,7 @@ impl Service { // Add EDU's into the transaction if let Destination::Normal(server_name) = dest { if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { + debug_assert!(select_edus.len() <= EDU_LIMIT, "exceeded edus limit"); events.extend(select_edus.into_iter().map(SendingEvent::Edu)); self.db.set_latest_educount(server_name, last_count); } @@ -254,69 +261,176 @@ impl Service { async fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { // u64: count of last edu let since = self.db.get_latest_educount(server_name).await; - let mut events = Vec::new(); let mut max_edu_count = since; - let mut device_list_changes = HashSet::new(); + let mut events = Vec::new(); - let server_rooms = self.services.state_cache.server_rooms(server_name); + self.select_edus_device_changes(server_name, since, &mut max_edu_count, &mut events) + .await; - pin_mut!(server_rooms); - while let Some(room_id) = server_rooms.next().await { - // Look for device list updates in this room - device_list_changes.extend( - self.services - .users - .keys_changed(room_id.as_str(), since, None) - .ready_filter(|user_id| self.services.globals.user_is_local(user_id)) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - if self.server.config.allow_outgoing_read_receipts - && !self - .select_edus_receipts(room_id, since, &mut max_edu_count, &mut events) - .await? - { - break; - } - } - - for user_id in device_list_changes { - // Empty prev id forces synapse to resync; because synapse resyncs, - // we can just insert placeholder data - let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { - user_id, - device_id: device_id!("placeholder").to_owned(), - device_display_name: Some("Placeholder".to_owned()), - stream_id: uint!(1), - prev_id: Vec::new(), - deleted: None, - keys: None, - }); - - events.push(serde_json::to_vec(&edu).expect("json can be serialized")); + if self.server.config.allow_outgoing_read_receipts { + self.select_edus_receipts(server_name, since, &mut max_edu_count, &mut events) + .await; } if self.server.config.allow_outgoing_presence { self.select_edus_presence(server_name, since, &mut max_edu_count, &mut events) - .await?; + .await; } Ok((events, max_edu_count)) } + /// Look for presence + async fn select_edus_device_changes( + &self, server_name: &ServerName, since: u64, max_edu_count: &mut u64, events: &mut Vec>, + ) { + debug_assert!(events.len() < SELECT_EDU_LIMIT, "called when edu limit reached"); + + let server_rooms = self.services.state_cache.server_rooms(server_name); + + pin_mut!(server_rooms); + let mut device_list_changes = HashSet::::new(); + while let Some(room_id) = server_rooms.next().await { + let keys_changed = self + .services + .users + .room_keys_changed(room_id, since, None) + .ready_filter(|(user_id, _)| self.services.globals.user_is_local(user_id)); + + pin_mut!(keys_changed); + while let Some((user_id, count)) = keys_changed.next().await { + *max_edu_count = cmp::max(count, *max_edu_count); + if !device_list_changes.insert(user_id.into()) { + continue; + } + + // Empty prev id forces synapse to resync; because synapse resyncs, + // we can just insert placeholder data + let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { + user_id: user_id.into(), + device_id: device_id!("placeholder").to_owned(), + device_display_name: Some("Placeholder".to_owned()), + stream_id: uint!(1), + prev_id: Vec::new(), + deleted: None, + keys: None, + }); + + let edu = serde_json::to_vec(&edu).expect("failed to serialize device list update to JSON"); + + events.push(edu); + if events.len() >= SELECT_EDU_LIMIT { + return; + } + } + } + } + + /// Look for read receipts in this room + async fn select_edus_receipts( + &self, server_name: &ServerName, since: u64, max_edu_count: &mut u64, events: &mut Vec>, + ) { + debug_assert!(events.len() < EDU_LIMIT, "called when edu limit reached"); + + let server_rooms = self.services.state_cache.server_rooms(server_name); + + pin_mut!(server_rooms); + let mut num = 0; + let mut receipts = BTreeMap::::new(); + while let Some(room_id) = server_rooms.next().await { + let receipt_map = self + .select_edus_receipts_room(room_id, since, max_edu_count, &mut num) + .await; + + if !receipt_map.read.is_empty() { + receipts.insert(room_id.into(), receipt_map); + } + } + + if receipts.is_empty() { + return; + } + + let receipt_content = Edu::Receipt(ReceiptContent { + receipts, + }); + + let receipt_content = + serde_json::to_vec(&receipt_content).expect("Failed to serialize Receipt EDU to JSON vec"); + + events.push(receipt_content); + } + + /// Look for read receipts in this room + async fn select_edus_receipts_room( + &self, room_id: &RoomId, since: u64, max_edu_count: &mut u64, num: &mut usize, + ) -> ReceiptMap { + let receipts = self + .services + .read_receipt + .readreceipts_since(room_id, since); + + pin_mut!(receipts); + let mut read = BTreeMap::::new(); + while let Some((user_id, count, read_receipt)) = receipts.next().await { + *max_edu_count = cmp::max(count, *max_edu_count); + if !self.services.globals.user_is_local(&user_id) { + continue; + } + + let Ok(event) = serde_json::from_str(read_receipt.json().get()) else { + error!(?user_id, ?count, ?read_receipt, "Invalid edu event in read_receipts."); + continue; + }; + + let AnySyncEphemeralRoomEvent::Receipt(r) = event else { + error!(?user_id, ?count, ?event, "Invalid event type in read_receipts"); + continue; + }; + + let (event_id, mut receipt) = r + .content + .0 + .into_iter() + .next() + .expect("we only use one event per read receipt"); + + let receipt = receipt + .remove(&ReceiptType::Read) + .expect("our read receipts always set this") + .remove(&user_id) + .expect("our read receipts always have the user here"); + + let receipt_data = ReceiptData { + data: receipt, + event_ids: vec![event_id.clone()], + }; + + if read.insert(user_id, receipt_data).is_none() { + *num = num.saturating_add(1); + if *num >= SELECT_RECEIPT_LIMIT { + break; + } + } + } + + ReceiptMap { + read, + } + } + /// Look for presence async fn select_edus_presence( &self, server_name: &ServerName, since: u64, max_edu_count: &mut u64, events: &mut Vec>, - ) -> Result { + ) { + debug_assert!(events.len() < EDU_LIMIT, "called when edu limit reached"); + let presence_since = self.services.presence.presence_since(since); pin_mut!(presence_since); - let mut presence_updates = Vec::new(); + let mut presence_updates = HashMap::::new(); while let Some((user_id, count, presence_bytes)) = presence_since.next().await { *max_edu_count = cmp::max(count, *max_edu_count); - if !self.services.globals.user_is_local(user_id) { continue; } @@ -330,101 +444,44 @@ impl Service { continue; } - let presence_event = self + let Ok(presence_event) = self .services .presence - .from_json_bytes_to_event(&presence_bytes, &user_id) - .await?; + .from_json_bytes_to_event(presence_bytes, user_id) + .await + .log_err() + else { + continue; + }; - presence_updates.push(PresenceUpdate { - user_id, + let update = PresenceUpdate { + user_id: user_id.into(), presence: presence_event.content.presence, currently_active: presence_event.content.currently_active.unwrap_or(false), + status_msg: presence_event.content.status_msg, last_active_ago: presence_event .content .last_active_ago .unwrap_or_else(|| uint!(0)), - status_msg: presence_event.content.status_msg, - }); + }; - if presence_updates.len() >= SELECT_EDU_LIMIT { + presence_updates.insert(user_id.into(), update); + if presence_updates.len() >= SELECT_PRESENCE_LIMIT { break; } } - if !presence_updates.is_empty() { - let presence_content = Edu::Presence(PresenceContent::new(presence_updates)); - events.push(serde_json::to_vec(&presence_content).expect("PresenceEvent can be serialized")); + if presence_updates.is_empty() { + return; } - Ok(true) - } + let presence_content = Edu::Presence(PresenceContent { + push: presence_updates.into_values().collect(), + }); - /// Look for read receipts in this room - async fn select_edus_receipts( - &self, room_id: &RoomId, since: u64, max_edu_count: &mut u64, events: &mut Vec>, - ) -> Result { - let receipts = self - .services - .read_receipt - .readreceipts_since(room_id, since); + let presence_content = serde_json::to_vec(&presence_content).expect("failed to serialize Presence EDU to JSON"); - pin_mut!(receipts); - while let Some((user_id, count, read_receipt)) = receipts.next().await { - *max_edu_count = cmp::max(count, *max_edu_count); - if !self.services.globals.user_is_local(&user_id) { - continue; - } - - let event = serde_json::from_str(read_receipt.json().get()) - .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; - - let federation_event = if let AnySyncEphemeralRoomEvent::Receipt(r) = event { - let mut read = BTreeMap::new(); - let (event_id, mut receipt) = r - .content - .0 - .into_iter() - .next() - .expect("we only use one event per read receipt"); - - let receipt = receipt - .remove(&ReceiptType::Read) - .expect("our read receipts always set this") - .remove(&user_id) - .expect("our read receipts always have the user here"); - - read.insert( - user_id, - ReceiptData { - data: receipt.clone(), - event_ids: vec![event_id.clone()], - }, - ); - - let receipt_map = ReceiptMap { - read, - }; - - let mut receipts = BTreeMap::new(); - receipts.insert(room_id.to_owned(), receipt_map); - - Edu::Receipt(ReceiptContent { - receipts, - }) - } else { - Error::bad_database("Invalid event type in read_receipts"); - continue; - }; - - events.push(serde_json::to_vec(&federation_event).expect("json can be serialized")); - - if events.len() >= SELECT_EDU_LIMIT { - return Ok(false); - } - } - - Ok(true) + events.push(presence_content); } async fn send_events(&self, dest: Destination, events: Vec) -> SendingResult { From ca57dc79288e563e5e090f6699b115bc49b9d27f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 06:21:57 +0000 Subject: [PATCH 0102/1248] optimize config denylists Signed-off-by: Jason Volk --- src/api/client/directory.rs | 20 ++++++++------------ src/core/config/mod.rs | 14 +++++++------- src/service/globals/mod.rs | 4 ---- src/service/media/remote.rs | 3 +-- src/service/sending/send.rs | 9 ++++----- 5 files changed, 20 insertions(+), 30 deletions(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index ea499545..6cf7b13f 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -37,14 +37,12 @@ pub(crate) async fn get_public_rooms_filtered_route( ) -> Result { if let Some(server) = &body.server { if services - .globals - .forbidden_remote_room_directory_server_names() + .server + .config + .forbidden_remote_room_directory_server_names .contains(server) { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } @@ -77,14 +75,12 @@ pub(crate) async fn get_public_rooms_route( ) -> Result { if let Some(server) = &body.server { if services - .globals - .forbidden_remote_room_directory_server_names() + .server + .config + .forbidden_remote_room_directory_server_names .contains(server) { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 23d35424..59ddd7c7 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2,7 +2,7 @@ pub mod check; pub mod proxy; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, HashSet}, fmt, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, path::PathBuf, @@ -983,8 +983,8 @@ pub struct Config { /// Vector list of servers that conduwuit will refuse to download remote /// media from. No default. - #[serde(default = "Vec::new")] - pub prevent_media_downloads_from: Vec, + #[serde(default = "HashSet::new")] + pub prevent_media_downloads_from: HashSet, /// List of forbidden server names that we will block incoming AND outgoing /// federation with, and block client room joins / remote user invites. @@ -994,14 +994,14 @@ pub struct Config { /// outbound federation handler. /// /// Basically "global" ACLs. No default. - #[serde(default = "Vec::new")] - pub forbidden_remote_server_names: Vec, + #[serde(default = "HashSet::new")] + pub forbidden_remote_server_names: HashSet, /// List of forbidden server names that we will block all outgoing federated /// room directory requests for. Useful for preventing our users from /// wandering into bad servers or spaces. No default. - #[serde(default = "Vec::new")] - pub forbidden_remote_room_directory_server_names: Vec, + #[serde(default = "HashSet::new")] + pub forbidden_remote_room_directory_server_names: HashSet, /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you /// do not want conduwuit to send outbound requests to. Defaults to diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 329a6583..157c3944 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -252,10 +252,6 @@ impl Service { pub fn allow_outgoing_read_receipts(&self) -> bool { self.config.allow_outgoing_read_receipts } - pub fn forbidden_remote_room_directory_server_names(&self) -> &[OwnedServerName] { - &self.config.forbidden_remote_room_directory_server_names - } - pub fn well_known_support_page(&self) -> &Option { &self.config.well_known.support_page } pub fn well_known_support_role(&self) -> &Option { &self.config.well_known.support_role } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index 59846b8e..1c6c9ca0 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -382,8 +382,7 @@ fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { .server .config .prevent_media_downloads_from - .iter() - .any(|entry| entry == mxc.server_name) + .contains(mxc.server_name) { // we'll lie to the client and say the blocked server's media was not found and // log. the client has no way of telling anyways so this is a security bonus. diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 73b6a468..62da59ef 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -1,8 +1,8 @@ use std::{fmt::Debug, mem}; use conduit::{ - debug, debug_error, debug_info, debug_warn, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, - Err, Error, Result, + debug, debug_error, debug_warn, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, Err, Error, + Result, }; use http::{header::AUTHORIZATION, HeaderValue}; use ipaddress::IPAddress; @@ -36,10 +36,9 @@ impl super::Service { .server .config .forbidden_remote_server_names - .contains(&dest.to_owned()) + .contains(dest) { - debug_info!("Refusing to send outbound federation request to {dest}"); - return Err!(Request(Forbidden("Federation with this homeserver is not allowed."))); + return Err!(Request(Forbidden(debug_warn!("Federation with this {dest} is not allowed.")))); } let actual = self.services.resolver.get_actual_dest(dest).await?; From b8260e0104860eee3b8dfcb6f9091e3ad87ae2de Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 06:37:09 +0000 Subject: [PATCH 0103/1248] optimize for pdu_exists; remove a yield thing Signed-off-by: Jason Volk --- src/service/rooms/event_handler/mod.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 8f96f68e..24c2692d 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -359,7 +359,7 @@ impl Service { }; // Skip the PDU if it is redacted and we already have it as an outlier event - if self.services.timeline.get_pdu_json(event_id).await.is_ok() { + if self.services.timeline.pdu_exists(event_id).await { return Err!(Request(InvalidParam("Event was redacted and we already knew about it"))); } @@ -1123,7 +1123,6 @@ impl Service { let mut todo_auth_events = vec![Arc::clone(id)]; let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len()); let mut events_all = HashSet::with_capacity(todo_auth_events.len()); - let mut i: u64 = 0; while let Some(next_id) = todo_auth_events.pop() { if let Some((time, tries)) = self .services @@ -1146,12 +1145,7 @@ impl Service { continue; } - i = i.saturating_add(1); - if i % 100 == 0 { - tokio::task::yield_now().await; - } - - if self.services.timeline.get_pdu(&next_id).await.is_ok() { + if self.services.timeline.pdu_exists(&next_id).await { trace!("Found {next_id} in db"); continue; } From dd6621a720b03ca18a7e0fca6881923c373e3cac Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 07:07:42 +0000 Subject: [PATCH 0104/1248] reduce unnecessary clone in pdu handler Signed-off-by: Jason Volk --- src/api/server/send.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index e2100a0f..4f526052 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -118,15 +118,12 @@ async fn handle_pdus( .lock(&room_id) .await; - resolved_map.insert( - event_id.clone(), - services - .rooms - .event_handler - .handle_incoming_pdu(origin, &room_id, &event_id, value, true) - .await - .map(|_| ()), - ); + let result = services + .rooms + .event_handler + .handle_incoming_pdu(origin, &room_id, &event_id, value, true) + .await + .map(|_| ()); drop(mutex_lock); debug!( @@ -134,12 +131,14 @@ async fn handle_pdus( txn_elapsed = ?txn_start_time.elapsed(), "Finished PDU {event_id}", ); + + resolved_map.insert(event_id, result); } - for pdu in &resolved_map { - if let Err(e) = pdu.1 { + for (id, result) in &resolved_map { + if let Err(e) = result { if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { - warn!("Incoming PDU failed {pdu:?}"); + warn!("Incoming PDU failed {id}: {e:?}"); } } } From b08c1241a89514046f666fc21f817af2feb8bce2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 07:15:28 +0000 Subject: [PATCH 0105/1248] add some interruption points in recursive event handling to prevent shutdown hangs Signed-off-by: Jason Volk --- src/api/server/send.rs | 7 ++++--- src/core/server.rs | 9 ++++++++- src/service/rooms/event_handler/mod.rs | 3 +++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 4f526052..d5d3ffbb 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -71,7 +71,7 @@ pub(crate) async fn send_transaction_message_route( "Starting txn", ); - let resolved_map = handle_pdus(&services, &client, &body.pdus, origin, &txn_start_time).await; + let resolved_map = handle_pdus(&services, &client, &body.pdus, origin, &txn_start_time).await?; handle_edus(&services, &client, &body.edus, origin).await; debug!( @@ -93,7 +93,7 @@ pub(crate) async fn send_transaction_message_route( async fn handle_pdus( services: &Services, _client: &IpAddr, pdus: &[Box], origin: &ServerName, txn_start_time: &Instant, -) -> ResolvedMap { +) -> Result { let mut parsed_pdus = Vec::with_capacity(pdus.len()); for pdu in pdus { parsed_pdus.push(match services.rooms.event_handler.parse_incoming_pdu(pdu).await { @@ -110,6 +110,7 @@ async fn handle_pdus( let mut resolved_map = BTreeMap::new(); for (event_id, value, room_id) in parsed_pdus { + services.server.check_running()?; let pdu_start_time = Instant::now(); let mutex_lock = services .rooms @@ -143,7 +144,7 @@ async fn handle_pdus( } } - resolved_map + Ok(resolved_map) } async fn handle_edus(services: &Services, client: &IpAddr, edus: &[Raw], origin: &ServerName) { diff --git a/src/core/server.rs b/src/core/server.rs index 89f1dea5..627e125d 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -5,7 +5,7 @@ use std::{ use tokio::{runtime, sync::broadcast}; -use crate::{config::Config, log::Log, metrics::Metrics, Err, Result}; +use crate::{config::Config, err, log::Log, metrics::Metrics, Err, Result}; /// Server runtime state; public portion pub struct Server { @@ -107,6 +107,13 @@ impl Server { .expect("runtime handle available in Server") } + #[inline] + pub fn check_running(&self) -> Result { + self.running() + .then_some(()) + .ok_or_else(|| err!(debug_warn!("Server is shutting down."))) + } + #[inline] pub fn running(&self) -> bool { !self.stopping.load(Ordering::Acquire) } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 24c2692d..0b2bbf73 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -205,6 +205,7 @@ impl Service { debug!(events = ?sorted_prev_events, "Got previous events"); for prev_id in sorted_prev_events { + self.services.server.check_running()?; match self .handle_prev_pdu( origin, @@ -1268,6 +1269,8 @@ impl Service { let mut amount = 0; while let Some(prev_event_id) = todo_outlier_stack.pop() { + self.services.server.check_running()?; + if let Some((pdu, mut json_opt)) = self .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id, room_version_id) .boxed() From 339654216857dc5caa492f0b9a0aa442af56c0f9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 09:09:20 +0000 Subject: [PATCH 0106/1248] complete the example-config generator macro Signed-off-by: Jason Volk --- src/macros/config.rs | 157 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 149 insertions(+), 8 deletions(-) diff --git a/src/macros/config.rs b/src/macros/config.rs index 6d29c21f..3c93bd08 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -1,11 +1,19 @@ -use std::fmt::Write; +use std::{fmt::Write as _, fs::File, io::Write as _}; use proc_macro::TokenStream; +use proc_macro2::Span; use quote::ToTokens; -use syn::{Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, MetaNameValue, Type, TypePath}; +use syn::{ + parse::Parser, punctuated::Punctuated, Error, Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, + MetaList, MetaNameValue, Type, TypePath, +}; use crate::{utils::is_cargo_build, Result}; +const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; +const HEADER: &str = "## Conduwuit Configuration\n##\n## THIS FILE IS GENERATED. Changes to documentation and \ + defaults must\n## be made within the code found at src/core/config/\n"; + #[allow(clippy::needless_pass_by_value)] pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { if is_cargo_build() { @@ -18,6 +26,12 @@ pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result Result<()> { + let mut file = File::create("conduwuit-example.toml") + .map_err(|e| Error::new(Span::call_site(), format!("Failed to open config file for generation: {e}")))?; + + file.write_all(HEADER.as_bytes()) + .expect("written to config file"); + if let Fields::Named(FieldsNamed { named, .. @@ -28,21 +42,143 @@ fn generate_example(input: &ItemStruct, _args: &[Meta]) -> Result<()> { continue; }; - let Some(doc) = get_doc_comment(field) else { - continue; - }; - let Some(type_name) = get_type_name(field) else { continue; }; - //println!("{:?} {type_name:?}\n{doc}", ident.to_string()); + let doc = get_doc_comment(field) + .unwrap_or_else(|| UNDOCUMENTED.into()) + .trim_end() + .to_owned(); + + let doc = if doc.ends_with('#') { + format!("{doc}\n") + } else { + format!("{doc}\n#\n") + }; + + let default = get_doc_default(field) + .or_else(|| get_default(field)) + .unwrap_or_default(); + + let default = if !default.is_empty() { + format!(" {default}") + } else { + default + }; + + file.write_fmt(format_args!("\n{doc}")) + .expect("written to config file"); + + file.write_fmt(format_args!("#{ident} ={default}\n")) + .expect("written to config file"); } } Ok(()) } +fn get_default(field: &Field) -> Option { + for attr in &field.attrs { + let Meta::List(MetaList { + path, + tokens, + .. + }) = &attr.meta + else { + continue; + }; + + if !path + .segments + .iter() + .next() + .is_some_and(|s| s.ident == "serde") + { + continue; + } + + let Some(arg) = Punctuated::::parse_terminated + .parse(tokens.clone().into()) + .ok()? + .iter() + .next() + .cloned() + else { + continue; + }; + + match arg { + Meta::NameValue(MetaNameValue { + value: Expr::Lit(ExprLit { + lit: Lit::Str(str), + .. + }), + .. + }) => { + match str.value().as_str() { + "HashSet::new" | "Vec::new" | "RegexSet::empty" => Some("[]".to_owned()), + "true_fn" => return Some("true".to_owned()), + _ => return None, + }; + }, + Meta::Path { + .. + } => return Some("false".to_owned()), + _ => return None, + }; + } + + None +} + +fn get_doc_default(field: &Field) -> Option { + for attr in &field.attrs { + let Meta::NameValue(MetaNameValue { + path, + value, + .. + }) = &attr.meta + else { + continue; + }; + + if !path + .segments + .iter() + .next() + .is_some_and(|s| s.ident == "doc") + { + continue; + } + + let Expr::Lit(ExprLit { + lit, + .. + }) = &value + else { + continue; + }; + + let Lit::Str(token) = &lit else { + continue; + }; + + let value = token.value(); + if !value.trim().starts_with("default:") { + continue; + } + + return value + .split_once(':') + .map(|(_, v)| v) + .map(str::trim) + .map(ToOwned::to_owned); + } + + None +} + fn get_doc_comment(field: &Field) -> Option { let mut out = String::new(); for attr in &field.attrs { @@ -76,7 +212,12 @@ fn get_doc_comment(field: &Field) -> Option { continue; }; - writeln!(&mut out, "# {}", token.value()).expect("wrote to output string buffer"); + let value = token.value(); + if value.trim().starts_with("default:") { + continue; + } + + writeln!(&mut out, "#{value}").expect("wrote to output string buffer"); } (!out.is_empty()).then_some(out) From 367d1533801d8d8c0b53aa07992cbac5f267db5c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 10:09:14 +0000 Subject: [PATCH 0107/1248] add default-directives to config document comments Signed-off-by: Jason Volk --- src/core/config/mod.rs | 249 ++++++++++++++++++++++++----------------- 1 file changed, 144 insertions(+), 105 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 59ddd7c7..919bb486 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -54,9 +54,9 @@ pub struct Config { /// want this to be localhost (127.0.0.1 / ::1). If you are using Docker or /// a container NAT networking setup, you likely need this to be 0.0.0.0. /// To listen multiple addresses, specify a vector e.g. ["127.0.0.1", "::1"] + /// Default if unspecified is both IPv4 and IPv6 localhost. /// - /// default if unspecified is both IPv4 and IPv6 localhost: ["127.0.0.1", - /// "::1"] + /// default: ["127.0.0.1", "::1"] #[serde(default = "default_address")] address: ListeningAddr, @@ -67,7 +67,7 @@ pub struct Config { /// port to this. To listen on multiple ports, specify a vector e.g. [8080, /// 8448] /// - /// default if unspecified is 8008 + /// default: 8008 #[serde(default = "default_port")] port: ListeningPort, @@ -80,9 +80,11 @@ pub struct Config { /// (666 minimum). pub unix_socket_path: Option, + /// default: 660 #[serde(default = "default_unix_socket_perms")] pub unix_socket_perms: u32, + /// default: rocksdb #[serde(default = "default_database_backend")] pub database_backend: String, @@ -98,7 +100,9 @@ pub struct Config { /// Set this to any float value in megabytes for conduwuit to tell the /// database engine that this much memory is available for database-related /// caches. May be useful if you have significant memory to spare to - /// increase performance. Defaults to 256.0 + /// increase performance. + /// + /// default: 256.0 #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, @@ -107,6 +111,8 @@ pub struct Config { /// lightning bolt emoji option, just replaced with support for adding your /// own custom text or emojis. To disable, set this to "" (an empty string) /// Defaults to "🏳️⚧️" (trans pride flag) + /// + /// default: 🏳️⚧️ #[serde(default = "default_new_user_displayname_suffix")] pub new_user_displayname_suffix: String, @@ -123,11 +129,10 @@ pub struct Config { /// Set this to any float value to multiply conduwuit's in-memory LRU caches /// with. May be useful if you have significant memory to spare to increase - /// performance. + /// performance. This was previously called + /// `conduit_cache_capacity_modifier`. /// - /// This was previously called `conduit_cache_capacity_modifier` - /// - /// Defaults to 1.0. + /// default: 1.0. #[serde(default = "default_cache_capacity_modifier", alias = "conduit_cache_capacity_modifier")] pub cache_capacity_modifier: f64, @@ -197,11 +202,9 @@ pub struct Config { pub dns_tcp_fallback: bool, /// Enable to query all nameservers until the domain is found. Referred to - /// as "trust_negative_responses" in hickory_reso> This can avoid useless - /// DNS queries if the first nameserver responds with NXDOMAIN or an empty - /// NOERROR response. - /// - /// The default is to query one nameserver and stop (false). + /// as "trust_negative_responses" in hickory_resolver. This can avoid + /// useless DNS queries if the first nameserver responds with NXDOMAIN or + /// an empty NOERROR response. #[serde(default = "true_fn")] pub query_all_nameservers: bool, @@ -230,116 +233,121 @@ pub struct Config { /// /// Defaults to 5 - Ipv4ThenIpv6 as this is the most compatible and IPv4 /// networking is currently the most prevalent. + /// + /// default: 5 #[serde(default = "default_ip_lookup_strategy")] pub ip_lookup_strategy: u8, /// Max request size for file uploads + /// + /// default: 20971520 #[serde(default = "default_max_request_size")] pub max_request_size: usize, #[serde(default = "default_max_fetch_prev_events")] pub max_fetch_prev_events: u16, - /// Default/base connection timeout. - /// This is used only by URL previews and update/news endpoint checks + /// Default/base connection timeout (seconds). This is used only by URL + /// previews and update/news endpoint checks. /// - /// Defaults to 10 seconds + /// default: 10 #[serde(default = "default_request_conn_timeout")] pub request_conn_timeout: u64, - /// Default/base request timeout. The time waiting to receive more data from - /// another server. This is used only by URL previews, update/news, and - /// misc endpoint checks + /// Default/base request timeout (seconds). The time waiting to receive more + /// data from another server. This is used only by URL previews, + /// update/news, and misc endpoint checks. /// - /// Defaults to 35 seconds + /// default: 35 #[serde(default = "default_request_timeout")] pub request_timeout: u64, - /// Default/base request total timeout. The time limit for a whole request. - /// This is set very high to not cancel healthy requests while serving as a - /// backstop. This is used only by URL previews and update/news endpoint - /// checks + /// Default/base request total timeout (seconds). The time limit for a whole + /// request. This is set very high to not cancel healthy requests while + /// serving as a backstop. This is used only by URL previews and + /// update/news endpoint checks. /// - /// Defaults to 320 seconds + /// default: 320 #[serde(default = "default_request_total_timeout")] pub request_total_timeout: u64, - /// Default/base idle connection pool timeout - /// This is used only by URL previews and update/news endpoint checks + /// Default/base idle connection pool timeout (seconds). This is used only + /// by URL previews and update/news endpoint checks. /// - /// Defaults to 5 seconds + /// default: 5 #[serde(default = "default_request_idle_timeout")] pub request_idle_timeout: u64, - /// Default/base max idle connections per host - /// This is used only by URL previews and update/news endpoint checks + /// Default/base max idle connections per host. This is used only by URL + /// previews and update/news endpoint checks. Defaults to 1 as generally the + /// same open connection can be re-used. /// - /// Defaults to 1 as generally the same open connection can be re-used + /// default: 1 #[serde(default = "default_request_idle_per_host")] pub request_idle_per_host: u16, - /// Federation well-known resolution connection timeout + /// Federation well-known resolution connection timeout (seconds) /// - /// Defaults to 6 seconds + /// default: 6 #[serde(default = "default_well_known_conn_timeout")] pub well_known_conn_timeout: u64, - /// Federation HTTP well-known resolution request timeout + /// Federation HTTP well-known resolution request timeout (seconds) /// - /// Defaults to 10 seconds + /// default: 10 #[serde(default = "default_well_known_timeout")] pub well_known_timeout: u64, - /// Federation client request timeout - /// You most definitely want this to be high to account for extremely large - /// room joins, slow homeservers, your own resources etc. + /// Federation client request timeout (seconds). You most definitely want + /// this to be high to account for extremely large room joins, slow + /// homeservers, your own resources etc. /// - /// Defaults to 300 seconds + /// default: 300 #[serde(default = "default_federation_timeout")] pub federation_timeout: u64, - /// Federation client idle connection pool timeout + /// Federation client idle connection pool timeout (seconds) /// - /// Defaults to 25 seconds + /// default: 25 #[serde(default = "default_federation_idle_timeout")] pub federation_idle_timeout: u64, - /// Federation client max idle connections per host + /// Federation client max idle connections per host. Defaults to 1 as + /// generally the same open connection can be re-used /// - /// Defaults to 1 as generally the same open connection can be re-used + /// default: 1 #[serde(default = "default_federation_idle_per_host")] pub federation_idle_per_host: u16, - /// Federation sender request timeout - /// The time it takes for the remote server to process sent transactions can - /// take a while. + /// Federation sender request timeout (seconds). The time it takes for the + /// remote server to process sent transactions can take a while. /// - /// Defaults to 180 seconds + /// default: 180 #[serde(default = "default_sender_timeout")] pub sender_timeout: u64, - /// Federation sender idle connection pool timeout + /// Federation sender idle connection pool timeout (seconds) /// - /// Defaults to 180 seconds + /// default: 180 #[serde(default = "default_sender_idle_timeout")] pub sender_idle_timeout: u64, - /// Federation sender transaction retry backoff limit + /// Federation sender transaction retry backoff limit (seconds) /// - /// Defaults to 86400 seconds + /// default: 86400 #[serde(default = "default_sender_retry_backoff_limit")] pub sender_retry_backoff_limit: u64, - /// Appservice URL request connection timeout + /// Appservice URL request connection timeout. Defaults to 35 seconds as + /// generally appservices are hosted within the same network. /// - /// Defaults to 35 seconds as generally appservices are hosted within the - /// same network + /// default: 35 #[serde(default = "default_appservice_timeout")] pub appservice_timeout: u64, - /// Appservice URL idle connection pool timeout + /// Appservice URL idle connection pool timeout (seconds) /// - /// Defaults to 300 seconds + /// default: 300 #[serde(default = "default_appservice_idle_timeout")] pub appservice_idle_timeout: u64, @@ -377,12 +385,11 @@ pub struct Config { /// no default pub registration_token_file: Option, - /// controls whether encrypted rooms and events are allowed (default true) + /// Controls whether encrypted rooms and events are allowed. #[serde(default = "true_fn")] pub allow_encryption: bool, - /// controls whether federation is allowed or not - /// defaults to true + /// Controls whether federation is allowed or not. #[serde(default = "true_fn")] pub allow_federation: bool, @@ -487,6 +494,8 @@ pub struct Config { /// /// (Currently, conduwuit doesn't support batched key requests, so this list /// should only contain other Synapse servers) Defaults to `matrix.org` + /// + /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] pub trusted_servers: Vec, @@ -527,13 +536,13 @@ pub struct Config { /// binary from trace macros. For debug builds, this restriction is not /// applied. /// - /// Defaults to "info" + /// default: "info" #[serde(default = "default_log")] pub log: String, /// controls whether logs will be outputted with ANSI colours /// - /// defaults to true + /// default: true #[serde(default = "true_fn", alias = "log_colours")] pub log_colors: bool, @@ -542,7 +551,7 @@ pub struct Config { /// These are the OpenID tokens that are primarily used for Matrix account /// integrations, *not* OIDC/OpenID Connect/etc /// - /// Defaults to 3600 (1 hour) + /// default: 3600 #[serde(default = "default_openid_token_ttl")] pub openid_token_ttl: u64, @@ -585,9 +594,9 @@ pub struct Config { /// no default pub turn_secret_file: Option, - /// TURN TTL + /// TURN TTL in seconds /// - /// Default is 86400 seconds + /// default: 86400 #[serde(default = "default_turn_ttl")] pub turn_ttl: u64, @@ -629,10 +638,14 @@ pub struct Config { pub rocksdb_log_stderr: bool, /// Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB. + /// + /// default: 4194304 #[serde(default = "default_rocksdb_max_log_file_size")] pub rocksdb_max_log_file_size: usize, - /// Time in seconds before RocksDB will forcibly rotate logs. Defaults to 0. + /// Time in seconds before RocksDB will forcibly rotate logs. + /// + /// default: 0 #[serde(default = "default_rocksdb_log_time_to_roll")] pub rocksdb_log_time_to_roll: usize, @@ -649,8 +662,6 @@ pub struct Config { /// RocksDB issues, try enabling this option as it turns off Direct IO and /// feel free to report in the conduwuit Matrix room if this option fixes /// your DB issues. See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. - /// - /// Defaults to false #[serde(default)] pub rocksdb_optimize_for_spinning_disks: bool, @@ -662,14 +673,16 @@ pub struct Config { /// Amount of threads that RocksDB will use for parallelism on database /// operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use - /// all your logical threads. + /// all your logical threads. Defaults to your CPU logical thread count. /// - /// Defaults to your CPU logical thread count. + /// default: 0 #[serde(default = "default_rocksdb_parallelism_threads")] pub rocksdb_parallelism_threads: usize, /// Maximum number of LOG files RocksDB will keep. This must *not* be set to /// 0. It must be at least 1. Defaults to 3 as these are not very useful. + /// + /// default: 3 #[serde(default = "default_rocksdb_max_log_files")] pub rocksdb_max_log_files: usize, @@ -682,7 +695,7 @@ pub struct Config { /// /// "none" will disable compression. /// - /// Defaults to "zstd" + /// default: "zstd" #[serde(default = "default_rocksdb_compression_algo")] pub rocksdb_compression_algo: String, @@ -746,6 +759,8 @@ pub struct Config { /// See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information /// /// Defaults to 1 (TolerateCorruptedTailRecords) + /// + /// default: 1 #[serde(default = "default_rocksdb_recovery_mode")] pub rocksdb_recovery_mode: u8, @@ -760,8 +775,6 @@ pub struct Config { /// repair. /// - Disabling repair mode and restarting the server is recommended after /// running the repair. - /// - /// Defaults to false #[serde(default)] pub rocksdb_repair: bool, @@ -798,6 +811,8 @@ pub struct Config { /// 6 = All statistics. /// /// Defaults to 1 (No statistics, except in debug-mode) + /// + /// default: 1 #[serde(default = "default_rocksdb_stats_level")] pub rocksdb_stats_level: u8, @@ -831,11 +846,15 @@ pub struct Config { /// Config option to control how many seconds before presence updates that /// you are idle. Defaults to 5 minutes. + /// + /// default: 300 #[serde(default = "default_presence_idle_timeout_s")] pub presence_idle_timeout_s: u64, /// Config option to control how many seconds before presence updates that /// you are offline. Defaults to 30 minutes. + /// + /// default: 1800 #[serde(default = "default_presence_offline_timeout_s")] pub presence_offline_timeout_s: u64, @@ -843,42 +862,46 @@ pub struct Config { /// Disabling is offered as an optimization for servers participating in /// many large rooms or when resources are limited. Disabling it may cause /// incorrect presence states (i.e. stuck online) to be seen for some - /// remote users. Defaults to true. + /// remote users. #[serde(default = "true_fn")] pub presence_timeout_remote_users: bool, /// Config option to control whether we should receive remote incoming read - /// receipts. Defaults to true. + /// receipts. #[serde(default = "true_fn")] pub allow_incoming_read_receipts: bool, /// Config option to control whether we should send read receipts to remote - /// servers. Defaults to true. + /// servers. #[serde(default = "true_fn")] pub allow_outgoing_read_receipts: bool, - /// Config option to control outgoing typing updates to federation. Defaults - /// to true. + /// Config option to control outgoing typing updates to federation. #[serde(default = "true_fn")] pub allow_outgoing_typing: bool, /// Config option to control incoming typing updates from federation. - /// Defaults to true. #[serde(default = "true_fn")] pub allow_incoming_typing: bool, /// Config option to control maximum time federation user can indicate /// typing. + /// + /// default: 30 #[serde(default = "default_typing_federation_timeout_s")] pub typing_federation_timeout_s: u64, /// Config option to control minimum time local client can indicate typing. /// This does not override a client's request to stop typing. It only /// enforces a minimum value in case of no stop request. + /// + /// default: 15 #[serde(default = "default_typing_client_timeout_min_s")] pub typing_client_timeout_min_s: u64, /// Config option to control maximum time local client can indicate typing. + /// + /// default: 45 #[serde(default = "default_typing_client_timeout_max_s")] pub typing_client_timeout_max_s: u64, @@ -910,7 +933,7 @@ pub struct Config { pub brotli_compression: bool, /// Set to true to allow user type "guest" registrations. Element attempts - /// to register guest users automatically. Defaults to false + /// to register guest users automatically. Defaults to false. #[serde(default)] pub allow_guest_registration: bool, @@ -920,7 +943,7 @@ pub struct Config { pub log_guest_registrations: bool, /// Set to true to allow guest registrations/users to auto join any rooms - /// specified in `auto_join_rooms` Defaults to false + /// specified in `auto_join_rooms` Defaults to false. #[serde(default)] pub allow_guests_auto_join_rooms: bool, @@ -964,9 +987,7 @@ pub struct Config { /// is now disabled by default. You may still return to upstream Conduit /// but you have to run Conduwuit at least once with this set to true and /// allow the media_startup_check to take place before shutting - /// down to return to Conduit. - /// - /// Disabled by default. + /// down to return to Conduit. Disabled by default. #[serde(default)] pub media_compat_file_link: bool, @@ -975,9 +996,7 @@ pub struct Config { /// corresponding entries will be removed from the database. This is /// disabled by default because if the media directory is accidentally moved /// or inaccessible the metadata entries in the database will be lost with - /// sadness. - /// - /// Disabled by default. + /// sadness. Disabled by default. #[serde(default)] pub prune_missing_media: bool, @@ -1008,12 +1027,35 @@ pub struct Config { /// RFC1918, unroutable, loopback, multicast, and testnet addresses for /// security. /// - /// To disable, set this to be an empty vector (`[]`). /// Please be aware that this is *not* a guarantee. You should be using a /// firewall with zones as doing this on the application layer may have /// bypasses. /// /// Currently this does not account for proxies in use like Synapse does. + /// + /// To disable, set this to be an empty vector (`[]`). + /// The default is: + /// [ + /// "127.0.0.0/8", + /// "10.0.0.0/8", + /// "172.16.0.0/12", + /// "192.168.0.0/16", + /// "100.64.0.0/10", + /// "192.0.0.0/24", + /// "169.254.0.0/16", + /// "192.88.99.0/24", + /// "198.18.0.0/15", + /// "192.0.2.0/24", + /// "198.51.100.0/24", + /// "203.0.113.0/24", + /// "224.0.0.0/4", + /// "::1/128", + /// "fe80::/10", + /// "fc00::/7", + /// "2001:db8::/32", + /// "ff00::/8", + /// "fec0::/10", + /// ] #[serde(default = "default_ip_range_denylist")] pub ip_range_denylist: Vec, @@ -1060,7 +1102,9 @@ pub struct Config { pub url_preview_url_contains_allowlist: Vec, /// Maximum amount of bytes allowed in a URL preview body size when - /// spidering. Defaults to 384KB (384_000 bytes) + /// spidering. Defaults to 384KB. + /// + /// defaukt: 384000 #[serde(default = "default_url_preview_max_spider_size")] pub url_preview_max_spider_size: usize, @@ -1109,27 +1153,27 @@ pub struct Config { /// reattempt every message without trimming the queues; this may consume /// significant disk. Set this value to 0 to drop all messages without any /// attempt at redelivery. + /// + /// default: 50 #[serde(default = "default_startup_netburst_keep")] pub startup_netburst_keep: i64, /// controls whether non-admin local users are forbidden from sending room /// invites (local and remote), and if non-admin users can receive remote /// room invites. admins are always allowed to send and receive all room - /// invites. defaults to false + /// invites. #[serde(default)] pub block_non_admin_invites: bool, /// Allows admins to enter commands in rooms other than #admins by prefixing /// with \!admin. The reply will be publicly visible to the room, - /// originating from the sender. defaults to true + /// originating from the sender. #[serde(default = "true_fn")] pub admin_escape_commands: bool, /// Controls whether the conduwuit admin room console / CLI will immediately /// activate on startup. This option can also be enabled with `--console` - /// conduwuit argument - /// - /// Defaults to false + /// conduwuit argument. #[serde(default)] pub admin_console_automatic: bool, @@ -1145,21 +1189,20 @@ pub struct Config { /// Such example could be: `./conduwuit --execute "server admin-notice /// conduwuit has started up at $(date)"` /// - /// Defaults to nothing. + /// default: [] #[serde(default)] pub admin_execute: Vec, /// Controls whether conduwuit should error and fail to start if an admin - /// execute command (`--execute` / `admin_execute`) fails - /// - /// Defaults to false + /// execute command (`--execute` / `admin_execute`) fails. #[serde(default)] pub admin_execute_errors_ignore: bool, /// Controls the max log level for admin command log captures (logs - /// generated from running admin commands) + /// generated from running admin commands). Defaults to "info" on release + /// builds, else "debug" on debug builds. /// - /// Defaults to "info" on release builds, else "debug" on debug builds + /// default: "info" #[serde(default = "default_admin_log_capture")] pub admin_log_capture: String, @@ -1169,8 +1212,6 @@ pub struct Config { /// Sentry.io crash/panic reporting, performance monitoring/metrics, etc. /// This is NOT enabled by default. conduwuit's default Sentry reporting /// endpoint is o4506996327251968.ingest.us.sentry.io - /// - /// Defaults to *false* #[serde(default)] pub sentry: bool, @@ -1182,8 +1223,6 @@ pub struct Config { pub sentry_endpoint: Option, /// Report your Conduwuit server_name in Sentry.io crash reports and metrics - /// - /// Defaults to false #[serde(default)] pub sentry_send_server_name: bool, @@ -1191,9 +1230,9 @@ pub struct Config { /// /// Note that too high values may impact performance, and can be disabled by /// setting it to 0.0 (0%) This value is read as a percentage to Sentry, - /// represented as a decimal + /// represented as a decimal. Defaults to 15% of traces (0.15) /// - /// Defaults to 15% of traces (0.15) + /// default: 0.15 #[serde(default = "default_sentry_traces_sample_rate")] pub sentry_traces_sample_rate: f32, From 5cb0a5f67668828b7c47b8a8efc3f8c834c1d7f2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Oct 2024 22:16:59 +0000 Subject: [PATCH 0108/1248] add config generator controls via attribute metadatas Signed-off-by: Jason Volk --- src/core/config/mod.rs | 45 +++++++++++++++++++- src/macros/config.rs | 93 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 127 insertions(+), 11 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 919bb486..ff214420 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -28,10 +28,19 @@ use self::proxy::ProxyConfig; use crate::{err, error::Error, utils::sys, Result}; /// all the config options for conduwuit -#[config_example_generator] -#[derive(Clone, Debug, Deserialize)] #[allow(clippy::struct_excessive_bools)] #[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] +#[derive(Clone, Debug, Deserialize)] +#[config_example_generator( + filename = "conduwuit-example.toml", + section = "global", + undocumented = "# This item is undocumented. Please contribute documentation for it.", + header = "### Conduwuit Configuration\n###\n### THIS FILE IS GENERATED. YOUR CHANGES WILL BE OVERWRITTEN!\n### \ + You should rename this file before configuring your server. Changes\n### to documentation and defaults \ + can be contributed in sourcecode at\n### src/core/config/mod.rs. This file is generated when \ + building.\n###\n", + ignore = "catchall well_known tls" +)] pub struct Config { /// The server_name is the pretty name of this server. It is used as a /// suffix for user and room ids. Examples: matrix.org, conduit.rs @@ -71,6 +80,7 @@ pub struct Config { #[serde(default = "default_port")] port: ListeningPort, + // external structure; separate section pub tls: Option, /// Uncomment unix_socket_path to listen on a UNIX socket at the specified @@ -458,15 +468,18 @@ pub struct Config { #[serde(default = "true_fn")] pub allow_unstable_room_versions: bool, + /// default: 10 #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, + // external structure; separate section #[serde(default)] pub well_known: WellKnownConfig, #[serde(default)] pub allow_jaeger: bool, + /// default: "info" #[serde(default = "default_jaeger_filter")] pub jaeger_filter: String, @@ -478,12 +491,38 @@ pub struct Config { #[serde(default)] pub tracing_flame: bool, + /// default: "info" #[serde(default = "default_tracing_flame_filter")] pub tracing_flame_filter: String, + /// default: "./tracing.folded" #[serde(default = "default_tracing_flame_output_path")] pub tracing_flame_output_path: String, + /// Examples: + /// - No proxy (default): + /// proxy ="none" + /// + /// - For global proxy, create the section at the bottom of this file: + /// [global.proxy] + /// global = { url = "socks5h://localhost:9050" } + /// + /// - To proxy some domains: + /// [global.proxy] + /// [[global.proxy.by_domain]] + /// url = "socks5h://localhost:9050" + /// include = ["*.onion", "matrix.myspecial.onion"] + /// exclude = ["*.myspecial.onion"] + /// + /// Include vs. Exclude: + /// - If include is an empty list, it is assumed to be `["*"]`. + /// - If a domain matches both the exclude and include list, the proxy will + /// only be used if it was included because of a more specific rule than + /// it was excluded. In the above example, the proxy would be used for + /// `ordinary.onion`, `matrix.myspecial.onion`, but not + /// `hello.myspecial.onion`. + /// + /// default: "none" #[serde(default)] pub proxy: ProxyConfig, @@ -1278,6 +1317,7 @@ pub struct Config { } #[derive(Clone, Debug, Deserialize)] +#[config_example_generator(filename = "conduwuit-example.toml", section = "global.tls")] pub struct TlsConfig { pub certs: String, pub key: String, @@ -1287,6 +1327,7 @@ pub struct TlsConfig { } #[derive(Clone, Debug, Deserialize, Default)] +#[config_example_generator(filename = "conduwuit-example.toml", section = "global.well_known")] pub struct WellKnownConfig { pub client: Option, pub server: Option, diff --git a/src/macros/config.rs b/src/macros/config.rs index 3c93bd08..f8616352 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -1,18 +1,21 @@ -use std::{fmt::Write as _, fs::File, io::Write as _}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Write as _, + fs::OpenOptions, + io::Write as _, +}; use proc_macro::TokenStream; use proc_macro2::Span; use quote::ToTokens; use syn::{ - parse::Parser, punctuated::Punctuated, Error, Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, - MetaList, MetaNameValue, Type, TypePath, + parse::Parser, punctuated::Punctuated, spanned::Spanned, Error, Expr, ExprLit, Field, Fields, FieldsNamed, + ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, }; use crate::{utils::is_cargo_build, Result}; const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; -const HEADER: &str = "## Conduwuit Configuration\n##\n## THIS FILE IS GENERATED. Changes to documentation and \ - defaults must\n## be made within the code found at src/core/config/\n"; #[allow(clippy::needless_pass_by_value)] pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { @@ -25,11 +28,41 @@ pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result Result<()> { - let mut file = File::create("conduwuit-example.toml") +fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { + let settings = get_settings(args); + + let filename = settings + .get("filename") + .ok_or_else(|| Error::new(args[0].span(), "missing required 'filename' attribute argument"))?; + + let undocumented = settings + .get("undocumented") + .map_or(UNDOCUMENTED, String::as_str); + + let ignore: HashSet<&str> = settings + .get("ignore") + .map_or("", String::as_str) + .split(' ') + .collect(); + + let section = settings + .get("section") + .ok_or_else(|| Error::new(args[0].span(), "missing required 'section' attribute argument"))?; + + let mut file = OpenOptions::new() + .write(true) + .create(section == "global") + .truncate(section == "global") + .append(section != "global") + .open(filename) .map_err(|e| Error::new(Span::call_site(), format!("Failed to open config file for generation: {e}")))?; - file.write_all(HEADER.as_bytes()) + if let Some(header) = settings.get("header") { + file.write_all(header.as_bytes()) + .expect("written to config file"); + } + + file.write_fmt(format_args!("\n[{section}]\n")) .expect("written to config file"); if let Fields::Named(FieldsNamed { @@ -42,12 +75,16 @@ fn generate_example(input: &ItemStruct, _args: &[Meta]) -> Result<()> { continue; }; + if ignore.contains(ident.to_string().as_str()) { + continue; + } + let Some(type_name) = get_type_name(field) else { continue; }; let doc = get_doc_comment(field) - .unwrap_or_else(|| UNDOCUMENTED.into()) + .unwrap_or_else(|| undocumented.into()) .trim_end() .to_owned(); @@ -75,9 +112,47 @@ fn generate_example(input: &ItemStruct, _args: &[Meta]) -> Result<()> { } } + if let Some(footer) = settings.get("footer") { + file.write_all(footer.as_bytes()) + .expect("written to config file"); + } + Ok(()) } +fn get_settings(args: &[Meta]) -> HashMap { + let mut map = HashMap::new(); + for arg in args { + let Meta::NameValue(MetaNameValue { + path, + value, + .. + }) = arg + else { + continue; + }; + + let Expr::Lit( + ExprLit { + lit: Lit::Str(str), + .. + }, + .., + ) = value + else { + continue; + }; + + let Some(key) = path.segments.iter().next().map(|s| s.ident.clone()) else { + continue; + }; + + map.insert(key.to_string(), str.value()); + } + + map +} + fn get_default(field: &Field) -> Option { for attr in &field.attrs { let Meta::List(MetaList { From c769fcc3471dcc4c976569be57aea65109105f92 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 24 Oct 2024 01:31:30 +0000 Subject: [PATCH 0109/1248] move core result into core utils Signed-off-by: Jason Volk --- src/core/mod.rs | 4 +--- src/core/utils/mod.rs | 1 + src/core/{ => utils}/result.rs | 0 src/core/{ => utils}/result/debug_inspect.rs | 0 src/core/{ => utils}/result/flat_ok.rs | 0 src/core/{ => utils}/result/inspect_log.rs | 0 src/core/{ => utils}/result/into_is_ok.rs | 0 src/core/{ => utils}/result/log_debug_err.rs | 0 src/core/{ => utils}/result/log_err.rs | 0 src/core/{ => utils}/result/map_expect.rs | 0 src/core/{ => utils}/result/not_found.rs | 0 src/core/{ => utils}/result/unwrap_infallible.rs | 0 12 files changed, 2 insertions(+), 3 deletions(-) rename src/core/{ => utils}/result.rs (100%) rename src/core/{ => utils}/result/debug_inspect.rs (100%) rename src/core/{ => utils}/result/flat_ok.rs (100%) rename src/core/{ => utils}/result/inspect_log.rs (100%) rename src/core/{ => utils}/result/into_is_ok.rs (100%) rename src/core/{ => utils}/result/log_debug_err.rs (100%) rename src/core/{ => utils}/result/log_err.rs (100%) rename src/core/{ => utils}/result/map_expect.rs (100%) rename src/core/{ => utils}/result/not_found.rs (100%) rename src/core/{ => utils}/result/unwrap_infallible.rs (100%) diff --git a/src/core/mod.rs b/src/core/mod.rs index 491d8b4c..79052554 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -7,7 +7,6 @@ pub mod log; pub mod metrics; pub mod mods; pub mod pdu; -pub mod result; pub mod server; pub mod utils; @@ -19,9 +18,8 @@ pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; pub use pdu::{PduBuilder, PduCount, PduEvent}; -pub use result::Result; pub use server::Server; -pub use utils::{ctor, dtor, implement}; +pub use utils::{ctor, dtor, implement, result, result::Result}; pub use crate as conduit_core; diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 96a98537..3943a8da 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -10,6 +10,7 @@ pub mod json; pub mod math; pub mod mutex_map; pub mod rand; +pub mod result; pub mod set; pub mod stream; pub mod string; diff --git a/src/core/result.rs b/src/core/utils/result.rs similarity index 100% rename from src/core/result.rs rename to src/core/utils/result.rs diff --git a/src/core/result/debug_inspect.rs b/src/core/utils/result/debug_inspect.rs similarity index 100% rename from src/core/result/debug_inspect.rs rename to src/core/utils/result/debug_inspect.rs diff --git a/src/core/result/flat_ok.rs b/src/core/utils/result/flat_ok.rs similarity index 100% rename from src/core/result/flat_ok.rs rename to src/core/utils/result/flat_ok.rs diff --git a/src/core/result/inspect_log.rs b/src/core/utils/result/inspect_log.rs similarity index 100% rename from src/core/result/inspect_log.rs rename to src/core/utils/result/inspect_log.rs diff --git a/src/core/result/into_is_ok.rs b/src/core/utils/result/into_is_ok.rs similarity index 100% rename from src/core/result/into_is_ok.rs rename to src/core/utils/result/into_is_ok.rs diff --git a/src/core/result/log_debug_err.rs b/src/core/utils/result/log_debug_err.rs similarity index 100% rename from src/core/result/log_debug_err.rs rename to src/core/utils/result/log_debug_err.rs diff --git a/src/core/result/log_err.rs b/src/core/utils/result/log_err.rs similarity index 100% rename from src/core/result/log_err.rs rename to src/core/utils/result/log_err.rs diff --git a/src/core/result/map_expect.rs b/src/core/utils/result/map_expect.rs similarity index 100% rename from src/core/result/map_expect.rs rename to src/core/utils/result/map_expect.rs diff --git a/src/core/result/not_found.rs b/src/core/utils/result/not_found.rs similarity index 100% rename from src/core/result/not_found.rs rename to src/core/utils/result/not_found.rs diff --git a/src/core/result/unwrap_infallible.rs b/src/core/utils/result/unwrap_infallible.rs similarity index 100% rename from src/core/result/unwrap_infallible.rs rename to src/core/utils/result/unwrap_infallible.rs From aa768b5dec1338a6dbdd1b3a9a00bc6ec9d53090 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 24 Oct 2024 05:03:31 +0000 Subject: [PATCH 0110/1248] distill active and old keys for federation key/server response Signed-off-by: Jason Volk --- src/api/server/key.rs | 40 +++++++++++++++++++++++++--------- src/service/server_keys/mod.rs | 17 +++++++++++++++ 2 files changed, 47 insertions(+), 10 deletions(-) diff --git a/src/api/server/key.rs b/src/api/server/key.rs index 3913ce43..5284593d 100644 --- a/src/api/server/key.rs +++ b/src/api/server/key.rs @@ -1,10 +1,14 @@ -use std::{collections::BTreeMap, time::Duration}; +use std::{ + collections::BTreeMap, + mem::take, + time::{Duration, SystemTime}, +}; use axum::{extract::State, response::IntoResponse, Json}; use conduit::{utils::timepoint_from_now, Result}; use ruma::{ api::{ - federation::discovery::{get_server_keys, ServerSigningKeys}, + federation::discovery::{get_server_keys, OldVerifyKey, ServerSigningKeys}, OutgoingResponse, }, serde::Raw, @@ -21,21 +25,32 @@ use ruma::{ // signature for the response pub(crate) async fn get_server_keys_route(State(services): State) -> Result { let server_name = services.globals.server_name(); - let verify_keys = services.server_keys.verify_keys_for(server_name).await; + let active_key_id = services.server_keys.active_key_id(); + let mut all_keys = services.server_keys.verify_keys_for(server_name).await; + + let verify_keys = all_keys + .remove_entry(active_key_id) + .expect("active verify_key is missing"); + + let old_verify_keys = all_keys + .into_iter() + .map(|(id, key)| (id, OldVerifyKey::new(expires_ts(), key.key))) + .collect(); + let server_key = ServerSigningKeys { - verify_keys, + verify_keys: [verify_keys].into(), + old_verify_keys, server_name: server_name.to_owned(), valid_until_ts: valid_until_ts(), - old_verify_keys: BTreeMap::new(), signatures: BTreeMap::new(), }; - let response = get_server_keys::v2::Response { - server_key: Raw::new(&server_key)?, - } - .try_into_http_response::>()?; + let server_key = Raw::new(&server_key)?; + let mut response = get_server_keys::v2::Response::new(server_key) + .try_into_http_response::>() + .map(|mut response| take(response.body_mut())) + .and_then(|body| serde_json::from_slice(&body).map_err(Into::into))?; - let mut response = serde_json::from_slice(response.body())?; services.server_keys.sign_json(&mut response)?; Ok(Json(response)) @@ -47,6 +62,11 @@ fn valid_until_ts() -> MilliSecondsSinceUnixEpoch { MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow") } +fn expires_ts() -> MilliSecondsSinceUnixEpoch { + let timepoint = SystemTime::now(); + MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow") +} + /// # `GET /_matrix/key/v2/server/{keyId}` /// /// Gets the public signing keys of this server. diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index dc09703c..dae45a51 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -44,7 +44,9 @@ pub type PubKeys = PublicKeySet; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let minimum_valid = Duration::from_secs(3600); + let (keypair, verify_keys) = keypair::init(args.db)?; + debug_assert!(verify_keys.len() == 1, "only one active verify_key supported"); Ok(Arc::new(Self { keypair, @@ -68,6 +70,21 @@ impl crate::Service for Service { #[inline] pub fn keypair(&self) -> &Ed25519KeyPair { &self.keypair } +#[implement(Service)] +#[inline] +pub fn active_key_id(&self) -> &ServerSigningKeyId { self.active_verify_key().0 } + +#[implement(Service)] +#[inline] +pub fn active_verify_key(&self) -> (&ServerSigningKeyId, &VerifyKey) { + debug_assert!(self.verify_keys.len() <= 1, "more than one active verify_key"); + self.verify_keys + .iter() + .next() + .map(|(id, key)| (id.as_ref(), key)) + .expect("missing active verify_key") +} + #[implement(Service)] async fn add_signing_keys(&self, new_keys: ServerSigningKeys) { let origin = &new_keys.server_name; From 89cc865868102697415802e45f4ce19bbaad33d2 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 25 Oct 2024 14:45:22 -0400 Subject: [PATCH 0111/1248] bump conduwuit to 0.5.0 Signed-off-by: strawberry --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4ac7cc35..31339b27 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -592,7 +592,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.4.7" +version = "0.5.0" dependencies = [ "clap", "conduit_admin", @@ -621,7 +621,7 @@ dependencies = [ [[package]] name = "conduit_admin" -version = "0.4.7" +version = "0.5.0" dependencies = [ "clap", "conduit_api", @@ -642,7 +642,7 @@ dependencies = [ [[package]] name = "conduit_api" -version = "0.4.7" +version = "0.5.0" dependencies = [ "axum", "axum-client-ip", @@ -674,7 +674,7 @@ dependencies = [ [[package]] name = "conduit_core" -version = "0.4.7" +version = "0.5.0" dependencies = [ "argon2", "arrayvec", @@ -725,7 +725,7 @@ dependencies = [ [[package]] name = "conduit_database" -version = "0.4.7" +version = "0.5.0" dependencies = [ "arrayvec", "conduit_core", @@ -741,7 +741,7 @@ dependencies = [ [[package]] name = "conduit_macros" -version = "0.4.7" +version = "0.5.0" dependencies = [ "itertools 0.13.0", "proc-macro2", @@ -751,7 +751,7 @@ dependencies = [ [[package]] name = "conduit_router" -version = "0.4.7" +version = "0.5.0" dependencies = [ "axum", "axum-client-ip", @@ -784,7 +784,7 @@ dependencies = [ [[package]] name = "conduit_service" -version = "0.4.7" +version = "0.5.0" dependencies = [ "async-trait", "base64 0.22.1", diff --git a/Cargo.toml b/Cargo.toml index 966c2818..64cd8ba3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ license = "Apache-2.0" readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" rust-version = "1.82.0" -version = "0.4.7" +version = "0.5.0" [workspace.metadata.crane] name = "conduit" From f29879288d00e24ec04d6a42bab6ef91e8bafda7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 25 Oct 2024 20:47:30 -0400 Subject: [PATCH 0112/1248] document conduwuit k8s helm chart Signed-off-by: strawberry --- docs/deploying/kubernetes.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 docs/deploying/kubernetes.md diff --git a/docs/deploying/kubernetes.md b/docs/deploying/kubernetes.md new file mode 100644 index 00000000..2a1bcb51 --- /dev/null +++ b/docs/deploying/kubernetes.md @@ -0,0 +1,4 @@ +# conduwuit for Kubernetes + +conduwuit doesn't support horizontal scalability or distributed loading natively, however a community maintained Helm Chart is available here to run conduwuit on Kubernetes: + From 652b04b9b6bc30f55b286645bb8cd706d429056c Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 25 Oct 2024 20:48:14 -0400 Subject: [PATCH 0113/1248] update conduwuit freebsd docs Signed-off-by: strawberry --- docs/deploying/freebsd.md | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/docs/deploying/freebsd.md b/docs/deploying/freebsd.md index 4ac83515..65b40204 100644 --- a/docs/deploying/freebsd.md +++ b/docs/deploying/freebsd.md @@ -1,11 +1,5 @@ # conduwuit for FreeBSD -conduwuit at the moment does not provide FreeBSD builds. Building conduwuit on -FreeBSD requires a specific environment variable to use the system prebuilt -RocksDB library instead of rust-rocksdb / rust-librocksdb-sys which does *not* -work and will cause a build error or coredump. +conduwuit at the moment does not provide FreeBSD builds or have FreeBSD packaging, however conduwuit does build and work on FreeBSD using the system-provided RocksDB. -Use the following environment variable: `ROCKSDB_LIB_DIR=/usr/local/lib` - -Such example commandline with it can be: `ROCKSDB_LIB_DIR=/usr/local/lib cargo -build --release` +Contributions for getting conduwuit packaged are welcome. From 2ce91f33afbd08a722684f6d0e3928cc9a497696 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 25 Oct 2024 21:08:34 -0400 Subject: [PATCH 0114/1248] log method on tracing req spans, fix path sometimes being truncated Signed-off-by: strawberry --- src/api/router/auth.rs | 4 +--- src/router/layers.rs | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 6b90c5ff..28d6bc55 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -5,7 +5,6 @@ use axum_extra::{ TypedHeader, }; use conduit::{debug_error, err, warn, Err, Error, Result}; -use http::uri::PathAndQuery; use ruma::{ api::{client::error::ErrorKind, AuthScheme, Metadata}, server_util::authorization::XMatrix, @@ -190,12 +189,11 @@ async fn auth_server(services: &Services, request: &mut Request, body: Option<&C let destination = services.globals.server_name(); let origin = &x_matrix.origin; - #[allow(clippy::or_fun_call)] let signature_uri = request .parts .uri .path_and_query() - .unwrap_or(&PathAndQuery::from_static("/")) + .expect("all requests have a path") .to_string(); let signature: [Member; 1] = [(x_matrix.key.to_string(), Value::String(x_matrix.sig.to_string()))]; diff --git a/src/router/layers.rs b/src/router/layers.rs index a1a70bb8..908105d8 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -184,12 +184,20 @@ fn catch_panic(err: Box) -> http::Response(request: &http::Request) -> tracing::Span { - let path = request - .extensions() - .get::() - .map_or_else(|| request.uri().path(), truncated_matched_path); + let path = request.extensions().get::().map_or_else( + || { + request + .uri() + .path_and_query() + .expect("all requests have a path") + .as_str() + }, + truncated_matched_path, + ); - tracing::info_span!("router:", %path) + let method = request.method(); + + tracing::info_span!("router:", %method, %path) } fn truncated_matched_path(path: &MatchedPath) -> &str { From 0efe24a028f5954e9aa4969f533ed89a51115bbc Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 25 Oct 2024 21:13:14 -0400 Subject: [PATCH 0115/1248] remove spaces from CSP header to save a few bytes Signed-off-by: strawberry --- src/router/layers.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/router/layers.rs b/src/router/layers.rs index 908105d8..fd68cc36 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -24,15 +24,15 @@ use tracing::Level; use crate::{request, router}; -const CONDUWUIT_CSP: &[&str] = &[ - "sandbox", +const CONDUWUIT_CSP: &[&str; 5] = &[ "default-src 'none'", "frame-ancestors 'none'", "form-action 'none'", "base-uri 'none'", + "sandbox", ]; -const CONDUWUIT_PERMISSIONS_POLICY: &[&str] = &["interest-cohort=()", "browsing-topics=()"]; +const CONDUWUIT_PERMISSIONS_POLICY: &[&str; 2] = &["interest-cohort=()", "browsing-topics=()"]; pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { let server = &services.server; @@ -78,7 +78,7 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { )) .layer(SetResponseHeaderLayer::if_not_present( header::CONTENT_SECURITY_POLICY, - HeaderValue::from_str(&CONDUWUIT_CSP.join("; "))?, + HeaderValue::from_str(&CONDUWUIT_CSP.join(";"))?, )) .layer(cors_layer(server)) .layer(body_limit_layer(server)) From d6991611f0d79d1ad4a1e3cdb5d1372a79b87ac7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 26 Oct 2024 12:32:47 -0400 Subject: [PATCH 0116/1248] add `require_auth_for_profile_requests` config option, check endpoint metadata instead of request string Signed-off-by: strawberry --- src/api/router/auth.rs | 36 ++++++++++++++++++++++++++++++------ src/core/config/mod.rs | 11 ++++++++++- 2 files changed, 40 insertions(+), 7 deletions(-) diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 28d6bc55..6b1bb1a9 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -6,7 +6,15 @@ use axum_extra::{ }; use conduit::{debug_error, err, warn, Err, Error, Result}; use ruma::{ - api::{client::error::ErrorKind, AuthScheme, Metadata}, + api::{ + client::{ + directory::get_public_rooms, + error::ErrorKind, + profile::{get_avatar_url, get_display_name, get_profile, get_profile_key, get_timezone_key}, + voip::get_turn_server_info, + }, + AuthScheme, IncomingRequest, Metadata, + }, server_util::authorization::XMatrix, CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, }; @@ -54,9 +62,8 @@ pub(super) async fn auth( }; if metadata.authentication == AuthScheme::None { - match request.parts.uri.path() { - // TODO: can we check this better? - "/_matrix/client/v3/publicRooms" | "/_matrix/client/r0/publicRooms" => { + match metadata { + &get_public_rooms::v3::Request::METADATA => { if !services .globals .config @@ -73,6 +80,23 @@ pub(super) async fn auth( } } }, + &get_profile::v3::Request::METADATA + | &get_profile_key::unstable::Request::METADATA + | &get_display_name::v3::Request::METADATA + | &get_avatar_url::v3::Request::METADATA + | &get_timezone_key::unstable::Request::METADATA => { + if services.globals.config.require_auth_for_profile_requests { + match token { + Token::Appservice(_) | Token::User(_) => { + // we should have validated the token above + // already + }, + Token::None | Token::Invalid => { + return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing or invalid access token.")); + }, + } + } + }, _ => {}, }; } @@ -107,9 +131,9 @@ pub(super) async fn auth( appservice_info: Some(*info), }) }, - (AuthScheme::AccessToken, Token::None) => match request.parts.uri.path() { + (AuthScheme::AccessToken, Token::None) => match metadata { // TODO: can we check this better? - "/_matrix/client/v3/voip/turnServer" | "/_matrix/client/r0/voip/turnServer" => { + &get_turn_server_info::v3::Request::METADATA => { if services.globals.config.turn_allow_guests { Ok(Auth { origin: None, diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index ff214420..04e44fd7 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -58,7 +58,6 @@ pub struct Config { /// YOU NEED TO EDIT THIS pub server_name: OwnedServerName, - /// Database backend: Only rocksdb is supported. /// default address (IPv4 or IPv6) conduwuit will listen on. Generally you /// want this to be localhost (127.0.0.1 / ::1). If you are using Docker or /// a container NAT networking setup, you likely need this to be 0.0.0.0. @@ -94,6 +93,8 @@ pub struct Config { #[serde(default = "default_unix_socket_perms")] pub unix_socket_perms: u32, + /// Database backend: Only rocksdb is supported. + /// /// default: rocksdb #[serde(default = "default_database_backend")] pub database_backend: String, @@ -406,6 +407,14 @@ pub struct Config { #[serde(default)] pub federation_loopback: bool, + /// Set this to true to require authentication on the normally + /// unauthenticated profile retrieval endpoints (GET) + /// "/_matrix/client/v3/profile/{userId}". + /// + /// This can prevent profile scraping. + #[serde(default)] + pub require_auth_for_profile_requests: bool, + /// Set this to true to allow your server's public room directory to be /// federated. Set this to false to protect against /publicRooms spiders, /// but will forbid external users from viewing your server's public room From 60d84195c51c523b965c17d75ebca861290260e5 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 26 Oct 2024 17:26:50 -0400 Subject: [PATCH 0117/1248] implement MSC4210, bump ruwuma Signed-off-by: strawberry --- Cargo.lock | 26 +++---- Cargo.toml | 3 +- src/admin/debug/commands.rs | 1 + src/api/client/push.rs | 100 +++++++++++++------------ src/service/rooms/event_handler/mod.rs | 1 + 5 files changed, 69 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 31339b27..c64d3cc6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2976,7 +2976,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "assign", "js_int", @@ -2998,7 +2998,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "js_int", "ruma-common", @@ -3010,7 +3010,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "as_variant", "assign", @@ -3033,7 +3033,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "as_variant", "base64 0.22.1", @@ -3063,7 +3063,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3087,7 +3087,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "bytes", "http", @@ -3105,7 +3105,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "js_int", "thiserror", @@ -3114,7 +3114,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "js_int", "ruma-common", @@ -3124,7 +3124,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "cfg-if", "once_cell", @@ -3140,7 +3140,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "js_int", "ruma-common", @@ -3152,7 +3152,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "headers", "http", @@ -3165,7 +3165,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3181,7 +3181,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73#d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" dependencies = [ "futures-util", "itertools 0.13.0", diff --git a/Cargo.toml b/Cargo.toml index 64cd8ba3..73f16daf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -315,7 +315,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "d7baeb7e5c3ae28e79ad3fe81c5e8b207a26cc73" +rev = "39c1addd37a4eed612ac1135edc2cccd9d331d5e" features = [ "compat", "rand", @@ -346,6 +346,7 @@ features = [ "unstable-msc4121", "unstable-msc4125", "unstable-msc4186", + "unstable-msc4210", # remove legacy mentions "unstable-extensible-events", ] diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 7fe8addf..0fd3c91b 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -203,6 +203,7 @@ pub(super) async fn get_remote_pdu( &server, ruma::api::federation::event::get_event::v1::Request { event_id: event_id.clone().into(), + include_unredacted_content: None, }, ) .await diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 103c0c5e..de280b32 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,18 +1,18 @@ use axum::extract::State; -use conduit::err; +use conduit::{err, Err}; use ruma::{ api::client::{ error::ErrorKind, push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, - set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleScope, + set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, }, }, events::{ push_rules::{PushRulesEvent, PushRulesEventContent}, GlobalAccountDataEventType, }, - push::{InsertPushRuleError, RemovePushRuleError, Ruleset}, + push::{InsertPushRuleError, PredefinedContentRuleId, PredefinedOverrideRuleId, RemovePushRuleError, Ruleset}, CanonicalJsonObject, CanonicalJsonValue, }; use service::Services; @@ -43,7 +43,24 @@ pub(crate) async fn get_pushrules_all_route( let account_data_content = serde_json::from_value::(content_value.into()) .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; - let global_ruleset: Ruleset = account_data_content.global; + let mut global_ruleset = account_data_content.global; + + // remove old deprecated mentions push rules as per MSC4210 + #[allow(deprecated)] + { + use ruma::push::RuleKind::*; + + global_ruleset + .remove(Override, PredefinedOverrideRuleId::ContainsDisplayName) + .ok(); + global_ruleset + .remove(Override, PredefinedOverrideRuleId::RoomNotif) + .ok(); + + global_ruleset + .remove(Content, PredefinedContentRuleId::ContainsUserName) + .ok(); + }; Ok(get_pushrules_all::v3::Response { global: global_ruleset, @@ -58,6 +75,15 @@ pub(crate) async fn get_pushrule_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + // remove old deprecated mentions push rules as per MSC4210 + #[allow(deprecated)] + if body.rule_id.as_str() == PredefinedContentRuleId::ContainsUserName.as_str() + || body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str() + || body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str() + { + return Err!(Request(NotFound("Push rule not found."))); + } + let event: PushRulesEvent = services .account_data .get_global(sender_user, GlobalAccountDataEventType::PushRules) @@ -79,7 +105,7 @@ pub(crate) async fn get_pushrule_route( } } -/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` +/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}` /// /// Creates a single specified push rule for this user. pub(crate) async fn set_pushrule_route( @@ -88,13 +114,6 @@ pub(crate) async fn set_pushrule_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let mut account_data: PushRulesEvent = services .account_data .get_global(sender_user, GlobalAccountDataEventType::PushRules) @@ -145,7 +164,7 @@ pub(crate) async fn set_pushrule_route( Ok(set_pushrule::v3::Response {}) } -/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` +/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions` /// /// Gets the actions of a single specified push rule for this user. pub(crate) async fn get_pushrule_actions_route( @@ -153,11 +172,13 @@ pub(crate) async fn get_pushrule_actions_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); + // remove old deprecated mentions push rules as per MSC4210 + #[allow(deprecated)] + if body.rule_id.as_str() == PredefinedContentRuleId::ContainsUserName.as_str() + || body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str() + || body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str() + { + return Err!(Request(NotFound("Push rule not found."))); } let event: PushRulesEvent = services @@ -178,7 +199,7 @@ pub(crate) async fn get_pushrule_actions_route( }) } -/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` +/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions` /// /// Sets the actions of a single specified push rule for this user. pub(crate) async fn set_pushrule_actions_route( @@ -186,13 +207,6 @@ pub(crate) async fn set_pushrule_actions_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let mut account_data: PushRulesEvent = services .account_data .get_global(sender_user, GlobalAccountDataEventType::PushRules) @@ -221,7 +235,7 @@ pub(crate) async fn set_pushrule_actions_route( Ok(set_pushrule_actions::v3::Response {}) } -/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` +/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled` /// /// Gets the enabled status of a single specified push rule for this user. pub(crate) async fn get_pushrule_enabled_route( @@ -229,11 +243,15 @@ pub(crate) async fn get_pushrule_enabled_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); + // remove old deprecated mentions push rules as per MSC4210 + #[allow(deprecated)] + if body.rule_id.as_str() == PredefinedContentRuleId::ContainsUserName.as_str() + || body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str() + || body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str() + { + return Ok(get_pushrule_enabled::v3::Response { + enabled: false, + }); } let event: PushRulesEvent = services @@ -254,7 +272,7 @@ pub(crate) async fn get_pushrule_enabled_route( }) } -/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` +/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled` /// /// Sets the enabled status of a single specified push rule for this user. pub(crate) async fn set_pushrule_enabled_route( @@ -262,13 +280,6 @@ pub(crate) async fn set_pushrule_enabled_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let mut account_data: PushRulesEvent = services .account_data .get_global(sender_user, GlobalAccountDataEventType::PushRules) @@ -297,7 +308,7 @@ pub(crate) async fn set_pushrule_enabled_route( Ok(set_pushrule_enabled::v3::Response {}) } -/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` +/// # `DELETE /_matrix/client/r0/pushrules/global/{kind}/{ruleId}` /// /// Deletes a single specified push rule for this user. pub(crate) async fn delete_pushrule_route( @@ -305,13 +316,6 @@ pub(crate) async fn delete_pushrule_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let mut account_data: PushRulesEvent = services .account_data .get_global(sender_user, GlobalAccountDataEventType::PushRules) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 0b2bbf73..026c5a4c 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1159,6 +1159,7 @@ impl Service { origin, get_event::v1::Request { event_id: (*next_id).to_owned(), + include_unredacted_content: None, }, ) .await From b921983a795f042dac0f348f1a832c73bd44de7f Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 26 Oct 2024 17:39:27 -0400 Subject: [PATCH 0118/1248] send room alias on pusher notification Signed-off-by: strawberry --- src/service/pusher/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index af15e332..2b90319e 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -332,6 +332,13 @@ impl Service { .await .ok(); + notifi.room_alias = self + .services + .state_accessor + .get_canonical_alias(&event.room_id) + .await + .ok(); + self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) .await?; } From 49343281d477cf414cfec737a00c150d5db34ba3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 24 Oct 2024 06:01:53 +0000 Subject: [PATCH 0119/1248] additional bool extensions Signed-off-by: Jason Volk --- src/core/utils/bool.rs | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/src/core/utils/bool.rs b/src/core/utils/bool.rs index d7ce78fe..d5fa85aa 100644 --- a/src/core/utils/bool.rs +++ b/src/core/utils/bool.rs @@ -2,12 +2,49 @@ /// Boolean extensions and chain.starters pub trait BoolExt { + fn map T>(self, f: F) -> T + where + Self: Sized; + + fn map_ok_or T>(self, err: E, f: F) -> Result; + + fn map_or T>(self, err: T, f: F) -> T; + + fn map_or_else T>(self, err: F, f: F) -> T; + + fn ok_or(self, err: E) -> Result<(), E>; + + fn ok_or_else E>(self, err: F) -> Result<(), E>; + fn or T>(self, f: F) -> Option; fn or_some(self, t: T) -> Option; } impl BoolExt for bool { + #[inline] + fn map T>(self, f: F) -> T + where + Self: Sized, + { + f(self) + } + + #[inline] + fn map_ok_or T>(self, err: E, f: F) -> Result { self.ok_or(err).map(|()| f()) } + + #[inline] + fn map_or T>(self, err: T, f: F) -> T { self.then(f).unwrap_or(err) } + + #[inline] + fn map_or_else T>(self, err: F, f: F) -> T { self.then(f).unwrap_or_else(err) } + + #[inline] + fn ok_or(self, err: E) -> Result<(), E> { self.then_some(()).ok_or(err) } + + #[inline] + fn ok_or_else E>(self, err: F) -> Result<(), E> { self.then_some(()).ok_or_else(err) } + #[inline] fn or T>(self, f: F) -> Option { (!self).then(f) } From efb28c1a9944840143af37bff65bd475f38df717 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 24 Oct 2024 06:03:45 +0000 Subject: [PATCH 0120/1248] add a Map::contains suite to db Signed-off-by: Jason Volk --- src/database/map.rs | 1 + src/database/map/contains.rs | 88 ++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 src/database/map/contains.rs diff --git a/src/database/map.rs b/src/database/map.rs index cac20d6a..d6b8bf38 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,3 +1,4 @@ +mod contains; mod count; mod get; mod insert; diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs new file mode 100644 index 00000000..a98fe7c5 --- /dev/null +++ b/src/database/map/contains.rs @@ -0,0 +1,88 @@ +use std::{convert::AsRef, fmt::Debug, future::Future, io::Write}; + +use arrayvec::ArrayVec; +use conduit::{implement, utils::TryFutureExtExt, Err, Result}; +use futures::future::ready; +use serde::Serialize; + +use crate::{ser, util}; + +/// Returns true if the map contains the key. +/// - key is serialized into allocated buffer +/// - harder errors may not be reported +#[implement(super::Map)] +pub fn contains(&self, key: &K) -> impl Future + Send +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = Vec::::with_capacity(64); + self.bcontains(key, &mut buf) +} + +/// Returns true if the map contains the key. +/// - key is serialized into stack-buffer +/// - harder errors will panic +#[implement(super::Map)] +pub fn acontains(&self, key: &K) -> impl Future + Send +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = ArrayVec::::new(); + self.bcontains(key, &mut buf) +} + +/// Returns true if the map contains the key. +/// - key is serialized into provided buffer +/// - harder errors will panic +#[implement(super::Map)] +#[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] +pub fn bcontains(&self, key: &K, buf: &mut B) -> impl Future + Send +where + K: Serialize + ?Sized + Debug, + B: Write + AsRef<[u8]>, +{ + let key = ser::serialize(buf, key).expect("failed to serialize query key"); + self.exists(key).is_ok() +} + +/// Returns Ok if the map contains the key. +/// - key is raw +#[implement(super::Map)] +pub fn exists(&self, key: &K) -> impl Future> + Send +where + K: AsRef<[u8]> + ?Sized + Debug, +{ + ready(self.exists_blocking(key)) +} + +/// Returns Ok if the map contains the key; NotFound otherwise. Harder errors +/// may not always be reported properly. +#[implement(super::Map)] +#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] +pub fn exists_blocking(&self, key: &K) -> Result<()> +where + K: AsRef<[u8]> + ?Sized + Debug, +{ + if self.maybe_exists_blocking(key) + && self + .db + .db + .get_pinned_cf_opt(&self.cf(), key, &self.read_options) + .map_err(util::map_err)? + .is_some() + { + Ok(()) + } else { + Err!(Request(NotFound("Not found in database"))) + } +} + +#[implement(super::Map)] +fn maybe_exists_blocking(&self, key: &K) -> bool +where + K: AsRef<[u8]> + ?Sized, +{ + self.db + .db + .key_may_exist_cf_opt(&self.cf(), key, &self.read_options) +} From 9438dc89e612ada5e5e44b48315877055498313b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 24 Oct 2024 10:58:08 +0000 Subject: [PATCH 0121/1248] merge and resplit/cleanup appservice service Signed-off-by: Jason Volk --- src/admin/query/appservice.rs | 6 +- src/service/appservice/data.rs | 50 ------ src/service/appservice/mod.rs | 173 ++++++-------------- src/service/appservice/namespace_regex.rs | 70 ++++++++ src/service/appservice/registration_info.rs | 39 +++++ 5 files changed, 157 insertions(+), 181 deletions(-) delete mode 100644 src/service/appservice/data.rs create mode 100644 src/service/appservice/namespace_regex.rs create mode 100644 src/service/appservice/registration_info.rs diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index 4b97ef4e..02e89e7a 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -26,11 +26,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_> appservice_id, } => { let timer = tokio::time::Instant::now(); - let results = services - .appservice - .db - .get_registration(appservice_id.as_ref()) - .await; + let results = services.appservice.get_registration(&appservice_id).await; let query_time = timer.elapsed(); diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs deleted file mode 100644 index 8fb7d958..00000000 --- a/src/service/appservice/data.rs +++ /dev/null @@ -1,50 +0,0 @@ -use std::sync::Arc; - -use conduit::{err, utils::stream::TryIgnore, Result}; -use database::{Database, Map}; -use futures::Stream; -use ruma::api::appservice::Registration; - -pub struct Data { - id_appserviceregistrations: Arc, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - id_appserviceregistrations: db["id_appserviceregistrations"].clone(), - } - } - - /// Registers an appservice and returns the ID to the caller - pub(super) fn register_appservice(&self, yaml: &Registration) -> Result { - let id = yaml.id.as_str(); - self.id_appserviceregistrations - .insert(id.as_bytes(), serde_yaml::to_string(&yaml).unwrap().as_bytes()); - - Ok(id.to_owned()) - } - - /// Remove an appservice registration - /// - /// # Arguments - /// - /// * `service_name` - the name you send to register the service previously - pub(super) fn unregister_appservice(&self, service_name: &str) -> Result<()> { - self.id_appserviceregistrations - .remove(service_name.as_bytes()); - Ok(()) - } - - pub async fn get_registration(&self, id: &str) -> Result { - self.id_appserviceregistrations - .get(id) - .await - .and_then(|ref bytes| serde_yaml::from_slice(bytes).map_err(Into::into)) - .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) - } - - pub(super) fn iter_ids(&self) -> impl Stream + Send + '_ { - self.id_appserviceregistrations.keys().ignore_err() - } -} diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 7e2dc738..1617e6e6 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,147 +1,49 @@ -mod data; +mod namespace_regex; +mod registration_info; use std::{collections::BTreeMap, sync::Arc}; use async_trait::async_trait; -use conduit::{err, Result}; -use data::Data; +use conduit::{err, utils::stream::TryIgnore, Result}; +use database::Map; use futures::{Future, StreamExt, TryStreamExt}; -use regex::RegexSet; -use ruma::{ - api::appservice::{Namespace, Registration}, - RoomAliasId, RoomId, UserId, -}; +use ruma::{api::appservice::Registration, RoomAliasId, RoomId, UserId}; use tokio::sync::RwLock; +pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo}; use crate::{sending, Dep}; -/// Compiled regular expressions for a namespace -#[derive(Clone, Debug)] -pub struct NamespaceRegex { - pub exclusive: Option, - pub non_exclusive: Option, -} - -impl NamespaceRegex { - /// Checks if this namespace has rights to a namespace - #[inline] - #[must_use] - pub fn is_match(&self, heystack: &str) -> bool { - if self.is_exclusive_match(heystack) { - return true; - } - - if let Some(non_exclusive) = &self.non_exclusive { - if non_exclusive.is_match(heystack) { - return true; - } - } - false - } - - /// Checks if this namespace has exlusive rights to a namespace - #[inline] - #[must_use] - pub fn is_exclusive_match(&self, heystack: &str) -> bool { - if let Some(exclusive) = &self.exclusive { - if exclusive.is_match(heystack) { - return true; - } - } - false - } -} - -impl RegistrationInfo { - #[must_use] - pub fn is_user_match(&self, user_id: &UserId) -> bool { - self.users.is_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() - } - - #[inline] - #[must_use] - pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { - self.users.is_exclusive_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() - } -} - -impl TryFrom> for NamespaceRegex { - type Error = regex::Error; - - fn try_from(value: Vec) -> Result { - let mut exclusive = Vec::with_capacity(value.len()); - let mut non_exclusive = Vec::with_capacity(value.len()); - - for namespace in value { - if namespace.exclusive { - exclusive.push(namespace.regex); - } else { - non_exclusive.push(namespace.regex); - } - } - - Ok(Self { - exclusive: if exclusive.is_empty() { - None - } else { - Some(RegexSet::new(exclusive)?) - }, - non_exclusive: if non_exclusive.is_empty() { - None - } else { - Some(RegexSet::new(non_exclusive)?) - }, - }) - } -} - -/// Appservice registration combined with its compiled regular expressions. -#[derive(Clone, Debug)] -pub struct RegistrationInfo { - pub registration: Registration, - pub users: NamespaceRegex, - pub aliases: NamespaceRegex, - pub rooms: NamespaceRegex, -} - -impl TryFrom for RegistrationInfo { - type Error = regex::Error; - - fn try_from(value: Registration) -> Result { - Ok(Self { - users: value.namespaces.users.clone().try_into()?, - aliases: value.namespaces.aliases.clone().try_into()?, - rooms: value.namespaces.rooms.clone().try_into()?, - registration: value, - }) - } -} - pub struct Service { - pub db: Data, - services: Services, registration_info: RwLock>, + services: Services, + db: Data, } struct Services { sending: Dep, } +struct Data { + id_appserviceregistrations: Arc, +} + #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(args.db), + registration_info: RwLock::new(BTreeMap::new()), services: Services { sending: args.depend::("sending"), }, - registration_info: RwLock::new(BTreeMap::new()), + db: Data { + id_appserviceregistrations: args.db["id_appserviceregistrations"].clone(), + }, })) } async fn worker(self: Arc) -> Result<()> { // Inserting registrations into cache - for appservice in iter_ids(&self.db).await? { + for appservice in self.iter_db_ids().await? { self.registration_info.write().await.insert( appservice.0, appservice @@ -158,9 +60,6 @@ impl crate::Service for Service { } impl Service { - #[inline] - pub async fn all(&self) -> Result> { iter_ids(&self.db).await } - /// Registers an appservice and returns the ID to the caller pub async fn register_appservice(&self, yaml: Registration) -> Result { //TODO: Check for collisions between exclusive appservice namespaces @@ -169,7 +68,11 @@ impl Service { .await .insert(yaml.id.clone(), yaml.clone().try_into()?); - self.db.register_appservice(&yaml) + let id = yaml.id.as_str(); + let yaml = serde_yaml::to_string(&yaml)?; + self.db.id_appserviceregistrations.insert(id, yaml); + + Ok(id.to_owned()) } /// Remove an appservice registration @@ -186,7 +89,7 @@ impl Service { .ok_or(err!("Appservice not found"))?; // remove the appservice from the database - self.db.unregister_appservice(service_name)?; + self.db.id_appserviceregistrations.remove(service_name); // deletes all active requests for the appservice if there are any so we stop // sending to the URL @@ -254,11 +157,29 @@ impl Service { pub fn read(&self) -> impl Future>> { self.registration_info.read() } -} -async fn iter_ids(db: &Data) -> Result> { - db.iter_ids() - .then(|id| async move { Ok((id.clone(), db.get_registration(&id).await?)) }) - .try_collect() - .await + #[inline] + pub async fn all(&self) -> Result> { self.iter_db_ids().await } + + pub async fn get_db_registration(&self, id: &str) -> Result { + self.db + .id_appserviceregistrations + .get(id) + .await + .and_then(|ref bytes| serde_yaml::from_slice(bytes).map_err(Into::into)) + .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) + } + + async fn iter_db_ids(&self) -> Result> { + self.db + .id_appserviceregistrations + .keys() + .ignore_err() + .then(|id: String| async move { + let reg = self.get_db_registration(&id).await?; + Ok((id, reg)) + }) + .try_collect() + .await + } } diff --git a/src/service/appservice/namespace_regex.rs b/src/service/appservice/namespace_regex.rs new file mode 100644 index 00000000..3529fc0e --- /dev/null +++ b/src/service/appservice/namespace_regex.rs @@ -0,0 +1,70 @@ +use conduit::Result; +use regex::RegexSet; +use ruma::api::appservice::Namespace; + +/// Compiled regular expressions for a namespace +#[derive(Clone, Debug)] +pub struct NamespaceRegex { + pub exclusive: Option, + pub non_exclusive: Option, +} + +impl NamespaceRegex { + /// Checks if this namespace has rights to a namespace + #[inline] + #[must_use] + pub fn is_match(&self, heystack: &str) -> bool { + if self.is_exclusive_match(heystack) { + return true; + } + + if let Some(non_exclusive) = &self.non_exclusive { + if non_exclusive.is_match(heystack) { + return true; + } + } + false + } + + /// Checks if this namespace has exlusive rights to a namespace + #[inline] + #[must_use] + pub fn is_exclusive_match(&self, heystack: &str) -> bool { + if let Some(exclusive) = &self.exclusive { + if exclusive.is_match(heystack) { + return true; + } + } + false + } +} + +impl TryFrom> for NamespaceRegex { + type Error = regex::Error; + + fn try_from(value: Vec) -> Result { + let mut exclusive = Vec::with_capacity(value.len()); + let mut non_exclusive = Vec::with_capacity(value.len()); + + for namespace in value { + if namespace.exclusive { + exclusive.push(namespace.regex); + } else { + non_exclusive.push(namespace.regex); + } + } + + Ok(Self { + exclusive: if exclusive.is_empty() { + None + } else { + Some(RegexSet::new(exclusive)?) + }, + non_exclusive: if non_exclusive.is_empty() { + None + } else { + Some(RegexSet::new(non_exclusive)?) + }, + }) + } +} diff --git a/src/service/appservice/registration_info.rs b/src/service/appservice/registration_info.rs new file mode 100644 index 00000000..2c8595b1 --- /dev/null +++ b/src/service/appservice/registration_info.rs @@ -0,0 +1,39 @@ +use conduit::Result; +use ruma::{api::appservice::Registration, UserId}; + +use super::NamespaceRegex; + +/// Appservice registration combined with its compiled regular expressions. +#[derive(Clone, Debug)] +pub struct RegistrationInfo { + pub registration: Registration, + pub users: NamespaceRegex, + pub aliases: NamespaceRegex, + pub rooms: NamespaceRegex, +} + +impl RegistrationInfo { + #[must_use] + pub fn is_user_match(&self, user_id: &UserId) -> bool { + self.users.is_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() + } + + #[inline] + #[must_use] + pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { + self.users.is_exclusive_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() + } +} + +impl TryFrom for RegistrationInfo { + type Error = regex::Error; + + fn try_from(value: Registration) -> Result { + Ok(Self { + users: value.namespaces.users.clone().try_into()?, + aliases: value.namespaces.aliases.clone().try_into()?, + rooms: value.namespaces.rooms.clone().try_into()?, + registration: value, + }) + } +} From 0e616f1d1267481ed97e9adc6d779d29fcf9ade2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 24 Oct 2024 11:16:44 +0000 Subject: [PATCH 0122/1248] add event macro log wrapper suite Signed-off-by: Jason Volk --- src/core/log/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 04d250a6..1cba236f 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -27,6 +27,11 @@ pub struct Log { // necessary but discouraged. Remember debug_ log macros are also exported to // the crate namespace like these. +#[macro_export] +macro_rules! event { + ( $level:expr, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } +} + #[macro_export] macro_rules! error { ( $($x:tt)+ ) => { ::tracing::error!( $($x)+ ) } From e175b7d28dffbea663c25e66babd3184e7fc5b1f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 24 Oct 2024 11:24:03 +0000 Subject: [PATCH 0123/1248] slightly cleanup prev_event eval loop Signed-off-by: Jason Volk --- src/service/rooms/event_handler/mod.rs | 41 +++++++++++++------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 026c5a4c..ec04e748 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -206,7 +206,7 @@ impl Service { debug!(events = ?sorted_prev_events, "Got previous events"); for prev_id in sorted_prev_events { self.services.server.check_running()?; - match self + if let Err(e) = self .handle_prev_pdu( origin, event_id, @@ -218,25 +218,26 @@ impl Service { ) .await { - Ok(()) => continue, - Err(e) => { - warn!("Prev event {prev_id} failed: {e}"); - match self - .services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry((*prev_id).to_owned()) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - }, - hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)); - }, - }; - }, + use hash_map::Entry; + + let now = Instant::now(); + warn!("Prev event {prev_id} failed: {e}"); + + match self + .services + .globals + .bad_event_ratelimiter + .write() + .expect("locked") + .entry(prev_id.into()) + { + Entry::Vacant(e) => { + e.insert((now, 1)); + }, + Entry::Occupied(mut e) => { + *e.get_mut() = (now, e.get().1.saturating_add(1)); + }, + }; } } From 60cc07134f3d80f0ba25d4bc1b6736c30494f947 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 24 Oct 2024 11:24:37 +0000 Subject: [PATCH 0124/1248] log error for auth_chain corruption immediately Signed-off-by: Jason Volk --- src/service/rooms/auth_chain/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index f3861ca3..1387bc7d 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -167,10 +167,12 @@ impl Service { Err(e) => debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"), Ok(pdu) => { if pdu.room_id != room_id { - return Err!(Request(Forbidden( - "auth event {event_id:?} for incorrect room {} which is not {room_id}", - pdu.room_id, - ))); + return Err!(Request(Forbidden(error!( + ?event_id, + ?room_id, + wrong_room_id = ?pdu.room_id, + "auth event for incorrect room" + )))); } for auth_event in &pdu.auth_events { From ee92a33a4de8db924ee5e203f5b3c64dade8dcc6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 24 Oct 2024 12:03:56 +0000 Subject: [PATCH 0125/1248] add some accessors to Ar for common patterns Signed-off-by: Jason Volk --- src/api/client/backup.rs | 107 ++++++++++++--------------- src/api/router/args.rs | 18 +++-- src/api/router/handler.rs | 2 +- src/api/server/backfill.rs | 7 +- src/api/server/event.rs | 6 +- src/api/server/event_auth.rs | 6 +- src/api/server/get_missing_events.rs | 8 +- src/api/server/hierarchy.rs | 4 +- src/api/server/invite.rs | 9 +-- src/api/server/make_join.rs | 13 ++-- src/api/server/make_leave.rs | 5 +- src/api/server/send.rs | 12 ++- src/api/server/send_join.rs | 19 ++--- src/api/server/send_leave.rs | 22 +----- src/api/server/state.rs | 6 +- src/api/server/state_ids.rs | 6 +- src/api/server/user.rs | 6 +- 17 files changed, 109 insertions(+), 147 deletions(-) diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index d52da80a..f435e086 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -18,10 +18,9 @@ use crate::{Result, Ruma}; pub(crate) async fn create_backup_version_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let version = services .key_backups - .create_backup(sender_user, &body.algorithm)?; + .create_backup(body.sender_user(), &body.algorithm)?; Ok(create_backup_version::v3::Response { version, @@ -35,10 +34,9 @@ pub(crate) async fn create_backup_version_route( pub(crate) async fn update_backup_version_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); services .key_backups - .update_backup(sender_user, &body.version, &body.algorithm) + .update_backup(body.sender_user(), &body.version, &body.algorithm) .await?; Ok(update_backup_version::v3::Response {}) @@ -50,19 +48,25 @@ pub(crate) async fn update_backup_version_route( pub(crate) async fn get_latest_backup_info_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let (version, algorithm) = services .key_backups - .get_latest_backup(sender_user) + .get_latest_backup(body.sender_user()) .await .map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?; Ok(get_latest_backup_info::v3::Response { algorithm, - count: (UInt::try_from(services.key_backups.count_keys(sender_user, &version).await) - .expect("user backup keys count should not be that high")), - etag: services.key_backups.get_etag(sender_user, &version).await, + count: (UInt::try_from( + services + .key_backups + .count_keys(body.sender_user(), &version) + .await, + ) + .expect("user backup keys count should not be that high")), + etag: services + .key_backups + .get_etag(body.sender_user(), &version) + .await, version, }) } @@ -73,10 +77,9 @@ pub(crate) async fn get_latest_backup_info_route( pub(crate) async fn get_backup_info_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = services .key_backups - .get_backup(sender_user, &body.version) + .get_backup(body.sender_user(), &body.version) .await .map_err(|_| err!(Request(NotFound("Key backup does not exist at version {:?}", body.version))))?; @@ -84,12 +87,12 @@ pub(crate) async fn get_backup_info_route( algorithm, count: services .key_backups - .count_keys(sender_user, &body.version) + .count_keys(body.sender_user(), &body.version) .await .try_into()?, etag: services .key_backups - .get_etag(sender_user, &body.version) + .get_etag(body.sender_user(), &body.version) .await, version: body.version.clone(), }) @@ -104,11 +107,9 @@ pub(crate) async fn get_backup_info_route( pub(crate) async fn delete_backup_version_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services .key_backups - .delete_backup(sender_user, &body.version) + .delete_backup(body.sender_user(), &body.version) .await; Ok(delete_backup_version::v3::Response {}) @@ -125,11 +126,9 @@ pub(crate) async fn delete_backup_version_route( pub(crate) async fn add_backup_keys_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if services .key_backups - .get_latest_backup_version(sender_user) + .get_latest_backup_version(body.sender_user()) .await .is_ok_and(|version| version != body.version) { @@ -142,7 +141,7 @@ pub(crate) async fn add_backup_keys_route( for (session_id, key_data) in &room.sessions { services .key_backups - .add_key(sender_user, &body.version, room_id, session_id, key_data) + .add_key(body.sender_user(), &body.version, room_id, session_id, key_data) .await?; } } @@ -150,12 +149,12 @@ pub(crate) async fn add_backup_keys_route( Ok(add_backup_keys::v3::Response { count: services .key_backups - .count_keys(sender_user, &body.version) + .count_keys(body.sender_user(), &body.version) .await .try_into()?, etag: services .key_backups - .get_etag(sender_user, &body.version) + .get_etag(body.sender_user(), &body.version) .await, }) } @@ -171,11 +170,9 @@ pub(crate) async fn add_backup_keys_route( pub(crate) async fn add_backup_keys_for_room_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if services .key_backups - .get_latest_backup_version(sender_user) + .get_latest_backup_version(body.sender_user()) .await .is_ok_and(|version| version != body.version) { @@ -187,19 +184,19 @@ pub(crate) async fn add_backup_keys_for_room_route( for (session_id, key_data) in &body.sessions { services .key_backups - .add_key(sender_user, &body.version, &body.room_id, session_id, key_data) + .add_key(body.sender_user(), &body.version, &body.room_id, session_id, key_data) .await?; } Ok(add_backup_keys_for_room::v3::Response { count: services .key_backups - .count_keys(sender_user, &body.version) + .count_keys(body.sender_user(), &body.version) .await .try_into()?, etag: services .key_backups - .get_etag(sender_user, &body.version) + .get_etag(body.sender_user(), &body.version) .await, }) } @@ -215,11 +212,9 @@ pub(crate) async fn add_backup_keys_for_room_route( pub(crate) async fn add_backup_keys_for_session_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if services .key_backups - .get_latest_backup_version(sender_user) + .get_latest_backup_version(body.sender_user()) .await .is_ok_and(|version| version != body.version) { @@ -230,18 +225,24 @@ pub(crate) async fn add_backup_keys_for_session_route( services .key_backups - .add_key(sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data) + .add_key( + body.sender_user(), + &body.version, + &body.room_id, + &body.session_id, + &body.session_data, + ) .await?; Ok(add_backup_keys_for_session::v3::Response { count: services .key_backups - .count_keys(sender_user, &body.version) + .count_keys(body.sender_user(), &body.version) .await .try_into()?, etag: services .key_backups - .get_etag(sender_user, &body.version) + .get_etag(body.sender_user(), &body.version) .await, }) } @@ -252,11 +253,9 @@ pub(crate) async fn add_backup_keys_for_session_route( pub(crate) async fn get_backup_keys_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let rooms = services .key_backups - .get_all(sender_user, &body.version) + .get_all(body.sender_user(), &body.version) .await; Ok(get_backup_keys::v3::Response { @@ -270,11 +269,9 @@ pub(crate) async fn get_backup_keys_route( pub(crate) async fn get_backup_keys_for_room_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sessions = services .key_backups - .get_room(sender_user, &body.version, &body.room_id) + .get_room(body.sender_user(), &body.version, &body.room_id) .await; Ok(get_backup_keys_for_room::v3::Response { @@ -288,11 +285,9 @@ pub(crate) async fn get_backup_keys_for_room_route( pub(crate) async fn get_backup_keys_for_session_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let key_data = services .key_backups - .get_session(sender_user, &body.version, &body.room_id, &body.session_id) + .get_session(body.sender_user(), &body.version, &body.room_id, &body.session_id) .await .map_err(|_| err!(Request(NotFound(debug_error!("Backup key not found for this user's session.")))))?; @@ -307,22 +302,20 @@ pub(crate) async fn get_backup_keys_for_session_route( pub(crate) async fn delete_backup_keys_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services .key_backups - .delete_all_keys(sender_user, &body.version) + .delete_all_keys(body.sender_user(), &body.version) .await; Ok(delete_backup_keys::v3::Response { count: services .key_backups - .count_keys(sender_user, &body.version) + .count_keys(body.sender_user(), &body.version) .await .try_into()?, etag: services .key_backups - .get_etag(sender_user, &body.version) + .get_etag(body.sender_user(), &body.version) .await, }) } @@ -333,22 +326,20 @@ pub(crate) async fn delete_backup_keys_route( pub(crate) async fn delete_backup_keys_for_room_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services .key_backups - .delete_room_keys(sender_user, &body.version, &body.room_id) + .delete_room_keys(body.sender_user(), &body.version, &body.room_id) .await; Ok(delete_backup_keys_for_room::v3::Response { count: services .key_backups - .count_keys(sender_user, &body.version) + .count_keys(body.sender_user(), &body.version) .await .try_into()?, etag: services .key_backups - .get_etag(sender_user, &body.version) + .get_etag(body.sender_user(), &body.version) .await, }) } @@ -359,22 +350,20 @@ pub(crate) async fn delete_backup_keys_for_room_route( pub(crate) async fn delete_backup_keys_for_session_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services .key_backups - .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id) + .delete_room_key(body.sender_user(), &body.version, &body.room_id, &body.session_id) .await; Ok(delete_backup_keys_for_session::v3::Response { count: services .key_backups - .count_keys(sender_user, &body.version) + .count_keys(body.sender_user(), &body.version) .await .try_into()?, etag: services .key_backups - .get_etag(sender_user, &body.version) + .get_etag(body.sender_user(), &body.version) .await, }) } diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 746e1cfc..cefacac1 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -3,17 +3,14 @@ use std::{mem, ops::Deref}; use axum::{async_trait, body::Body, extract::FromRequest}; use bytes::{BufMut, BytesMut}; use conduit::{debug, err, trace, utils::string::EMPTY, Error, Result}; -use ruma::{api::IncomingRequest, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId}; +use ruma::{api::IncomingRequest, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, ServerName, UserId}; use service::Services; use super::{auth, auth::Auth, request, request::Request}; use crate::{service::appservice::RegistrationInfo, State}; /// Extractor for Ruma request structs -pub(crate) struct Args -where - T: IncomingRequest + Send + Sync + 'static, -{ +pub(crate) struct Args { /// Request struct body pub(crate) body: T, @@ -38,6 +35,17 @@ where pub(crate) json_body: Option, } +impl Args +where + T: IncomingRequest + Send + Sync + 'static, +{ + #[inline] + pub(crate) fn sender_user(&self) -> &UserId { self.sender_user.as_deref().expect("user is authenticated") } + + #[inline] + pub(crate) fn origin(&self) -> &ServerName { self.origin.as_deref().expect("server is authenticated") } +} + #[async_trait] impl FromRequest for Args where diff --git a/src/api/router/handler.rs b/src/api/router/handler.rs index 3b7b1eeb..0022f06a 100644 --- a/src/api/router/handler.rs +++ b/src/api/router/handler.rs @@ -38,7 +38,7 @@ macro_rules! ruma_handler { where Fun: Fn($($tx,)* Ruma,) -> Fut + Send + Sync + 'static, Fut: Future> + Send, - Req: IncomingRequest + Send + Sync, + Req: IncomingRequest + Send + Sync + 'static, Err: IntoResponse + Send, ::OutgoingResponse: Send, $( $tx: FromRequestParts + Send + Sync + 'static, )* diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 2bbc95ca..088b891a 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -18,12 +18,10 @@ use crate::Ruma; pub(crate) async fn get_backfill_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - services .rooms .event_handler - .acl_check(origin, &body.room_id) + .acl_check(body.origin(), &body.room_id) .await?; if !services @@ -33,7 +31,7 @@ pub(crate) async fn get_backfill_route( .await && !services .rooms .state_cache - .server_in_room(origin, &body.room_id) + .server_in_room(body.origin(), &body.room_id) .await { return Err!(Request(Forbidden("Server is not in room."))); @@ -59,6 +57,7 @@ pub(crate) async fn get_backfill_route( .try_into() .expect("UInt could not be converted to usize"); + let origin = body.origin(); let pdus = services .rooms .timeline diff --git a/src/api/server/event.rs b/src/api/server/event.rs index e4eac794..64ce3e40 100644 --- a/src/api/server/event.rs +++ b/src/api/server/event.rs @@ -13,8 +13,6 @@ use crate::Ruma; pub(crate) async fn get_event_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - let event = services .rooms .timeline @@ -37,7 +35,7 @@ pub(crate) async fn get_event_route( .await && !services .rooms .state_cache - .server_in_room(origin, room_id) + .server_in_room(body.origin(), room_id) .await { return Err!(Request(Forbidden("Server is not in room."))); @@ -46,7 +44,7 @@ pub(crate) async fn get_event_route( if !services .rooms .state_accessor - .server_can_see_event(origin, room_id, &body.event_id) + .server_can_see_event(body.origin(), room_id, &body.event_id) .await? { return Err!(Request(Forbidden("Server is not allowed to see event."))); diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 8307a4ad..8fe96f81 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -18,12 +18,10 @@ use crate::Ruma; pub(crate) async fn get_event_authorization_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - services .rooms .event_handler - .acl_check(origin, &body.room_id) + .acl_check(body.origin(), &body.room_id) .await?; if !services @@ -33,7 +31,7 @@ pub(crate) async fn get_event_authorization_route( .await && !services .rooms .state_cache - .server_in_room(origin, &body.room_id) + .server_in_room(body.origin(), &body.room_id) .await { return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index e267898f..aee4fbe9 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -13,12 +13,10 @@ use crate::Ruma; pub(crate) async fn get_missing_events_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - services .rooms .event_handler - .acl_check(origin, &body.room_id) + .acl_check(body.origin(), &body.room_id) .await?; if !services @@ -28,7 +26,7 @@ pub(crate) async fn get_missing_events_route( .await && !services .rooms .state_cache - .server_in_room(origin, &body.room_id) + .server_in_room(body.origin(), &body.room_id) .await { return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room")); @@ -71,7 +69,7 @@ pub(crate) async fn get_missing_events_route( if !services .rooms .state_accessor - .server_can_see_event(origin, &body.room_id, &queued_events[i]) + .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) .await? { i = i.saturating_add(1); diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index 002bd763..e3ce7108 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -10,13 +10,11 @@ use crate::{Error, Result, Ruma}; pub(crate) async fn get_hierarchy_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - if services.rooms.metadata.exists(&body.room_id).await { services .rooms .spaces - .get_federation_hierarchy(&body.room_id, origin, body.suggested_only) + .get_federation_hierarchy(&body.room_id, body.origin(), body.suggested_only) .await } else { Err(Error::BadRequest(ErrorKind::NotFound, "Room does not exist.")) diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index a9e404c5..b30a1b58 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -18,13 +18,11 @@ pub(crate) async fn create_invite_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - // ACL check origin services .rooms .event_handler - .acl_check(origin, &body.room_id) + .acl_check(body.origin(), &body.room_id) .await?; if !services @@ -55,10 +53,11 @@ pub(crate) async fn create_invite_route( .globals .config .forbidden_remote_server_names - .contains(origin) + .contains(body.origin()) { warn!( - "Received federated/remote invite from banned server {origin} for room ID {}. Rejecting.", + "Received federated/remote invite from banned server {} for room ID {}. Rejecting.", + body.origin(), body.room_id ); diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index 85668038..c3524f0e 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -30,8 +30,7 @@ pub(crate) async fn create_join_event_template_route( return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); } - let origin = body.origin.as_ref().expect("server is authenticated"); - if body.user_id.server_name() != origin { + if body.user_id.server_name() != body.origin() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Not allowed to join on behalf of another server/user", @@ -42,19 +41,21 @@ pub(crate) async fn create_join_event_template_route( services .rooms .event_handler - .acl_check(origin, &body.room_id) + .acl_check(body.origin(), &body.room_id) .await?; if services .globals .config .forbidden_remote_server_names - .contains(origin) + .contains(body.origin()) { warn!( - "Server {origin} for remote user {} tried joining room ID {} which has a server name that is globally \ + "Server {} for remote user {} tried joining room ID {} which has a server name that is globally \ forbidden. Rejecting.", - &body.user_id, &body.room_id, + body.origin(), + &body.user_id, + &body.room_id, ); return Err(Error::BadRequest( ErrorKind::forbidden(), diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 81a32c86..33a94560 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -19,8 +19,7 @@ pub(crate) async fn create_leave_event_template_route( return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); } - let origin = body.origin.as_ref().expect("server is authenticated"); - if body.user_id.server_name() != origin { + if body.user_id.server_name() != body.origin() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Not allowed to leave on behalf of another server/user", @@ -31,7 +30,7 @@ pub(crate) async fn create_leave_event_template_route( services .rooms .event_handler - .acl_check(origin, &body.room_id) + .acl_check(body.origin(), &body.room_id) .await?; let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; diff --git a/src/api/server/send.rs b/src/api/server/send.rs index d5d3ffbb..2da99c93 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -41,9 +41,7 @@ pub(crate) async fn send_transaction_message_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - - if *origin != body.body.origin { + if body.origin() != body.body.origin { return Err!(Request(Forbidden( "Not allowed to send transactions on behalf of other servers" ))); @@ -67,19 +65,19 @@ pub(crate) async fn send_transaction_message_route( edus = ?body.edus.len(), elapsed = ?txn_start_time.elapsed(), id = ?body.transaction_id, - origin =?body.origin, + origin =?body.origin(), "Starting txn", ); - let resolved_map = handle_pdus(&services, &client, &body.pdus, origin, &txn_start_time).await?; - handle_edus(&services, &client, &body.edus, origin).await; + let resolved_map = handle_pdus(&services, &client, &body.pdus, body.origin(), &txn_start_time).await?; + handle_edus(&services, &client, &body.edus, body.origin()).await; debug!( pdus = ?body.pdus.len(), edus = ?body.edus.len(), elapsed = ?txn_start_time.elapsed(), id = ?body.transaction_id, - origin =?body.origin, + origin =?body.origin(), "Finished txn", ); diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index d888d75e..c3273baf 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -217,16 +217,15 @@ async fn create_join_event( pub(crate) async fn create_join_event_v1_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - if services .globals .config .forbidden_remote_server_names - .contains(origin) + .contains(body.origin()) { warn!( - "Server {origin} tried joining room ID {} who has a server name that is globally forbidden. Rejecting.", + "Server {} tried joining room ID {} who has a server name that is globally forbidden. Rejecting.", + body.origin(), &body.room_id, ); return Err(Error::BadRequest( @@ -243,8 +242,8 @@ pub(crate) async fn create_join_event_v1_route( .contains(&server.to_owned()) { warn!( - "Server {origin} tried joining room ID {} which has a server name that is globally forbidden. \ - Rejecting.", + "Server {} tried joining room ID {} which has a server name that is globally forbidden. Rejecting.", + body.origin(), &body.room_id, ); return Err(Error::BadRequest( @@ -254,7 +253,7 @@ pub(crate) async fn create_join_event_v1_route( } } - let room_state = create_join_event(&services, origin, &body.room_id, &body.pdu).await?; + let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu).await?; Ok(create_join_event::v1::Response { room_state, @@ -267,13 +266,11 @@ pub(crate) async fn create_join_event_v1_route( pub(crate) async fn create_join_event_v2_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - if services .globals .config .forbidden_remote_server_names - .contains(origin) + .contains(body.origin()) { return Err(Error::BadRequest( ErrorKind::forbidden(), @@ -299,7 +296,7 @@ pub(crate) async fn create_join_event_v2_route( auth_chain, state, event, - } = create_join_event(&services, origin, &body.room_id, &body.pdu).await?; + } = create_join_event(&services, body.origin(), &body.room_id, &body.pdu).await?; let room_state = create_join_event::v2::RoomState { members_omitted: false, auth_chain, diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index 0530f9dd..7b4a8aee 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -8,7 +8,7 @@ use ruma::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, }, - OwnedServerName, OwnedUserId, RoomId, ServerName, + OwnedUserId, RoomId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; @@ -23,9 +23,7 @@ use crate::{ pub(crate) async fn create_leave_event_v1_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - - create_leave_event(&services, origin, &body.room_id, &body.pdu).await?; + create_leave_event(&services, body.origin(), &body.room_id, &body.pdu).await?; Ok(create_leave_event::v1::Response::new()) } @@ -36,9 +34,7 @@ pub(crate) async fn create_leave_event_v1_route( pub(crate) async fn create_leave_event_v2_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - - create_leave_event(&services, origin, &body.room_id, &body.pdu).await?; + create_leave_event(&services, body.origin(), &body.room_id, &body.pdu).await?; Ok(create_leave_event::v2::Response::new()) } @@ -139,16 +135,6 @@ async fn create_leave_event( )); } - let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value( - value - .get("origin") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing origin property."))?, - ) - .expect("CanonicalJson is valid json value"), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?; - let mutex_lock = services .rooms .event_handler @@ -159,7 +145,7 @@ async fn create_leave_event( let pdu_id: Vec = services .rooms .event_handler - .handle_incoming_pdu(&origin, room_id, &event_id, value, true) + .handle_incoming_pdu(origin, room_id, &event_id, value, true) .await? .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Could not accept as timeline event."))?; diff --git a/src/api/server/state.rs b/src/api/server/state.rs index 3a27cd0a..59bb6c7b 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -13,12 +13,10 @@ use crate::Ruma; pub(crate) async fn get_room_state_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - services .rooms .event_handler - .acl_check(origin, &body.room_id) + .acl_check(body.origin(), &body.room_id) .await?; if !services @@ -28,7 +26,7 @@ pub(crate) async fn get_room_state_route( .await && !services .rooms .state_cache - .server_in_room(origin, &body.room_id) + .server_in_room(body.origin(), &body.room_id) .await { return Err!(Request(Forbidden("Server is not in room."))); diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index b026abf1..957a2a86 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -14,12 +14,10 @@ use crate::{Result, Ruma}; pub(crate) async fn get_room_state_ids_route( State(services): State, body: Ruma, ) -> Result { - let origin = body.origin.as_ref().expect("server is authenticated"); - services .rooms .event_handler - .acl_check(origin, &body.room_id) + .acl_check(body.origin(), &body.room_id) .await?; if !services @@ -29,7 +27,7 @@ pub(crate) async fn get_room_state_ids_route( .await && !services .rooms .state_cache - .server_in_room(origin, &body.room_id) + .server_in_room(body.origin(), &body.room_id) .await { return Err!(Request(Forbidden("Server is not in room."))); diff --git a/src/api/server/user.rs b/src/api/server/user.rs index 0718da58..40f330a1 100644 --- a/src/api/server/user.rs +++ b/src/api/server/user.rs @@ -27,8 +27,6 @@ pub(crate) async fn get_devices_route( )); } - let origin = body.origin.as_ref().expect("server is authenticated"); - let user_id = &body.user_id; Ok(get_devices::v1::Response { user_id: user_id.clone(), @@ -66,12 +64,12 @@ pub(crate) async fn get_devices_route( .await, master_key: services .users - .get_master_key(None, &body.user_id, &|u| u.server_name() == origin) + .get_master_key(None, &body.user_id, &|u| u.server_name() == body.origin()) .await .ok(), self_signing_key: services .users - .get_self_signing_key(None, &body.user_id, &|u| u.server_name() == origin) + .get_self_signing_key(None, &body.user_id, &|u| u.server_name() == body.origin()) .await .ok(), }) From 8742266ff0422fb678c86306d2d7384ff7081fe4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 25 Oct 2024 01:16:01 +0000 Subject: [PATCH 0126/1248] split up core/pdu Signed-off-by: Jason Volk --- src/core/pdu/content.rs | 20 ++ src/core/pdu/id.rs | 27 +++ src/core/pdu/mod.rs | 456 +++----------------------------------- src/core/pdu/redact.rs | 93 ++++++++ src/core/pdu/state_res.rs | 30 +++ src/core/pdu/strip.rs | 208 +++++++++++++++++ src/core/pdu/unsigned.rs | 83 +++++++ 7 files changed, 492 insertions(+), 425 deletions(-) create mode 100644 src/core/pdu/content.rs create mode 100644 src/core/pdu/id.rs create mode 100644 src/core/pdu/redact.rs create mode 100644 src/core/pdu/state_res.rs create mode 100644 src/core/pdu/strip.rs create mode 100644 src/core/pdu/unsigned.rs diff --git a/src/core/pdu/content.rs b/src/core/pdu/content.rs new file mode 100644 index 00000000..a6d86554 --- /dev/null +++ b/src/core/pdu/content.rs @@ -0,0 +1,20 @@ +use serde::Deserialize; +use serde_json::value::Value as JsonValue; + +use crate::{err, implement, Result}; + +#[must_use] +#[implement(super::PduEvent)] +pub fn get_content_as_value(&self) -> JsonValue { + self.get_content() + .expect("pdu content must be a valid JSON value") +} + +#[implement(super::PduEvent)] +pub fn get_content(&self) -> Result +where + T: for<'de> Deserialize<'de>, +{ + serde_json::from_str(self.content.get()) + .map_err(|e| err!(Database("Failed to deserialize pdu content into type: {e}"))) +} diff --git a/src/core/pdu/id.rs b/src/core/pdu/id.rs new file mode 100644 index 00000000..ae5b85f9 --- /dev/null +++ b/src/core/pdu/id.rs @@ -0,0 +1,27 @@ +use ruma::{CanonicalJsonObject, OwnedEventId, RoomVersionId}; +use serde_json::value::RawValue as RawJsonValue; + +use crate::{err, Result}; + +/// Generates a correct eventId for the incoming pdu. +/// +/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. +pub fn gen_event_id_canonical_json( + pdu: &RawJsonValue, room_version_id: &RoomVersionId, +) -> Result<(OwnedEventId, CanonicalJsonObject)> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()) + .map_err(|e| err!(BadServerResponse(warn!("Error parsing incoming event: {e:?}"))))?; + + let event_id = gen_event_id(&value, room_version_id)?; + + Ok((event_id, value)) +} + +/// Generates a correct eventId for the incoming pdu. +pub fn gen_event_id(value: &CanonicalJsonObject, room_version_id: &RoomVersionId) -> Result { + let reference_hash = ruma::signatures::reference_hash(value, room_version_id)?; + let event_id: OwnedEventId = format!("${reference_hash}").try_into()?; + + Ok(event_id) +} diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 274b96bd..9970c39e 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -1,44 +1,28 @@ mod builder; +mod content; mod count; +mod id; +mod redact; +mod state_res; +mod strip; +mod unsigned; -use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; +use std::{cmp::Ordering, sync::Arc}; use ruma::{ - canonical_json::redact_content_in_place, - events::{ - room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent}, - space::child::HierarchySpaceChildEvent, - AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, - AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, TimelineEventType, - }, - serde::Raw, - state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, - OwnedUserId, RoomId, RoomVersionId, UInt, UserId, + events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedRoomId, OwnedUserId, UInt, }; use serde::{Deserialize, Serialize}; -use serde_json::{ - json, - value::{to_raw_value, RawValue as RawJsonValue, Value as JsonValue}, -}; +use serde_json::value::RawValue as RawJsonValue; pub use self::{ builder::{Builder, Builder as PduBuilder}, count::PduCount, + id::*, }; -use crate::{err, is_true, warn, Error, Result}; - -#[derive(Deserialize)] -struct ExtractRedactedBecause { - redacted_because: Option, -} - -/// Content hashes of a PDU. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct EventHash { - /// The SHA-256 hash. - pub sha256: String, -} +use crate::Result; +/// Persistent Data Unit (Event) #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: Arc, @@ -65,415 +49,37 @@ pub struct PduEvent { pub signatures: Option>, } +/// Content hashes of a PDU. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct EventHash { + /// The SHA-256 hash. + pub sha256: String, +} + impl PduEvent { - #[tracing::instrument(skip(self), level = "debug")] - pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> Result<()> { - self.unsigned = None; - - let mut content = serde_json::from_str(self.content.get()) - .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; - - redact_content_in_place(&mut content, &room_version_id, self.kind.to_string()) - .map_err(|e| Error::Redaction(self.sender.server_name().to_owned(), e))?; - - self.unsigned = Some( - to_raw_value(&json!({ - "redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works") - })) - .expect("to string always works"), - ); - - self.content = to_raw_value(&content).expect("to string always works"); - - Ok(()) - } - - #[must_use] - pub fn is_redacted(&self) -> bool { - let Some(unsigned) = &self.unsigned else { - return false; - }; - - let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else { - return false; - }; - - unsigned.redacted_because.is_some() - } - - pub fn remove_transaction_id(&mut self) -> Result<()> { - let Some(unsigned) = &self.unsigned else { - return Ok(()); - }; - - let mut unsigned: BTreeMap> = - serde_json::from_str(unsigned.get()).map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; - - unsigned.remove("transaction_id"); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); - - Ok(()) - } - - pub fn add_age(&mut self) -> Result<()> { - let mut unsigned: BTreeMap> = self - .unsigned - .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) - .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; - - // deliberately allowing for the possibility of negative age - let now: i128 = MilliSecondsSinceUnixEpoch::now().get().into(); - let then: i128 = self.origin_server_ts.into(); - let this_age = now.saturating_sub(then); - - unsigned.insert("age".to_owned(), to_raw_value(&this_age).expect("age is valid")); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); - - Ok(()) - } - - /// Copies the `redacts` property of the event to the `content` dict and - /// vice-versa. - /// - /// This follows the specification's - /// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property): - /// - /// > For backwards-compatibility with older clients, servers should add a - /// > redacts - /// > property to the top level of m.room.redaction events in when serving - /// > such events - /// > over the Client-Server API. - /// - /// > For improved compatibility with newer clients, servers should add a - /// > redacts property - /// > to the content of m.room.redaction events in older room versions when - /// > serving - /// > such events over the Client-Server API. - #[must_use] - pub fn copy_redacts(&self) -> (Option>, Box) { - if self.kind == TimelineEventType::RoomRedaction { - if let Ok(mut content) = serde_json::from_str::(self.content.get()) { - if let Some(redacts) = content.redacts { - return (Some(redacts.into()), self.content.clone()); - } else if let Some(redacts) = self.redacts.clone() { - content.redacts = Some(redacts.into()); - return ( - self.redacts.clone(), - to_raw_value(&content).expect("Must be valid, we only added redacts field"), - ); - } - } - } - - (self.redacts.clone(), self.content.clone()) - } - - #[must_use] - pub fn get_content_as_value(&self) -> JsonValue { - self.get_content() - .expect("pdu content must be a valid JSON value") - } - - pub fn get_content(&self) -> Result - where - T: for<'de> Deserialize<'de>, - { - serde_json::from_str(self.content.get()) - .map_err(|e| err!(Database("Failed to deserialize pdu content into type: {e}"))) - } - - pub fn contains_unsigned_property(&self, property: &str, is_type: F) -> bool - where - F: FnOnce(&JsonValue) -> bool, - { - self.get_unsigned_as_value() - .get(property) - .map(is_type) - .is_some_and(is_true!()) - } - - pub fn get_unsigned_property(&self, property: &str) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.get_unsigned_as_value() - .get_mut(property) - .map(JsonValue::take) - .map(serde_json::from_value) - .ok_or(err!(Request(NotFound("property not found in unsigned object"))))? - .map_err(|e| err!(Database("Failed to deserialize unsigned.{property} into type: {e}"))) - } - - #[must_use] - pub fn get_unsigned_as_value(&self) -> JsonValue { self.get_unsigned::().unwrap_or_default() } - - pub fn get_unsigned(&self) -> Result { - self.unsigned - .as_ref() - .map(|raw| raw.get()) - .map(serde_json::from_str) - .ok_or(err!(Request(NotFound("\"unsigned\" property not found in pdu"))))? - .map_err(|e| err!(Database("Failed to deserialize \"unsigned\" into value: {e}"))) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_sync_room_event(&self) -> Raw { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") - } - - /// This only works for events that are also AnyRoomEvents. - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_any_event(&self) -> Raw { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_room_event(&self) -> Raw { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_message_like_event(&self) -> Raw { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") - } - - #[must_use] - pub fn to_state_event_value(&self) -> JsonValue { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - "state_key": self.state_key, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - - json - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_state_event(&self) -> Raw { - serde_json::from_value(self.to_state_event_value()).expect("Raw::from_value always works") - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_sync_state_event(&self) -> Raw { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "state_key": self.state_key, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - - serde_json::from_value(json).expect("Raw::from_value always works") - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_stripped_state_event(&self) -> Raw { - let json = json!({ - "content": self.content, - "type": self.kind, - "sender": self.sender, - "state_key": self.state_key, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_stripped_spacechild_state_event(&self) -> Raw { - let json = json!({ - "content": self.content, - "type": self.kind, - "sender": self.sender, - "state_key": self.state_key, - "origin_server_ts": self.origin_server_ts, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn to_member_event(&self) -> Raw> { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "redacts": self.redacts, - "room_id": self.room_id, - "state_key": self.state_key, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - - serde_json::from_value(json).expect("Raw::from_value always works") - } - pub fn from_id_val(event_id: &EventId, mut json: CanonicalJsonObject) -> Result { - json.insert("event_id".into(), CanonicalJsonValue::String(event_id.into())); - - let value = serde_json::to_value(json)?; - let pdu = serde_json::from_value(value)?; - - Ok(pdu) + let event_id = CanonicalJsonValue::String(event_id.into()); + json.insert("event_id".into(), event_id); + serde_json::to_value(json) + .and_then(serde_json::from_value) + .map_err(Into::into) } } -impl state_res::Event for PduEvent { - type Id = Arc; - - fn event_id(&self) -> &Self::Id { &self.event_id } - - fn room_id(&self) -> &RoomId { &self.room_id } - - fn sender(&self) -> &UserId { &self.sender } - - fn event_type(&self) -> &TimelineEventType { &self.kind } - - fn content(&self) -> &RawJsonValue { &self.content } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { MilliSecondsSinceUnixEpoch(self.origin_server_ts) } - - fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { self.prev_events.iter() } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { self.auth_events.iter() } - - fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } -} - -// These impl's allow us to dedup state snapshots when resolving state -// for incoming events (federation/send/{txn}). +/// Prevent derived equality which wouldn't limit itself to event_id impl Eq for PduEvent {} + +/// Equality determined by the Pdu's ID, not the memory representations. impl PartialEq for PduEvent { fn eq(&self, other: &Self) -> bool { self.event_id == other.event_id } } + +/// Ordering determined by the Pdu's ID, not the memory representations. impl PartialOrd for PduEvent { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } + +/// Ordering determined by the Pdu's ID, not the memory representations. impl Ord for PduEvent { fn cmp(&self, other: &Self) -> Ordering { self.event_id.cmp(&other.event_id) } } - -/// Generates a correct eventId for the incoming pdu. -/// -/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. -pub fn gen_event_id_canonical_json( - pdu: &RawJsonValue, room_version_id: &RoomVersionId, -) -> Result<(OwnedEventId, CanonicalJsonObject)> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()) - .map_err(|e| err!(BadServerResponse(warn!("Error parsing incoming event: {e:?}"))))?; - - let event_id = gen_event_id(&value, room_version_id)?; - - Ok((event_id, value)) -} - -/// Generates a correct eventId for the incoming pdu. -pub fn gen_event_id(value: &CanonicalJsonObject, room_version_id: &RoomVersionId) -> Result { - let reference_hash = ruma::signatures::reference_hash(value, room_version_id)?; - let event_id: OwnedEventId = format!("${reference_hash}").try_into()?; - - Ok(event_id) -} diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs new file mode 100644 index 00000000..647f54c0 --- /dev/null +++ b/src/core/pdu/redact.rs @@ -0,0 +1,93 @@ +use std::sync::Arc; + +use ruma::{ + canonical_json::redact_content_in_place, + events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, + EventId, RoomVersionId, +}; +use serde::Deserialize; +use serde_json::{ + json, + value::{to_raw_value, RawValue as RawJsonValue}, +}; + +use crate::{implement, warn, Error, Result}; + +#[derive(Deserialize)] +struct ExtractRedactedBecause { + redacted_because: Option, +} + +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> Result<()> { + self.unsigned = None; + + let mut content = + serde_json::from_str(self.content.get()).map_err(|_| Error::bad_database("PDU in db has invalid content."))?; + + redact_content_in_place(&mut content, &room_version_id, self.kind.to_string()) + .map_err(|e| Error::Redaction(self.sender.server_name().to_owned(), e))?; + + self.unsigned = Some( + to_raw_value(&json!({ + "redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works") + })) + .expect("to string always works"), + ); + + self.content = to_raw_value(&content).expect("to string always works"); + + Ok(()) +} + +#[implement(super::PduEvent)] +#[must_use] +pub fn is_redacted(&self) -> bool { + let Some(unsigned) = &self.unsigned else { + return false; + }; + + let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else { + return false; + }; + + unsigned.redacted_because.is_some() +} + +/// Copies the `redacts` property of the event to the `content` dict and +/// vice-versa. +/// +/// This follows the specification's +/// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property): +/// +/// > For backwards-compatibility with older clients, servers should add a +/// > redacts +/// > property to the top level of m.room.redaction events in when serving +/// > such events +/// > over the Client-Server API. +/// +/// > For improved compatibility with newer clients, servers should add a +/// > redacts property +/// > to the content of m.room.redaction events in older room versions when +/// > serving +/// > such events over the Client-Server API. +#[implement(super::PduEvent)] +#[must_use] +pub fn copy_redacts(&self) -> (Option>, Box) { + if self.kind == TimelineEventType::RoomRedaction { + if let Ok(mut content) = serde_json::from_str::(self.content.get()) { + if let Some(redacts) = content.redacts { + return (Some(redacts.into()), self.content.clone()); + } else if let Some(redacts) = self.redacts.clone() { + content.redacts = Some(redacts.into()); + return ( + self.redacts.clone(), + to_raw_value(&content).expect("Must be valid, we only added redacts field"), + ); + } + } + } + + (self.redacts.clone(), self.content.clone()) +} diff --git a/src/core/pdu/state_res.rs b/src/core/pdu/state_res.rs new file mode 100644 index 00000000..a27c9822 --- /dev/null +++ b/src/core/pdu/state_res.rs @@ -0,0 +1,30 @@ +use std::sync::Arc; + +use ruma::{events::TimelineEventType, state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; +use serde_json::value::RawValue as RawJsonValue; + +use super::PduEvent; + +impl state_res::Event for PduEvent { + type Id = Arc; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { &self.room_id } + + fn sender(&self) -> &UserId { &self.sender } + + fn event_type(&self) -> &TimelineEventType { &self.kind } + + fn content(&self) -> &RawJsonValue { &self.content } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { MilliSecondsSinceUnixEpoch(self.origin_server_ts) } + + fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { self.prev_events.iter() } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { self.auth_events.iter() } + + fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } +} diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs new file mode 100644 index 00000000..8d20d982 --- /dev/null +++ b/src/core/pdu/strip.rs @@ -0,0 +1,208 @@ +use ruma::{ + events::{ + room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, + AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, + AnyTimelineEvent, StateEvent, + }, + serde::Raw, +}; +use serde_json::{json, value::Value as JsonValue}; + +use crate::{implement, warn}; + +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn to_sync_room_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); + let mut json = json!({ + "content": content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") +} + +/// This only works for events that are also AnyRoomEvents. +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn to_any_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); + let mut json = json!({ + "content": content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "room_id": self.room_id, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") +} + +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn to_room_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); + let mut json = json!({ + "content": content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "room_id": self.room_id, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") +} + +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn to_message_like_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); + let mut json = json!({ + "content": content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "room_id": self.room_id, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") +} + +#[implement(super::PduEvent)] +#[must_use] +pub fn to_state_event_value(&self) -> JsonValue { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "room_id": self.room_id, + "state_key": self.state_key, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + + json +} + +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn to_state_event(&self) -> Raw { + serde_json::from_value(self.to_state_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn to_sync_state_event(&self) -> Raw { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "state_key": self.state_key, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Raw::from_value always works") +} + +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn to_stripped_state_event(&self) -> Raw { + let json = json!({ + "content": self.content, + "type": self.kind, + "sender": self.sender, + "state_key": self.state_key, + }); + + serde_json::from_value(json).expect("Raw::from_value always works") +} + +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn to_stripped_spacechild_state_event(&self) -> Raw { + let json = json!({ + "content": self.content, + "type": self.kind, + "sender": self.sender, + "state_key": self.state_key, + "origin_server_ts": self.origin_server_ts, + }); + + serde_json::from_value(json).expect("Raw::from_value always works") +} + +#[implement(super::PduEvent)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn to_member_event(&self) -> Raw> { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "redacts": self.redacts, + "room_id": self.room_id, + "state_key": self.state_key, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Raw::from_value always works") +} diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs new file mode 100644 index 00000000..1c47e826 --- /dev/null +++ b/src/core/pdu/unsigned.rs @@ -0,0 +1,83 @@ +use std::collections::BTreeMap; + +use ruma::MilliSecondsSinceUnixEpoch; +use serde::Deserialize; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue, Value as JsonValue}; + +use crate::{err, implement, is_true, Result}; + +#[implement(super::PduEvent)] +pub fn remove_transaction_id(&mut self) -> Result<()> { + let Some(unsigned) = &self.unsigned else { + return Ok(()); + }; + + let mut unsigned: BTreeMap> = + serde_json::from_str(unsigned.get()).map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; + + unsigned.remove("transaction_id"); + self.unsigned = to_raw_value(&unsigned) + .map(Some) + .expect("unsigned is valid"); + + Ok(()) +} + +#[implement(super::PduEvent)] +pub fn add_age(&mut self) -> Result<()> { + let mut unsigned: BTreeMap> = self + .unsigned + .as_ref() + .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; + + // deliberately allowing for the possibility of negative age + let now: i128 = MilliSecondsSinceUnixEpoch::now().get().into(); + let then: i128 = self.origin_server_ts.into(); + let this_age = now.saturating_sub(then); + + unsigned.insert("age".to_owned(), to_raw_value(&this_age).expect("age is valid")); + self.unsigned = to_raw_value(&unsigned) + .map(Some) + .expect("unsigned is valid"); + + Ok(()) +} + +#[implement(super::PduEvent)] +pub fn contains_unsigned_property(&self, property: &str, is_type: F) -> bool +where + F: FnOnce(&JsonValue) -> bool, +{ + self.get_unsigned_as_value() + .get(property) + .map(is_type) + .is_some_and(is_true!()) +} + +#[implement(super::PduEvent)] +pub fn get_unsigned_property(&self, property: &str) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.get_unsigned_as_value() + .get_mut(property) + .map(JsonValue::take) + .map(serde_json::from_value) + .ok_or(err!(Request(NotFound("property not found in unsigned object"))))? + .map_err(|e| err!(Database("Failed to deserialize unsigned.{property} into type: {e}"))) +} + +#[implement(super::PduEvent)] +#[must_use] +pub fn get_unsigned_as_value(&self) -> JsonValue { self.get_unsigned::().unwrap_or_default() } + +#[implement(super::PduEvent)] +pub fn get_unsigned(&self) -> Result { + self.unsigned + .as_ref() + .map(|raw| raw.get()) + .map(serde_json::from_str) + .ok_or(err!(Request(NotFound("\"unsigned\" property not found in pdu"))))? + .map_err(|e| err!(Database("Failed to deserialize \"unsigned\" into value: {e}"))) +} From cf59f738b9f687aa0902bf2bd011219567e4fea5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 25 Oct 2024 02:01:29 +0000 Subject: [PATCH 0127/1248] move macros incorrectly moved out of utils to top level Signed-off-by: Jason Volk --- src/core/mod.rs | 56 ----------------------------------- src/core/utils/mod.rs | 68 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 56 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 79052554..d201709b 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -36,59 +36,3 @@ pub mod mods { () => {}; } } - -/// Functor for falsy -#[macro_export] -macro_rules! is_false { - () => { - |x| !x - }; -} - -/// Functor for truthy -#[macro_export] -macro_rules! is_true { - () => { - |x| !!x - }; -} - -/// Functor for equality to zero -#[macro_export] -macro_rules! is_zero { - () => { - $crate::is_matching!(0) - }; -} - -/// Functor for equality i.e. .is_some_and(is_equal!(2)) -#[macro_export] -macro_rules! is_equal_to { - ($val:expr) => { - |x| x == $val - }; -} - -/// Functor for less i.e. .is_some_and(is_less_than!(2)) -#[macro_export] -macro_rules! is_less_than { - ($val:expr) => { - |x| x < $val - }; -} - -/// Functor for matches! i.e. .is_some_and(is_matching!('A'..='Z')) -#[macro_export] -macro_rules! is_matching { - ($val:expr) => { - |x| matches!(x, $val) - }; -} - -/// Functor for !is_empty() -#[macro_export] -macro_rules! is_not_empty { - () => { - |x| !x.is_empty() - }; -} diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 3943a8da..8e29c608 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -57,3 +57,71 @@ macro_rules! at { |t| t.$idx }; } + +/// Functor for equality i.e. .is_some_and(is_equal!(2)) +#[macro_export] +macro_rules! is_equal_to { + ($val:ident) => { + |x| x == $val + }; + + ($val:expr) => { + |x| x == $val + }; +} + +/// Functor for less i.e. .is_some_and(is_less_than!(2)) +#[macro_export] +macro_rules! is_less_than { + ($val:ident) => { + |x| x < $val + }; + + ($val:expr) => { + |x| x < $val + }; +} + +/// Functor for equality to zero +#[macro_export] +macro_rules! is_zero { + () => { + $crate::is_matching!(0) + }; +} + +/// Functor for matches! i.e. .is_some_and(is_matching!('A'..='Z')) +#[macro_export] +macro_rules! is_matching { + ($val:ident) => { + |x| matches!(x, $val) + }; + + ($val:expr) => { + |x| matches!(x, $val) + }; +} + +/// Functor for !is_empty() +#[macro_export] +macro_rules! is_not_empty { + () => { + |x| !x.is_empty() + }; +} + +/// Functor for truthy +#[macro_export] +macro_rules! is_true { + () => { + |x| !!x + }; +} + +/// Functor for falsy +#[macro_export] +macro_rules! is_false { + () => { + |x| !x + }; +} From b7369074d4d9e235c4bb9a7529e98c1aa5a662b1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 25 Oct 2024 02:56:24 +0000 Subject: [PATCH 0128/1248] add RoomEventFilter matcher for PduEvent Signed-off-by: Jason Volk --- src/core/pdu/filter.rs | 90 ++++++++++++++++++++++++++++++++++++++++++ src/core/pdu/mod.rs | 1 + 2 files changed, 91 insertions(+) create mode 100644 src/core/pdu/filter.rs diff --git a/src/core/pdu/filter.rs b/src/core/pdu/filter.rs new file mode 100644 index 00000000..bd232ebd --- /dev/null +++ b/src/core/pdu/filter.rs @@ -0,0 +1,90 @@ +use ruma::api::client::filter::{RoomEventFilter, UrlFilter}; +use serde_json::Value; + +use crate::{implement, is_equal_to}; + +#[implement(super::PduEvent)] +#[must_use] +pub fn matches(&self, filter: &RoomEventFilter) -> bool { + if !self.matches_sender(filter) { + return false; + } + + if !self.matches_room(filter) { + return false; + } + + if !self.matches_type(filter) { + return false; + } + + if !self.matches_url(filter) { + return false; + } + + true +} + +#[implement(super::PduEvent)] +fn matches_room(&self, filter: &RoomEventFilter) -> bool { + if filter.not_rooms.contains(&self.room_id) { + return false; + } + + if let Some(rooms) = filter.rooms.as_ref() { + if !rooms.contains(&self.room_id) { + return false; + } + } + + true +} + +#[implement(super::PduEvent)] +fn matches_sender(&self, filter: &RoomEventFilter) -> bool { + if filter.not_senders.contains(&self.sender) { + return false; + } + + if let Some(senders) = filter.senders.as_ref() { + if !senders.contains(&self.sender) { + return false; + } + } + + true +} + +#[implement(super::PduEvent)] +fn matches_type(&self, filter: &RoomEventFilter) -> bool { + let event_type = &self.kind.to_cow_str(); + if filter.not_types.iter().any(is_equal_to!(event_type)) { + return false; + } + + if let Some(types) = filter.types.as_ref() { + if !types.iter().any(is_equal_to!(event_type)) { + return false; + } + } + + true +} + +#[implement(super::PduEvent)] +fn matches_url(&self, filter: &RoomEventFilter) -> bool { + let Some(url_filter) = filter.url_filter.as_ref() else { + return true; + }; + + //TODO: might be better to use Ruma's Raw rather than serde here + let url = serde_json::from_str::(self.content.get()) + .expect("parsing content JSON failed") + .get("url") + .is_some_and(Value::is_string); + + match url_filter { + UrlFilter::EventsWithUrl => url, + UrlFilter::EventsWithoutUrl => !url, + } +} diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 9970c39e..ed11adbb 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -1,6 +1,7 @@ mod builder; mod content; mod count; +mod filter; mod id; mod redact; mod state_res; From 68086717516225af96fe6c7cff743836103188eb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 25 Oct 2024 05:22:50 +0000 Subject: [PATCH 0129/1248] merge search service w/ data Signed-off-by: Jason Volk --- src/service/rooms/search/data.rs | 113 ---------------------------- src/service/rooms/search/mod.rs | 123 ++++++++++++++++++++++++++----- 2 files changed, 103 insertions(+), 133 deletions(-) delete mode 100644 src/service/rooms/search/data.rs diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs deleted file mode 100644 index de98beee..00000000 --- a/src/service/rooms/search/data.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::sync::Arc; - -use conduit::utils::{set, stream::TryIgnore, IterStream, ReadyExt}; -use database::Map; -use futures::StreamExt; -use ruma::RoomId; - -use crate::{rooms, Dep}; - -pub(super) struct Data { - tokenids: Arc, - services: Services, -} - -struct Services { - short: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - tokenids: db["tokenids"].clone(), - services: Services { - short: args.depend::("rooms::short"), - }, - } - } - - pub(super) fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { - let batch = tokenize(message_body) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xFF); - key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here - (key, Vec::::new()) - }) - .collect::>(); - - self.tokenids.insert_batch(batch.iter()); - } - - pub(super) fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { - let batch = tokenize(message_body).map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xFF); - key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here - key - }); - - for token in batch { - self.tokenids.remove(&token); - } - } - - pub(super) async fn search_pdus( - &self, room_id: &RoomId, search_string: &str, - ) -> Option<(Vec>, Vec)> { - let prefix = self - .services - .short - .get_shortroomid(room_id) - .await - .ok()? - .to_be_bytes() - .to_vec(); - - let words: Vec<_> = tokenize(search_string).collect(); - - let bufs: Vec<_> = words - .clone() - .into_iter() - .stream() - .then(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xFF); - let prefix3 = prefix2.clone(); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .rev_raw_keys_from(&last_possible_id) // Newest pdus first - .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix2)) - .map(move |key| key[prefix3.len()..].to_vec()) - .collect::>() - }) - .collect() - .await; - - Some(( - set::intersection(bufs.iter().map(|buf| buf.iter())) - .cloned() - .collect(), - words, - )) - } -} - -/// Splits a string into tokens used as keys in the search inverted index -/// -/// This may be used to tokenize both message bodies (for indexing) or search -/// queries (for querying). -fn tokenize(body: &str) -> impl Iterator + Send + '_ { - body.split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) -} diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 80b58804..032ad55c 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,41 +1,124 @@ -mod data; - use std::sync::Arc; -use conduit::Result; -use data::Data; +use conduit::{ + implement, + utils::{set, stream::TryIgnore, IterStream, ReadyExt}, + Result, +}; +use database::Map; +use futures::StreamExt; use ruma::RoomId; +use crate::{rooms, Dep}; + pub struct Service { db: Data, + services: Services, +} + +struct Data { + tokenids: Arc, +} + +struct Services { + short: Dep, } impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data::new(&args), + db: Data { + tokenids: args.db["tokenids"].clone(), + }, + services: Services { + short: args.depend::("rooms::short"), + }, })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - #[inline] - #[tracing::instrument(skip(self), level = "debug")] - pub fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { - self.db.index_pdu(shortroomid, pdu_id, message_body); - } +#[implement(Service)] +pub fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { + let batch = tokenize(message_body) + .map(|word| { + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xFF); + key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here + (key, Vec::::new()) + }) + .collect::>(); - #[inline] - #[tracing::instrument(skip(self), level = "debug")] - pub fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { - self.db.deindex_pdu(shortroomid, pdu_id, message_body); - } + self.db.tokenids.insert_batch(batch.iter()); +} - #[inline] - #[tracing::instrument(skip(self), level = "debug")] - pub async fn search_pdus(&self, room_id: &RoomId, search_string: &str) -> Option<(Vec>, Vec)> { - self.db.search_pdus(room_id, search_string).await +#[implement(Service)] +pub fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { + let batch = tokenize(message_body).map(|word| { + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xFF); + key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here + key + }); + + for token in batch { + self.db.tokenids.remove(&token); } } + +#[implement(Service)] +pub async fn search_pdus(&self, room_id: &RoomId, search_string: &str) -> Option<(Vec>, Vec)> { + let prefix = self + .services + .short + .get_shortroomid(room_id) + .await + .ok()? + .to_be_bytes() + .to_vec(); + + let words: Vec<_> = tokenize(search_string).collect(); + + let bufs: Vec<_> = words + .clone() + .into_iter() + .stream() + .then(move |word| { + let mut prefix2 = prefix.clone(); + prefix2.extend_from_slice(word.as_bytes()); + prefix2.push(0xFF); + let prefix3 = prefix2.clone(); + + let mut last_possible_id = prefix2.clone(); + last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.db.tokenids + .rev_raw_keys_from(&last_possible_id) // Newest pdus first + .ignore_err() + .ready_take_while(move |key| key.starts_with(&prefix2)) + .map(move |key| key[prefix3.len()..].to_vec()) + .collect::>() + }) + .collect() + .await; + + let bufs = bufs.iter().map(|buf| buf.iter()); + + let results = set::intersection(bufs).cloned().collect(); + + Some((results, words)) +} + +/// Splits a string into tokens used as keys in the search inverted index +/// +/// This may be used to tokenize both message bodies (for indexing) or search +/// queries (for querying). +fn tokenize(body: &str) -> impl Iterator + Send + '_ { + body.split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) + .filter(|word| word.len() <= 50) + .map(str::to_lowercase) +} From 0426f92ac032f03b8a4c86acec00b53c093a82d5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 25 Oct 2024 18:25:06 +0000 Subject: [PATCH 0130/1248] unify database record separator constants Signed-off-by: Jason Volk --- src/database/de.rs | 3 +-- src/database/mod.rs | 2 +- src/database/ser.rs | 5 ++++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index e5fdf7cb..0e074fdb 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -41,8 +41,7 @@ pub struct Ignore; pub struct IgnoreAll; impl<'de> Deserializer<'de> { - /// Record separator; an intentionally invalid-utf8 byte. - const SEP: u8 = b'\xFF'; + const SEP: u8 = crate::ser::SEP; /// Determine if the input was fully consumed and error if bytes remaining. /// This is intended for debug assertions; not optimized for parsing logic. diff --git a/src/database/mod.rs b/src/database/mod.rs index 6d3b2079..dcd66a1e 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -29,7 +29,7 @@ pub use self::{ handle::Handle, keyval::{KeyVal, Slice}, map::Map, - ser::{serialize, serialize_to_array, serialize_to_vec, Interfix, Json, Separator}, + ser::{serialize, serialize_to_array, serialize_to_vec, Interfix, Json, Separator, SEP}, }; conduit::mod_ctor! {} diff --git a/src/database/ser.rs b/src/database/ser.rs index 742f1e34..0cc5c886 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -69,8 +69,11 @@ pub struct Interfix; #[derive(Debug, Serialize)] pub struct Separator; +/// Record separator; an intentionally invalid-utf8 byte. +pub const SEP: u8 = b'\xFF'; + impl Serializer<'_, W> { - const SEP: &'static [u8] = b"\xFF"; + const SEP: &'static [u8] = &[SEP]; fn tuple_start(&mut self) { debug_assert!(!self.sep, "Tuple start with separator set"); From 1e7207c23015f5bb8d9c22db30ccbb3669a9540a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 25 Oct 2024 19:53:08 +0000 Subject: [PATCH 0131/1248] start an ArrayVec extension trait Signed-off-by: Jason Volk --- src/core/utils/arrayvec.rs | 15 +++++++++++++++ src/core/utils/mod.rs | 2 ++ 2 files changed, 17 insertions(+) create mode 100644 src/core/utils/arrayvec.rs diff --git a/src/core/utils/arrayvec.rs b/src/core/utils/arrayvec.rs new file mode 100644 index 00000000..685aaf18 --- /dev/null +++ b/src/core/utils/arrayvec.rs @@ -0,0 +1,15 @@ +use ::arrayvec::ArrayVec; + +pub trait ArrayVecExt { + fn extend_from_slice(&mut self, other: &[T]) -> &mut Self; +} + +impl ArrayVecExt for ArrayVec { + #[inline] + fn extend_from_slice(&mut self, other: &[T]) -> &mut Self { + self.try_extend_from_slice(other) + .expect("Insufficient buffer capacity to extend from slice"); + + self + } +} diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 8e29c608..26b0484e 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -1,3 +1,4 @@ +pub mod arrayvec; pub mod bool; pub mod bytes; pub mod content_disposition; @@ -22,6 +23,7 @@ pub use ::conduit_macros::implement; pub use ::ctor::{ctor, dtor}; pub use self::{ + arrayvec::ArrayVecExt, bool::BoolExt, bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}, debug::slice_truncated as debug_slice_truncated, From f245389c0223ed96542969dafc90f0aeab1da9f5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Oct 2024 22:20:16 +0000 Subject: [PATCH 0132/1248] add typedef for pdu_ids Signed-off-by: Jason Volk --- src/service/rooms/short/mod.rs | 4 ++++ src/service/rooms/timeline/mod.rs | 6 +++++- src/service/rooms/timeline/pduid.rs | 13 +++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 src/service/rooms/timeline/pduid.rs diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 609c0e07..02c449cc 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -24,6 +24,10 @@ struct Services { globals: Dep, } +pub type ShortEventId = ShortId; +pub type ShortRoomId = ShortId; +pub type ShortId = u64; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 902e50ff..e45bf7e5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,4 +1,5 @@ mod data; +mod pduid; use std::{ cmp, @@ -38,7 +39,10 @@ use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use self::data::Data; -pub use self::data::PdusIterItem; +pub use self::{ + data::PdusIterItem, + pduid::{PduId, RawPduId}, +}; use crate::{ account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, rooms::state_compressor::CompressedStateEvent, sending, server_keys, users, Dep, diff --git a/src/service/rooms/timeline/pduid.rs b/src/service/rooms/timeline/pduid.rs new file mode 100644 index 00000000..b43c382c --- /dev/null +++ b/src/service/rooms/timeline/pduid.rs @@ -0,0 +1,13 @@ +use crate::rooms::short::{ShortEventId, ShortRoomId}; + +#[derive(Clone, Copy)] +pub struct PduId { + _room_id: ShortRoomId, + _event_id: ShortEventId, +} + +pub type RawPduId = [u8; PduId::LEN]; + +impl PduId { + pub const LEN: usize = size_of::() + size_of::(); +} From 21a67513f2480e6cb1cb0322e15016ba8d919dac Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Oct 2024 22:21:23 +0000 Subject: [PATCH 0133/1248] refactor search system Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/api/client/search.rs | 339 +++++++++++++++++--------------- src/service/Cargo.toml | 1 + src/service/rooms/search/mod.rs | 176 +++++++++++++---- 4 files changed, 312 insertions(+), 205 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c64d3cc6..a8acce7d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -786,6 +786,7 @@ dependencies = [ name = "conduit_service" version = "0.5.0" dependencies = [ + "arrayvec", "async-trait", "base64 0.22.1", "bytes", diff --git a/src/api/client/search.rs b/src/api/client/search.rs index b073640e..1e5384fe 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -2,25 +2,32 @@ use std::collections::BTreeMap; use axum::extract::State; use conduit::{ - debug, - utils::{IterStream, ReadyExt}, - Err, + at, is_true, + result::FlatOk, + utils::{stream::ReadyExt, IterStream}, + Err, PduEvent, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{future::OptionFuture, FutureExt, StreamExt, TryFutureExt}; use ruma::{ - api::client::{ - error::ErrorKind, - search::search_events::{ - self, - v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, - }, + api::client::search::search_events::{ + self, + v3::{Criteria, EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, }, events::AnyStateEvent, serde::Raw, - uint, OwnedRoomId, + OwnedRoomId, RoomId, UInt, UserId, }; +use search_events::v3::{Request, Response}; +use service::{rooms::search::RoomQuery, Services}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; + +type RoomStates = BTreeMap; +type RoomState = Vec>; + +const LIMIT_DEFAULT: usize = 10; +const LIMIT_MAX: usize = 100; +const BATCH_MAX: usize = 20; /// # `POST /_matrix/client/r0/search` /// @@ -28,173 +35,177 @@ use crate::{Error, Result, Ruma}; /// /// - Only works if the user is currently joined to the room (TODO: Respect /// history visibility) -pub(crate) async fn search_events_route( - State(services): State, body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); +pub(crate) async fn search_events_route(State(services): State, body: Ruma) -> Result { + let sender_user = body.sender_user(); + let next_batch = body.next_batch.as_deref(); + let room_events_result: OptionFuture<_> = body + .search_categories + .room_events + .as_ref() + .map(|criteria| category_room_events(&services, sender_user, next_batch, criteria)) + .into(); - let search_criteria = body.search_categories.room_events.as_ref().unwrap(); - let filter = &search_criteria.filter; - let include_state = &search_criteria.include_state; + Ok(Response { + search_categories: ResultCategories { + room_events: room_events_result + .await + .unwrap_or_else(|| Ok(ResultRoomEvents::default()))?, + }, + }) +} - let room_ids = if let Some(room_ids) = &filter.rooms { - room_ids.clone() - } else { - services - .rooms - .state_cache - .rooms_joined(sender_user) - .map(ToOwned::to_owned) - .collect() - .await - }; +#[allow(clippy::map_unwrap_or)] +async fn category_room_events( + services: &Services, sender_user: &UserId, next_batch: Option<&str>, criteria: &Criteria, +) -> Result { + let filter = &criteria.filter; - // Use limit or else 10, with maximum 100 let limit: usize = filter .limit - .unwrap_or_else(|| uint!(10)) - .try_into() - .unwrap_or(10) - .min(100); + .map(TryInto::try_into) + .flat_ok() + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); - let mut room_states: BTreeMap>> = BTreeMap::new(); + let next_batch: usize = next_batch + .map(str::parse) + .transpose()? + .unwrap_or(0) + .min(limit.saturating_mul(BATCH_MAX)); - if include_state.is_some_and(|include_state| include_state) { - for room_id in &room_ids { - if !services - .rooms - .state_cache - .is_joined(sender_user, room_id) - .await - { - return Err!(Request(Forbidden("You don't have permission to view this room."))); - } - - // check if sender_user can see state events - if services - .rooms - .state_accessor - .user_can_see_state_events(sender_user, room_id) - .await - { - let room_state: Vec<_> = services - .rooms - .state_accessor - .room_state_full(room_id) - .await? - .values() - .map(|pdu| pdu.to_state_event()) - .collect(); - - debug!("Room state: {:?}", room_state); - - room_states.insert(room_id.clone(), room_state); - } else { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this room.", - )); - } - } - } - - let mut search_vecs = Vec::new(); - - for room_id in &room_ids { - if !services - .rooms - .state_cache - .is_joined(sender_user, room_id) - .await - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this room.", - )); - } - - if let Some(search) = services - .rooms - .search - .search_pdus(room_id, &search_criteria.search_term) - .await - { - search_vecs.push(search.0); - } - } - - let mut searches: Vec<_> = search_vecs - .iter() - .map(|vec| vec.iter().peekable()) - .collect(); - - let skip: usize = match body.next_batch.as_ref().map(|s| s.parse()) { - Some(Ok(s)) => s, - Some(Err(_)) => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid next_batch token.")), - None => 0, // Default to the start - }; - - let mut results = Vec::new(); - let next_batch = skip.saturating_add(limit); - - for _ in 0..next_batch { - if let Some(s) = searches - .iter_mut() - .map(|s| (s.peek().copied(), s)) - .max_by_key(|(peek, _)| *peek) - .and_then(|(_, i)| i.next()) - { - results.push(s); - } - } - - let results: Vec<_> = results - .into_iter() - .skip(skip) - .stream() - .filter_map(|id| services.rooms.timeline.get_pdu_from_id(id).map(Result::ok)) - .ready_filter(|pdu| !pdu.is_redacted()) - .filter_map(|pdu| async move { + let rooms = filter + .rooms + .clone() + .map(IntoIterator::into_iter) + .map(IterStream::stream) + .map(StreamExt::boxed) + .unwrap_or_else(|| { services .rooms - .state_accessor - .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) + .state_cache + .rooms_joined(sender_user) + .map(ToOwned::to_owned) + .boxed() + }); + + let results: Vec<_> = rooms + .filter_map(|room_id| async move { + check_room_visible(services, sender_user, &room_id, criteria) .await - .then_some(pdu) + .is_ok() + .then_some(room_id) }) - .take(limit) - .map(|pdu| pdu.to_room_event()) - .map(|result| SearchResult { - context: EventContextResult { - end: None, - events_after: Vec::new(), - events_before: Vec::new(), - profile_info: BTreeMap::new(), - start: None, - }, - rank: None, - result: Some(result), + .filter_map(|room_id| async move { + let query = RoomQuery { + room_id: &room_id, + user_id: Some(sender_user), + criteria, + skip: next_batch, + limit, + }; + + let (count, results) = services.rooms.search.search_pdus(&query).await.ok()?; + + results + .collect::>() + .map(|results| (room_id.clone(), count, results)) + .map(Some) + .await }) .collect() - .boxed() .await; - let more_unloaded_results = searches.iter_mut().any(|s| s.peek().is_some()); + let total: UInt = results + .iter() + .fold(0, |a: usize, (_, count, _)| a.saturating_add(*count)) + .try_into()?; - let next_batch = more_unloaded_results.then(|| next_batch.to_string()); + let state: RoomStates = results + .iter() + .stream() + .ready_filter(|_| criteria.include_state.is_some_and(is_true!())) + .filter_map(|(room_id, ..)| async move { + procure_room_state(services, room_id) + .map_ok(|state| (room_id.clone(), state)) + .await + .ok() + }) + .collect() + .await; - Ok(search_events::v3::Response::new(ResultCategories { - room_events: ResultRoomEvents { - count: Some(results.len().try_into().unwrap_or_else(|_| uint!(0))), - groups: BTreeMap::new(), // TODO - next_batch, - results, - state: room_states, - highlights: search_criteria - .search_term - .split_terminator(|c: char| !c.is_alphanumeric()) - .map(str::to_lowercase) - .collect(), - }, - })) + let results: Vec = results + .into_iter() + .map(at!(2)) + .flatten() + .stream() + .map(|pdu| pdu.to_room_event()) + .map(|result| SearchResult { + rank: None, + result: Some(result), + context: EventContextResult { + profile_info: BTreeMap::new(), //TODO + events_after: Vec::new(), //TODO + events_before: Vec::new(), //TODO + start: None, //TODO + end: None, //TODO + }, + }) + .collect() + .await; + + let highlights = criteria + .search_term + .split_terminator(|c: char| !c.is_alphanumeric()) + .map(str::to_lowercase) + .collect(); + + let next_batch = (results.len() >= limit) + .then_some(next_batch.saturating_add(results.len())) + .as_ref() + .map(ToString::to_string); + + Ok(ResultRoomEvents { + count: Some(total), + next_batch, + results, + state, + highlights, + groups: BTreeMap::new(), // TODO + }) +} + +async fn procure_room_state(services: &Services, room_id: &RoomId) -> Result { + let state_map = services + .rooms + .state_accessor + .room_state_full(room_id) + .await?; + + let state_events = state_map + .values() + .map(AsRef::as_ref) + .map(PduEvent::to_state_event) + .collect(); + + Ok(state_events) +} + +async fn check_room_visible(services: &Services, user_id: &UserId, room_id: &RoomId, search: &Criteria) -> Result { + let check_visible = search.filter.rooms.is_some(); + let check_state = check_visible && search.include_state.is_some_and(is_true!()); + + let is_joined = !check_visible || services.rooms.state_cache.is_joined(user_id, room_id).await; + + let state_visible = !check_state + || services + .rooms + .state_accessor + .user_can_see_state_events(user_id, room_id) + .await; + + if !is_joined || !state_visible { + return Err!(Request(Forbidden("You don't have permission to view {room_id:?}"))); + } + + Ok(()) } diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 737a7039..7578ef64 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -40,6 +40,7 @@ release_max_log_level = [ ] [dependencies] +arrayvec.workspace = true async-trait.workspace = true base64.workspace = true bytes.workspace = true diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 032ad55c..8882ec99 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,15 +1,23 @@ -use std::sync::Arc; +use std::{iter, sync::Arc}; +use arrayvec::ArrayVec; use conduit::{ implement, - utils::{set, stream::TryIgnore, IterStream, ReadyExt}, - Result, + utils::{set, stream::TryIgnore, ArrayVecExt, IterStream, ReadyExt}, + PduEvent, Result, }; -use database::Map; -use futures::StreamExt; -use ruma::RoomId; +use database::{keyval::Val, Map}; +use futures::{Stream, StreamExt}; +use ruma::{api::client::search::search_events::v3::Criteria, RoomId, UserId}; -use crate::{rooms, Dep}; +use crate::{ + rooms, + rooms::{ + short::ShortRoomId, + timeline::{PduId, RawPduId}, + }, + Dep, +}; pub struct Service { db: Data, @@ -22,8 +30,24 @@ struct Data { struct Services { short: Dep, + state_accessor: Dep, + timeline: Dep, } +#[derive(Clone, Debug)] +pub struct RoomQuery<'a> { + pub room_id: &'a RoomId, + pub user_id: Option<&'a UserId>, + pub criteria: &'a Criteria, + pub limit: usize, + pub skip: usize, +} + +type TokenId = ArrayVec; + +const TOKEN_ID_MAX_LEN: usize = size_of::() + WORD_MAX_LEN + 1 + size_of::(); +const WORD_MAX_LEN: usize = 50; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -32,6 +56,8 @@ impl crate::Service for Service { }, services: Services { short: args.depend::("rooms::short"), + state_accessor: args.depend::("rooms::state_accessor"), + timeline: args.depend::("rooms::timeline"), }, })) } @@ -70,46 +96,92 @@ pub fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { } #[implement(Service)] -pub async fn search_pdus(&self, room_id: &RoomId, search_string: &str) -> Option<(Vec>, Vec)> { - let prefix = self - .services - .short - .get_shortroomid(room_id) - .await - .ok()? - .to_be_bytes() - .to_vec(); +pub async fn search_pdus<'a>( + &'a self, query: &'a RoomQuery<'a>, +) -> Result<(usize, impl Stream + Send + 'a)> { + let pdu_ids: Vec<_> = self.search_pdu_ids(query).await?.collect().await; - let words: Vec<_> = tokenize(search_string).collect(); - - let bufs: Vec<_> = words - .clone() + let count = pdu_ids.len(); + let pdus = pdu_ids .into_iter() .stream() - .then(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xFF); - let prefix3 = prefix2.clone(); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.db.tokenids - .rev_raw_keys_from(&last_possible_id) // Newest pdus first - .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix2)) - .map(move |key| key[prefix3.len()..].to_vec()) - .collect::>() + .filter_map(move |result_pdu_id: RawPduId| async move { + self.services + .timeline + .get_pdu_from_id(&result_pdu_id) + .await + .ok() }) - .collect() - .await; + .ready_filter(|pdu| !pdu.is_redacted()) + .filter_map(move |pdu| async move { + self.services + .state_accessor + .user_can_see_event(query.user_id?, &pdu.room_id, &pdu.event_id) + .await + .then_some(pdu) + }) + .skip(query.skip) + .take(query.limit); - let bufs = bufs.iter().map(|buf| buf.iter()); + Ok((count, pdus)) +} - let results = set::intersection(bufs).cloned().collect(); +// result is modeled as a stream such that callers don't have to be refactored +// though an additional async/wrap still exists for now +#[implement(Service)] +pub async fn search_pdu_ids(&self, query: &RoomQuery<'_>) -> Result + Send + '_> { + let shortroomid = self.services.short.get_shortroomid(query.room_id).await?; - Some((results, words)) + let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await; + + let iters = pdu_ids.into_iter().map(IntoIterator::into_iter); + + Ok(set::intersection(iters).stream()) +} + +#[implement(Service)] +async fn search_pdu_ids_query_room(&self, query: &RoomQuery<'_>, shortroomid: ShortRoomId) -> Vec> { + tokenize(&query.criteria.search_term) + .stream() + .then(|word| async move { + self.search_pdu_ids_query_words(shortroomid, &word) + .collect::>() + .await + }) + .collect::>() + .await +} + +/// Iterate over PduId's containing a word +#[implement(Service)] +fn search_pdu_ids_query_words<'a>( + &'a self, shortroomid: ShortRoomId, word: &'a str, +) -> impl Stream + Send + '_ { + self.search_pdu_ids_query_word(shortroomid, word) + .ready_filter_map(move |key| { + key[prefix_len(word)..] + .chunks_exact(PduId::LEN) + .next() + .map(RawPduId::try_from) + .and_then(Result::ok) + }) +} + +/// Iterate over raw database results for a word +#[implement(Service)] +fn search_pdu_ids_query_word(&self, shortroomid: ShortRoomId, word: &str) -> impl Stream> + Send + '_ { + const PDUID_LEN: usize = PduId::LEN; + // rustc says const'ing this not yet stable + let end_id: ArrayVec = iter::repeat(u8::MAX).take(PduId::LEN).collect(); + + // Newest pdus first + let end = make_tokenid(shortroomid, word, end_id.as_slice()); + let prefix = make_prefix(shortroomid, word); + self.db + .tokenids + .rev_raw_keys_from(&end) + .ignore_err() + .ready_take_while(move |key| key.starts_with(&prefix)) } /// Splits a string into tokens used as keys in the search inverted index @@ -119,6 +191,28 @@ pub async fn search_pdus(&self, room_id: &RoomId, search_string: &str) -> Option fn tokenize(body: &str) -> impl Iterator + Send + '_ { body.split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) + .filter(|word| word.len() <= WORD_MAX_LEN) .map(str::to_lowercase) } + +fn make_tokenid(shortroomid: ShortRoomId, word: &str, pdu_id: &[u8]) -> TokenId { + debug_assert!(pdu_id.len() == PduId::LEN, "pdu_id size mismatch"); + + let mut key = make_prefix(shortroomid, word); + key.extend_from_slice(pdu_id); + key +} + +fn make_prefix(shortroomid: ShortRoomId, word: &str) -> TokenId { + let mut key = TokenId::new(); + key.extend_from_slice(&shortroomid.to_be_bytes()); + key.extend_from_slice(word.as_bytes()); + key.push(database::SEP); + key +} + +fn prefix_len(word: &str) -> usize { + size_of::() + .saturating_add(word.len()) + .saturating_add(1) +} From d281b8d3ae1818ea84be11ba38ac0325aaa84ffc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Oct 2024 22:22:10 +0000 Subject: [PATCH 0134/1248] implement filters for search (#596) closes #596 Signed-off-by: Jason Volk --- src/service/rooms/search/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 8882ec99..70daded1 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -113,6 +113,7 @@ pub async fn search_pdus<'a>( .ok() }) .ready_filter(|pdu| !pdu.is_redacted()) + .ready_filter(|pdu| pdu.matches(&query.criteria.filter)) .filter_map(move |pdu| async move { self.services .state_accessor From 5e6dbaa27f5e08556422ee6b756efdc318654fd7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 27 Oct 2024 01:48:57 +0000 Subject: [PATCH 0135/1248] apply room event filter to messages endpoint (#596) Signed-off-by: Jason Volk --- src/api/client/message.rs | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 578b675b..094daa30 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -9,13 +9,13 @@ use conduit::{ use futures::{FutureExt, StreamExt}; use ruma::{ api::client::{ - filter::{RoomEventFilter, UrlFilter}, + filter::RoomEventFilter, message::{get_message_events, send_message_event}, }, events::{MessageLikeEventType, StateEventType, TimelineEventType::*}, UserId, }; -use serde_json::{from_str, Value}; +use serde_json::from_str; use service::rooms::timeline::PdusIterItem; use crate::{ @@ -151,7 +151,7 @@ pub(crate) async fn get_message_events_route( .timeline .pdus_after(sender_user, room_id, from) .await? - .ready_filter_map(|item| contains_url_filter(item, filter)) + .ready_filter_map(|item| event_filter(item, filter)) .filter_map(|item| visibility_filter(&services, item, sender_user)) .ready_take_while(|(count, _)| Some(*count) != to) // Stop at `to` .take(limit) @@ -225,7 +225,7 @@ pub(crate) async fn get_message_events_route( .timeline .pdus_until(sender_user, room_id, from) .await? - .ready_filter_map(|item| contains_url_filter(item, filter)) + .ready_filter_map(|item| event_filter(item, filter)) .filter_map(|(count, pdu)| async move { // list of safe and common non-state events to ignore if matches!( @@ -329,19 +329,7 @@ async fn visibility_filter(services: &Services, item: PdusIterItem, user_id: &Us .then_some(item) } -fn contains_url_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { +fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { let (_, pdu) = &item; - - if filter.url_filter.is_none() { - return Some(item); - } - - let content: Value = from_str(pdu.content.get()).unwrap(); - let res = match filter.url_filter { - Some(UrlFilter::EventsWithoutUrl) => !content["url"].is_string(), - Some(UrlFilter::EventsWithUrl) => content["url"].is_string(), - None => true, - }; - - res.then_some(item) + pdu.matches(filter).then_some(item) } From 9787dfe77c7de1ef186c8bb934ba242f856ccc12 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 27 Oct 2024 00:30:30 +0000 Subject: [PATCH 0136/1248] fix clippy::ref_option fix needless borrow fix clippy::nonminimal_bool --- Cargo.toml | 1 + src/api/client/config.rs | 6 +++--- src/api/client/report.rs | 4 ++-- src/api/client/room.rs | 12 ++++++------ src/api/client/sync/v4.rs | 2 +- src/core/utils/future/try_ext_ext.rs | 1 + src/core/utils/stream/ready.rs | 1 + src/core/utils/stream/try_ready.rs | 1 + src/macros/config.rs | 18 ++++-------------- src/service/admin/mod.rs | 4 ++-- src/service/services.rs | 4 ++-- 11 files changed, 24 insertions(+), 30 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 73f16daf..2f9f196b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -772,6 +772,7 @@ unused-qualifications = "warn" #unused-results = "warn" # TODO ## some sadness +elided_named_lifetimes = "allow" # TODO! let_underscore_drop = "allow" missing_docs = "allow" # cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g. diff --git a/src/api/client/config.rs b/src/api/client/config.rs index d06cc072..3cf71135 100644 --- a/src/api/client/config.rs +++ b/src/api/client/config.rs @@ -23,7 +23,7 @@ pub(crate) async fn set_global_account_data_route( set_account_data( &services, None, - &body.sender_user, + body.sender_user.as_ref(), &body.event_type.to_string(), body.data.json(), ) @@ -41,7 +41,7 @@ pub(crate) async fn set_room_account_data_route( set_account_data( &services, Some(&body.room_id), - &body.sender_user, + body.sender_user.as_ref(), &body.event_type.to_string(), body.data.json(), ) @@ -89,7 +89,7 @@ pub(crate) async fn get_room_account_data_route( } async fn set_account_data( - services: &Services, room_id: Option<&RoomId>, sender_user: &Option, event_type: &str, + services: &Services, room_id: Option<&RoomId>, sender_user: Option<&OwnedUserId>, event_type: &str, data: &RawJsonValue, ) -> Result<()> { let sender_user = sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client/report.rs b/src/api/client/report.rs index cf789246..143c13e5 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -101,7 +101,7 @@ pub(crate) async fn report_event_route( &pdu.event_id, &body.room_id, sender_user, - &body.reason, + body.reason.as_ref(), body.score, &pdu, ) @@ -134,7 +134,7 @@ pub(crate) async fn report_event_route( /// check if report reasoning is less than or equal to 750 characters /// check if reporting user is in the reporting room async fn is_event_report_valid( - services: &Services, event_id: &EventId, room_id: &RoomId, sender_user: &UserId, reason: &Option, + services: &Services, event_id: &EventId, room_id: &RoomId, sender_user: &UserId, reason: Option<&String>, score: Option, pdu: &std::sync::Arc, ) -> Result<()> { debug_info!("Checking if report from user {sender_user} for event {event_id} in room {room_id} is valid"); diff --git a/src/api/client/room.rs b/src/api/client/room.rs index daadb724..4224d3fa 100644 --- a/src/api/client/room.rs +++ b/src/api/client/room.rs @@ -126,8 +126,8 @@ pub(crate) async fn create_room_route( .await; let state_lock = services.rooms.state.mutex.lock(&room_id).await; - let alias: Option = if let Some(alias) = &body.room_alias_name { - Some(room_alias_check(&services, alias, &body.appservice_info).await?) + let alias: Option = if let Some(alias) = body.room_alias_name.as_ref() { + Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?) } else { None }; @@ -270,7 +270,7 @@ pub(crate) async fn create_room_route( } let power_levels_content = - default_power_levels_content(&body.power_level_content_override, &body.visibility, users)?; + default_power_levels_content(body.power_level_content_override.as_ref(), &body.visibility, users)?; services .rooms @@ -814,7 +814,7 @@ pub(crate) async fn upgrade_room_route( /// creates the power_levels_content for the PDU builder fn default_power_levels_content( - power_level_content_override: &Option>, visibility: &room::Visibility, + power_level_content_override: Option<&Raw>, visibility: &room::Visibility, users: BTreeMap, ) -> Result { let mut power_levels_content = serde_json::to_value(RoomPowerLevelsEventContent { @@ -864,7 +864,7 @@ fn default_power_levels_content( /// if a room is being created with a room alias, run our checks async fn room_alias_check( - services: &Services, room_alias_name: &str, appservice_info: &Option, + services: &Services, room_alias_name: &str, appservice_info: Option<&RegistrationInfo>, ) -> Result { // Basic checks on the room alias validity if room_alias_name.contains(':') { @@ -905,7 +905,7 @@ async fn room_alias_check( return Err(Error::BadRequest(ErrorKind::RoomInUse, "Room alias already exists.")); } - if let Some(ref info) = appservice_info { + if let Some(info) = appservice_info { if !info.aliases.is_match(full_room_alias.as_str()) { return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias is not in namespace.")); } diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 4f8323e6..f8ada81c 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -560,7 +560,7 @@ pub(crate) async fn sync_events_v4_route( for (_, pdu) in timeline_pdus { let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts); - if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) && !timestamp.is_some_and(|time| time > ts) { + if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) && timestamp.is_none_or(|time| time <= ts) { timestamp = Some(ts); } } diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index 7c0b36a2..f97ae885 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -1,4 +1,5 @@ //! Extended external extensions to futures::TryFutureExt +#![allow(clippy::type_complexity)] use futures::{ future::{MapOkOrElse, UnwrapOrElse}, diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index da5aec5a..c16d1246 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -1,4 +1,5 @@ //! Synchronous combinator extensions to futures::Stream +#![allow(clippy::type_complexity)] use futures::{ future::{ready, Ready}, diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index df356456..feb38067 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -1,4 +1,5 @@ //! Synchronous combinator extensions to futures::TryStream +#![allow(clippy::type_complexity)] use futures::{ future::{ready, Ready}, diff --git a/src/macros/config.rs b/src/macros/config.rs index f8616352..6ccdb73c 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -164,11 +164,11 @@ fn get_default(field: &Field) -> Option { continue; }; - if !path + if path .segments .iter() .next() - .is_some_and(|s| s.ident == "serde") + .is_none_or(|s| s.ident == "serde") { continue; } @@ -218,12 +218,7 @@ fn get_doc_default(field: &Field) -> Option { continue; }; - if !path - .segments - .iter() - .next() - .is_some_and(|s| s.ident == "doc") - { + if path.segments.iter().next().is_none_or(|s| s.ident == "doc") { continue; } @@ -266,12 +261,7 @@ fn get_doc_comment(field: &Field) -> Option { continue; }; - if !path - .segments - .iter() - .next() - .is_some_and(|s| s.ident == "doc") - { + if path.segments.iter().next().is_none_or(|s| s.ident == "doc") { continue; } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 58cc012c..2860bd1b 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -370,9 +370,9 @@ impl Service { /// Sets the self-reference to crate::Services which will provide context to /// the admin commands. - pub(super) fn set_services(&self, services: &Option>) { + pub(super) fn set_services(&self, services: Option<&Arc>) { let receiver = &mut *self.services.services.write().expect("locked for writing"); - let weak = services.as_ref().map(Arc::downgrade); + let weak = services.map(Arc::downgrade); *receiver = weak; } } diff --git a/src/service/services.rs b/src/service/services.rs index ea81f434..c0af4249 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -113,7 +113,7 @@ impl Services { pub async fn start(self: &Arc) -> Result> { debug_info!("Starting services..."); - self.admin.set_services(&Some(Arc::clone(self))); + self.admin.set_services(Some(Arc::clone(self)).as_ref()); globals::migrations::migrations(self).await?; self.manager .lock() @@ -151,7 +151,7 @@ impl Services { manager.stop().await; } - self.admin.set_services(&None); + self.admin.set_services(None); debug_info!("Services shutdown complete."); } From e7e606300f33410bfb6bfdf7c9671b210e37f287 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 27 Oct 2024 19:17:41 +0000 Subject: [PATCH 0137/1248] slightly simplify reqwest/hickory hooks Signed-off-by: Jason Volk --- src/service/resolver/dns.rs | 59 ++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 33 deletions(-) diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index b77bbb84..89129e03 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -1,15 +1,11 @@ -use std::{ - future, iter, - net::{IpAddr, SocketAddr}, - sync::Arc, - time::Duration, -}; +use std::{iter, net::SocketAddr, sync::Arc, time::Duration}; use conduit::{err, Result, Server}; +use futures::FutureExt; use hickory_resolver::TokioAsyncResolver; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; -use super::cache::Cache; +use super::cache::{Cache, CachedOverride}; pub struct Resolver { pub(crate) resolver: Arc, @@ -21,6 +17,8 @@ pub(crate) struct Hooked { cache: Arc, } +type ResolvingResult = Result>; + impl Resolver { #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] pub(super) fn build(server: &Arc, cache: Arc) -> Result> { @@ -82,12 +80,12 @@ impl Resolver { } impl Resolve for Resolver { - fn resolve(&self, name: Name) -> Resolving { resolve_to_reqwest(self.resolver.clone(), name) } + fn resolve(&self, name: Name) -> Resolving { resolve_to_reqwest(self.resolver.clone(), name).boxed() } } impl Resolve for Hooked { fn resolve(&self, name: Name) -> Resolving { - let cached = self + let cached: Option = self .cache .overrides .read() @@ -95,35 +93,30 @@ impl Resolve for Hooked { .get(name.as_str()) .cloned(); - if let Some(cached) = cached { - cached_to_reqwest(&cached.ips, cached.port) - } else { - resolve_to_reqwest(self.resolver.clone(), name) - } + cached.map_or_else( + || resolve_to_reqwest(self.resolver.clone(), name).boxed(), + |cached| cached_to_reqwest(cached).boxed(), + ) } } -fn cached_to_reqwest(override_name: &[IpAddr], port: u16) -> Resolving { - override_name +async fn cached_to_reqwest(cached: CachedOverride) -> ResolvingResult { + let first_ip = cached + .ips .first() - .map(|first_name| -> Resolving { - let saddr = SocketAddr::new(*first_name, port); - let result: Box + Send> = Box::new(iter::once(saddr)); - Box::pin(future::ready(Ok(result))) - }) - .expect("must provide at least one override name") + .expect("must provide at least one override"); + + let saddr = SocketAddr::new(*first_ip, cached.port); + + Ok(Box::new(iter::once(saddr))) } -fn resolve_to_reqwest(resolver: Arc, name: Name) -> Resolving { - Box::pin(async move { - let results = resolver - .lookup_ip(name.as_str()) - .await? - .into_iter() - .map(|ip| SocketAddr::new(ip, 0)); +async fn resolve_to_reqwest(resolver: Arc, name: Name) -> ResolvingResult { + let results = resolver + .lookup_ip(name.as_str()) + .await? + .into_iter() + .map(|ip| SocketAddr::new(ip, 0)); - let results: Addrs = Box::new(results); - - Ok(results) - }) + Ok(Box::new(results)) } From 6c9ecb031a62db0c589f383f7effe01ea30f38ce Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 27 Oct 2024 20:53:22 +0000 Subject: [PATCH 0138/1248] re-export ruma Event trait through core pdu Signed-off-by: Jason Volk --- src/core/mod.rs | 2 +- src/core/pdu/{state_res.rs => event.rs} | 5 +++-- src/core/pdu/mod.rs | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) rename src/core/pdu/{state_res.rs => event.rs} (85%) diff --git a/src/core/mod.rs b/src/core/mod.rs index d201709b..1b7b8fa1 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -17,7 +17,7 @@ pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; -pub use pdu::{PduBuilder, PduCount, PduEvent}; +pub use pdu::{Event, PduBuilder, PduCount, PduEvent}; pub use server::Server; pub use utils::{ctor, dtor, implement, result, result::Result}; diff --git a/src/core/pdu/state_res.rs b/src/core/pdu/event.rs similarity index 85% rename from src/core/pdu/state_res.rs rename to src/core/pdu/event.rs index a27c9822..15117f92 100644 --- a/src/core/pdu/state_res.rs +++ b/src/core/pdu/event.rs @@ -1,11 +1,12 @@ use std::sync::Arc; -use ruma::{events::TimelineEventType, state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; +pub use ruma::state_res::Event; +use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; use serde_json::value::RawValue as RawJsonValue; use super::PduEvent; -impl state_res::Event for PduEvent { +impl Event for PduEvent { type Id = Arc; fn event_id(&self) -> &Self::Id { &self.event_id } diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index ed11adbb..9c3aaf9b 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -1,10 +1,10 @@ mod builder; mod content; mod count; +mod event; mod filter; mod id; mod redact; -mod state_res; mod strip; mod unsigned; @@ -19,6 +19,7 @@ use serde_json::value::RawValue as RawJsonValue; pub use self::{ builder::{Builder, Builder as PduBuilder}, count::PduCount, + event::Event, id::*, }; use crate::Result; From 7a09ac81e039a5ac1dc6d7e215824599b00aed36 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 27 Oct 2024 20:13:10 +0000 Subject: [PATCH 0139/1248] split send from messages; refactor client/messages; add filters to client/context Signed-off-by: Jason Volk --- src/api/client/context.rs | 264 +++++++++------------- src/api/client/message.rs | 457 +++++++++++++++----------------------- src/api/client/mod.rs | 2 + src/api/client/send.rs | 92 ++++++++ src/api/router/args.rs | 26 ++- 5 files changed, 403 insertions(+), 438 deletions(-) create mode 100644 src/api/client/send.rs diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 9a5c4e82..9bf0c467 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,14 +1,25 @@ -use std::collections::HashSet; +use std::iter::once; use axum::extract::State; -use conduit::{err, error, Err}; -use futures::StreamExt; +use conduit::{ + err, error, + utils::{future::TryExtExt, stream::ReadyExt, IterStream}, + Err, Result, +}; +use futures::{future::try_join, StreamExt, TryFutureExt}; use ruma::{ api::client::{context::get_context, filter::LazyLoadOptions}, - events::{StateEventType, TimelineEventType::*}, + events::StateEventType, + UserId, }; -use crate::{Result, Ruma}; +use crate::{ + client::message::{event_filter, ignored_filter, update_lazy, visibility_filter, LazySet}, + Ruma, +}; + +const LIMIT_MAX: usize = 100; +const LIMIT_DEFAULT: usize = 10; /// # `GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}` /// @@ -19,33 +30,43 @@ use crate::{Result, Ruma}; pub(crate) async fn get_context_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let filter = &body.filter; + let sender = body.sender(); + let (sender_user, _) = sender; + + // Use limit or else 10, with maximum 100 + let limit: usize = body + .limit + .try_into() + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); // some clients, at least element, seem to require knowledge of redundant // members for "inline" profiles on the timeline to work properly - let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options { - LazyLoadOptions::Enabled { - include_redundant_members, - } => (true, *include_redundant_members), - LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")), - }; + let lazy_load_enabled = matches!(filter.lazy_load_options, LazyLoadOptions::Enabled { .. }); - let mut lazy_loaded = HashSet::with_capacity(100); + let lazy_load_redundant = if let LazyLoadOptions::Enabled { + include_redundant_members, + } = filter.lazy_load_options + { + include_redundant_members + } else { + false + }; let base_token = services .rooms .timeline .get_pdu_count(&body.event_id) - .await - .map_err(|_| err!(Request(NotFound("Base event id not found."))))?; + .map_err(|_| err!(Request(NotFound("Event not found.")))); let base_event = services .rooms .timeline .get_pdu(&body.event_id) - .await - .map_err(|_| err!(Request(NotFound("Base event not found."))))?; + .map_err(|_| err!(Request(NotFound("Base event not found.")))); + + let (base_token, base_event) = try_join(base_token, base_event).await?; let room_id = &base_event.room_id; @@ -58,136 +79,50 @@ pub(crate) async fn get_context_route( return Err!(Request(Forbidden("You don't have permission to view this event."))); } - if !services - .rooms - .lazy_loading - .lazy_load_was_sent_before(sender_user, sender_device, room_id, &base_event.sender) - .await || lazy_load_send_redundant - { - lazy_loaded.insert(base_event.sender.as_str().to_owned()); - } - - // Use limit or else 10, with maximum 100 - let limit = usize::try_from(body.limit).unwrap_or(10).min(100); - - let base_event = base_event.to_room_event(); - let events_before: Vec<_> = services .rooms .timeline .pdus_until(sender_user, room_id, base_token) .await? + .ready_filter_map(|item| event_filter(item, filter)) + .filter_map(|item| ignored_filter(&services, item, sender_user)) + .filter_map(|item| visibility_filter(&services, item, sender_user)) .take(limit / 2) - .filter_map(|(count, pdu)| async move { - // list of safe and common non-state events to ignore - if matches!( - &pdu.kind, - RoomMessage - | Sticker | CallInvite - | CallNotify | RoomEncrypted - | Image | File | Audio - | Voice | Video | UnstablePollStart - | PollStart | KeyVerificationStart - | Reaction | Emote - | Location - ) && services - .users - .user_is_ignored(&pdu.sender, sender_user) - .await - { - return None; - } - - services - .rooms - .state_accessor - .user_can_see_event(sender_user, room_id, &pdu.event_id) - .await - .then_some((count, pdu)) - }) .collect() .await; - for (_, event) in &events_before { - if !services - .rooms - .lazy_loading - .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) - .await || lazy_load_send_redundant - { - lazy_loaded.insert(event.sender.as_str().to_owned()); - } - } - - let start_token = events_before - .last() - .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()); - let events_after: Vec<_> = services .rooms .timeline .pdus_after(sender_user, room_id, base_token) .await? + .ready_filter_map(|item| event_filter(item, filter)) + .filter_map(|item| ignored_filter(&services, item, sender_user)) + .filter_map(|item| visibility_filter(&services, item, sender_user)) .take(limit / 2) - .filter_map(|(count, pdu)| async move { - // list of safe and common non-state events to ignore - if matches!( - &pdu.kind, - RoomMessage - | Sticker | CallInvite - | CallNotify | RoomEncrypted - | Image | File | Audio - | Voice | Video | UnstablePollStart - | PollStart | KeyVerificationStart - | Reaction | Emote - | Location - ) && services - .users - .user_is_ignored(&pdu.sender, sender_user) - .await - { - return None; - } - - services - .rooms - .state_accessor - .user_can_see_event(sender_user, room_id, &pdu.event_id) - .await - .then_some((count, pdu)) - }) .collect() .await; - for (_, event) in &events_after { - if !services - .rooms - .lazy_loading - .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) - .await || lazy_load_send_redundant - { - lazy_loaded.insert(event.sender.as_str().to_owned()); - } - } + let lazy = once(&(base_token, (*base_event).clone())) + .chain(events_before.iter()) + .chain(events_after.iter()) + .stream() + .fold(LazySet::new(), |lazy, item| { + update_lazy(&services, room_id, sender, lazy, item, lazy_load_redundant) + }) + .await; + + let state_id = events_after + .last() + .map_or(body.event_id.as_ref(), |(_, e)| e.event_id.as_ref()); let shortstatehash = services .rooms .state_accessor - .pdu_shortstatehash( - events_after - .last() - .map_or(&*body.event_id, |(_, e)| &*e.event_id), - ) + .pdu_shortstatehash(state_id) + .or_else(|_| services.rooms.state.get_room_shortstatehash(room_id)) .await - .map_or( - services - .rooms - .state - .get_room_shortstatehash(room_id) - .await - .expect("All rooms have state"), - |hash| hash, - ); + .map_err(|e| err!(Database("State hash not found: {e}")))?; let state_ids = services .rooms @@ -196,48 +131,61 @@ pub(crate) async fn get_context_route( .await .map_err(|e| err!(Database("State not found: {e}")))?; - let end_token = events_after - .last() - .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()); + let lazy = &lazy; + let state: Vec<_> = state_ids + .iter() + .stream() + .filter_map(|(shortstatekey, event_id)| { + services + .rooms + .short + .get_statekey_from_short(*shortstatekey) + .map_ok(move |(event_type, state_key)| (event_type, state_key, event_id)) + .ok() + }) + .filter_map(|(event_type, state_key, event_id)| async move { + if lazy_load_enabled && event_type == StateEventType::RoomMember { + let user_id: &UserId = state_key.as_str().try_into().ok()?; + if !lazy.contains(user_id) { + return None; + } + } - let mut state = Vec::with_capacity(state_ids.len()); - - for (shortstatekey, id) in state_ids { - let (event_type, state_key) = services - .rooms - .short - .get_statekey_from_short(shortstatekey) - .await?; - - if event_type != StateEventType::RoomMember { - let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { - error!("Pdu in state not found: {id}"); - continue; - }; - - state.push(pdu.to_state_event()); - } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { - let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { - error!("Pdu in state not found: {id}"); - continue; - }; - - state.push(pdu.to_state_event()); - } - } + services + .rooms + .timeline + .get_pdu(event_id) + .await + .inspect_err(|_| error!("Pdu in state not found: {event_id}")) + .map(|pdu| pdu.to_state_event()) + .ok() + }) + .collect() + .await; Ok(get_context::v3::Response { - start: Some(start_token), - end: Some(end_token), + event: Some(base_event.to_room_event()), + + start: events_before + .last() + .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()) + .into(), + + end: events_after + .last() + .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()) + .into(), + events_before: events_before - .iter() + .into_iter() .map(|(_, pdu)| pdu.to_room_event()) .collect(), - event: Some(base_event), + events_after: events_after - .iter() + .into_iter() .map(|(_, pdu)| pdu.to_room_event()) .collect(), + state, }) } diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 094daa30..4fc58d9f 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,111 +1,52 @@ -use std::collections::{BTreeMap, HashSet}; +use std::collections::HashSet; use axum::extract::State; use conduit::{ - err, - utils::{IterStream, ReadyExt}, - Err, PduCount, + at, is_equal_to, + utils::{ + result::{FlatOk, LogErr}, + IterStream, ReadyExt, + }, + Event, PduCount, Result, }; use futures::{FutureExt, StreamExt}; use ruma::{ - api::client::{ - filter::RoomEventFilter, - message::{get_message_events, send_message_event}, + api::{ + client::{filter::RoomEventFilter, message::get_message_events}, + Direction, }, - events::{MessageLikeEventType, StateEventType, TimelineEventType::*}, - UserId, + events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, + serde::Raw, + DeviceId, OwnedUserId, RoomId, UserId, }; -use serde_json::from_str; -use service::rooms::timeline::PdusIterItem; +use service::{rooms::timeline::PdusIterItem, Services}; -use crate::{ - service::{pdu::PduBuilder, Services}, - utils, Result, Ruma, -}; +use crate::Ruma; -/// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` -/// -/// Send a message event into the room. -/// -/// - Is a NOOP if the txn id was already used before and returns the same event -/// id again -/// - The only requirement for the content is that it has to be valid json -/// - Tries to send the event into the room, auth rules will determine if it is -/// allowed -pub(crate) async fn send_message_event_route( - State(services): State, body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_deref().expect("user is authenticated"); - let sender_device = body.sender_device.as_deref(); - let appservice_info = body.appservice_info.as_ref(); +pub(crate) type LazySet = HashSet; - // Forbid m.room.encrypted if encryption is disabled - if MessageLikeEventType::RoomEncrypted == body.event_type && !services.globals.allow_encryption() { - return Err!(Request(Forbidden("Encryption has been disabled"))); - } +/// list of safe and common non-state events to ignore +const IGNORED_MESSAGE_TYPES: &[TimelineEventType] = &[ + RoomMessage, + Sticker, + CallInvite, + CallNotify, + RoomEncrypted, + Image, + File, + Audio, + Voice, + Video, + UnstablePollStart, + PollStart, + KeyVerificationStart, + Reaction, + Emote, + Location, +]; - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - if body.event_type == MessageLikeEventType::CallInvite - && services.rooms.directory.is_public_room(&body.room_id).await - { - return Err!(Request(Forbidden("Room call invites are not allowed in public rooms"))); - } - - // Check if this is a new transaction id - if let Ok(response) = services - .transaction_ids - .existing_txnid(sender_user, sender_device, &body.txn_id) - .await - { - // The client might have sent a txnid of the /sendToDevice endpoint - // This txnid has no response associated with it - if response.is_empty() { - return Err!(Request(InvalidParam( - "Tried to use txn id already used for an incompatible endpoint." - ))); - } - - return Ok(send_message_event::v3::Response { - event_id: utils::string_from_bytes(&response) - .map(TryInto::try_into) - .map_err(|e| err!(Database("Invalid event_id in txnid data: {e:?}")))??, - }); - } - - let mut unsigned = BTreeMap::new(); - unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); - - let content = - from_str(body.body.body.json().get()).map_err(|e| err!(Request(BadJson("Invalid JSON body: {e}"))))?; - - let event_id = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: body.event_type.clone().into(), - content, - unsigned: Some(unsigned), - timestamp: appservice_info.and(body.timestamp), - ..Default::default() - }, - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - services - .transaction_ids - .add_txnid(sender_user, sender_device, &body.txn_id, event_id.as_bytes()); - - drop(state_lock); - - Ok(send_message_event::v3::Response { - event_id: event_id.into(), - }) -} +const LIMIT_MAX: usize = 100; +const LIMIT_DEFAULT: usize = 10; /// # `GET /_matrix/client/r0/rooms/{roomId}/messages` /// @@ -116,209 +57,171 @@ pub(crate) async fn send_message_event_route( pub(crate) async fn get_message_events_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - + let sender = body.sender(); + let (sender_user, sender_device) = sender; let room_id = &body.room_id; let filter = &body.filter; - let limit = usize::try_from(body.limit).unwrap_or(10).min(100); - let from = match body.from.as_ref() { - Some(from) => PduCount::try_from_string(from)?, - None => match body.dir { - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), - }, + let from_default = match body.dir { + Direction::Forward => PduCount::min(), + Direction::Backward => PduCount::max(), }; - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(t).ok()); + let from = body + .from + .as_deref() + .map(PduCount::try_from_string) + .transpose()? + .unwrap_or(from_default); + + let to = body.to.as_deref().map(PduCount::try_from_string).flat_ok(); + + let limit: usize = body + .limit + .try_into() + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); services .rooms .lazy_loading .lazy_load_confirm_delivery(sender_user, sender_device, room_id, from); - let mut resp = get_message_events::v3::Response::new(); - let mut lazy_loaded = HashSet::new(); - let next_token; - match body.dir { - ruma::api::Direction::Forward => { - let events_after: Vec = services - .rooms - .timeline - .pdus_after(sender_user, room_id, from) - .await? - .ready_filter_map(|item| event_filter(item, filter)) - .filter_map(|item| visibility_filter(&services, item, sender_user)) - .ready_take_while(|(count, _)| Some(*count) != to) // Stop at `to` - .take(limit) - .collect() - .boxed() - .await; - - for (_, event) in &events_after { - /* TODO: Remove the not "element_hacks" check when these are resolved: - * https://github.com/vector-im/element-android/issues/3417 - * https://github.com/vector-im/element-web/issues/21034 - */ - if !cfg!(feature = "element_hacks") - && !services - .rooms - .lazy_loading - .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) - .await - { - lazy_loaded.insert(event.sender.clone()); - } - - if cfg!(features = "element_hacks") { - lazy_loaded.insert(event.sender.clone()); - } - } - - next_token = events_after.last().map(|(count, _)| count).copied(); - - let events_after: Vec<_> = events_after - .into_iter() - .stream() - .filter_map(|(_, pdu)| async move { - // list of safe and common non-state events to ignore - if matches!( - &pdu.kind, - RoomMessage - | Sticker | CallInvite - | CallNotify | RoomEncrypted - | Image | File | Audio - | Voice | Video | UnstablePollStart - | PollStart | KeyVerificationStart - | Reaction | Emote | Location - ) && services - .users - .user_is_ignored(&pdu.sender, sender_user) - .await - { - return None; - } - - Some(pdu.to_room_event()) - }) - .collect() - .await; - - resp.start = from.stringify(); - resp.end = next_token.map(|count| count.stringify()); - resp.chunk = events_after; - }, - ruma::api::Direction::Backward => { - services - .rooms - .timeline - .backfill_if_required(room_id, from) - .boxed() - .await?; - - let events_before: Vec = services - .rooms - .timeline - .pdus_until(sender_user, room_id, from) - .await? - .ready_filter_map(|item| event_filter(item, filter)) - .filter_map(|(count, pdu)| async move { - // list of safe and common non-state events to ignore - if matches!( - &pdu.kind, - RoomMessage - | Sticker | CallInvite - | CallNotify | RoomEncrypted - | Image | File | Audio - | Voice | Video | UnstablePollStart - | PollStart | KeyVerificationStart - | Reaction | Emote | Location - ) && services - .users - .user_is_ignored(&pdu.sender, sender_user) - .await - { - return None; - } - - Some((count, pdu)) - }) - .filter_map(|item| visibility_filter(&services, item, sender_user)) - .ready_take_while(|(count, _)| Some(*count) != to) // Stop at `to` - .take(limit) - .collect() - .boxed() - .await; - - for (_, event) in &events_before { - /* TODO: Remove the not "element_hacks" check when these are resolved: - * https://github.com/vector-im/element-android/issues/3417 - * https://github.com/vector-im/element-web/issues/21034 - */ - if !cfg!(feature = "element_hacks") - && !services - .rooms - .lazy_loading - .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) - .await - { - lazy_loaded.insert(event.sender.clone()); - } - - if cfg!(features = "element_hacks") { - lazy_loaded.insert(event.sender.clone()); - } - } - - next_token = events_before.last().map(|(count, _)| count).copied(); - - let events_before: Vec<_> = events_before - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); - - resp.start = from.stringify(); - resp.end = next_token.map(|count| count.stringify()); - resp.chunk = events_before; - }, + if matches!(body.dir, Direction::Backward) { + services + .rooms + .timeline + .backfill_if_required(room_id, from) + .boxed() + .await + .log_err() + .ok(); } - resp.state = lazy_loaded - .iter() - .stream() - .filter_map(|ll_user_id| async move { - services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, ll_user_id.as_str()) - .await - .map(|member_event| member_event.to_state_event()) - .ok() - }) + let it = match body.dir { + Direction::Forward => services + .rooms + .timeline + .pdus_after(sender_user, room_id, from) + .await? + .boxed(), + + Direction::Backward => services + .rooms + .timeline + .pdus_until(sender_user, room_id, from) + .await? + .boxed(), + }; + + let events: Vec<_> = it + .ready_take_while(|(count, _)| Some(*count) != to) + .ready_filter_map(|item| event_filter(item, filter)) + .filter_map(|item| ignored_filter(&services, item, sender_user)) + .filter_map(|item| visibility_filter(&services, item, sender_user)) + .take(limit) .collect() .await; - // remove the feature check when we are sure clients like element can handle it + let lazy = events + .iter() + .stream() + .fold(LazySet::new(), |lazy, item| { + update_lazy(&services, room_id, sender, lazy, item, false) + }) + .await; + + let state = lazy + .iter() + .stream() + .filter_map(|user_id| get_member_event(&services, room_id, user_id)) + .collect() + .await; + + let next_token = events.last().map(|(count, _)| count).copied(); + if !cfg!(feature = "element_hacks") { if let Some(next_token) = next_token { - services.rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_token, - ); + services + .rooms + .lazy_loading + .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy, next_token); } } - Ok(resp) + let chunk = events + .into_iter() + .map(at!(1)) + .map(|pdu| pdu.to_room_event()) + .collect(); + + Ok(get_message_events::v3::Response { + start: from.stringify(), + end: next_token.as_ref().map(PduCount::stringify), + chunk, + state, + }) } -async fn visibility_filter(services: &Services, item: PdusIterItem, user_id: &UserId) -> Option { +async fn get_member_event(services: &Services, room_id: &RoomId, user_id: &UserId) -> Option> { + services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) + .await + .map(|member_event| member_event.to_state_event()) + .ok() +} + +pub(crate) async fn update_lazy( + services: &Services, room_id: &RoomId, sender: (&UserId, &DeviceId), mut lazy: LazySet, item: &PdusIterItem, + force: bool, +) -> LazySet { + let (_, event) = &item; + let (sender_user, sender_device) = sender; + + /* TODO: Remove the not "element_hacks" check when these are resolved: + * https://github.com/vector-im/element-android/issues/3417 + * https://github.com/vector-im/element-web/issues/21034 + */ + if force || cfg!(features = "element_hacks") { + lazy.insert(event.sender().into()); + return lazy; + } + + if !services + .rooms + .lazy_loading + .lazy_load_was_sent_before(sender_user, sender_device, room_id, event.sender()) + .await + { + lazy.insert(event.sender().into()); + } + + lazy +} + +pub(crate) async fn ignored_filter(services: &Services, item: PdusIterItem, user_id: &UserId) -> Option { + let (_, pdu) = &item; + + if pdu.kind.to_cow_str() == "org.matrix.dummy_event" { + return None; + } + + if !IGNORED_MESSAGE_TYPES.iter().any(is_equal_to!(&pdu.kind)) { + return Some(item); + } + + if !services.users.user_is_ignored(&pdu.sender, user_id).await { + return Some(item); + } + + None +} + +pub(crate) async fn visibility_filter( + services: &Services, item: PdusIterItem, user_id: &UserId, +) -> Option { let (_, pdu) = &item; services @@ -329,7 +232,7 @@ async fn visibility_filter(services: &Services, item: PdusIterItem, user_id: &Us .then_some(item) } -fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { +pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { let (_, pdu) = &item; pdu.matches(filter).then_some(item) } diff --git a/src/api/client/mod.rs b/src/api/client/mod.rs index 2928be87..9ee88bec 100644 --- a/src/api/client/mod.rs +++ b/src/api/client/mod.rs @@ -23,6 +23,7 @@ pub(super) mod relations; pub(super) mod report; pub(super) mod room; pub(super) mod search; +pub(super) mod send; pub(super) mod session; pub(super) mod space; pub(super) mod state; @@ -65,6 +66,7 @@ pub(super) use relations::*; pub(super) use report::*; pub(super) use room::*; pub(super) use search::*; +pub(super) use send::*; pub(super) use session::*; pub(super) use space::*; pub(super) use state::*; diff --git a/src/api/client/send.rs b/src/api/client/send.rs new file mode 100644 index 00000000..ff011efa --- /dev/null +++ b/src/api/client/send.rs @@ -0,0 +1,92 @@ +use std::collections::BTreeMap; + +use axum::extract::State; +use conduit::{err, Err}; +use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; +use serde_json::from_str; + +use crate::{service::pdu::PduBuilder, utils, Result, Ruma}; + +/// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` +/// +/// Send a message event into the room. +/// +/// - Is a NOOP if the txn id was already used before and returns the same event +/// id again +/// - The only requirement for the content is that it has to be valid json +/// - Tries to send the event into the room, auth rules will determine if it is +/// allowed +pub(crate) async fn send_message_event_route( + State(services): State, body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + let sender_device = body.sender_device.as_deref(); + let appservice_info = body.appservice_info.as_ref(); + + // Forbid m.room.encrypted if encryption is disabled + if MessageLikeEventType::RoomEncrypted == body.event_type && !services.globals.allow_encryption() { + return Err!(Request(Forbidden("Encryption has been disabled"))); + } + + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + if body.event_type == MessageLikeEventType::CallInvite + && services.rooms.directory.is_public_room(&body.room_id).await + { + return Err!(Request(Forbidden("Room call invites are not allowed in public rooms"))); + } + + // Check if this is a new transaction id + if let Ok(response) = services + .transaction_ids + .existing_txnid(sender_user, sender_device, &body.txn_id) + .await + { + // The client might have sent a txnid of the /sendToDevice endpoint + // This txnid has no response associated with it + if response.is_empty() { + return Err!(Request(InvalidParam( + "Tried to use txn id already used for an incompatible endpoint." + ))); + } + + return Ok(send_message_event::v3::Response { + event_id: utils::string_from_bytes(&response) + .map(TryInto::try_into) + .map_err(|e| err!(Database("Invalid event_id in txnid data: {e:?}")))??, + }); + } + + let mut unsigned = BTreeMap::new(); + unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); + + let content = + from_str(body.body.body.json().get()).map_err(|e| err!(Request(BadJson("Invalid JSON body: {e}"))))?; + + let event_id = services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: body.event_type.clone().into(), + content, + unsigned: Some(unsigned), + timestamp: appservice_info.and(body.timestamp), + ..Default::default() + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + services + .transaction_ids + .add_txnid(sender_user, sender_device, &body.txn_id, event_id.as_bytes()); + + drop(state_lock); + + Ok(send_message_event::v3::Response { + event_id: event_id.into(), + }) +} diff --git a/src/api/router/args.rs b/src/api/router/args.rs index cefacac1..38236db3 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -3,7 +3,9 @@ use std::{mem, ops::Deref}; use axum::{async_trait, body::Body, extract::FromRequest}; use bytes::{BufMut, BytesMut}; use conduit::{debug, err, trace, utils::string::EMPTY, Error, Result}; -use ruma::{api::IncomingRequest, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, ServerName, UserId}; +use ruma::{ + api::IncomingRequest, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, OwnedUserId, ServerName, UserId, +}; use service::Services; use super::{auth, auth::Auth, request, request::Request}; @@ -40,10 +42,28 @@ where T: IncomingRequest + Send + Sync + 'static, { #[inline] - pub(crate) fn sender_user(&self) -> &UserId { self.sender_user.as_deref().expect("user is authenticated") } + pub(crate) fn sender(&self) -> (&UserId, &DeviceId) { (self.sender_user(), self.sender_device()) } #[inline] - pub(crate) fn origin(&self) -> &ServerName { self.origin.as_deref().expect("server is authenticated") } + pub(crate) fn sender_user(&self) -> &UserId { + self.sender_user + .as_deref() + .expect("user must be authenticated for this handler") + } + + #[inline] + pub(crate) fn sender_device(&self) -> &DeviceId { + self.sender_device + .as_deref() + .expect("user must be authenticated and device identified") + } + + #[inline] + pub(crate) fn origin(&self) -> &ServerName { + self.origin + .as_deref() + .expect("server must be authenticated for this handler") + } } #[async_trait] From 52e356d7805fd25c4e0b21757076f04d271d4241 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 28 Oct 2024 06:49:25 +0000 Subject: [PATCH 0140/1248] generate ActualDest https string on the fly Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 29 ++++++++++++++--------------- src/service/resolver/fed.rs | 19 ++++++++----------- src/service/sending/send.rs | 4 ++-- 3 files changed, 24 insertions(+), 28 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index ea4b1100..660498f7 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -18,34 +18,33 @@ use crate::resolver::{ pub(crate) struct ActualDest { pub(crate) dest: FedDest, pub(crate) host: String, - pub(crate) string: String, pub(crate) cached: bool, } +impl ActualDest { + #[inline] + pub(crate) fn string(&self) -> String { self.dest.https_string() } +} + impl super::Service { #[tracing::instrument(skip_all, name = "resolve")] pub(crate) async fn get_actual_dest(&self, server_name: &ServerName) -> Result { - let cached; - let cached_result = self.get_cached_destination(server_name); + let (result, cached) = if let Some(result) = self.get_cached_destination(server_name) { + (result, true) + } else { + self.validate_dest(server_name)?; + (self.resolve_actual_dest(server_name, true).await?, false) + }; let CachedDest { dest, host, .. - } = if let Some(result) = cached_result { - cached = true; - result - } else { - cached = false; - self.validate_dest(server_name)?; - self.resolve_actual_dest(server_name, true).await? - }; + } = result; - let string = dest.clone().into_https_string(); Ok(ActualDest { dest, host, - string, cached, }) } @@ -89,7 +88,7 @@ impl super::Service { debug!("Actual destination: {actual_dest:?} hostname: {host:?}"); Ok(CachedDest { dest: actual_dest, - host: host.into_uri_string(), + host: host.uri_string(), expire: CachedDest::default_expire(), }) } @@ -109,7 +108,7 @@ impl super::Service { async fn actual_dest_3(&self, host: &mut String, cache: bool, delegated: String) -> Result { debug!("3: A .well-known file is available"); - *host = add_port_to_hostname(&delegated).into_uri_string(); + *host = add_port_to_hostname(&delegated).uri_string(); match get_ip_with_port(&delegated) { Some(host_and_port) => Self::actual_dest_3_1(host_and_port), None => { diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index 10cbbbdd..79f71f13 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -1,4 +1,5 @@ use std::{ + borrow::Cow, fmt, net::{IpAddr, SocketAddr}, }; @@ -29,24 +30,25 @@ pub(crate) fn add_port_to_hostname(dest_str: &str) -> FedDest { } impl FedDest { - pub(crate) fn into_https_string(self) -> String { + pub(crate) fn https_string(&self) -> String { match self { Self::Literal(addr) => format!("https://{addr}"), Self::Named(host, port) => format!("https://{host}{port}"), } } - pub(crate) fn into_uri_string(self) -> String { + pub(crate) fn uri_string(&self) -> String { match self { Self::Literal(addr) => addr.to_string(), Self::Named(host, port) => format!("{host}{port}"), } } - pub(crate) fn hostname(&self) -> String { + #[inline] + pub(crate) fn hostname(&self) -> Cow<'_, str> { match &self { - Self::Literal(addr) => addr.ip().to_string(), - Self::Named(host, _) => host.clone(), + Self::Literal(addr) => addr.ip().to_string().into(), + Self::Named(host, _) => host.into(), } } @@ -61,10 +63,5 @@ impl FedDest { } impl fmt::Display for FedDest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Named(host, port) => write!(f, "{host}{port}"), - Self::Literal(addr) => write!(f, "{addr}"), - } - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.uri_string().as_str()) } } diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 62da59ef..2fbb3919 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -71,7 +71,7 @@ impl super::Service { trace!("Preparing request"); let mut http_request = req - .try_into_http_request::>(&actual.string, SATIR, &VERSIONS) + .try_into_http_request::>(actual.string().as_str(), SATIR, &VERSIONS) .map_err(|e| err!(BadServerResponse("Invalid destination: {e:?}")))?; self.sign_request::(dest, &mut http_request); @@ -107,7 +107,7 @@ where request_url = ?url, response_url = ?response.url(), "Received response from {}", - actual.string, + actual.string(), ); let mut http_response_builder = http::Response::builder() From d92f2c121f95f8d8beadc3a727b8a02376c46d3c Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 27 Oct 2024 12:19:45 -0400 Subject: [PATCH 0141/1248] document nginx needing request_uri Signed-off-by: strawberry --- docs/deploying/generic.md | 12 ++++++++---- src/api/router/auth.rs | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 1e44ab54..31dc1845 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -119,12 +119,16 @@ is the recommended reverse proxy for new users and is very trivial to use (handles TLS, reverse proxy headers, etc transparently with proper defaults). Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization -header, making federation non-functional. If using Apache, you need to use -`nocanon` in your `ProxyPass` directive to prevent this (note that Apache -isn't very good as a general reverse proxy). +header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. + +If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent this (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). + +If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: +- `proxy_pass http://127.0.0.1:6167$request_uri;` +- `proxy_pass http://127.0.0.1:6167;` Nginx users may need to set `proxy_buffering off;` if there are issues with -uploading media like images. +uploading media like images. This is due to Nginx storing the entire POST content in-memory (`/tmp`) and running out of memory if on low memory hardware. You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 6b1bb1a9..31e71f2f 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -247,8 +247,8 @@ async fn auth_server(services: &Services, request: &mut Request, body: Option<&C debug_error!("Failed to verify federation request from {origin}: {e}"); if request.parts.uri.to_string().contains('@') { warn!( - "Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: \ - use nocanon)" + "Request uri contained '@' character. Make sure your reverse proxy gives conduwuit the raw uri \ + (apache: use nocanon)" ); } From 065396f8f502e1b206c37b0d7dea92f79bfd8634 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 27 Oct 2024 12:37:44 -0400 Subject: [PATCH 0142/1248] better document allow_inbound_profile_lookup_federation_requests Signed-off-by: strawberry --- src/api/server/publicrooms.rs | 3 ++- src/api/server/query.rs | 6 +++++- src/core/config/mod.rs | 21 ++++++++++++++++----- src/service/globals/mod.rs | 4 ---- 4 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index af8a5846..f6c41859 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -20,7 +20,8 @@ pub(crate) async fn get_public_rooms_filtered_route( ) -> Result { if !services .globals - .allow_public_room_directory_over_federation() + .config + .allow_public_room_directory_over_federation { return Err(Error::BadRequest(ErrorKind::forbidden(), "Room directory is not public")); } diff --git a/src/api/server/query.rs b/src/api/server/query.rs index 348b8c6e..bf515b3c 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -63,7 +63,11 @@ pub(crate) async fn get_room_information_route( pub(crate) async fn get_profile_information_route( State(services): State, body: Ruma, ) -> Result { - if !services.globals.allow_profile_lookup_federation_requests() { + if !services + .globals + .config + .allow_inbound_profile_lookup_federation_requests + { return Err(Error::BadRequest( ErrorKind::forbidden(), "Profile lookup over federation is not allowed on this homeserver.", diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 04e44fd7..7a5c6d08 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -458,11 +458,16 @@ pub struct Config { /// obtain the profiles of our local users from /// `/_matrix/federation/v1/query/profile` /// - /// This is inherently false if `allow_federation` is disabled + /// Increases privacy of your local user's such as display names, but some + /// remote users may get a false "this user does not exist" error when they + /// try to invite you to a DM or room. Also can protect against profile + /// spiders. /// - /// Defaults to true - #[serde(default = "true_fn")] - pub allow_profile_lookup_federation_requests: bool, + /// Defaults to true. + /// + /// This is inherently false if `allow_federation` is disabled + #[serde(default = "true_fn", alias = "allow_profile_lookup_federation_requests")] + pub allow_inbound_profile_lookup_federation_requests: bool, /// controls whether users are allowed to create rooms. /// appservices and admins are always allowed to create rooms @@ -1530,6 +1535,10 @@ impl fmt::Display for Config { line("Allow encryption", &self.allow_encryption.to_string()); line("Allow federation", &self.allow_federation.to_string()); line("Federation loopback", &self.federation_loopback.to_string()); + line( + "Require authentication for profile requests", + &self.require_auth_for_profile_requests.to_string(), + ); line( "Allow incoming federated presence requests (updates)", &self.allow_incoming_presence.to_string(), @@ -1577,7 +1586,9 @@ impl fmt::Display for Config { line("Allow device name federation", &self.allow_device_name_federation.to_string()); line( "Allow incoming profile lookup federation requests", - &self.allow_profile_lookup_federation_requests.to_string(), + &self + .allow_inbound_profile_lookup_federation_requests + .to_string(), ); line( "Auto deactivate banned room join attempts", diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 157c3944..0a7dda9f 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -212,10 +212,6 @@ impl Service { pub fn turn_username(&self) -> &String { &self.config.turn_username } - pub fn allow_profile_lookup_federation_requests(&self) -> bool { - self.config.allow_profile_lookup_federation_requests - } - pub fn notification_push_path(&self) -> &String { &self.config.notification_push_path } pub fn emergency_password(&self) -> &Option { &self.config.emergency_password } From 85890ed42502a4672d21218b07fc7366f7027ef3 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 27 Oct 2024 13:21:16 -0400 Subject: [PATCH 0143/1248] remove some unnecessary HTML from admin commands Signed-off-by: strawberry --- src/admin/debug/commands.rs | 2 +- src/admin/federation/commands.rs | 28 +++++----------------------- src/admin/room/directory.rs | 29 +++++------------------------ src/admin/server/commands.rs | 5 ++++- src/core/config/mod.rs | 2 +- 5 files changed, 16 insertions(+), 50 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 0fd3c91b..2aa6078f 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -106,7 +106,7 @@ pub(super) async fn get_pdu(&self, event_id: Box) -> Result) -> Result { @@ -108,33 +108,15 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: Box) -> Result< rooms.sort_by_key(|r| r.1); rooms.reverse(); - let output_plain = format!( - "Rooms {user_id} shares with us ({}):\n{}", + let output = format!( + "Rooms {user_id} shares with us ({}):\n```\n{}\n```", rooms.len(), rooms .iter() - .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) .collect::>() .join("\n") ); - let output_html = format!( - "\n\t\t\n{}
    Rooms {user_id} shares with us \ - ({})
    idmembersname
    ", - rooms.len(), - rooms - .iter() - .fold(String::new(), |mut output, (id, members, name)| { - writeln!( - output, - "{}\t{}\t{}", - id, - members, - escape_html(name) - ) - .expect("should be able to write to string buffer"); - output - }) - ); - Ok(RoomMessageEventContent::text_html(output_plain, output_html)) + Ok(RoomMessageEventContent::text_markdown(output)) } diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 1080356a..0bdaf56d 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,11 +1,9 @@ -use std::fmt::Write; - use clap::Subcommand; use conduit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId}; -use crate::{escape_html, get_room_info, Command, PAGE_SIZE}; +use crate::{get_room_info, Command, PAGE_SIZE}; #[derive(Debug, Subcommand)] pub(crate) enum RoomDirectoryCommand { @@ -68,32 +66,15 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_> return Ok(RoomMessageEventContent::text_plain("No more rooms.")); }; - let output_plain = format!( - "Rooms:\n{}", + let output = format!( + "Rooms (page {page}):\n```\n{}\n```", rooms .iter() - .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) .collect::>() .join("\n") ); - let output_html = format!( - "\n\t\t\n{}
    Room directory - page \ - {page}
    idmembersname
    ", - rooms - .iter() - .fold(String::new(), |mut output, (id, members, name)| { - writeln!( - output, - "{}\t{}\t{}", - escape_html(id.as_ref()), - members, - escape_html(name.as_ref()) - ) - .expect("should be able to write to string buffer"); - output - }) - ); - Ok(RoomMessageEventContent::text_html(output_plain, output_html)) + Ok(RoomMessageEventContent::text_markdown(output)) }, } } diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index de6ad98a..f5879b03 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -21,7 +21,10 @@ pub(super) async fn uptime(&self) -> Result { #[admin_command] pub(super) async fn show_config(&self) -> Result { // Construct and send the response - Ok(RoomMessageEventContent::text_plain(format!("{}", self.services.globals.config))) + Ok(RoomMessageEventContent::text_markdown(format!( + "```\n{}\n```", + self.services.globals.config + ))) } #[admin_command] diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 7a5c6d08..512cb48b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1441,7 +1441,7 @@ impl Config { impl fmt::Display for Config { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "Active config values:\n\n").expect("wrote line to formatter stream"); + writeln!(f, "Active config values:\n").expect("wrote line to formatter stream"); let mut line = |key: &str, val: &str| { writeln!(f, "{key}: {val}").expect("wrote line to formatter stream"); }; From 0a281241efdc536d950f24edd4805364a8bbfd97 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 28 Oct 2024 16:53:53 -0400 Subject: [PATCH 0144/1248] bump few dependencies, bump ruwuma Signed-off-by: strawberry --- Cargo.lock | 68 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 10 ++++---- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8acce7d..44856753 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -290,7 +290,7 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pemfile", "rustls-pki-types", "tokio", @@ -310,7 +310,7 @@ dependencies = [ "http", "http-body-util", "pin-project", - "rustls 0.23.15", + "rustls 0.23.16", "tokio", "tokio-rustls", "tokio-util", @@ -770,7 +770,7 @@ dependencies = [ "hyper-util", "log", "ruma", - "rustls 0.23.15", + "rustls 0.23.16", "sd-notify", "sentry", "sentry-tower", @@ -1202,9 +1202,9 @@ dependencies = [ [[package]] name = "fdeflate" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8090f921a24b04994d9929e204f50b498a33ea6ba559ffaa05e04f7ee7fb5ab" +checksum = "07c6f4c64c1d33a3111c4466f7365ebdcc37c5bd1ea0d62aae2e3d722aacbedb" dependencies = [ "simd-adler32", ] @@ -1708,7 +1708,7 @@ dependencies = [ "http", "hyper", "hyper-util", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -2771,7 +2771,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "socket2", "thiserror", "tokio", @@ -2788,7 +2788,7 @@ dependencies = [ "rand", "ring", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "slab", "thiserror", "tinyvec", @@ -2902,9 +2902,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "async-compression", "base64 0.22.1", @@ -2928,7 +2928,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-native-certs", "rustls-pemfile", "rustls-pki-types", @@ -2977,7 +2977,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "assign", "js_int", @@ -2999,7 +2999,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "js_int", "ruma-common", @@ -3011,7 +3011,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "as_variant", "assign", @@ -3034,7 +3034,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "as_variant", "base64 0.22.1", @@ -3064,7 +3064,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3088,7 +3088,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "bytes", "http", @@ -3106,7 +3106,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "js_int", "thiserror", @@ -3115,7 +3115,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "js_int", "ruma-common", @@ -3125,7 +3125,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "cfg-if", "once_cell", @@ -3141,7 +3141,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "js_int", "ruma-common", @@ -3153,7 +3153,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "headers", "http", @@ -3166,7 +3166,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3182,7 +3182,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=39c1addd37a4eed612ac1135edc2cccd9d331d5e#39c1addd37a4eed612ac1135edc2cccd9d331d5e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" dependencies = [ "futures-util", "itertools 0.13.0", @@ -3258,9 +3258,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" dependencies = [ "bitflags 2.6.0", "errno", @@ -3285,9 +3285,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "aws-lc-rs", "log", @@ -3563,18 +3563,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", @@ -4106,7 +4106,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "tokio", ] @@ -4472,7 +4472,7 @@ dependencies = [ "base64 0.22.1", "log", "once_cell", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "url", "webpki-roots", diff --git a/Cargo.toml b/Cargo.toml index 2f9f196b..e406c9e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -128,10 +128,10 @@ features = [ ] [workspace.dependencies.rustls] -version = "0.23.13" +version = "0.23.16" [workspace.dependencies.reqwest] -version = "0.12.8" +version = "0.12.9" default-features = false features = [ "rustls-tls-native-roots", @@ -141,7 +141,7 @@ features = [ ] [workspace.dependencies.serde] -version = "1.0.209" +version = "1.0.214" default-features = false features = ["rc"] @@ -257,7 +257,7 @@ features = [ ] [workspace.dependencies.hyper-util] -# 0.1.9 causes DNS issues +# 0.1.9 and above causes DNS issues version = "=0.1.8" default-features = false features = [ @@ -315,7 +315,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "39c1addd37a4eed612ac1135edc2cccd9d331d5e" +rev = "dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" features = [ "compat", "rand", From c71db93e225b44f15c652f4fbe0befaad508e48e Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 28 Oct 2024 18:28:56 -0400 Subject: [PATCH 0145/1248] implement admin command to force join list of local users Signed-off-by: strawberry --- src/admin/user/commands.rs | 139 ++++++++++++++++++++++++++++++++++++- src/admin/user/mod.rs | 15 ++++ 2 files changed, 153 insertions(+), 1 deletion(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index fb6d2bf1..d6946b4e 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -1,7 +1,11 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; -use conduit::{error, info, is_equal_to, utils, warn, PduBuilder, Result}; +use conduit::{ + debug_warn, error, info, is_equal_to, + utils::{self, ReadyExt}, + warn, PduBuilder, Result, +}; use conduit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; use ruma::{ @@ -376,6 +380,139 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result Result { + const REASON: &str = "Bulk force joining this room as initiated by the server admin."; + + if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" + { + return Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )); + } + + if !yes_i_want_to_do_this { + return Ok(RoomMessageEventContent::notice_markdown( + "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force bulk join all \ + specified local users.", + )); + } + + let Ok(admin_room) = self.services.admin.get_admin_room().await else { + return Ok(RoomMessageEventContent::notice_markdown( + "There is not an admin room to check for server admins.", + )); + }; + + let (room_id, servers) = self + .services + .rooms + .alias + .resolve_with_servers(&room_id, None) + .await?; + + if !self + .services + .rooms + .state_cache + .server_in_room(self.services.globals.server_name(), &room_id) + .await + { + return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + } + + let server_admins: Vec<_> = self + .services + .rooms + .state_cache + .active_local_users_in_room(&admin_room) + .map(ToOwned::to_owned) + .collect() + .await; + + if !self + .services + .rooms + .state_cache + .room_members(&room_id) + .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) + .await + { + return Ok(RoomMessageEventContent::notice_markdown( + "There is not a single server admin in the room.", + )); + } + + let usernames = self + .body + .to_vec() + .drain(1..self.body.len().saturating_sub(1)) + .collect::>(); + + let mut user_ids: Vec = Vec::with_capacity(usernames.len()); + + for username in usernames { + match parse_active_local_user_id(self.services, username).await { + Ok(user_id) => { + // don't make the server service account join + if user_id == self.services.globals.server_user { + self.services + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "{username} is the server service account, skipping over" + ))) + .await + .ok(); + continue; + } + + user_ids.push(user_id); + }, + Err(e) => { + self.services + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "{username} is not a valid username, skipping over: {e}" + ))) + .await + .ok(); + continue; + }, + } + } + + let mut failed_joins: usize = 0; + let mut successful_joins: usize = 0; + + for user_id in user_ids { + match join_room_by_id_helper( + self.services, + &user_id, + &room_id, + Some(String::from(REASON)), + &servers, + None, + &None, + ) + .await + { + Ok(_res) => { + successful_joins = successful_joins.saturating_add(1); + }, + Err(e) => { + debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); + failed_joins = failed_joins.saturating_add(1); + }, + }; + } + + Ok(RoomMessageEventContent::notice_markdown(format!( + "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins failed.", + ))) +} + #[admin_command] pub(super) async fn force_join_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index e7bb5c73..e1568269 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -124,4 +124,19 @@ pub(super) enum UserCommand { RedactEvent { event_id: Box, }, + + /// - Force joins a specified list of local users to join the specified + /// room. + /// + /// Specify a codeblock of usernames. + /// + /// At least 1 server admin must be in the room to prevent abuse. + /// + /// Requires the `--yes-i-want-to-do-this` flag. + ForceJoinListOfLocalUsers { + room_id: OwnedRoomOrAliasId, + + #[arg(long)] + yes_i_want_to_do_this: bool, + }, } From 567a4cb4417726d4400f81e1bedea12e46fac439 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 28 Oct 2024 19:06:53 -0400 Subject: [PATCH 0146/1248] implement admin command to force join all local users to room Signed-off-by: strawberry --- src/admin/user/commands.rs | 100 +++++++++++++++++++++++++++++++++++-- src/admin/user/mod.rs | 14 +++++- 2 files changed, 109 insertions(+), 5 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index d6946b4e..531ce490 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -18,7 +18,7 @@ use ruma::{ tag::{TagEvent, TagEventContent, TagInfo}, RoomAccountDataEventType, StateEventType, }, - EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, + EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, }; use crate::{ @@ -27,6 +27,7 @@ use crate::{ }; const AUTO_GEN_PASSWORD_LENGTH: usize = 25; +const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin."; #[admin_command] pub(super) async fn list_users(&self) -> Result { @@ -384,8 +385,6 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result Result { - const REASON: &str = "Bulk force joining this room as initiated by the server admin."; - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { return Ok(RoomMessageEventContent::text_plain( @@ -491,7 +490,100 @@ pub(super) async fn force_join_list_of_local_users( self.services, &user_id, &room_id, - Some(String::from(REASON)), + Some(String::from(BULK_JOIN_REASON)), + &servers, + None, + &None, + ) + .await + { + Ok(_res) => { + successful_joins = successful_joins.saturating_add(1); + }, + Err(e) => { + debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); + failed_joins = failed_joins.saturating_add(1); + }, + }; + } + + Ok(RoomMessageEventContent::notice_markdown(format!( + "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins failed.", + ))) +} + +#[admin_command] +pub(super) async fn force_join_all_local_users( + &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, +) -> Result { + if !yes_i_want_to_do_this { + return Ok(RoomMessageEventContent::notice_markdown( + "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force bulk join all local \ + users.", + )); + } + + let Ok(admin_room) = self.services.admin.get_admin_room().await else { + return Ok(RoomMessageEventContent::notice_markdown( + "There is not an admin room to check for server admins.", + )); + }; + + let (room_id, servers) = self + .services + .rooms + .alias + .resolve_with_servers(&room_id, None) + .await?; + + if !self + .services + .rooms + .state_cache + .server_in_room(self.services.globals.server_name(), &room_id) + .await + { + return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + } + + let server_admins: Vec<_> = self + .services + .rooms + .state_cache + .active_local_users_in_room(&admin_room) + .map(ToOwned::to_owned) + .collect() + .await; + + if !self + .services + .rooms + .state_cache + .room_members(&room_id) + .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) + .await + { + return Ok(RoomMessageEventContent::notice_markdown( + "There is not a single server admin in the room.", + )); + } + + let mut failed_joins: usize = 0; + let mut successful_joins: usize = 0; + + for user_id in &self + .services + .users + .list_local_users() + .map(UserId::to_owned) + .collect::>() + .await + { + match join_room_by_id_helper( + self.services, + user_id, + &room_id, + Some(String::from(BULK_JOIN_REASON)), &servers, None, &None, diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index e1568269..649cdfb8 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -130,7 +130,7 @@ pub(super) enum UserCommand { /// /// Specify a codeblock of usernames. /// - /// At least 1 server admin must be in the room to prevent abuse. + /// At least 1 server admin must be in the room to reduce abuse. /// /// Requires the `--yes-i-want-to-do-this` flag. ForceJoinListOfLocalUsers { @@ -139,4 +139,16 @@ pub(super) enum UserCommand { #[arg(long)] yes_i_want_to_do_this: bool, }, + + /// - Force joins all local users to the specified room. + /// + /// At least 1 server admin must be in the room to reduce abuse. + /// + /// Requires the `--yes-i-want-to-do-this` flag. + ForceJoinAllLocalUsers { + room_id: OwnedRoomOrAliasId, + + #[arg(long)] + yes_i_want_to_do_this: bool, + }, } From 354dc9e703a16dfca96bfe59cba0ba83e65725cf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 29 Oct 2024 00:08:41 +0000 Subject: [PATCH 0147/1248] add map accessor to Database; move cork interface Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 2 +- src/database/cork.rs | 16 +++++++++++++++- src/database/database.rs | 26 ++++++++++---------------- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 2aa6078f..db102858 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -838,7 +838,7 @@ pub(super) async fn database_stats( let map_name = map.as_ref().map_or(EMPTY, String::as_str); let mut out = String::new(); - for (name, map) in self.services.db.iter_maps() { + for (name, map) in self.services.db.iter() { if !map_name.is_empty() && *map_name != *name { continue; } diff --git a/src/database/cork.rs b/src/database/cork.rs index 26c520a2..5fe5fd7a 100644 --- a/src/database/cork.rs +++ b/src/database/cork.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::Engine; +use crate::{Database, Engine}; pub struct Cork { db: Arc, @@ -8,6 +8,20 @@ pub struct Cork { sync: bool, } +impl Database { + #[inline] + #[must_use] + pub fn cork(&self) -> Cork { Cork::new(&self.db, false, false) } + + #[inline] + #[must_use] + pub fn cork_and_flush(&self) -> Cork { Cork::new(&self.db, true, false) } + + #[inline] + #[must_use] + pub fn cork_and_sync(&self) -> Cork { Cork::new(&self.db, true, true) } +} + impl Cork { #[inline] pub(super) fn new(db: &Arc, flush: bool, sync: bool) -> Self { diff --git a/src/database/database.rs b/src/database/database.rs index 4c29c840..099df87d 100644 --- a/src/database/database.rs +++ b/src/database/database.rs @@ -1,9 +1,8 @@ use std::{ops::Index, sync::Arc}; -use conduit::{Result, Server}; +use conduit::{err, Result, Server}; use crate::{ - cork::Cork, maps, maps::{Maps, MapsKey, MapsVal}, Engine, Map, @@ -11,7 +10,7 @@ use crate::{ pub struct Database { pub db: Arc, - map: Maps, + maps: Maps, } impl Database { @@ -20,24 +19,19 @@ impl Database { let db = Engine::open(server)?; Ok(Arc::new(Self { db: db.clone(), - map: maps::open(&db)?, + maps: maps::open(&db)?, })) } #[inline] - #[must_use] - pub fn cork(&self) -> Cork { Cork::new(&self.db, false, false) } + pub fn get(&self, name: &str) -> Result<&Arc> { + self.maps + .get(name) + .ok_or_else(|| err!(Request(NotFound("column not found")))) + } #[inline] - #[must_use] - pub fn cork_and_flush(&self) -> Cork { Cork::new(&self.db, true, false) } - - #[inline] - #[must_use] - pub fn cork_and_sync(&self) -> Cork { Cork::new(&self.db, true, true) } - - #[inline] - pub fn iter_maps(&self) -> impl Iterator + Send + '_ { self.map.iter() } + pub fn iter(&self) -> impl Iterator + Send + '_ { self.maps.iter() } #[inline] #[must_use] @@ -52,7 +46,7 @@ impl Index<&str> for Database { type Output = Arc; fn index(&self, name: &str) -> &Self::Output { - self.map + self.maps .get(name) .expect("column in database does not exist") } From 8ed9d49b73923c39524d442ed9c7878d99ff2189 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 31 Oct 2024 14:41:35 -0400 Subject: [PATCH 0148/1248] skip new flakey complement test Signed-off-by: strawberry --- bin/complement | 2 +- tests/test_results/complement/test_results.jsonl | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/complement b/bin/complement index 601edb5a..a1db4b32 100755 --- a/bin/complement +++ b/bin/complement @@ -18,7 +18,7 @@ RESULTS_FILE="$3" OCI_IMAGE="complement-conduwuit:main" # Complement tests that are skipped due to flakiness/reliability issues -SKIPPED_COMPLEMENT_TESTS='-skip=TestClientSpacesSummary.*|TestJoinFederatedRoomFromApplicationServiceBridgeUser.*|TestJumpToDateEndpoint.*' +SKIPPED_COMPLEMENT_TESTS='-skip=TestClientSpacesSummary.*|TestJoinFederatedRoomFromApplicationServiceBridgeUser.*|TestJumpToDateEndpoint.*|TestUnbanViaInvite.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index ff695bb7..575a22fe 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -225,7 +225,6 @@ {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/good_connectivity"} {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/interrupted_connectivity"} {"Action":"fail","Test":"TestToDeviceMessagesOverFederation/stopped_server"} -{"Action":"pass","Test":"TestUnbanViaInvite"} {"Action":"fail","Test":"TestUnknownEndpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Client-server_endpoints"} {"Action":"fail","Test":"TestUnknownEndpoints/Key_endpoints"} From 240c78e8101da122e35986c7c1414b4f2d655d31 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 1 Nov 2024 00:54:21 -0400 Subject: [PATCH 0149/1248] strong-type URL for URL previews to Url type Signed-off-by: strawberry --- src/api/client/media.rs | 25 ++++++++++++++++++------- src/api/client/media_legacy.rs | 25 ++++++++++++++++--------- src/service/media/preview.rs | 32 ++++++++++++-------------------- 3 files changed, 46 insertions(+), 36 deletions(-) diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 12012711..71693618 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -11,6 +11,7 @@ use conduit_service::{ media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, MXC_LENGTH}, Services, }; +use reqwest::Url; use ruma::{ api::client::{ authenticated_media::{ @@ -165,23 +166,33 @@ pub(crate) async fn get_media_preview_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let url = &body.url; - if !services.media.url_preview_allowed(url) { + let url = Url::parse(&body.url).map_err(|e| { + err!(Request(InvalidParam( + debug_warn!(%sender_user, %url, "Requested URL is not valid: {e}") + ))) + })?; + + if !services.media.url_preview_allowed(&url) { return Err!(Request(Forbidden( debug_warn!(%sender_user, %url, "URL is not allowed to be previewed") ))); } - let preview = services.media.get_url_preview(url).await.map_err(|error| { - err!(Request(Unknown( - debug_error!(%sender_user, %url, ?error, "Failed to fetch URL preview.") - ))) - })?; + let preview = services + .media + .get_url_preview(&url) + .await + .map_err(|error| { + err!(Request(Unknown( + debug_error!(%sender_user, %url, "Failed to fetch URL preview: {error}") + ))) + })?; serde_json::value::to_raw_value(&preview) .map(get_media_preview::v1::Response::from_raw_value) .map_err(|error| { err!(Request(Unknown( - debug_error!(%sender_user, %url, ?error, "Failed to parse URL preview.") + debug_error!(%sender_user, %url, "Failed to parse URL preview: {error}") ))) }) } diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index e87b9a2b..f6837462 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -8,6 +8,7 @@ use conduit::{ Err, Result, }; use conduit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN}; +use reqwest::Url; use ruma::{ api::client::media::{ create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, @@ -55,25 +56,31 @@ pub(crate) async fn get_media_preview_legacy_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let url = &body.url; - if !services.media.url_preview_allowed(url) { + let url = Url::parse(&body.url).map_err(|e| { + err!(Request(InvalidParam( + debug_warn!(%sender_user, %url, "Requested URL is not valid: {e}") + ))) + })?; + + if !services.media.url_preview_allowed(&url) { return Err!(Request(Forbidden( debug_warn!(%sender_user, %url, "URL is not allowed to be previewed") ))); } - let preview = services.media.get_url_preview(url).await.map_err(|e| { + let preview = services.media.get_url_preview(&url).await.map_err(|e| { err!(Request(Unknown( debug_error!(%sender_user, %url, "Failed to fetch a URL preview: {e}") ))) })?; - let res = serde_json::value::to_raw_value(&preview).map_err(|e| { - err!(Request(Unknown( - debug_error!(%sender_user, %url, "Failed to parse a URL preview: {e}") - ))) - })?; - - Ok(get_media_preview::v3::Response::from_raw_value(res)) + serde_json::value::to_raw_value(&preview) + .map(get_media_preview::v3::Response::from_raw_value) + .map_err(|error| { + err!(Request(Unknown( + debug_error!(%sender_user, %url, "Failed to parse URL preview: {error}") + ))) + }) } /// # `GET /_matrix/media/v1/preview_url` diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index 6b147383..acc9d8ed 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -1,6 +1,6 @@ use std::{io::Cursor, time::SystemTime}; -use conduit::{debug, utils, warn, Err, Result}; +use conduit::{debug, utils, Err, Result}; use conduit_core::implement; use image::ImageReader as ImgReader; use ipaddress::IPAddress; @@ -70,30 +70,30 @@ pub async fn download_image(&self, url: &str) -> Result { } #[implement(Service)] -pub async fn get_url_preview(&self, url: &str) -> Result { - if let Ok(preview) = self.db.get_url_preview(url).await { +pub async fn get_url_preview(&self, url: &Url) -> Result { + if let Ok(preview) = self.db.get_url_preview(url.as_str()).await { return Ok(preview); } // ensure that only one request is made per URL - let _request_lock = self.url_preview_mutex.lock(url).await; + let _request_lock = self.url_preview_mutex.lock(url.as_str()).await; - match self.db.get_url_preview(url).await { + match self.db.get_url_preview(url.as_str()).await { Ok(preview) => Ok(preview), Err(_) => self.request_url_preview(url).await, } } #[implement(Service)] -async fn request_url_preview(&self, url: &str) -> Result { - if let Ok(ip) = IPAddress::parse(url) { +async fn request_url_preview(&self, url: &Url) -> Result { + if let Ok(ip) = IPAddress::parse(url.host_str().expect("URL previously validated")) { if !self.services.globals.valid_cidr_range(&ip) { return Err!(BadServerResponse("Requesting from this address is forbidden")); } } let client = &self.services.client.url_preview; - let response = client.head(url).send().await?; + let response = client.head(url.as_str()).send().await?; if let Some(remote_addr) = response.remote_addr() { if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { @@ -111,12 +111,12 @@ async fn request_url_preview(&self, url: &str) -> Result { return Err!(Request(Unknown("Unknown Content-Type"))); }; let data = match content_type { - html if html.starts_with("text/html") => self.download_html(url).await?, - img if img.starts_with("image/") => self.download_image(url).await?, + html if html.starts_with("text/html") => self.download_html(url.as_str()).await?, + img if img.starts_with("image/") => self.download_image(url.as_str()).await?, _ => return Err!(Request(Unknown("Unsupported Content-Type"))), }; - self.set_url_preview(url, &data).await?; + self.set_url_preview(url.as_str(), &data).await?; Ok(data) } @@ -159,15 +159,7 @@ async fn download_html(&self, url: &str) -> Result { } #[implement(Service)] -pub fn url_preview_allowed(&self, url_str: &str) -> bool { - let url: Url = match Url::parse(url_str) { - Ok(u) => u, - Err(e) => { - warn!("Failed to parse URL from a str: {}", e); - return false; - }, - }; - +pub fn url_preview_allowed(&self, url: &Url) -> bool { if ["http", "https"] .iter() .all(|&scheme| scheme != url.scheme().to_lowercase()) From 6cbaef2d12b24765dc16c0478b58c2e76dd972cd Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 2 Nov 2024 13:12:14 -0400 Subject: [PATCH 0150/1248] always set RUST_BACKTRACE=full in OCI images Signed-off-by: strawberry --- nix/pkgs/complement/default.nix | 1 + nix/pkgs/oci-image/default.nix | 3 +++ 2 files changed, 4 insertions(+) diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index 80e9ce27..399c4449 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -96,6 +96,7 @@ dockerTools.buildImage { Env = [ "SSL_CERT_FILE=/complement/ca/ca.crt" "CONDUWUIT_CONFIG=${./config.toml}" + "RUST_BACKTRACE=full" ]; ExposedPorts = { diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 5078523b..9b641310 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -24,5 +24,8 @@ dockerTools.buildLayeredImage { Cmd = [ "${lib.getExe main}" ]; + Env = [ + "RUST_BACKTRACE=full" + ]; }; } From ee6af6c90e5f941584429b9e890bffa358b23720 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 2 Nov 2024 18:46:20 -0400 Subject: [PATCH 0151/1248] drop report delay response range to 2-5 secs Signed-off-by: strawberry --- src/api/client/report.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 143c13e5..32a254d8 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -180,7 +180,7 @@ async fn is_event_report_valid( /// random delay sending a response per spec suggestion regarding /// enumerating for potential events existing in our server. async fn delay_response() { - let time_to_wait = rand::thread_rng().gen_range(3..10); + let time_to_wait = rand::thread_rng().gen_range(2..5); debug_info!("Got successful /report request, waiting {time_to_wait} seconds before sending successful response."); sleep(Duration::from_secs(time_to_wait)).await; } From 9466aeb08876472f49da6ce4b2fb673ff3598c04 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 2 Nov 2024 18:52:25 -0400 Subject: [PATCH 0152/1248] remove some unnecessary debug prints on notices Signed-off-by: strawberry --- src/api/client/account.rs | 18 ++++++++++-------- src/api/client/report.rs | 25 ++++++++++++++----------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 97d36839..87e73c5a 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -100,8 +100,8 @@ pub(crate) async fn register_route( if !services.globals.allow_registration() && body.appservice_info.is_none() { info!( "Registration disabled and request not from known appservice, rejecting registration attempt for username \ - {:?}", - body.username + \"{}\"", + body.username.as_deref().unwrap_or("") ); return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration has been disabled.")); } @@ -114,8 +114,8 @@ pub(crate) async fn register_route( { info!( "Guest registration disabled / registration enabled with token configured, rejecting guest registration \ - attempt, initial device name: {:?}", - body.initial_device_display_name + attempt, initial device name: \"{}\"", + body.initial_device_display_name.as_deref().unwrap_or("") ); return Err(Error::BadRequest( ErrorKind::GuestAccessForbidden, @@ -128,8 +128,8 @@ pub(crate) async fn register_route( if is_guest && services.users.count().await < 2 { warn!( "Guest account attempted to register before a real admin user has been registered, rejecting \ - registration. Guest's initial device name: {:?}", - body.initial_device_display_name + registration. Guest's initial device name: \"{}\"", + body.initial_device_display_name.as_deref().unwrap_or("") ); return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration temporarily disabled.")); } @@ -312,12 +312,14 @@ pub(crate) async fn register_route( debug_info!(%user_id, %device_id, "User account was created"); - let device_display_name = body.initial_device_display_name.clone().unwrap_or_default(); + let device_display_name = body.initial_device_display_name.as_deref().unwrap_or(""); // log in conduit admin channel if a non-guest user registered if body.appservice_info.is_none() && !is_guest { if !device_display_name.is_empty() { - info!("New user \"{user_id}\" registered on this server with device display name: {device_display_name}"); + info!( + "New user \"{user_id}\" registered on this server with device display name: \"{device_display_name}\"" + ); if services.globals.config.admin_room_notices { services diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 32a254d8..e20fa8c2 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -33,10 +33,18 @@ pub(crate) async fn report_room_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); info!( - "Received room report by user {sender_user} for room {} with reason: {:?}", - body.room_id, body.reason + "Received room report by user {sender_user} for room {} with reason: \"{}\"", + body.room_id, + body.reason.as_deref().unwrap_or("") ); + if body.reason.as_ref().is_some_and(|s| s.len() > 750) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Reason too long, should be 750 characters or fewer", + )); + }; + delay_response().await; if !services @@ -50,13 +58,6 @@ pub(crate) async fn report_room_route( ))); } - if body.reason.as_ref().is_some_and(|s| s.len() > 750) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Reason too long, should be 750 characters or fewer", - )); - }; - // send admin room message that we received the report with an @room ping for // urgency services @@ -85,8 +86,10 @@ pub(crate) async fn report_event_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); info!( - "Received event report by user {sender_user} for room {} and event ID {}, with reason: {:?}", - body.room_id, body.event_id, body.reason + "Received event report by user {sender_user} for room {} and event ID {}, with reason: \"{}\"", + body.room_id, + body.event_id, + body.reason.as_deref().unwrap_or("") ); delay_response().await; From 6f37a251fb5945c9f431ffeb3f32fcb5d3bbc470 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 2 Nov 2024 20:55:40 -0400 Subject: [PATCH 0153/1248] allow taking room aliases for `auto_join_rooms` config option Signed-off-by: strawberry --- src/admin/user/commands.rs | 20 ++++++++++++++++---- src/api/client/account.rs | 13 +++++++++---- src/core/config/mod.rs | 11 ++++++----- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 531ce490..444a7f37 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -108,24 +108,29 @@ pub(super) async fn create_user(&self, username: String, password: Option { + self.services + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "Failed to automatically join room {room} for user {user_id}: {e}" + ))) + .await + .ok(); // don't return this error so we don't fail registrations error!("Failed to automatically join room {room} for user {user_id}: {e}"); }, diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 87e73c5a..c340f529 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -398,23 +398,28 @@ pub(crate) async fn register_route( && (services.globals.allow_guests_auto_join_rooms() || !is_guest) { for room in &services.globals.config.auto_join_rooms { + let Ok(room_id) = services.rooms.alias.resolve(room).await else { + error!("Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"); + continue; + }; + if !services .rooms .state_cache - .server_in_room(services.globals.server_name(), room) + .server_in_room(services.globals.server_name(), &room_id) .await { warn!("Skipping room {room} to automatically join as we have never joined before."); continue; } - if let Some(room_id_server_name) = room.server_name() { + if let Some(room_server_name) = room.server_name() { if let Err(e) = join_room_by_id_helper( &services, &user_id, - room, + &room_id, Some("Automatically joining this room upon registration".to_owned()), - &[room_id_server_name.to_owned(), services.globals.server_name().to_owned()], + &[services.globals.server_name().to_owned(), room_server_name.to_owned()], None, &body.appservice_info, ) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 512cb48b..a6216da2 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -18,7 +18,8 @@ pub use figment::{value::Value as FigmentValue, Figment}; use itertools::Itertools; use regex::RegexSet; use ruma::{ - api::client::discovery::discover_support::ContactRole, OwnedRoomId, OwnedServerName, OwnedUserId, RoomVersionId, + api::client::discovery::discover_support::ContactRole, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, + RoomVersionId, }; use serde::{de::IgnoredAny, Deserialize}; use url::Url; @@ -653,13 +654,13 @@ pub struct Config { #[serde(default = "default_turn_ttl")] pub turn_ttl: u64, - /// List/vector of room **IDs** that conduwuit will make newly registered - /// users join. The room IDs specified must be rooms that you have joined - /// at least once on the server, and must be public. + /// List/vector of room IDs or room aliases that conduwuit will make newly + /// registered users join. The rooms specified must be rooms that you + /// have joined at least once on the server, and must be public. /// /// No default. #[serde(default = "Vec::new")] - pub auto_join_rooms: Vec, + pub auto_join_rooms: Vec, /// Config option to automatically deactivate the account of any user who /// attempts to join a: From 038787106365cebb0c538af043cc90028e200cc3 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 2 Nov 2024 21:20:36 -0400 Subject: [PATCH 0154/1248] add workaround for matrix-appservice-irc using historical localparts see https://github.com/matrix-org/matrix-appservice-irc/issues/1780 Signed-off-by: strawberry --- src/api/client/account.rs | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index c340f529..5ed4b312 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -48,10 +48,19 @@ pub(crate) async fn get_register_available_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { + // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue + let is_matrix_appservice_irc = body.appservice_info.as_ref().is_some_and(|appservice| { + appservice.registration.id == "irc" + || appservice.registration.id.contains("matrix-appservice-irc") + || appservice.registration.id.contains("matrix_appservice_irc") + }); + // Validate user id let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), services.globals.server_name()) .ok() - .filter(|user_id| !user_id.is_historical() && services.globals.user_is_local(user_id)) + .filter(|user_id| { + (!user_id.is_historical() || is_matrix_appservice_irc) && services.globals.user_is_local(user_id) + }) .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; // Check if username is creative enough @@ -134,12 +143,22 @@ pub(crate) async fn register_route( return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration temporarily disabled.")); } + // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue + let is_matrix_appservice_irc = body.appservice_info.as_ref().is_some_and(|appservice| { + appservice.registration.id == "irc" + || appservice.registration.id.contains("matrix-appservice-irc") + || appservice.registration.id.contains("matrix_appservice_irc") + }); + let user_id = match (&body.username, is_guest) { (Some(username), false) => { let proposed_user_id = UserId::parse_with_server_name(username.to_lowercase(), services.globals.server_name()) .ok() - .filter(|user_id| !user_id.is_historical() && services.globals.user_is_local(user_id)) + .filter(|user_id| { + (!user_id.is_historical() || is_matrix_appservice_irc) + && services.globals.user_is_local(user_id) + }) .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; if services.users.exists(&proposed_user_id).await { From 1fbfc983e9606752770286f42d5812a34a820e63 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 29 Oct 2024 10:53:56 +0000 Subject: [PATCH 0155/1248] optimize FedDest::Named port Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 39 +++++++++++++++++++++++++--------- src/service/resolver/fed.rs | 26 +++++++++++++++++------ src/service/resolver/tests.rs | 4 ++-- 3 files changed, 51 insertions(+), 18 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 660498f7..61eedca5 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -9,9 +9,9 @@ use hickory_resolver::{error::ResolveError, lookup::SrvLookup}; use ipaddress::IPAddress; use ruma::ServerName; -use crate::resolver::{ +use super::{ cache::{CachedDest, CachedOverride}, - fed::{add_port_to_hostname, get_ip_with_port, FedDest}, + fed::{add_port_to_hostname, get_ip_with_port, FedDest, PortString}, }; #[derive(Clone, Debug)] @@ -77,12 +77,12 @@ impl super::Service { let host = if let Ok(addr) = host.parse::() { FedDest::Literal(addr) } else if let Ok(addr) = host.parse::() { - FedDest::Named(addr.to_string(), ":8448".to_owned()) + FedDest::Named(addr.to_string(), FedDest::default_port()) } else if let Some(pos) = host.find(':') { let (host, port) = host.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) + FedDest::Named(host.to_owned(), port.try_into().unwrap_or_else(|_| FedDest::default_port())) } else { - FedDest::Named(host, ":8448".to_owned()) + FedDest::Named(host, FedDest::default_port()) }; debug!("Actual destination: {actual_dest:?} hostname: {host:?}"); @@ -103,7 +103,10 @@ impl super::Service { let (host, port) = dest.as_str().split_at(pos); self.conditional_query_and_cache_override(host, host, port.parse::().unwrap_or(8448), cache) .await?; - Ok(FedDest::Named(host.to_owned(), port.to_owned())) + Ok(FedDest::Named( + host.to_owned(), + port.try_into().unwrap_or_else(|_| FedDest::default_port()), + )) } async fn actual_dest_3(&self, host: &mut String, cache: bool, delegated: String) -> Result { @@ -136,7 +139,10 @@ impl super::Service { let (host, port) = delegated.split_at(pos); self.conditional_query_and_cache_override(host, host, port.parse::().unwrap_or(8448), cache) .await?; - Ok(FedDest::Named(host.to_owned(), port.to_owned())) + Ok(FedDest::Named( + host.to_owned(), + port.try_into().unwrap_or_else(|_| FedDest::default_port()), + )) } async fn actual_dest_3_3(&self, cache: bool, delegated: String, overrider: FedDest) -> Result { @@ -145,7 +151,13 @@ impl super::Service { self.conditional_query_and_cache_override(&delegated, &overrider.hostname(), force_port.unwrap_or(8448), cache) .await?; if let Some(port) = force_port { - Ok(FedDest::Named(delegated, format!(":{port}"))) + Ok(FedDest::Named( + delegated, + format!(":{port}") + .as_str() + .try_into() + .unwrap_or_else(|_| FedDest::default_port()), + )) } else { Ok(add_port_to_hostname(&delegated)) } @@ -164,7 +176,11 @@ impl super::Service { self.conditional_query_and_cache_override(host, &overrider.hostname(), force_port.unwrap_or(8448), cache) .await?; if let Some(port) = force_port { - Ok(FedDest::Named(host.to_owned(), format!(":{port}"))) + let port = format!(":{port}"); + Ok(FedDest::Named( + host.to_owned(), + PortString::from(port.as_str()).unwrap_or_else(|_| FedDest::default_port()), + )) } else { Ok(add_port_to_hostname(host)) } @@ -269,7 +285,10 @@ impl super::Service { srv.iter().next().map(|result| { FedDest::Named( result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()), + format!(":{}", result.port()) + .as_str() + .try_into() + .unwrap_or_else(|_| FedDest::default_port()), ) }) } diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index 79f71f13..9c348b47 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -4,12 +4,19 @@ use std::{ net::{IpAddr, SocketAddr}, }; +use arrayvec::ArrayString; + #[derive(Clone, Debug, PartialEq, Eq)] pub enum FedDest { Literal(SocketAddr), - Named(String, String), + Named(String, PortString), } +/// numeric or service-name +pub type PortString = ArrayString<16>; + +const DEFAULT_PORT: &str = ":8448"; + pub(crate) fn get_ip_with_port(dest_str: &str) -> Option { if let Ok(dest) = dest_str.parse::() { Some(FedDest::Literal(dest)) @@ -20,13 +27,16 @@ pub(crate) fn get_ip_with_port(dest_str: &str) -> Option { } } -pub(crate) fn add_port_to_hostname(dest_str: &str) -> FedDest { - let (host, port) = match dest_str.find(':') { - None => (dest_str, ":8448"), - Some(pos) => dest_str.split_at(pos), +pub(crate) fn add_port_to_hostname(dest: &str) -> FedDest { + let (host, port) = match dest.find(':') { + None => (dest, DEFAULT_PORT), + Some(pos) => dest.split_at(pos), }; - FedDest::Named(host.to_owned(), port.to_owned()) + FedDest::Named( + host.to_owned(), + PortString::from(port).unwrap_or_else(|_| FedDest::default_port()), + ) } impl FedDest { @@ -60,6 +70,10 @@ impl FedDest { Self::Named(_, port) => port[1..].parse().ok(), } } + + #[inline] + #[must_use] + pub fn default_port() -> PortString { PortString::from(DEFAULT_PORT).expect("default port string") } } impl fmt::Display for FedDest { diff --git a/src/service/resolver/tests.rs b/src/service/resolver/tests.rs index 55cf0345..870f5eab 100644 --- a/src/service/resolver/tests.rs +++ b/src/service/resolver/tests.rs @@ -30,7 +30,7 @@ fn ips_keep_custom_ports() { fn hostnames_get_default_ports() { assert_eq!( add_port_to_hostname("example.com"), - FedDest::Named(String::from("example.com"), String::from(":8448")) + FedDest::Named(String::from("example.com"), ":8448".try_into().unwrap()) ); } @@ -38,6 +38,6 @@ fn hostnames_get_default_ports() { fn hostnames_keep_custom_ports() { assert_eq!( add_port_to_hostname("example.com:1337"), - FedDest::Named(String::from("example.com"), String::from(":1337")) + FedDest::Named(String::from("example.com"), ":1337".try_into().unwrap()) ); } From ad117641b88330aff3d1ba7ced57939df7862659 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 29 Oct 2024 00:08:41 +0000 Subject: [PATCH 0156/1248] add tuple-apply macro with length argument for now Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 26b0484e..b8640f3a 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -53,6 +53,25 @@ macro_rules! extract_variant { }; } +#[macro_export] +macro_rules! apply { + (1, $($idx:tt)+) => { + |t| (($($idx)+)(t.0),) + }; + + (2, $($idx:tt)+) => { + |t| (($($idx)+)(t.0), ($($idx)+)(t.1),) + }; + + (3, $($idx:tt)+) => { + |t| (($($idx)+)(t.0), ($($idx)+)(t.1), ($($idx)+)(t.2),) + }; + + (4, $($idx:tt)+) => { + |t| (($($idx)+)(t.0), ($($idx)+)(t.1), ($($idx)+)(t.2), ($($idx)+4)(t.3)) + }; +} + #[macro_export] macro_rules! at { ($idx:tt) => { @@ -112,6 +131,14 @@ macro_rules! is_not_empty { }; } +/// Functor for equality i.e. (a, b).map(is_equal!()) +#[macro_export] +macro_rules! is_equal { + () => { + |a, b| a == b + }; +} + /// Functor for truthy #[macro_export] macro_rules! is_true { From ed76797b55c8f32e11fcb0a3b8d0d29a4b93b6b8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 29 Oct 2024 03:10:18 +0000 Subject: [PATCH 0157/1248] add raw_ overloads for prefix/from counting Signed-off-by: Jason Volk --- src/database/database.rs | 3 +++ src/database/map/count.rs | 24 ++++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/src/database/database.rs b/src/database/database.rs index 099df87d..bf8c8855 100644 --- a/src/database/database.rs +++ b/src/database/database.rs @@ -33,6 +33,9 @@ impl Database { #[inline] pub fn iter(&self) -> impl Iterator + Send + '_ { self.maps.iter() } + #[inline] + pub fn keys(&self) -> impl Iterator + Send + '_ { self.maps.keys() } + #[inline] #[must_use] pub fn is_read_only(&self) -> bool { self.db.secondary || self.db.read_only } diff --git a/src/database/map/count.rs b/src/database/map/count.rs index dab45b7a..3e92279c 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -21,6 +21,18 @@ where self.keys_from_raw(from).count() } +/// Count the number of entries in the map starting from a lower-bound. +/// +/// - From is a raw +#[implement(super::Map)] +#[inline] +pub fn raw_count_from<'a, P>(&'a self, from: &'a P) -> impl Future + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, +{ + self.raw_keys_from(from).count() +} + /// Count the number of entries in the map matching a prefix. /// /// - Prefix is structured key @@ -32,3 +44,15 @@ where { self.keys_prefix_raw(prefix).count() } + +/// Count the number of entries in the map matching a prefix. +/// +/// - Prefix is raw +#[implement(super::Map)] +#[inline] +pub fn raw_count_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Future + Send + 'a +where + P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, +{ + self.raw_keys_prefix(prefix).count() +} From a7cb1c59518e8398e8a6aaedd784b9b7a222a2fd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 29 Oct 2024 23:31:53 +0000 Subject: [PATCH 0158/1248] slightly optimize request signing/verifying Signed-off-by: Jason Volk --- src/api/router/auth.rs | 40 +++++++----- src/service/sending/send.rs | 117 ++++++++++++++++++------------------ 2 files changed, 86 insertions(+), 71 deletions(-) diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 31e71f2f..2552dded 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -220,20 +220,32 @@ async fn auth_server(services: &Services, request: &mut Request, body: Option<&C .expect("all requests have a path") .to_string(); - let signature: [Member; 1] = [(x_matrix.key.to_string(), Value::String(x_matrix.sig.to_string()))]; - let signatures: [Member; 1] = [(origin.to_string(), Value::Object(signature.into()))]; - let authorization: [Member; 5] = [ - ("destination".into(), Value::String(destination.into())), - ("method".into(), Value::String(request.parts.method.to_string())), - ("origin".into(), Value::String(origin.to_string())), - ("signatures".into(), Value::Object(signatures.into())), - ("uri".into(), Value::String(signature_uri)), - ]; + let signature: [Member; 1] = [(x_matrix.key.as_str().into(), Value::String(x_matrix.sig.to_string()))]; - let mut authorization: Object = authorization.into(); - if let Some(body) = body { - authorization.insert("content".to_owned(), body.clone()); - } + let signatures: [Member; 1] = [(origin.as_str().into(), Value::Object(signature.into()))]; + + let authorization: Object = if let Some(body) = body.cloned() { + let authorization: [Member; 6] = [ + ("content".into(), body), + ("destination".into(), Value::String(destination.into())), + ("method".into(), Value::String(request.parts.method.as_str().into())), + ("origin".into(), Value::String(origin.as_str().into())), + ("signatures".into(), Value::Object(signatures.into())), + ("uri".into(), Value::String(signature_uri)), + ]; + + authorization.into() + } else { + let authorization: [Member; 5] = [ + ("destination".into(), Value::String(destination.into())), + ("method".into(), Value::String(request.parts.method.as_str().into())), + ("origin".into(), Value::String(origin.as_str().into())), + ("signatures".into(), Value::Object(signatures.into())), + ("uri".into(), Value::String(signature_uri)), + ]; + + authorization.into() + }; let key = services .server_keys @@ -242,7 +254,7 @@ async fn auth_server(services: &Services, request: &mut Request, body: Option<&C .map_err(|e| err!(Request(Forbidden(warn!("Failed to fetch signing keys: {e}")))))?; let keys: PubKeys = [(x_matrix.key.to_string(), key.key)].into(); - let keys: PubKeyMap = [(origin.to_string(), keys)].into(); + let keys: PubKeyMap = [(origin.as_str().into(), keys)].into(); if let Err(e) = ruma::signatures::verify_json(&keys, authorization) { debug_error!("Failed to verify federation request from {origin}: {e}"); if request.parts.uri.to_string().contains('@') { diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 2fbb3919..939d6e73 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -14,7 +14,7 @@ use ruma::{ }, serde::Base64, server_util::authorization::XMatrix, - ServerName, + CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, }; use crate::{ @@ -74,7 +74,7 @@ impl super::Service { .try_into_http_request::>(actual.string().as_str(), SATIR, &VERSIONS) .map_err(|e| err!(BadServerResponse("Invalid destination: {e:?}")))?; - self.sign_request::(dest, &mut http_request); + self.sign_request(&mut http_request, dest); let request = Request::try_from(http_request)?; self.validate_url(request.url())?; @@ -178,68 +178,71 @@ where } #[implement(super::Service)] -fn sign_request(&self, dest: &ServerName, http_request: &mut http::Request>) -where - T: OutgoingRequest + Debug + Send, -{ - let mut req_map = serde_json::Map::with_capacity(8); - if !http_request.body().is_empty() { - req_map.insert( - "content".to_owned(), - serde_json::from_slice(http_request.body()).expect("body is valid json, we just created it"), - ); +fn sign_request(&self, http_request: &mut http::Request>, dest: &ServerName) { + type Member = (String, Value); + type Value = CanonicalJsonValue; + type Object = CanonicalJsonObject; + + let origin = self.services.globals.server_name(); + let body = http_request.body(); + let uri = http_request + .uri() + .path_and_query() + .expect("http::Request missing path_and_query"); + + let mut req: Object = if !body.is_empty() { + let content: CanonicalJsonValue = serde_json::from_slice(body).expect("failed to serialize body"); + + let authorization: [Member; 5] = [ + ("content".into(), content), + ("destination".into(), dest.as_str().into()), + ("method".into(), http_request.method().as_str().into()), + ("origin".into(), origin.as_str().into()), + ("uri".into(), uri.to_string().into()), + ]; + + authorization.into() + } else { + let authorization: [Member; 4] = [ + ("destination".into(), dest.as_str().into()), + ("method".into(), http_request.method().as_str().into()), + ("origin".into(), origin.as_str().into()), + ("uri".into(), uri.to_string().into()), + ]; + + authorization.into() }; - req_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); - req_map.insert( - "uri".to_owned(), - http_request - .uri() - .path_and_query() - .expect("all requests have a path") - .to_string() - .into(), - ); - req_map.insert("origin".to_owned(), self.services.globals.server_name().to_string().into()); - req_map.insert("destination".to_owned(), dest.as_str().into()); - - let mut req_json = serde_json::from_value(req_map.into()).expect("valid JSON is valid BTreeMap"); self.services .server_keys - .sign_json(&mut req_json) - .expect("our request json is what ruma expects"); + .sign_json(&mut req) + .expect("request signing failed"); - let req_json: serde_json::Map = - serde_json::from_slice(&serde_json::to_vec(&req_json).unwrap()).unwrap(); - - let signatures = req_json["signatures"] + let signatures = req["signatures"] .as_object() - .expect("signatures object") + .and_then(|object| object[origin.as_str()].as_object()) + .expect("origin signatures object"); + + let key: &ServerSigningKeyId = signatures + .keys() + .next() + .map(|k| k.as_str().try_into()) + .expect("at least one signature from this origin") + .expect("keyid is json string"); + + let sig: Base64 = signatures .values() - .map(|v| { - v.as_object() - .expect("server signatures object") - .iter() - .map(|(k, v)| (k, v.as_str().expect("server signature string"))) - }); + .next() + .map(|s| s.as_str().map(Base64::parse)) + .expect("at least one signature from this origin") + .expect("signature is json string") + .expect("signature is valid base64"); - for signature_server in signatures { - for s in signature_server { - let key = - s.0.as_str() - .try_into() - .expect("valid homeserver signing key ID"); - let sig = Base64::parse(s.1).expect("valid base64"); + let x_matrix = XMatrix::new(origin.into(), dest.into(), key.into(), sig); + let authorization = HeaderValue::from(&x_matrix); + let authorization = http_request + .headers_mut() + .insert(AUTHORIZATION, authorization); - http_request.headers_mut().insert( - AUTHORIZATION, - HeaderValue::from(&XMatrix::new( - self.services.globals.server_name().to_owned(), - dest.to_owned(), - key, - sig, - )), - ); - } - } + debug_assert!(authorization.is_none(), "Authorization header already present"); } From 9775694423943135bc6015ebb102a21288ec05a1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 30 Oct 2024 05:08:04 +0000 Subject: [PATCH 0159/1248] inline database stream interface functions lt 64B Signed-off-by: Jason Volk --- src/database/stream.rs | 3 +++ src/database/stream/items.rs | 2 ++ src/database/stream/items_rev.rs | 2 ++ src/database/stream/keys.rs | 3 +++ src/database/stream/keys_rev.rs | 3 +++ 5 files changed, 13 insertions(+) diff --git a/src/database/stream.rs b/src/database/stream.rs index d9b74215..a2a72e44 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -71,6 +71,7 @@ impl<'a> State<'a> { self } + #[inline] fn seek_fwd(&mut self) { if !exchange(&mut self.init, false) { self.inner.next(); @@ -79,6 +80,7 @@ impl<'a> State<'a> { } } + #[inline] fn seek_rev(&mut self) { if !exchange(&mut self.init, false) { self.inner.prev(); @@ -95,6 +97,7 @@ impl<'a> State<'a> { fn status(&self) -> Option { self.inner.status().map_err(map_err).err() } + #[inline] fn valid(&self) -> bool { self.inner.valid() } } diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index 31d5e9e8..54f8bc5c 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -28,6 +28,7 @@ impl<'a> Cursor<'a, KeyVal<'a>> for Items<'a> { fn fetch(&self) -> Option> { self.state.fetch().map(keyval_longevity) } + #[inline] fn seek(&mut self) { self.state.seek_fwd(); } } @@ -40,5 +41,6 @@ impl<'a> Stream for Items<'a> { } impl FusedStream for Items<'_> { + #[inline] fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } } diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index ab57a250..26492db8 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -28,6 +28,7 @@ impl<'a> Cursor<'a, KeyVal<'a>> for ItemsRev<'a> { fn fetch(&self) -> Option> { self.state.fetch().map(keyval_longevity) } + #[inline] fn seek(&mut self) { self.state.seek_rev(); } } @@ -40,5 +41,6 @@ impl<'a> Stream for ItemsRev<'a> { } impl FusedStream for ItemsRev<'_> { + #[inline] fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } } diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index 1c5d12e3..91884c8d 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -26,8 +26,10 @@ impl<'a> Keys<'a> { impl<'a> Cursor<'a, Key<'a>> for Keys<'a> { fn state(&self) -> &State<'a> { &self.state } + #[inline] fn fetch(&self) -> Option> { self.state.fetch_key().map(slice_longevity) } + #[inline] fn seek(&mut self) { self.state.seek_fwd(); } } @@ -40,5 +42,6 @@ impl<'a> Stream for Keys<'a> { } impl FusedStream for Keys<'_> { + #[inline] fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } } diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index 26707483..59f66c2e 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -26,8 +26,10 @@ impl<'a> KeysRev<'a> { impl<'a> Cursor<'a, Key<'a>> for KeysRev<'a> { fn state(&self) -> &State<'a> { &self.state } + #[inline] fn fetch(&self) -> Option> { self.state.fetch_key().map(slice_longevity) } + #[inline] fn seek(&mut self) { self.state.seek_rev(); } } @@ -40,5 +42,6 @@ impl<'a> Stream for KeysRev<'a> { } impl FusedStream for KeysRev<'_> { + #[inline] fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } } From 0eb67cfea00c19e7d0cf1981acc805986dfd05d6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 30 Oct 2024 06:41:03 +0000 Subject: [PATCH 0160/1248] additional bool extensions for Result/Option conversion Signed-off-by: Jason Volk --- src/core/utils/bool.rs | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/src/core/utils/bool.rs b/src/core/utils/bool.rs index d5fa85aa..e9f399d4 100644 --- a/src/core/utils/bool.rs +++ b/src/core/utils/bool.rs @@ -2,6 +2,23 @@ /// Boolean extensions and chain.starters pub trait BoolExt { + #[must_use] + fn clone_or(self, err: T, t: &T) -> T; + + #[must_use] + fn copy_or(self, err: T, t: T) -> T; + + #[must_use] + fn expect(self, msg: &str) -> Self; + + #[must_use] + fn expect_false(self, msg: &str) -> Self; + + fn into_option(self) -> Option<()>; + + #[allow(clippy::result_unit_err)] + fn into_result(self) -> Result<(), ()>; + fn map T>(self, f: F) -> T where Self: Sized; @@ -22,6 +39,24 @@ pub trait BoolExt { } impl BoolExt for bool { + #[inline] + fn clone_or(self, err: T, t: &T) -> T { self.map_or(err, || t.clone()) } + + #[inline] + fn copy_or(self, err: T, t: T) -> T { self.map_or(err, || t) } + + #[inline] + fn expect(self, msg: &str) -> Self { self.then_some(true).expect(msg) } + + #[inline] + fn expect_false(self, msg: &str) -> Self { (!self).then_some(false).expect(msg) } + + #[inline] + fn into_option(self) -> Option<()> { self.then_some(()) } + + #[inline] + fn into_result(self) -> Result<(), ()> { self.ok_or(()) } + #[inline] fn map T>(self, f: F) -> T where @@ -40,10 +75,10 @@ impl BoolExt for bool { fn map_or_else T>(self, err: F, f: F) -> T { self.then(f).unwrap_or_else(err) } #[inline] - fn ok_or(self, err: E) -> Result<(), E> { self.then_some(()).ok_or(err) } + fn ok_or(self, err: E) -> Result<(), E> { self.into_option().ok_or(err) } #[inline] - fn ok_or_else E>(self, err: F) -> Result<(), E> { self.then_some(()).ok_or_else(err) } + fn ok_or_else E>(self, err: F) -> Result<(), E> { self.into_option().ok_or_else(err) } #[inline] fn or T>(self, f: F) -> Option { (!self).then(f) } From 7fcc6d11a4993a174d4b0998e276bdc150594a15 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 30 Oct 2024 11:04:23 +0000 Subject: [PATCH 0161/1248] de-wrap state_accessor.server_can_see_event Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/mod.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 561db18a..a2cc27e8 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -160,20 +160,18 @@ impl Service { /// Whether a server is allowed to see an event through federation, based on /// the room's history_visibility at that event's state. #[tracing::instrument(skip(self, origin, room_id, event_id))] - pub async fn server_can_see_event( - &self, origin: &ServerName, room_id: &RoomId, event_id: &EventId, - ) -> Result { + pub async fn server_can_see_event(&self, origin: &ServerName, room_id: &RoomId, event_id: &EventId) -> bool { let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return Ok(true); + return true; }; if let Some(visibility) = self .server_visibility_cache .lock() - .unwrap() + .expect("locked") .get_mut(&(origin.to_owned(), shortstatehash)) { - return Ok(*visibility); + return *visibility; } let history_visibility = self @@ -211,10 +209,10 @@ impl Service { self.server_visibility_cache .lock() - .unwrap() + .expect("locked") .insert((origin.to_owned(), shortstatehash), visibility); - Ok(visibility) + visibility } /// Whether a user is allowed to see an event, based on @@ -228,7 +226,7 @@ impl Service { if let Some(visibility) = self .user_visibility_cache .lock() - .unwrap() + .expect("locked") .get_mut(&(user_id.to_owned(), shortstatehash)) { return *visibility; @@ -262,7 +260,7 @@ impl Service { self.user_visibility_cache .lock() - .unwrap() + .expect("locked") .insert((user_id.to_owned(), shortstatehash), visibility); visibility From e49aee61c1f276fd613b8c61cde158b778c40763 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 30 Oct 2024 07:01:50 +0000 Subject: [PATCH 0162/1248] consolidate and parallelize api/server access check prologues Signed-off-by: Jason Volk --- Cargo.toml | 2 +- src/api/server/backfill.rs | 29 +++++--------- src/api/server/event.rs | 39 ++++++------------ src/api/server/event_auth.rs | 25 ++++-------- src/api/server/get_missing_events.rs | 32 +++++---------- src/api/server/mod.rs | 3 ++ src/api/server/state.rs | 27 +++++-------- src/api/server/state_ids.rs | 29 +++++--------- src/api/server/utils.rs | 60 ++++++++++++++++++++++++++++ 9 files changed, 123 insertions(+), 123 deletions(-) create mode 100644 src/api/server/utils.rs diff --git a/Cargo.toml b/Cargo.toml index e406c9e1..043790f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -213,7 +213,7 @@ features = [ [workspace.dependencies.futures] version = "0.3.30" default-features = false -features = ["std"] +features = ["std", "async-await"] [workspace.dependencies.tokio] version = "1.40.0" diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 088b891a..281bf2a2 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -2,13 +2,13 @@ use std::cmp; use axum::extract::State; use conduit::{ - is_equal_to, utils::{IterStream, ReadyExt}, - Err, PduCount, Result, + PduCount, Result, }; use futures::{FutureExt, StreamExt}; use ruma::{api::federation::backfill::get_backfill, uint, user_id, MilliSecondsSinceUnixEpoch}; +use super::AccessCheck; use crate::Ruma; /// # `GET /_matrix/federation/v1/backfill/` @@ -18,24 +18,14 @@ use crate::Ruma; pub(crate) async fn get_backfill_route( State(services): State, body: Ruma, ) -> Result { - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - if !services - .rooms - .state_accessor - .is_world_readable(&body.room_id) - .await && !services - .rooms - .state_cache - .server_in_room(body.origin(), &body.room_id) - .await - { - return Err!(Request(Forbidden("Server is not in room."))); + AccessCheck { + services: &services, + origin: body.origin(), + room_id: &body.room_id, + event_id: None, } + .check() + .await?; let until = body .v @@ -70,7 +60,6 @@ pub(crate) async fn get_backfill_route( .state_accessor .server_can_see_event(origin, &pdu.room_id, &pdu.event_id) .await - .is_ok_and(is_equal_to!(true)) { return None; } diff --git a/src/api/server/event.rs b/src/api/server/event.rs index 64ce3e40..29d5d870 100644 --- a/src/api/server/event.rs +++ b/src/api/server/event.rs @@ -1,7 +1,8 @@ use axum::extract::State; -use conduit::{err, Err, Result}; +use conduit::{err, Result}; use ruma::{api::federation::event::get_event, MilliSecondsSinceUnixEpoch, RoomId}; +use super::AccessCheck; use crate::Ruma; /// # `GET /_matrix/federation/v1/event/{eventId}` @@ -20,35 +21,21 @@ pub(crate) async fn get_event_route( .await .map_err(|_| err!(Request(NotFound("Event not found."))))?; - let room_id_str = event + let room_id: &RoomId = event .get("room_id") .and_then(|val| val.as_str()) - .ok_or_else(|| err!(Database("Invalid event in database.")))?; + .ok_or_else(|| err!(Database("Invalid event in database.")))? + .try_into() + .map_err(|_| err!(Database("Invalid room_id in event in database.")))?; - let room_id = - <&RoomId>::try_from(room_id_str).map_err(|_| err!(Database("Invalid room_id in event in database.")))?; - - if !services - .rooms - .state_accessor - .is_world_readable(room_id) - .await && !services - .rooms - .state_cache - .server_in_room(body.origin(), room_id) - .await - { - return Err!(Request(Forbidden("Server is not in room."))); - } - - if !services - .rooms - .state_accessor - .server_can_see_event(body.origin(), room_id, &body.event_id) - .await? - { - return Err!(Request(Forbidden("Server is not allowed to see event."))); + AccessCheck { + services: &services, + origin: body.origin(), + room_id, + event_id: Some(&body.event_id), } + .check() + .await?; Ok(get_event::v1::Response { origin: services.globals.server_name().to_owned(), diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 8fe96f81..faeb2b99 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -8,6 +8,7 @@ use ruma::{ RoomId, }; +use super::AccessCheck; use crate::Ruma; /// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` @@ -18,24 +19,14 @@ use crate::Ruma; pub(crate) async fn get_event_authorization_route( State(services): State, body: Ruma, ) -> Result { - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - if !services - .rooms - .state_accessor - .is_world_readable(&body.room_id) - .await && !services - .rooms - .state_cache - .server_in_room(body.origin(), &body.room_id) - .await - { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); + AccessCheck { + services: &services, + origin: body.origin(), + room_id: &body.room_id, + event_id: None, } + .check() + .await?; let event = services .rooms diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index aee4fbe9..7dff44dc 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -5,6 +5,7 @@ use ruma::{ CanonicalJsonValue, EventId, RoomId, }; +use super::AccessCheck; use crate::Ruma; /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` @@ -13,29 +14,16 @@ use crate::Ruma; pub(crate) async fn get_missing_events_route( State(services): State, body: Ruma, ) -> Result { - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - if !services - .rooms - .state_accessor - .is_world_readable(&body.room_id) - .await && !services - .rooms - .state_cache - .server_in_room(body.origin(), &body.room_id) - .await - { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room")); + AccessCheck { + services: &services, + origin: body.origin(), + room_id: &body.room_id, + event_id: None, } + .check() + .await?; - let limit = body - .limit - .try_into() - .expect("UInt could not be converted to usize"); + let limit = body.limit.try_into()?; let mut queued_events = body.latest_events.clone(); // the vec will never have more entries the limit @@ -70,7 +58,7 @@ pub(crate) async fn get_missing_events_route( .rooms .state_accessor .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) - .await? + .await { i = i.saturating_add(1); continue; diff --git a/src/api/server/mod.rs b/src/api/server/mod.rs index 9a184f23..9b7d91cb 100644 --- a/src/api/server/mod.rs +++ b/src/api/server/mod.rs @@ -41,3 +41,6 @@ pub(super) use state_ids::*; pub(super) use user::*; pub(super) use version::*; pub(super) use well_known::*; + +mod utils; +use utils::AccessCheck; diff --git a/src/api/server/state.rs b/src/api/server/state.rs index 59bb6c7b..06a44a99 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,10 +1,11 @@ use std::borrow::Borrow; use axum::extract::State; -use conduit::{err, result::LogErr, utils::IterStream, Err, Result}; +use conduit::{err, result::LogErr, utils::IterStream, Result}; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::api::federation::event::get_room_state; +use super::AccessCheck; use crate::Ruma; /// # `GET /_matrix/federation/v1/state/{roomId}` @@ -13,24 +14,14 @@ use crate::Ruma; pub(crate) async fn get_room_state_route( State(services): State, body: Ruma, ) -> Result { - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - if !services - .rooms - .state_accessor - .is_world_readable(&body.room_id) - .await && !services - .rooms - .state_cache - .server_in_room(body.origin(), &body.room_id) - .await - { - return Err!(Request(Forbidden("Server is not in room."))); + AccessCheck { + services: &services, + origin: body.origin(), + room_id: &body.room_id, + event_id: None, } + .check() + .await?; let shortstatehash = services .rooms diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 957a2a86..52d8e7cc 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,11 +1,12 @@ use std::borrow::Borrow; use axum::extract::State; -use conduit::{err, Err}; +use conduit::{err, Result}; use futures::StreamExt; use ruma::api::federation::event::get_room_state_ids; -use crate::{Result, Ruma}; +use super::AccessCheck; +use crate::Ruma; /// # `GET /_matrix/federation/v1/state_ids/{roomId}` /// @@ -14,24 +15,14 @@ use crate::{Result, Ruma}; pub(crate) async fn get_room_state_ids_route( State(services): State, body: Ruma, ) -> Result { - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - if !services - .rooms - .state_accessor - .is_world_readable(&body.room_id) - .await && !services - .rooms - .state_cache - .server_in_room(body.origin(), &body.room_id) - .await - { - return Err!(Request(Forbidden("Server is not in room."))); + AccessCheck { + services: &services, + origin: body.origin(), + room_id: &body.room_id, + event_id: None, } + .check() + .await?; let shortstatehash = services .rooms diff --git a/src/api/server/utils.rs b/src/api/server/utils.rs new file mode 100644 index 00000000..278465ca --- /dev/null +++ b/src/api/server/utils.rs @@ -0,0 +1,60 @@ +use conduit::{implement, is_false, Err, Result}; +use conduit_service::Services; +use futures::{future::OptionFuture, join, FutureExt}; +use ruma::{EventId, RoomId, ServerName}; + +pub(super) struct AccessCheck<'a> { + pub(super) services: &'a Services, + pub(super) origin: &'a ServerName, + pub(super) room_id: &'a RoomId, + pub(super) event_id: Option<&'a EventId>, +} + +#[implement(AccessCheck, params = "<'_>")] +pub(super) async fn check(&self) -> Result { + let acl_check = self + .services + .rooms + .event_handler + .acl_check(self.origin, self.room_id) + .map(|result| result.is_ok()); + + let world_readable = self + .services + .rooms + .state_accessor + .is_world_readable(self.room_id); + + let server_in_room = self + .services + .rooms + .state_cache + .server_in_room(self.origin, self.room_id); + + let server_can_see: OptionFuture<_> = self + .event_id + .map(|event_id| { + self.services + .rooms + .state_accessor + .server_can_see_event(self.origin, self.room_id, event_id) + }) + .into(); + + let (world_readable, server_in_room, server_can_see, acl_check) = + join!(world_readable, server_in_room, server_can_see, acl_check); + + if !acl_check { + return Err!(Request(Forbidden("Server access denied."))); + } + + if !world_readable && !server_in_room { + return Err!(Request(Forbidden("Server is not in room."))); + } + + if server_can_see.is_some_and(is_false!()) { + return Err!(Request(Forbidden("Server is not allowed to see event."))); + } + + Ok(()) +} From 6b0eb7608d06fbfce663e4775196cfd3c7bae643 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 31 Oct 2024 07:33:16 +0000 Subject: [PATCH 0163/1248] add Filter extension to Result Signed-off-by: Jason Volk --- src/core/utils/result.rs | 5 +++-- src/core/utils/result/filter.rs | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 src/core/utils/result/filter.rs diff --git a/src/core/utils/result.rs b/src/core/utils/result.rs index 9a60d19e..fb1b7b95 100644 --- a/src/core/utils/result.rs +++ b/src/core/utils/result.rs @@ -1,4 +1,5 @@ mod debug_inspect; +mod filter; mod flat_ok; mod into_is_ok; mod log_debug_err; @@ -8,8 +9,8 @@ mod not_found; mod unwrap_infallible; pub use self::{ - debug_inspect::DebugInspect, flat_ok::FlatOk, into_is_ok::IntoIsOk, log_debug_err::LogDebugErr, log_err::LogErr, - map_expect::MapExpect, not_found::NotFound, unwrap_infallible::UnwrapInfallible, + debug_inspect::DebugInspect, filter::Filter, flat_ok::FlatOk, into_is_ok::IntoIsOk, log_debug_err::LogDebugErr, + log_err::LogErr, map_expect::MapExpect, not_found::NotFound, unwrap_infallible::UnwrapInfallible, }; pub type Result = std::result::Result; diff --git a/src/core/utils/result/filter.rs b/src/core/utils/result/filter.rs new file mode 100644 index 00000000..f11d3632 --- /dev/null +++ b/src/core/utils/result/filter.rs @@ -0,0 +1,21 @@ +use super::Result; + +pub trait Filter { + /// Similar to Option::filter + #[must_use] + fn filter(self, predicate: P) -> Self + where + P: FnOnce(&T) -> Result<(), U>, + E: From; +} + +impl Filter for Result { + #[inline] + fn filter(self, predicate: P) -> Self + where + P: FnOnce(&T) -> Result<(), U>, + E: From, + { + self.and_then(move |t| predicate(&t).map(move |()| t).map_err(Into::into)) + } +} From 0bc6fdd5897c9a216ced0433532eb47a97f142ac Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 31 Oct 2024 08:19:37 +0000 Subject: [PATCH 0164/1248] Refactor ShortStateInfo et al to properly named structures Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 9 ++- src/api/client/membership.rs | 14 ++++- src/service/rooms/event_handler/mod.rs | 15 +++-- src/service/rooms/short/mod.rs | 1 + src/service/rooms/state/mod.rs | 8 +-- src/service/rooms/state_accessor/data.rs | 6 +- src/service/rooms/state_compressor/mod.rs | 77 +++++++++++++++-------- 7 files changed, 89 insertions(+), 41 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index db102858..754c9840 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -12,6 +12,7 @@ use ruma::{ events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName, }; +use service::rooms::state_compressor::HashSetCompressStateEvent; use tracing_subscriber::EnvFilter; use crate::admin_command; @@ -632,7 +633,11 @@ pub(super) async fn force_set_room_state_from_server( .await?; info!("Forcing new room state"); - let (short_state_hash, new, removed) = self + let HashSetCompressStateEvent { + shortstatehash: short_state_hash, + added, + removed, + } = self .services .rooms .state_compressor @@ -643,7 +648,7 @@ pub(super) async fn force_set_room_state_from_server( self.services .rooms .state - .force_state(room_id.clone().as_ref(), short_state_hash, new, removed, &state_lock) + .force_state(room_id.clone().as_ref(), short_state_hash, added, removed, &state_lock) .await?; info!( diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 27de60c6..c41e93fa 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -39,7 +39,11 @@ use ruma::{ state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; -use service::{appservice::RegistrationInfo, rooms::state::RoomMutexGuard, Services}; +use service::{ + appservice::RegistrationInfo, + rooms::{state::RoomMutexGuard, state_compressor::HashSetCompressStateEvent}, + Services, +}; use crate::{client::full_user_deactivate, Ruma}; @@ -941,7 +945,11 @@ async fn join_room_by_id_helper_remote( .await; debug!("Saving compressed state"); - let (statehash_before_join, new, removed) = services + let HashSetCompressStateEvent { + shortstatehash: statehash_before_join, + added, + removed, + } = services .rooms .state_compressor .save_state(room_id, Arc::new(compressed)) @@ -951,7 +959,7 @@ async fn join_room_by_id_helper_remote( services .rooms .state - .force_state(room_id, statehash_before_join, new, removed, &state_lock) + .force_state(room_id, statehash_before_join, added, removed, &state_lock) .await?; info!("Updating joined counts for new room"); diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index ec04e748..adebd332 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -33,8 +33,11 @@ use ruma::{ RoomId, RoomVersionId, ServerName, UserId, }; -use super::state_compressor::CompressedStateEvent; -use crate::{globals, rooms, sending, server_keys, Dep}; +use crate::{ + globals, rooms, + rooms::state_compressor::{CompressedStateEvent, HashSetCompressStateEvent}, + sending, server_keys, Dep, +}; pub struct Service { services: Services, @@ -692,7 +695,11 @@ impl Service { // Set the new room state to the resolved state debug!("Forcing new room state"); - let (sstatehash, new, removed) = self + let HashSetCompressStateEvent { + shortstatehash, + added, + removed, + } = self .services .state_compressor .save_state(room_id, new_room_state) @@ -700,7 +707,7 @@ impl Service { self.services .state - .force_state(room_id, sstatehash, new, removed, &state_lock) + .force_state(room_id, shortstatehash, added, removed, &state_lock) .await?; } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 02c449cc..62011605 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -24,6 +24,7 @@ struct Services { globals: Dep, } +pub type ShortStateKey = ShortId; pub type ShortEventId = ShortId; pub type ShortRoomId = ShortId; pub type ShortId = u64; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 6abaa198..34fab079 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -182,12 +182,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) + .difference(&parent_stateinfo.full_state) .copied() .collect(); let statediffremoved: HashSet<_> = parent_stateinfo - .1 + .full_state .difference(&state_ids_compressed) .copied() .collect(); @@ -259,7 +259,7 @@ impl Service { let replaces = states_parents .last() .map(|info| { - info.1 + info.full_state .iter() .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) }) @@ -421,7 +421,7 @@ impl Service { })? .pop() .expect("there is always one layer") - .1; + .full_state; let mut ret = HashMap::new(); for compressed in full_state.iter() { diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index adc26f00..f77a6d80 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -45,7 +45,7 @@ impl Data { .map_err(|e| err!(Database("Missing state IDs: {e}")))? .pop() .expect("there is always one layer") - .1; + .full_state; let mut result = HashMap::new(); let mut i: u8 = 0; @@ -78,7 +78,7 @@ impl Data { .await? .pop() .expect("there is always one layer") - .1; + .full_state; let mut result = HashMap::new(); let mut i: u8 = 0; @@ -123,7 +123,7 @@ impl Data { .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? .pop() .expect("there is always one layer") - .1; + .full_state; let compressed = full_state .iter() diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index be66c597..1f351f40 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -10,7 +10,7 @@ use database::Map; use lru_cache::LruCache; use ruma::{EventId, RoomId}; -use crate::{rooms, Dep}; +use crate::{rooms, rooms::short::ShortId, Dep}; pub struct Service { pub stateinfo_cache: Mutex, @@ -27,24 +27,33 @@ struct Data { shortstatehash_statediff: Arc, } +#[derive(Clone)] struct StateDiff { parent: Option, added: Arc>, removed: Arc>, } +#[derive(Clone, Default)] +pub struct ShortStateInfo { + pub shortstatehash: ShortStateHash, + pub full_state: Arc>, + pub added: Arc>, + pub removed: Arc>, +} + +#[derive(Clone, Default)] +pub struct HashSetCompressStateEvent { + pub shortstatehash: ShortStateHash, + pub added: Arc>, + pub removed: Arc>, +} + +pub type ShortStateHash = ShortId; +pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; type StateInfoLruCache = LruCache; type ShortStateInfoVec = Vec; type ParentStatesVec = Vec; -type ShortStateInfo = ( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed -); - -type HashSetCompressStateEvent = (u64, Arc>, Arc>); -pub type CompressedStateEvent = [u8; 2 * size_of::()]; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -95,14 +104,19 @@ impl Service { if let Some(parent) = parent { let mut response = Box::pin(self.load_shortstatehash_info(parent)).await?; - let mut state = (*response.last().expect("at least one response").1).clone(); + let mut state = (*response.last().expect("at least one response").full_state).clone(); state.extend(added.iter().copied()); let removed = (*removed).clone(); for r in &removed { state.remove(r); } - response.push((shortstatehash, Arc::new(state), added, Arc::new(removed))); + response.push(ShortStateInfo { + shortstatehash, + full_state: Arc::new(state), + added, + removed: Arc::new(removed), + }); self.stateinfo_cache .lock() @@ -111,7 +125,13 @@ impl Service { Ok(response) } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; + let response = vec![ShortStateInfo { + shortstatehash, + full_state: added.clone(), + added, + removed, + }]; + self.stateinfo_cache .lock() .expect("locked") @@ -185,8 +205,8 @@ impl Service { // To many layers, we have to go deeper let parent = parent_states.pop().expect("parent must have a state"); - let mut parent_new = (*parent.2).clone(); - let mut parent_removed = (*parent.3).clone(); + let mut parent_new = (*parent.added).clone(); + let mut parent_removed = (*parent.removed).clone(); for removed in statediffremoved.iter() { if !parent_new.remove(removed) { @@ -236,14 +256,14 @@ impl Service { // 2. We replace a layer above let parent = parent_states.pop().expect("parent must have a state"); - let parent_2_len = parent.2.len(); - let parent_3_len = parent.3.len(); - let parent_diff = checked!(parent_2_len + parent_3_len)?; + let parent_added_len = parent.added.len(); + let parent_removed_len = parent.removed.len(); + let parent_diff = checked!(parent_added_len + parent_removed_len)?; if checked!(diffsum * diffsum)? >= checked!(2 * diff_to_sibling * parent_diff)? { // Diff too big, we replace above layer(s) - let mut parent_new = (*parent.2).clone(); - let mut parent_removed = (*parent.3).clone(); + let mut parent_new = (*parent.added).clone(); + let mut parent_removed = (*parent.removed).clone(); for removed in statediffremoved.iter() { if !parent_new.remove(removed) { @@ -275,7 +295,7 @@ impl Service { self.save_statediff( shortstatehash, &StateDiff { - parent: Some(parent.0), + parent: Some(parent.shortstatehash), added: statediffnew, removed: statediffremoved, }, @@ -311,7 +331,10 @@ impl Service { .await; if Some(new_shortstatehash) == previous_shortstatehash { - return Ok((new_shortstatehash, Arc::new(HashSet::new()), Arc::new(HashSet::new()))); + return Ok(HashSetCompressStateEvent { + shortstatehash: new_shortstatehash, + ..Default::default() + }); } let states_parents = if let Some(p) = previous_shortstatehash { @@ -322,12 +345,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) + .difference(&parent_stateinfo.full_state) .copied() .collect(); let statediffremoved: HashSet<_> = parent_stateinfo - .1 + .full_state .difference(&new_state_ids_compressed) .copied() .collect(); @@ -347,7 +370,11 @@ impl Service { )?; }; - Ok((new_shortstatehash, statediffnew, statediffremoved)) + Ok(HashSetCompressStateEvent { + shortstatehash: new_shortstatehash, + added: statediffnew, + removed: statediffremoved, + }) } async fn get_statediff(&self, shortstatehash: u64) -> Result { From f746be82c158e0bae2a3311953d49c6c8be2c910 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 31 Oct 2024 08:41:43 +0000 Subject: [PATCH 0165/1248] typename some loose u64 ShortId's Signed-off-by: Jason Volk --- src/service/rooms/short/mod.rs | 33 ++++++++++++----------- src/service/rooms/state_accessor/data.rs | 12 ++++----- src/service/rooms/state_compressor/mod.rs | 25 +++++++++-------- 3 files changed, 37 insertions(+), 33 deletions(-) diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 62011605..a903ef22 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -24,6 +24,7 @@ struct Services { globals: Dep, } +pub type ShortStateHash = ShortId; pub type ShortStateKey = ShortId; pub type ShortEventId = ShortId; pub type ShortRoomId = ShortId; @@ -50,7 +51,7 @@ impl crate::Service for Service { } #[implement(Service)] -pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { +pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEventId { const BUFSIZE: usize = size_of::(); if let Ok(shorteventid) = self @@ -78,7 +79,7 @@ pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> u64 { } #[implement(Service)] -pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Vec { +pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Vec { self.db .eventid_shorteventid .get_batch_blocking(event_ids.iter()) @@ -106,7 +107,7 @@ pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> } #[implement(Service)] -pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { +pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { let key = (event_type, state_key); self.db .statekey_shortstatekey @@ -116,8 +117,8 @@ pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &s } #[implement(Service)] -pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> u64 { - const BUFSIZE: usize = size_of::(); +pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> ShortStateKey { + const BUFSIZE: usize = size_of::(); let key = (event_type, state_key); if let Ok(shortstatekey) = self @@ -145,8 +146,8 @@ pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, sta } #[implement(Service)] -pub async fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - const BUFSIZE: usize = size_of::(); +pub async fn get_eventid_from_short(&self, shorteventid: ShortEventId) -> Result> { + const BUFSIZE: usize = size_of::(); self.db .shorteventid_eventid @@ -157,8 +158,8 @@ pub async fn get_eventid_from_short(&self, shorteventid: u64) -> Result Vec>> { - const BUFSIZE: usize = size_of::(); +pub async fn multi_get_eventid_from_short(&self, shorteventid: &[ShortEventId]) -> Vec>> { + const BUFSIZE: usize = size_of::(); let keys: Vec<[u8; BUFSIZE]> = shorteventid .iter() @@ -174,8 +175,8 @@ pub async fn multi_get_eventid_from_short(&self, shorteventid: &[u64]) -> Vec Result<(StateEventType, String)> { - const BUFSIZE: usize = size_of::(); +pub async fn get_statekey_from_short(&self, shortstatekey: ShortStateKey) -> Result<(StateEventType, String)> { + const BUFSIZE: usize = size_of::(); self.db .shortstatekey_statekey @@ -191,8 +192,8 @@ pub async fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(State /// Returns (shortstatehash, already_existed) #[implement(Service)] -pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, bool) { - const BUFSIZE: usize = size_of::(); +pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (ShortStateHash, bool) { + const BUFSIZE: usize = size_of::(); if let Ok(shortstatehash) = self .db @@ -215,19 +216,19 @@ pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (u64, boo } #[implement(Service)] -pub async fn get_shortroomid(&self, room_id: &RoomId) -> Result { +pub async fn get_shortroomid(&self, room_id: &RoomId) -> Result { self.db.roomid_shortroomid.get(room_id).await.deserialized() } #[implement(Service)] -pub async fn get_or_create_shortroomid(&self, room_id: &RoomId) -> u64 { +pub async fn get_or_create_shortroomid(&self, room_id: &RoomId) -> ShortRoomId { self.db .roomid_shortroomid .get(room_id) .await .deserialized() .unwrap_or_else(|_| { - const BUFSIZE: usize = size_of::(); + const BUFSIZE: usize = size_of::(); let short = self.services.globals.next_count().unwrap(); debug_assert!(size_of_val(&short) == BUFSIZE, "buffer requirement changed"); diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index f77a6d80..9c96785f 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -5,7 +5,7 @@ use database::{Deserialized, Map}; use futures::TryFutureExt; use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{rooms, Dep}; +use crate::{rooms, rooms::short::ShortStateHash, Dep}; pub(super) struct Data { eventid_shorteventid: Arc, @@ -36,7 +36,7 @@ impl Data { } #[allow(unused_qualifications)] // async traits - pub(super) async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + pub(super) async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result>> { let full_state = self .services .state_compressor @@ -69,7 +69,7 @@ impl Data { #[allow(unused_qualifications)] // async traits pub(super) async fn state_full( - &self, shortstatehash: u64, + &self, shortstatehash: ShortStateHash, ) -> Result>> { let full_state = self .services @@ -107,7 +107,7 @@ impl Data { /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). #[allow(clippy::unused_self)] pub(super) async fn state_get_id( - &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result> { let shortstatekey = self .services @@ -147,7 +147,7 @@ impl Data { /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). pub(super) async fn state_get( - &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result> { self.state_get_id(shortstatehash, event_type, state_key) .and_then(|event_id| async move { self.services.timeline.get_pdu(&event_id).await }) @@ -155,7 +155,7 @@ impl Data { } /// Returns the state hash for this pdu. - pub(super) async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + pub(super) async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { self.eventid_shorteventid .get(event_id) .and_then(|shorteventid| self.shorteventid_shortstatehash.get(&shorteventid)) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 1f351f40..e213490b 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -10,7 +10,11 @@ use database::Map; use lru_cache::LruCache; use ruma::{EventId, RoomId}; -use crate::{rooms, rooms::short::ShortId, Dep}; +use crate::{ + rooms, + rooms::short::{ShortStateHash, ShortStateKey}, + Dep, +}; pub struct Service { pub stateinfo_cache: Mutex, @@ -49,9 +53,8 @@ pub struct HashSetCompressStateEvent { pub removed: Arc>, } -pub type ShortStateHash = ShortId; pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; -type StateInfoLruCache = LruCache; +type StateInfoLruCache = LruCache; type ShortStateInfoVec = Vec; type ParentStatesVec = Vec; @@ -86,7 +89,7 @@ impl crate::Service for Service { impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and /// removed diff for the selected shortstatehash and each parent layer. - pub async fn load_shortstatehash_info(&self, shortstatehash: u64) -> Result { + pub async fn load_shortstatehash_info(&self, shortstatehash: ShortStateHash) -> Result { if let Some(r) = self .stateinfo_cache .lock() @@ -141,7 +144,7 @@ impl Service { } } - pub async fn compress_state_event(&self, shortstatekey: u64, event_id: &EventId) -> CompressedStateEvent { + pub async fn compress_state_event(&self, shortstatekey: ShortStateKey, event_id: &EventId) -> CompressedStateEvent { let mut v = shortstatekey.to_be_bytes().to_vec(); v.extend_from_slice( &self @@ -159,7 +162,7 @@ impl Service { #[inline] pub async fn parse_compressed_state_event( &self, compressed_event: &CompressedStateEvent, - ) -> Result<(u64, Arc)> { + ) -> Result<(ShortStateKey, Arc)> { use utils::u64_from_u8; let shortstatekey = u64_from_u8(&compressed_event[0..size_of::()]); @@ -192,7 +195,7 @@ impl Service { /// added diff and removed diff for each parent layer #[tracing::instrument(skip_all, level = "debug")] pub fn save_state_from_diff( - &self, shortstatehash: u64, statediffnew: Arc>, + &self, shortstatehash: ShortStateHash, statediffnew: Arc>, statediffremoved: Arc>, diff_to_sibling: usize, mut parent_states: ParentStatesVec, ) -> Result { @@ -377,9 +380,9 @@ impl Service { }) } - async fn get_statediff(&self, shortstatehash: u64) -> Result { - const BUFSIZE: usize = size_of::(); - const STRIDE: usize = size_of::(); + async fn get_statediff(&self, shortstatehash: ShortStateHash) -> Result { + const BUFSIZE: usize = size_of::(); + const STRIDE: usize = size_of::(); let value = self .db @@ -418,7 +421,7 @@ impl Service { }) } - fn save_statediff(&self, shortstatehash: u64, diff: &StateDiff) { + fn save_statediff(&self, shortstatehash: ShortStateHash, diff: &StateDiff) { let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); for new in diff.added.iter() { value.extend_from_slice(&new[..]); From 1f1e2d547cceda68da71855186abc38a3a1ba713 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 31 Oct 2024 11:49:00 +0000 Subject: [PATCH 0166/1248] optimize override ips; utilize all ips from cache Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 6 +++--- src/service/resolver/cache.rs | 10 +++++++--- src/service/resolver/dns.rs | 12 +++++------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 61eedca5..5dc03d14 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -10,7 +10,7 @@ use ipaddress::IPAddress; use ruma::ServerName; use super::{ - cache::{CachedDest, CachedOverride}, + cache::{CachedDest, CachedOverride, MAX_IPS}, fed::{add_port_to_hostname, get_ip_with_port, FedDest, PortString}, }; @@ -266,9 +266,9 @@ impl super::Service { } self.set_cached_override( - overname.to_owned(), + overname, CachedOverride { - ips: override_ip.iter().collect(), + ips: override_ip.into_iter().take(MAX_IPS).collect(), port, expire: CachedOverride::default_expire(), }, diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 465b5985..a13399dc 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -5,6 +5,7 @@ use std::{ time::SystemTime, }; +use arrayvec::ArrayVec; use conduit::{trace, utils::rand}; use ruma::{OwnedServerName, ServerName}; @@ -24,7 +25,7 @@ pub struct CachedDest { #[derive(Clone, Debug)] pub struct CachedOverride { - pub ips: Vec, + pub ips: IpAddrs, pub port: u16, pub expire: SystemTime, } @@ -32,6 +33,9 @@ pub struct CachedOverride { pub type WellKnownMap = HashMap; pub type TlsNameMap = HashMap; +pub type IpAddrs = ArrayVec; +pub(crate) const MAX_IPS: usize = 3; + impl Cache { pub(super) fn new() -> Arc { Arc::new(Self { @@ -61,13 +65,13 @@ impl super::Service { .cloned() } - pub fn set_cached_override(&self, name: String, over: CachedOverride) -> Option { + pub fn set_cached_override(&self, name: &str, over: CachedOverride) -> Option { trace!(?name, ?over, "set cached override"); self.cache .overrides .write() .expect("locked for writing") - .insert(name, over) + .insert(name.into(), over) } #[must_use] diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index 89129e03..d3e9f5c9 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -1,4 +1,4 @@ -use std::{iter, net::SocketAddr, sync::Arc, time::Duration}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; use conduit::{err, Result, Server}; use futures::FutureExt; @@ -101,14 +101,12 @@ impl Resolve for Hooked { } async fn cached_to_reqwest(cached: CachedOverride) -> ResolvingResult { - let first_ip = cached + let addrs = cached .ips - .first() - .expect("must provide at least one override"); + .into_iter() + .map(move |ip| SocketAddr::new(ip, cached.port)); - let saddr = SocketAddr::new(*first_ip, cached.port); - - Ok(Box::new(iter::once(saddr))) + Ok(Box::new(addrs)) } async fn resolve_to_reqwest(resolver: Arc, name: Name) -> ResolvingResult { From ba1c13468942dce17d78e394ffabe2796cfed577 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 1 Nov 2024 22:16:14 +0000 Subject: [PATCH 0167/1248] move migrations out of globals service Signed-off-by: Jason Volk --- src/service/globals/mod.rs | 1 - src/service/media/migrations.rs | 4 ++-- src/service/{globals => }/migrations.rs | 0 src/service/mod.rs | 1 + src/service/services.rs | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename src/service/{globals => }/migrations.rs (100%) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 0a7dda9f..bd956964 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,5 +1,4 @@ mod data; -pub(super) mod migrations; use std::{ collections::HashMap, diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 0e358d44..f1c6da7d 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -13,7 +13,7 @@ use conduit::{ warn, Config, Result, }; -use crate::{globals, Services}; +use crate::{migrations, Services}; /// Migrates a media directory from legacy base64 file names to sha2 file names. /// All errors are fatal. Upon success the database is keyed to not perform this @@ -50,7 +50,7 @@ pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { // Apply fix from when sha256_media was backward-incompat and bumped the schema // version from 13 to 14. For users satisfying these conditions we can go back. - if services.globals.db.database_version().await == 14 && globals::migrations::DATABASE_VERSION == 13 { + if services.globals.db.database_version().await == 14 && migrations::DATABASE_VERSION == 13 { services.globals.db.bump_database_version(13)?; } diff --git a/src/service/globals/migrations.rs b/src/service/migrations.rs similarity index 100% rename from src/service/globals/migrations.rs rename to src/service/migrations.rs diff --git a/src/service/mod.rs b/src/service/mod.rs index 604e3404..c7dcc0c6 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,6 +1,7 @@ #![allow(refining_impl_trait)] mod manager; +mod migrations; mod service; pub mod services; diff --git a/src/service/services.rs b/src/service/services.rs index c0af4249..b86e7a72 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -114,7 +114,7 @@ impl Services { debug_info!("Starting services..."); self.admin.set_services(Some(Arc::clone(self)).as_ref()); - globals::migrations::migrations(self).await?; + super::migrations::migrations(self).await?; self.manager .lock() .await From 87424370364fa391214d4a55feaf9d3606add9e0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 1 Nov 2024 22:43:26 +0000 Subject: [PATCH 0168/1248] wrap unimplemented ser/de branches with internal macro Signed-off-by: Jason Volk --- src/database/de.rs | 34 ++++++++++++++++--------------- src/database/mod.rs | 2 +- src/database/ser.rs | 48 ++++++++++++++++++++++---------------------- src/database/util.rs | 23 +++++++++++++++++++++ 4 files changed, 66 insertions(+), 41 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index 0e074fdb..d7dc1102 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -5,6 +5,8 @@ use serde::{ Deserialize, }; +use crate::util::unhandled; + /// Deserialize into T from buffer. pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result where @@ -192,7 +194,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { match name { "Ignore" => self.record_ignore(), "IgnoreAll" => self.record_ignore_all(), - _ => unimplemented!("Unrecognized deserialization Directive {name:?}"), + _ => unhandled!("Unrecognized deserialization Directive {name:?}"), }; visitor.visit_unit() @@ -214,27 +216,27 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { where V: Visitor<'de>, { - unimplemented!("deserialize Enum not implemented") + unhandled!("deserialize Enum not implemented") } fn deserialize_option>(self, _visitor: V) -> Result { - unimplemented!("deserialize Option not implemented") + unhandled!("deserialize Option not implemented") } fn deserialize_bool>(self, _visitor: V) -> Result { - unimplemented!("deserialize bool not implemented") + unhandled!("deserialize bool not implemented") } fn deserialize_i8>(self, _visitor: V) -> Result { - unimplemented!("deserialize i8 not implemented") + unhandled!("deserialize i8 not implemented") } fn deserialize_i16>(self, _visitor: V) -> Result { - unimplemented!("deserialize i16 not implemented") + unhandled!("deserialize i16 not implemented") } fn deserialize_i32>(self, _visitor: V) -> Result { - unimplemented!("deserialize i32 not implemented") + unhandled!("deserialize i32 not implemented") } fn deserialize_i64>(self, visitor: V) -> Result { @@ -244,15 +246,15 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } fn deserialize_u8>(self, _visitor: V) -> Result { - unimplemented!("deserialize u8 not implemented; try dereferencing the Handle for [u8] access instead") + unhandled!("deserialize u8 not implemented; try dereferencing the Handle for [u8] access instead") } fn deserialize_u16>(self, _visitor: V) -> Result { - unimplemented!("deserialize u16 not implemented") + unhandled!("deserialize u16 not implemented") } fn deserialize_u32>(self, _visitor: V) -> Result { - unimplemented!("deserialize u32 not implemented") + unhandled!("deserialize u32 not implemented") } fn deserialize_u64>(self, visitor: V) -> Result { @@ -262,15 +264,15 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } fn deserialize_f32>(self, _visitor: V) -> Result { - unimplemented!("deserialize f32 not implemented") + unhandled!("deserialize f32 not implemented") } fn deserialize_f64>(self, _visitor: V) -> Result { - unimplemented!("deserialize f64 not implemented") + unhandled!("deserialize f64 not implemented") } fn deserialize_char>(self, _visitor: V) -> Result { - unimplemented!("deserialize char not implemented") + unhandled!("deserialize char not implemented") } fn deserialize_str>(self, visitor: V) -> Result { @@ -291,11 +293,11 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } fn deserialize_byte_buf>(self, _visitor: V) -> Result { - unimplemented!("deserialize Byte Buf not implemented") + unhandled!("deserialize Byte Buf not implemented") } fn deserialize_unit>(self, _visitor: V) -> Result { - unimplemented!("deserialize Unit not implemented") + unhandled!("deserialize Unit not implemented") } // this only used for $serde_json::private::RawValue at this time; see MapAccess @@ -305,7 +307,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } fn deserialize_ignored_any>(self, _visitor: V) -> Result { - unimplemented!("deserialize Ignored Any not implemented") + unhandled!("deserialize Ignored Any not implemented") } fn deserialize_any>(self, visitor: V) -> Result { diff --git a/src/database/mod.rs b/src/database/mod.rs index dcd66a1e..f09c4a71 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -11,7 +11,7 @@ mod opts; mod ser; mod stream; mod tests; -mod util; +pub(crate) mod util; mod watchers; pub(crate) use self::{ diff --git a/src/database/ser.rs b/src/database/ser.rs index 0cc5c886..961d2700 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -4,6 +4,8 @@ use arrayvec::ArrayVec; use conduit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; use serde::{ser, Serialize}; +use crate::util::unhandled; + #[inline] pub fn serialize_to_array(val: T) -> Result> where @@ -146,17 +148,15 @@ impl ser::Serializer for &mut Serializer<'_, W> { fn serialize_tuple_variant( self, _name: &'static str, _idx: u32, _var: &'static str, _len: usize, ) -> Result { - unimplemented!("serialize Tuple Variant not implemented") + unhandled!("serialize Tuple Variant not implemented") } fn serialize_map(self, _len: Option) -> Result { - unimplemented!( - "serialize Map not implemented; did you mean to use database::Json() around your serde_json::Value?" - ) + unhandled!("serialize Map not implemented; did you mean to use database::Json() around your serde_json::Value?") } fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { - unimplemented!( + unhandled!( "serialize Struct not implemented at this time; did you mean to use database::Json() around your struct?" ) } @@ -164,7 +164,7 @@ impl ser::Serializer for &mut Serializer<'_, W> { fn serialize_struct_variant( self, _name: &'static str, _idx: u32, _var: &'static str, _len: usize, ) -> Result { - unimplemented!("serialize Struct Variant not implemented") + unhandled!("serialize Struct Variant not implemented") } #[allow(clippy::needless_borrows_for_generic_args)] // buggy @@ -179,14 +179,14 @@ impl ser::Serializer for &mut Serializer<'_, W> { match name { "Json" => serde_json::to_writer(&mut self.out, value).map_err(Into::into), - _ => unimplemented!("Unrecognized serialization Newtype {name:?}"), + _ => unhandled!("Unrecognized serialization Newtype {name:?}"), } } fn serialize_newtype_variant( self, _name: &'static str, _idx: u32, _var: &'static str, _value: &T, ) -> Result { - unimplemented!("serialize Newtype Variant not implemented") + unhandled!("serialize Newtype Variant not implemented") } fn serialize_unit_struct(self, name: &'static str) -> Result { @@ -197,14 +197,14 @@ impl ser::Serializer for &mut Serializer<'_, W> { "Separator" => { self.separator()?; }, - _ => unimplemented!("Unrecognized serialization directive: {name:?}"), + _ => unhandled!("Unrecognized serialization directive: {name:?}"), }; Ok(()) } fn serialize_unit_variant(self, _name: &'static str, _idx: u32, _var: &'static str) -> Result { - unimplemented!("serialize Unit Variant not implemented") + unhandled!("serialize Unit Variant not implemented") } fn serialize_some(self, val: &T) -> Result { val.serialize(self) } @@ -234,29 +234,29 @@ impl ser::Serializer for &mut Serializer<'_, W> { self.write(v) } - fn serialize_f64(self, _v: f64) -> Result { unimplemented!("serialize f64 not implemented") } + fn serialize_f64(self, _v: f64) -> Result { unhandled!("serialize f64 not implemented") } - fn serialize_f32(self, _v: f32) -> Result { unimplemented!("serialize f32 not implemented") } + fn serialize_f32(self, _v: f32) -> Result { unhandled!("serialize f32 not implemented") } fn serialize_i64(self, v: i64) -> Result { self.write(&v.to_be_bytes()) } fn serialize_i32(self, v: i32) -> Result { self.write(&v.to_be_bytes()) } - fn serialize_i16(self, _v: i16) -> Result { unimplemented!("serialize i16 not implemented") } + fn serialize_i16(self, _v: i16) -> Result { unhandled!("serialize i16 not implemented") } - fn serialize_i8(self, _v: i8) -> Result { unimplemented!("serialize i8 not implemented") } + fn serialize_i8(self, _v: i8) -> Result { unhandled!("serialize i8 not implemented") } fn serialize_u64(self, v: u64) -> Result { self.write(&v.to_be_bytes()) } fn serialize_u32(self, v: u32) -> Result { self.write(&v.to_be_bytes()) } - fn serialize_u16(self, _v: u16) -> Result { unimplemented!("serialize u16 not implemented") } + fn serialize_u16(self, _v: u16) -> Result { unhandled!("serialize u16 not implemented") } fn serialize_u8(self, v: u8) -> Result { self.write(&[v]) } - fn serialize_bool(self, _v: bool) -> Result { unimplemented!("serialize bool not implemented") } + fn serialize_bool(self, _v: bool) -> Result { unhandled!("serialize bool not implemented") } - fn serialize_unit(self) -> Result { unimplemented!("serialize unit not implemented") } + fn serialize_unit(self) -> Result { unhandled!("serialize unit not implemented") } } impl ser::SerializeSeq for &mut Serializer<'_, W> { @@ -309,14 +309,14 @@ impl ser::SerializeMap for &mut Serializer<'_, W> { type Ok = (); fn serialize_key(&mut self, _key: &T) -> Result { - unimplemented!("serialize Map Key not implemented") + unhandled!("serialize Map Key not implemented") } fn serialize_value(&mut self, _val: &T) -> Result { - unimplemented!("serialize Map Val not implemented") + unhandled!("serialize Map Val not implemented") } - fn end(self) -> Result { unimplemented!("serialize Map End not implemented") } + fn end(self) -> Result { unhandled!("serialize Map End not implemented") } } impl ser::SerializeStruct for &mut Serializer<'_, W> { @@ -324,10 +324,10 @@ impl ser::SerializeStruct for &mut Serializer<'_, W> { type Ok = (); fn serialize_field(&mut self, _key: &'static str, _val: &T) -> Result { - unimplemented!("serialize Struct Field not implemented") + unhandled!("serialize Struct Field not implemented") } - fn end(self) -> Result { unimplemented!("serialize Struct End not implemented") } + fn end(self) -> Result { unhandled!("serialize Struct End not implemented") } } impl ser::SerializeStructVariant for &mut Serializer<'_, W> { @@ -335,8 +335,8 @@ impl ser::SerializeStructVariant for &mut Serializer<'_, W> { type Ok = (); fn serialize_field(&mut self, _key: &'static str, _val: &T) -> Result { - unimplemented!("serialize Struct Variant Field not implemented") + unhandled!("serialize Struct Variant Field not implemented") } - fn end(self) -> Result { unimplemented!("serialize Struct Variant End not implemented") } + fn end(self) -> Result { unhandled!("serialize Struct Variant End not implemented") } } diff --git a/src/database/util.rs b/src/database/util.rs index d36e183f..ae076381 100644 --- a/src/database/util.rs +++ b/src/database/util.rs @@ -1,6 +1,29 @@ use conduit::{err, Result}; use rocksdb::{Direction, IteratorMode}; +//#[cfg(debug_assertions)] +macro_rules! unhandled { + ($msg:literal) => { + unimplemented!($msg) + }; +} + +// activate when stable; we're not ready for this yet +#[cfg(disable)] // #[cfg(not(debug_assertions))] +macro_rules! unhandled { + ($msg:literal) => { + // SAFETY: Eliminates branches for serializing and deserializing types never + // encountered in the codebase. This can promote optimization and reduce + // codegen. The developer must verify for every invoking callsite that the + // unhandled type is in no way involved and could not possibly be encountered. + unsafe { + std::hint::unreachable_unchecked(); + } + }; +} + +pub(crate) use unhandled; + #[inline] pub(crate) fn _into_direction(mode: &IteratorMode<'_>) -> Direction { use Direction::{Forward, Reverse}; From f191b4bad4cc6d15584f6c33cac57925e7b67abf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 2 Nov 2024 04:54:28 +0000 Subject: [PATCH 0169/1248] add map_expect for stream Signed-off-by: Jason Volk --- src/core/utils/result/map_expect.rs | 8 ++++---- src/core/utils/stream/expect.rs | 11 ++++++++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/core/utils/result/map_expect.rs b/src/core/utils/result/map_expect.rs index 8ce9195f..9cd498f7 100644 --- a/src/core/utils/result/map_expect.rs +++ b/src/core/utils/result/map_expect.rs @@ -2,14 +2,14 @@ use std::fmt::Debug; use super::Result; -pub trait MapExpect { +pub trait MapExpect<'a, T> { /// Calls expect(msg) on the mapped Result value. This is similar to /// map(Result::unwrap) but composes an expect call and message without /// requiring a closure. - fn map_expect(self, msg: &str) -> Option; + fn map_expect(self, msg: &'a str) -> T; } -impl MapExpect for Option> { +impl<'a, T, E: Debug> MapExpect<'a, Option> for Option> { #[inline] - fn map_expect(self, msg: &str) -> Option { self.map(|result| result.expect(msg)) } + fn map_expect(self, msg: &'a str) -> Option { self.map(|result| result.expect(msg)) } } diff --git a/src/core/utils/stream/expect.rs b/src/core/utils/stream/expect.rs index 3ab7181a..68ac24ce 100644 --- a/src/core/utils/stream/expect.rs +++ b/src/core/utils/stream/expect.rs @@ -4,14 +4,19 @@ use crate::Result; pub trait TryExpect<'a, Item> { fn expect_ok(self) -> impl Stream + Send + 'a; + + fn map_expect(self, msg: &'a str) -> impl Stream + Send + 'a; } impl<'a, T, Item> TryExpect<'a, Item> for T where T: Stream> + TryStream + Send + 'a, + Item: 'a, { #[inline] - fn expect_ok(self: T) -> impl Stream + Send + 'a { - self.map(|res| res.expect("stream expectation failure")) - } + fn expect_ok(self: T) -> impl Stream + Send + 'a { self.map_expect("stream expectation failure") } + + //TODO: move to impl MapExpect + #[inline] + fn map_expect(self, msg: &'a str) -> impl Stream + Send + 'a { self.map(|res| res.expect(msg)) } } From 52f09fdb51f895940a1d0895cd05775ed9aeacbd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 2 Nov 2024 01:59:06 +0000 Subject: [PATCH 0170/1248] add database migration for missing referencedevents separator Signed-off-by: Jason Volk --- src/service/migrations.rs | 59 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/src/service/migrations.rs b/src/service/migrations.rs index c953e7b1..45323fa2 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -1,7 +1,12 @@ +use std::cmp; + use conduit::{ - debug_info, debug_warn, error, info, + debug, debug_info, debug_warn, error, info, result::NotFound, - utils::{stream::TryIgnore, IterStream, ReadyExt}, + utils::{ + stream::{TryExpect, TryIgnore}, + IterStream, ReadyExt, + }, warn, Err, Result, }; use futures::{FutureExt, StreamExt}; @@ -120,6 +125,14 @@ async fn migrate(services: &Services) -> Result<()> { retroactively_fix_bad_data_from_roomuserid_joined(services).await?; } + if db["global"] + .get(b"fix_referencedevents_missing_sep") + .await + .is_not_found() + { + fix_referencedevents_missing_sep(services).await?; + } + let version_match = services.globals.db.database_version().await == DATABASE_VERSION || services.globals.db.database_version().await == CONDUIT_DATABASE_VERSION; @@ -444,3 +457,45 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) info!("Finished fixing"); Ok(()) } + +async fn fix_referencedevents_missing_sep(services: &Services) -> Result { + warn!("Fixing missing record separator between room_id and event_id in referencedevents"); + + let db = &services.db; + let cork = db.cork_and_sync(); + + let referencedevents = db["referencedevents"].clone(); + + let totals: (usize, usize) = (0, 0); + let (total, fixed) = referencedevents + .raw_stream() + .expect_ok() + .enumerate() + .ready_fold(totals, |mut a, (i, (key, val))| { + debug_assert!(val.is_empty(), "expected no value"); + + let has_sep = key.contains(&database::SEP); + + if !has_sep { + let key_str = std::str::from_utf8(key).expect("key not utf-8"); + let room_id_len = key_str.find('$').expect("missing '$' in key"); + let (room_id, event_id) = key_str.split_at(room_id_len); + debug!(?a, "fixing {room_id}, {event_id}"); + + let new_key = (room_id, event_id); + referencedevents.put_raw(new_key, val); + referencedevents.remove(key); + } + + a.0 = cmp::max(i, a.0); + a.1 = a.1.saturating_add((!has_sep).into()); + a + }) + .await; + + drop(cork); + info!(?total, ?fixed, "Fixed missing record separators in 'referencedevents'."); + + db["global"].insert(b"fix_referencedevents_missing_sep", []); + db.db.cleanup() +} From 8d251003a25aa697de052f01515e5cc23ce999e4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 3 Nov 2024 12:42:43 +0000 Subject: [PATCH 0171/1248] reduce Error-related codegen; add PoisonError Signed-off-by: Jason Volk --- src/core/config/mod.rs | 1 + src/core/error/err.rs | 1 + src/core/error/mod.rs | 10 +++++++++- src/core/error/panic.rs | 4 ++++ src/core/error/response.rs | 1 + 5 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a6216da2..43cca4b8 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2001,6 +2001,7 @@ fn default_rocksdb_stats_level() -> u8 { 1 } // I know, it's a great name #[must_use] +#[inline] pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V10 } fn default_ip_range_denylist() -> Vec { diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 82bb40b0..baeb992d 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -137,6 +137,7 @@ macro_rules! err_log { let visit = &mut |vs: ValueSet<'_>| { struct Visitor<'a>(&'a mut String); impl Visit for Visitor<'_> { + #[inline] fn record_debug(&mut self, field: &Field, val: &dyn fmt::Debug) { if field.name() == "message" { write!(self.0, "{:?}", val).expect("stream error"); diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 42250a0c..302d0f87 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -4,7 +4,7 @@ mod panic; mod response; mod serde; -use std::{any::Any, borrow::Cow, convert::Infallible, fmt}; +use std::{any::Any, borrow::Cow, convert::Infallible, fmt, sync::PoisonError}; pub use self::log::*; use crate::error; @@ -59,6 +59,8 @@ pub enum Error { JsTryFromInt(#[from] ruma::JsTryFromIntError), // js_int re-export #[error(transparent)] Path(#[from] axum::extract::rejection::PathRejection), + #[error("Mutex poisoned: {0}")] + Poison(Cow<'static, str>), #[error("Regex error: {0}")] Regex(#[from] regex::Error), #[error("Request error: {0}")] @@ -184,6 +186,12 @@ impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.message()) } } +impl From> for Error { + #[cold] + #[inline(never)] + fn from(e: PoisonError) -> Self { Self::Poison(e.to_string().into()) } +} + #[allow(clippy::fallible_impl_from)] impl From for Error { #[cold] diff --git a/src/core/error/panic.rs b/src/core/error/panic.rs index c070f786..bec25132 100644 --- a/src/core/error/panic.rs +++ b/src/core/error/panic.rs @@ -10,11 +10,14 @@ impl UnwindSafe for Error {} impl RefUnwindSafe for Error {} impl Error { + #[inline] pub fn panic(self) -> ! { panic_any(self.into_panic()) } #[must_use] + #[inline] pub fn from_panic(e: Box) -> Self { Self::Panic(debug::panic_str(&e), e) } + #[inline] pub fn into_panic(self) -> Box { match self { Self::Panic(_, e) | Self::PanicAny(e) => e, @@ -24,6 +27,7 @@ impl Error { } /// Get the panic message string. + #[inline] pub fn panic_str(self) -> Option<&'static str> { self.is_panic() .then_some(debug::panic_str(&self.into_panic())) diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 7568a1c0..21fbdcf2 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -26,6 +26,7 @@ impl axum::response::IntoResponse for Error { } impl From for UiaaResponse { + #[inline] fn from(error: Error) -> Self { if let Error::Uiaa(uiaainfo) = error { return Self::AuthResponse(uiaainfo); From 768e81741cbd2bb16e18edb0782b64d270a86dfd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 3 Nov 2024 11:22:38 +0000 Subject: [PATCH 0172/1248] use FnMut for ready_try_for_each extension Signed-off-by: Jason Volk --- src/core/utils/stream/try_ready.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index feb38067..3fbcbc45 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -24,7 +24,7 @@ where self, f: F, ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> where - F: Fn(S::Ok) -> Result<(), E>; + F: FnMut(S::Ok) -> Result<(), E>; } impl TryReadyExt for S @@ -42,10 +42,10 @@ where #[inline] fn ready_try_for_each( - self, f: F, + self, mut f: F, ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> where - F: Fn(S::Ok) -> Result<(), E>, + F: FnMut(S::Ok) -> Result<(), E>, { self.try_for_each(move |t| ready(f(t))) } From 4a94a4c945740b3a5ee605af61397f37060d91bc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 4 Nov 2024 18:20:32 +0000 Subject: [PATCH 0173/1248] rename pdu/id to pdu/event_id Signed-off-by: Jason Volk --- src/core/pdu/{id.rs => event_id.rs} | 0 src/core/pdu/mod.rs | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename src/core/pdu/{id.rs => event_id.rs} (100%) diff --git a/src/core/pdu/id.rs b/src/core/pdu/event_id.rs similarity index 100% rename from src/core/pdu/id.rs rename to src/core/pdu/event_id.rs diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 9c3aaf9b..53fcd0a9 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -2,8 +2,8 @@ mod builder; mod content; mod count; mod event; +mod event_id; mod filter; -mod id; mod redact; mod strip; mod unsigned; @@ -20,7 +20,7 @@ pub use self::{ builder::{Builder, Builder as PduBuilder}, count::PduCount, event::Event, - id::*, + event_id::*, }; use crate::Result; From 78aeb620bc4ecd1d2feadd72043a08a037615553 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 3 Nov 2024 18:23:35 +0000 Subject: [PATCH 0174/1248] add broad timeout on acquire_origins keys operation Signed-off-by: Jason Volk --- src/service/server_keys/acquire.rs | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index 25b676b8..cdaf28b4 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -1,15 +1,17 @@ use std::{ borrow::Borrow, collections::{BTreeMap, BTreeSet}, + time::Duration, }; -use conduit::{debug, debug_warn, error, implement, result::FlatOk, warn}; +use conduit::{debug, debug_error, debug_warn, error, implement, result::FlatOk, trace, warn}; use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, }; use serde_json::value::RawValue as RawJsonValue; +use tokio::time::{timeout_at, Instant}; use super::key_exists; @@ -136,8 +138,12 @@ async fn acquire_origins(&self, batch: I) -> Batch where I: Iterator)> + Send, { + let timeout = Instant::now() + .checked_add(Duration::from_secs(45)) + .expect("timeout overflows"); + let mut requests: FuturesUnordered<_> = batch - .map(|(origin, key_ids)| self.acquire_origin(origin, key_ids)) + .map(|(origin, key_ids)| self.acquire_origin(origin, key_ids, timeout)) .collect(); let mut missing = Batch::new(); @@ -152,11 +158,22 @@ where #[implement(super::Service)] async fn acquire_origin( - &self, origin: OwnedServerName, mut key_ids: Vec, + &self, origin: OwnedServerName, mut key_ids: Vec, timeout: Instant, ) -> (OwnedServerName, Vec) { - if let Ok(server_keys) = self.server_request(&origin).await { - self.add_signing_keys(server_keys.clone()).await; - key_ids.retain(|key_id| !key_exists(&server_keys, key_id)); + match timeout_at(timeout, self.server_request(&origin)).await { + Err(e) => debug_warn!(?origin, "timed out: {e}"), + Ok(Err(e)) => debug_error!(?origin, "{e}"), + Ok(Ok(server_keys)) => { + trace!( + %origin, + ?key_ids, + ?server_keys, + "received server_keys" + ); + + self.add_signing_keys(server_keys.clone()).await; + key_ids.retain(|key_id| !key_exists(&server_keys, key_id)); + }, } (origin, key_ids) From 2e4d9cb37cf7d47a9506ee3697775ddfadcb1d56 Mon Sep 17 00:00:00 2001 From: Kirill Hmelnitski Date: Thu, 31 Oct 2024 23:39:20 +0300 Subject: [PATCH 0175/1248] fix thread pagination refactor logic increase fetch limit for first relates apply other format Co-authored-by: Jason Volk Signed-off-by: Jason Volk --- src/api/client/relations.rs | 12 +- src/service/rooms/pdu_metadata/data.rs | 39 ++--- src/service/rooms/pdu_metadata/mod.rs | 197 +++++++++++++------------ 3 files changed, 125 insertions(+), 123 deletions(-) diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index d4384730..0456924c 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -20,8 +20,8 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route( &body.event_id, body.event_type.clone().into(), body.rel_type.clone().into(), - body.from.as_ref(), - body.to.as_ref(), + body.from.as_deref(), + body.to.as_deref(), body.limit, body.recurse, body.dir, @@ -51,8 +51,8 @@ pub(crate) async fn get_relating_events_with_rel_type_route( &body.event_id, None, body.rel_type.clone().into(), - body.from.as_ref(), - body.to.as_ref(), + body.from.as_deref(), + body.to.as_deref(), body.limit, body.recurse, body.dir, @@ -82,8 +82,8 @@ pub(crate) async fn get_relating_events_route( &body.event_id, None, None, - body.from.as_ref(), - body.to.as_ref(), + body.from.as_deref(), + body.to.as_deref(), body.limit, body.recurse, body.dir, diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 4d570e6d..51a43714 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -8,7 +8,7 @@ use conduit::{ }; use database::Map; use futures::{Stream, StreamExt}; -use ruma::{EventId, RoomId, UserId}; +use ruma::{api::Direction, EventId, RoomId, UserId}; use crate::{rooms, Dep}; @@ -45,9 +45,9 @@ impl Data { self.tofrom_relation.aput_raw::(key, []); } - pub(super) fn relations_until<'a>( - &'a self, user_id: &'a UserId, shortroomid: u64, target: u64, until: PduCount, - ) -> impl Stream + Send + 'a + '_ { + pub(super) fn get_relations<'a>( + &'a self, user_id: &'a UserId, shortroomid: u64, target: u64, until: PduCount, dir: Direction, + ) -> impl Stream + Send + '_ { let prefix = target.to_be_bytes().to_vec(); let mut current = prefix.clone(); let count_raw = match until { @@ -59,22 +59,23 @@ impl Data { }; current.extend_from_slice(&count_raw.to_be_bytes()); - self.tofrom_relation - .rev_raw_keys_from(¤t) - .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix)) - .map(|to_from| utils::u64_from_u8(&to_from[(size_of::())..])) - .filter_map(move |from| async move { - let mut pduid = shortroomid.to_be_bytes().to_vec(); - pduid.extend_from_slice(&from.to_be_bytes()); - let mut pdu = self.services.timeline.get_pdu_from_id(&pduid).await.ok()?; + match dir { + Direction::Forward => self.tofrom_relation.raw_keys_from(¤t).boxed(), + Direction::Backward => self.tofrom_relation.rev_raw_keys_from(¤t).boxed(), + } + .ignore_err() + .ready_take_while(move |key| key.starts_with(&prefix)) + .map(|to_from| utils::u64_from_u8(&to_from[(size_of::())..])) + .filter_map(move |from| async move { + let mut pduid = shortroomid.to_be_bytes().to_vec(); + pduid.extend_from_slice(&from.to_be_bytes()); + let mut pdu = self.services.timeline.get_pdu_from_id(&pduid).await.ok()?; + if pdu.sender != user_id { + pdu.remove_transaction_id().log_err().ok(); + } - if pdu.sender != user_id { - pdu.remove_transaction_id().log_err().ok(); - } - - Some((PduCount::Normal(from), pdu)) - }) + Some((PduCount::Normal(from), pdu)) + }) } pub(super) fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) { diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index fb85d031..b1cf2049 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,12 +1,16 @@ mod data; use std::sync::Arc; -use conduit::{utils::stream::IterStream, PduCount, Result}; -use futures::StreamExt; +use conduit::{ + at, + utils::{result::FlatOk, stream::ReadyExt, IterStream}, + PduCount, Result, +}; +use futures::{FutureExt, StreamExt}; use ruma::{ api::{client::relations::get_relating_events, Direction}, events::{relation::RelationType, TimelineEventType}, - uint, EventId, RoomId, UInt, UserId, + EventId, RoomId, UInt, UserId, }; use serde::Deserialize; @@ -63,24 +67,24 @@ impl Service { #[allow(clippy::too_many_arguments)] pub async fn paginate_relations_with_filter( &self, sender_user: &UserId, room_id: &RoomId, target: &EventId, filter_event_type: Option, - filter_rel_type: Option, from: Option<&String>, to: Option<&String>, limit: Option, + filter_rel_type: Option, from: Option<&str>, to: Option<&str>, limit: Option, recurse: bool, dir: Direction, ) -> Result { - let from = match from { - Some(from) => PduCount::try_from_string(from)?, - None => match dir { + let from = from + .map(PduCount::try_from_string) + .transpose()? + .unwrap_or_else(|| match dir { Direction::Forward => PduCount::min(), Direction::Backward => PduCount::max(), - }, - }; + }); - let to = to.and_then(|t| PduCount::try_from_string(t).ok()); + let to = to.map(PduCount::try_from_string).flat_ok(); - // Use limit or else 10, with maximum 100 - let limit = limit - .unwrap_or_else(|| uint!(10)) - .try_into() - .unwrap_or(10) + // Use limit or else 30, with maximum 100 + let limit: usize = limit + .map(TryInto::try_into) + .flat_ok() + .unwrap_or(30) .min(100); // Spec (v1.10) recommends depth of at least 3 @@ -90,53 +94,96 @@ impl Service { 1 }; - let relations_until: Vec = self - .relations_until(sender_user, room_id, target, from, depth) - .await?; - - // TODO: should be relations_after - let events: Vec<_> = relations_until + let events: Vec = self + .get_relations(sender_user, room_id, target, from, limit, depth, dir) + .await .into_iter() - .filter(move |(_, pdu): &PdusIterItem| { - if !filter_event_type.as_ref().map_or(true, |t| pdu.kind == *t) { - return false; - } - - let Ok(content) = pdu.get_content::() else { - return false; - }; - - filter_rel_type + .filter(|(_, pdu)| { + filter_event_type .as_ref() - .map_or(true, |r| *r == content.relates_to.rel_type) + .is_none_or(|kind| *kind == pdu.kind) + }) + .filter(|(_, pdu)| { + filter_rel_type.as_ref().is_none_or(|rel_type| { + pdu.get_content() + .map(|c: ExtractRelatesToEventId| c.relates_to.rel_type) + .is_ok_and(|r| r == *rel_type) + }) }) - .take(limit) - .take_while(|(k, _)| Some(*k) != to) .stream() .filter_map(|item| self.visibility_filter(sender_user, item)) + .ready_take_while(|(count, _)| Some(*count) != to) + .take(limit) + .collect() + .boxed() + .await; + + let next_batch = match dir { + Direction::Backward => events.first(), + Direction::Forward => events.last(), + } + .map(at!(0)) + .map(|t| t.stringify()); + + Ok(get_relating_events::v1::Response { + next_batch, + prev_batch: Some(from.stringify()), + recursion_depth: recurse.then_some(depth.into()), + chunk: events + .into_iter() + .map(at!(1)) + .map(|pdu| pdu.to_message_like_event()) + .collect(), + }) + } + + #[allow(clippy::too_many_arguments)] + pub async fn get_relations( + &self, user_id: &UserId, room_id: &RoomId, target: &EventId, until: PduCount, limit: usize, max_depth: u8, + dir: Direction, + ) -> Vec { + let room_id = self.services.short.get_or_create_shortroomid(room_id).await; + + let target = match self.services.timeline.get_pdu_count(target).await { + Ok(PduCount::Normal(c)) => c, + // TODO: Support backfilled relations + _ => 0, // This will result in an empty iterator + }; + + let mut pdus: Vec<_> = self + .db + .get_relations(user_id, room_id, target, until, dir) .collect() .await; - let next_token = events.last().map(|(count, _)| count).copied(); + let mut stack: Vec<_> = pdus.iter().map(|pdu| (pdu.clone(), 1)).collect(); - let events_chunk: Vec<_> = match dir { - Direction::Forward => events - .into_iter() - .map(|(_, pdu)| pdu.to_message_like_event()) - .collect(), - Direction::Backward => events - .into_iter() - .rev() // relations are always most recent first - .map(|(_, pdu)| pdu.to_message_like_event()) - .collect(), - }; + 'limit: while let Some(stack_pdu) = stack.pop() { + let target = match stack_pdu.0 .0 { + PduCount::Normal(c) => c, + // TODO: Support backfilled relations + PduCount::Backfilled(_) => 0, // This will result in an empty iterator + }; - Ok(get_relating_events::v1::Response { - chunk: events_chunk, - next_batch: next_token.map(|t| t.stringify()), - prev_batch: Some(from.stringify()), - recursion_depth: recurse.then_some(depth.into()), - }) + let relations: Vec<_> = self + .db + .get_relations(user_id, room_id, target, until, dir) + .collect() + .await; + + for relation in relations { + if stack_pdu.1 < max_depth { + stack.push((relation.clone(), stack_pdu.1.saturating_add(1))); + } + + pdus.push(relation); + if pdus.len() >= limit { + break 'limit; + } + } + } + + pdus } async fn visibility_filter(&self, sender_user: &UserId, item: PdusIterItem) -> Option { @@ -149,52 +196,6 @@ impl Service { .then_some(item) } - pub async fn relations_until( - &self, user_id: &UserId, room_id: &RoomId, target: &EventId, until: PduCount, max_depth: u8, - ) -> Result> { - let room_id = self.services.short.get_or_create_shortroomid(room_id).await; - - let target = match self.services.timeline.get_pdu_count(target).await { - Ok(PduCount::Normal(c)) => c, - // TODO: Support backfilled relations - _ => 0, // This will result in an empty iterator - }; - - let mut pdus: Vec = self - .db - .relations_until(user_id, room_id, target, until) - .collect() - .await; - - let mut stack: Vec<_> = pdus.clone().into_iter().map(|pdu| (pdu, 1)).collect(); - - while let Some(stack_pdu) = stack.pop() { - let target = match stack_pdu.0 .0 { - PduCount::Normal(c) => c, - // TODO: Support backfilled relations - PduCount::Backfilled(_) => 0, // This will result in an empty iterator - }; - - let relations: Vec = self - .db - .relations_until(user_id, room_id, target, until) - .collect() - .await; - - for relation in relations { - if stack_pdu.1 < max_depth { - stack.push((relation.clone(), stack_pdu.1.saturating_add(1))); - } - - pdus.push(relation); - } - } - - pdus.sort_by(|a, b| a.0.cmp(&b.0)); - - Ok(pdus) - } - #[inline] #[tracing::instrument(skip_all, level = "debug")] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) { From 9da523c004aba6e9d1d51c73de0524d5f6433bbd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 2 Nov 2024 06:12:54 +0000 Subject: [PATCH 0176/1248] refactor for stronger RawPduId type implement standard traits for PduCount enable serde for arrayvec typedef various shortid's pducount simplifications split parts of pdu_metadata service to core/pdu and api/relations remove some yields; improve var names/syntax tweak types for limit timeline limit arguments Signed-off-by: Jason Volk --- Cargo.lock | 3 + Cargo.toml | 1 + src/api/client/context.rs | 4 +- src/api/client/membership.rs | 7 +- src/api/client/message.rs | 20 +-- src/api/client/relations.rs | 199 +++++++++++++++------- src/api/client/sync/mod.rs | 11 +- src/api/client/sync/v3.rs | 65 +++---- src/api/client/sync/v4.rs | 24 ++- src/api/client/threads.rs | 40 ++--- src/api/server/send_join.rs | 4 +- src/api/server/send_leave.rs | 6 +- src/core/mod.rs | 2 +- src/core/pdu/count.rs | 142 ++++++++++++--- src/core/pdu/id.rs | 22 +++ src/core/pdu/mod.rs | 6 + src/core/pdu/raw_id.rs | 117 +++++++++++++ src/core/pdu/relation.rs | 22 +++ src/core/pdu/tests.rs | 19 +++ src/service/migrations.rs | 2 +- src/service/rooms/auth_chain/data.rs | 8 +- src/service/rooms/auth_chain/mod.rs | 18 +- src/service/rooms/event_handler/mod.rs | 13 +- src/service/rooms/pdu_metadata/data.rs | 49 +++--- src/service/rooms/pdu_metadata/mod.rs | 116 +------------ src/service/rooms/search/mod.rs | 36 ++-- src/service/rooms/short/mod.rs | 8 +- src/service/rooms/spaces/mod.rs | 6 +- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/state_accessor/data.rs | 6 +- src/service/rooms/state_accessor/mod.rs | 30 ++-- src/service/rooms/state_compressor/mod.rs | 42 +++-- src/service/rooms/threads/data.rs | 52 +++--- src/service/rooms/threads/mod.rs | 6 +- src/service/rooms/timeline/data.rs | 124 ++++++-------- src/service/rooms/timeline/mod.rs | 75 ++++---- src/service/rooms/timeline/pduid.rs | 13 -- src/service/rooms/user/mod.rs | 6 +- src/service/sending/data.rs | 12 +- src/service/sending/mod.rs | 23 +-- src/service/sending/sender.rs | 6 +- 41 files changed, 796 insertions(+), 573 deletions(-) create mode 100644 src/core/pdu/id.rs create mode 100644 src/core/pdu/raw_id.rs create mode 100644 src/core/pdu/relation.rs create mode 100644 src/core/pdu/tests.rs delete mode 100644 src/service/rooms/timeline/pduid.rs diff --git a/Cargo.lock b/Cargo.lock index 44856753..f729d3d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -76,6 +76,9 @@ name = "arrayvec" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] [[package]] name = "as_variant" diff --git a/Cargo.toml b/Cargo.toml index 043790f8..3ac1556c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,7 @@ name = "conduit" [workspace.dependencies.arrayvec] version = "0.7.4" +features = ["std", "serde"] [workspace.dependencies.const-str] version = "0.5.7" diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 9bf0c467..5b492cb1 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -168,12 +168,12 @@ pub(crate) async fn get_context_route( start: events_before .last() - .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()) + .map_or_else(|| base_token.to_string(), |(count, _)| count.to_string()) .into(), end: events_after .last() - .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()) + .map_or_else(|| base_token.to_string(), |(count, _)| count.to_string()) .into(), events_before: events_before diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index c41e93fa..fa71c0c8 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1376,15 +1376,12 @@ pub(crate) async fn invite_helper( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id: Vec = services + let pdu_id = services .rooms .event_handler .handle_incoming_pdu(&origin, room_id, &event_id, value, true) .await? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; + .ok_or_else(|| err!(Request(InvalidParam("Could not accept incoming PDU as timeline event."))))?; services.sending.send_pdu_room(room_id, &pdu_id).await?; return Ok(()); diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 4fc58d9f..cb261a7f 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -62,19 +62,17 @@ pub(crate) async fn get_message_events_route( let room_id = &body.room_id; let filter = &body.filter; - let from_default = match body.dir { - Direction::Forward => PduCount::min(), - Direction::Backward => PduCount::max(), - }; - - let from = body + let from: PduCount = body .from .as_deref() - .map(PduCount::try_from_string) + .map(str::parse) .transpose()? - .unwrap_or(from_default); + .unwrap_or_else(|| match body.dir { + Direction::Forward => PduCount::min(), + Direction::Backward => PduCount::max(), + }); - let to = body.to.as_deref().map(PduCount::try_from_string).flat_ok(); + let to: Option = body.to.as_deref().map(str::parse).flat_ok(); let limit: usize = body .limit @@ -156,8 +154,8 @@ pub(crate) async fn get_message_events_route( .collect(); Ok(get_message_events::v3::Response { - start: from.stringify(), - end: next_token.as_ref().map(PduCount::stringify), + start: from.to_string(), + end: next_token.as_ref().map(PduCount::to_string), chunk, state, }) diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 0456924c..ef7035e2 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,34 +1,43 @@ use axum::extract::State; -use ruma::api::client::relations::{ - get_relating_events, get_relating_events_with_rel_type, get_relating_events_with_rel_type_and_event_type, +use conduit::{ + at, + utils::{result::FlatOk, IterStream, ReadyExt}, + PduCount, Result, }; +use futures::{FutureExt, StreamExt}; +use ruma::{ + api::{ + client::relations::{ + get_relating_events, get_relating_events_with_rel_type, get_relating_events_with_rel_type_and_event_type, + }, + Direction, + }, + events::{relation::RelationType, TimelineEventType}, + EventId, RoomId, UInt, UserId, +}; +use service::{rooms::timeline::PdusIterItem, Services}; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_deref().expect("user is authenticated"); - - let res = services - .rooms - .pdu_metadata - .paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - body.event_type.clone().into(), - body.rel_type.clone().into(), - body.from.as_deref(), - body.to.as_deref(), - body.limit, - body.recurse, - body.dir, - ) - .await?; - - Ok(get_relating_events_with_rel_type_and_event_type::v1::Response { + paginate_relations_with_filter( + &services, + body.sender_user(), + &body.room_id, + &body.event_id, + body.event_type.clone().into(), + body.rel_type.clone().into(), + body.from.as_deref(), + body.to.as_deref(), + body.limit, + body.recurse, + body.dir, + ) + .await + .map(|res| get_relating_events_with_rel_type_and_event_type::v1::Response { chunk: res.chunk, next_batch: res.next_batch, prev_batch: res.prev_batch, @@ -40,26 +49,21 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route( pub(crate) async fn get_relating_events_with_rel_type_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_deref().expect("user is authenticated"); - - let res = services - .rooms - .pdu_metadata - .paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - None, - body.rel_type.clone().into(), - body.from.as_deref(), - body.to.as_deref(), - body.limit, - body.recurse, - body.dir, - ) - .await?; - - Ok(get_relating_events_with_rel_type::v1::Response { + paginate_relations_with_filter( + &services, + body.sender_user(), + &body.room_id, + &body.event_id, + None, + body.rel_type.clone().into(), + body.from.as_deref(), + body.to.as_deref(), + body.limit, + body.recurse, + body.dir, + ) + .await + .map(|res| get_relating_events_with_rel_type::v1::Response { chunk: res.chunk, next_batch: res.next_batch, prev_batch: res.prev_batch, @@ -71,22 +75,103 @@ pub(crate) async fn get_relating_events_with_rel_type_route( pub(crate) async fn get_relating_events_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_deref().expect("user is authenticated"); + paginate_relations_with_filter( + &services, + body.sender_user(), + &body.room_id, + &body.event_id, + None, + None, + body.from.as_deref(), + body.to.as_deref(), + body.limit, + body.recurse, + body.dir, + ) + .await +} + +#[allow(clippy::too_many_arguments)] +async fn paginate_relations_with_filter( + services: &Services, sender_user: &UserId, room_id: &RoomId, target: &EventId, + filter_event_type: Option, filter_rel_type: Option, from: Option<&str>, + to: Option<&str>, limit: Option, recurse: bool, dir: Direction, +) -> Result { + let from: PduCount = from + .map(str::parse) + .transpose()? + .unwrap_or_else(|| match dir { + Direction::Forward => PduCount::min(), + Direction::Backward => PduCount::max(), + }); + + let to: Option = to.map(str::parse).flat_ok(); + + // Use limit or else 30, with maximum 100 + let limit: usize = limit + .map(TryInto::try_into) + .flat_ok() + .unwrap_or(30) + .min(100); + + // Spec (v1.10) recommends depth of at least 3 + let depth: u8 = if recurse { + 3 + } else { + 1 + }; + + let events: Vec = services + .rooms + .pdu_metadata + .get_relations(sender_user, room_id, target, from, limit, depth, dir) + .await + .into_iter() + .filter(|(_, pdu)| { + filter_event_type + .as_ref() + .is_none_or(|kind| *kind == pdu.kind) + }) + .filter(|(_, pdu)| { + filter_rel_type + .as_ref() + .is_none_or(|rel_type| pdu.relation_type_equal(rel_type)) + }) + .stream() + .filter_map(|item| visibility_filter(services, sender_user, item)) + .ready_take_while(|(count, _)| Some(*count) != to) + .take(limit) + .collect() + .boxed() + .await; + + let next_batch = match dir { + Direction::Backward => events.first(), + Direction::Forward => events.last(), + } + .map(at!(0)) + .as_ref() + .map(ToString::to_string); + + Ok(get_relating_events::v1::Response { + next_batch, + prev_batch: Some(from.to_string()), + recursion_depth: recurse.then_some(depth.into()), + chunk: events + .into_iter() + .map(at!(1)) + .map(|pdu| pdu.to_message_like_event()) + .collect(), + }) +} + +async fn visibility_filter(services: &Services, sender_user: &UserId, item: PdusIterItem) -> Option { + let (_, pdu) = &item; services .rooms - .pdu_metadata - .paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - None, - None, - body.from.as_deref(), - body.to.as_deref(), - body.limit, - body.recurse, - body.dir, - ) + .state_accessor + .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) .await + .then_some(item) } diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index ed22010c..7aec7186 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -1,10 +1,7 @@ mod v3; mod v4; -use conduit::{ - utils::{math::usize_from_u64_truncated, ReadyExt}, - PduCount, -}; +use conduit::{utils::ReadyExt, PduCount}; use futures::StreamExt; use ruma::{RoomId, UserId}; @@ -12,7 +9,7 @@ pub(crate) use self::{v3::sync_events_route, v4::sync_events_v4_route}; use crate::{service::Services, Error, PduEvent, Result}; async fn load_timeline( - services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64, + services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: usize, ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { let last_timeline_count = services .rooms @@ -29,12 +26,12 @@ async fn load_timeline( .timeline .pdus_until(sender_user, room_id, PduCount::max()) .await? - .ready_take_while(|(pducount, _)| pducount > &roomsincecount); + .ready_take_while(|(pducount, _)| *pducount > roomsincecount); // Take the last events for the timeline let timeline_pdus: Vec<_> = non_timeline_pdus .by_ref() - .take(usize_from_u64_truncated(limit)) + .take(limit) .collect::>() .await .into_iter() diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index ccca1f85..08048902 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -432,28 +432,26 @@ async fn handle_left_room( left_state_ids.insert(leave_shortstatekey, left_event_id); - let mut i: u8 = 0; - for (key, id) in left_state_ids { - if full_state || since_state_ids.get(&key) != Some(&id) { - let (event_type, state_key) = services.rooms.short.get_statekey_from_short(key).await?; + for (shortstatekey, event_id) in left_state_ids { + if full_state || since_state_ids.get(&shortstatekey) != Some(&event_id) { + let (event_type, state_key) = services + .rooms + .short + .get_statekey_from_short(shortstatekey) + .await?; + // TODO: Delete "element_hacks" when this is resolved: https://github.com/vector-im/element-web/issues/22565 if !lazy_load_enabled - || event_type != StateEventType::RoomMember - || full_state - // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 - || (cfg!(feature = "element_hacks") && *sender_user == state_key) + || event_type != StateEventType::RoomMember + || full_state + || (cfg!(feature = "element_hacks") && *sender_user == state_key) { - let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { - error!("Pdu in state not found: {}", id); + let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { + error!("Pdu in state not found: {event_id}"); continue; }; left_state_events.push(pdu.to_sync_state_event()); - - i = i.wrapping_add(1); - if i % 100 == 0 { - tokio::task::yield_now().await; - } } } } @@ -542,7 +540,7 @@ async fn load_joined_room( let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; drop(insert_lock); - let (timeline_pdus, limited) = load_timeline(services, sender_user, room_id, sincecount, 10).await?; + let (timeline_pdus, limited) = load_timeline(services, sender_user, room_id, sincecount, 10_usize).await?; let send_notification_counts = !timeline_pdus.is_empty() || services @@ -678,8 +676,7 @@ async fn load_joined_room( let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); - let mut i: u8 = 0; - for (shortstatekey, id) in current_state_ids { + for (shortstatekey, event_id) in current_state_ids { let (event_type, state_key) = services .rooms .short @@ -687,24 +684,22 @@ async fn load_joined_room( .await?; if event_type != StateEventType::RoomMember { - let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { - error!("Pdu in state not found: {id}"); + let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { + error!("Pdu in state not found: {event_id}"); continue; }; - state_events.push(pdu); - i = i.wrapping_add(1); - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } else if !lazy_load_enabled - || full_state - || timeline_users.contains(&state_key) - // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 - || (cfg!(feature = "element_hacks") && *sender_user == state_key) + state_events.push(pdu); + continue; + } + + // TODO: Delete "element_hacks" when this is resolved: https://github.com/vector-im/element-web/issues/22565 + if !lazy_load_enabled + || full_state || timeline_users.contains(&state_key) + || (cfg!(feature = "element_hacks") && *sender_user == state_key) { - let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { - error!("Pdu in state not found: {id}"); + let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { + error!("Pdu in state not found: {event_id}"); continue; }; @@ -712,12 +707,8 @@ async fn load_joined_room( if let Ok(uid) = UserId::parse(&state_key) { lazy_loaded.insert(uid); } - state_events.push(pdu); - i = i.wrapping_add(1); - if i % 100 == 0 { - tokio::task::yield_now().await; - } + state_events.push(pdu); } } diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index f8ada81c..11e3830c 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -8,7 +8,7 @@ use axum::extract::State; use conduit::{ debug, error, extract_variant, utils::{ - math::{ruma_from_usize, usize_from_ruma}, + math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, warn, Error, PduCount, Result, @@ -350,14 +350,16 @@ pub(crate) async fn sync_events_v4_route( new_known_rooms.extend(room_ids.iter().cloned()); for room_id in &room_ids { - let todo_room = todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0, u64::MAX)); + let todo_room = + todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0_usize, u64::MAX)); - let limit = list + let limit: usize = list .room_details .timeline_limit - .map_or(10, u64::from) + .map(u64::from) + .map_or(10, usize_from_u64_truncated) .min(100); todo_room @@ -406,8 +408,14 @@ pub(crate) async fn sync_events_v4_route( } let todo_room = todo_rooms .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0, u64::MAX)); - let limit = room.timeline_limit.map_or(10, u64::from).min(100); + .or_insert((BTreeSet::new(), 0_usize, u64::MAX)); + + let limit: usize = room + .timeline_limit + .map(u64::from) + .map_or(10, usize_from_u64_truncated) + .min(100); + todo_room.0.extend(room.required_state.iter().cloned()); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 50f6cdfb..02cf7992 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,19 +1,14 @@ use axum::extract::State; -use conduit::PduEvent; +use conduit::{PduCount, PduEvent}; use futures::StreamExt; -use ruma::{ - api::client::{error::ErrorKind, threads::get_threads}, - uint, -}; +use ruma::{api::client::threads::get_threads, uint}; -use crate::{Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `GET /_matrix/client/r0/rooms/{roomId}/threads` pub(crate) async fn get_threads_route( - State(services): State, body: Ruma, + State(services): State, ref body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - // Use limit or else 10, with maximum 100 let limit = body .limit @@ -22,38 +17,39 @@ pub(crate) async fn get_threads_route( .unwrap_or(10) .min(100); - let from = if let Some(from) = &body.from { - from.parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, ""))? - } else { - u64::MAX - }; + let from: PduCount = body + .from + .as_deref() + .map(str::parse) + .transpose()? + .unwrap_or_else(PduCount::max); - let room_id = &body.room_id; - let threads: Vec<(u64, PduEvent)> = services + let threads: Vec<(PduCount, PduEvent)> = services .rooms .threads - .threads_until(sender_user, &body.room_id, from, &body.include) + .threads_until(body.sender_user(), &body.room_id, from, &body.include) .await? .take(limit) .filter_map(|(count, pdu)| async move { services .rooms .state_accessor - .user_can_see_event(sender_user, room_id, &pdu.event_id) + .user_can_see_event(body.sender_user(), &body.room_id, &pdu.event_id) .await .then_some((count, pdu)) }) .collect() .await; - let next_batch = threads.last().map(|(count, _)| count.to_string()); - Ok(get_threads::v1::Response { + next_batch: threads + .last() + .map(|(count, _)| count) + .map(ToString::to_string), + chunk: threads .into_iter() .map(|(_, pdu)| pdu.to_room_event()) .collect(), - next_batch, }) } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index c3273baf..f2ede9d0 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -156,12 +156,12 @@ async fn create_join_event( .lock(room_id) .await; - let pdu_id: Vec = services + let pdu_id = services .rooms .event_handler .handle_incoming_pdu(&origin, room_id, &event_id, value.clone(), true) .await? - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Could not accept as timeline event."))?; + .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; drop(mutex_lock); diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index 7b4a8aee..448e5de3 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,7 +1,7 @@ #![allow(deprecated)] use axum::extract::State; -use conduit::{utils::ReadyExt, Error, Result}; +use conduit::{err, utils::ReadyExt, Error, Result}; use ruma::{ api::{client::error::ErrorKind, federation::membership::create_leave_event}, events::{ @@ -142,12 +142,12 @@ async fn create_leave_event( .lock(room_id) .await; - let pdu_id: Vec = services + let pdu_id = services .rooms .event_handler .handle_incoming_pdu(origin, room_id, &event_id, value, true) .await? - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Could not accept as timeline event."))?; + .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; drop(mutex_lock); diff --git a/src/core/mod.rs b/src/core/mod.rs index 1b7b8fa1..4ab84730 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -17,7 +17,7 @@ pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; -pub use pdu::{Event, PduBuilder, PduCount, PduEvent}; +pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId}; pub use server::Server; pub use utils::{ctor, dtor, implement, result, result::Result}; diff --git a/src/core/pdu/count.rs b/src/core/pdu/count.rs index 094988b6..90e552e8 100644 --- a/src/core/pdu/count.rs +++ b/src/core/pdu/count.rs @@ -1,38 +1,135 @@ -use std::cmp::Ordering; +#![allow(clippy::cast_possible_wrap, clippy::cast_sign_loss, clippy::as_conversions)] -use ruma::api::client::error::ErrorKind; +use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr}; -use crate::{Error, Result}; +use crate::{err, Error, Result}; #[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] pub enum PduCount { - Backfilled(u64), Normal(u64), + Backfilled(i64), } impl PduCount { + #[inline] #[must_use] - pub fn min() -> Self { Self::Backfilled(u64::MAX) } + pub fn from_unsigned(unsigned: u64) -> Self { Self::from_signed(unsigned as i64) } + #[inline] #[must_use] - pub fn max() -> Self { Self::Normal(u64::MAX) } - - pub fn try_from_string(token: &str) -> Result { - if let Some(stripped_token) = token.strip_prefix('-') { - stripped_token.parse().map(PduCount::Backfilled) - } else { - token.parse().map(PduCount::Normal) + pub fn from_signed(signed: i64) -> Self { + match signed { + i64::MIN..=0 => Self::Backfilled(signed), + _ => Self::Normal(signed as u64), } - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid pagination token.")) } + #[inline] #[must_use] - pub fn stringify(&self) -> String { + pub fn into_unsigned(self) -> u64 { + self.debug_assert_valid(); match self { - Self::Backfilled(x) => format!("-{x}"), - Self::Normal(x) => x.to_string(), + Self::Normal(i) => i, + Self::Backfilled(i) => i as u64, } } + + #[inline] + #[must_use] + pub fn into_signed(self) -> i64 { + self.debug_assert_valid(); + match self { + Self::Normal(i) => i as i64, + Self::Backfilled(i) => i, + } + } + + #[inline] + #[must_use] + pub fn into_normal(self) -> Self { + self.debug_assert_valid(); + match self { + Self::Normal(i) => Self::Normal(i), + Self::Backfilled(_) => Self::Normal(0), + } + } + + #[inline] + pub fn checked_add(self, add: u64) -> Result { + Ok(match self { + Self::Normal(i) => Self::Normal( + i.checked_add(add) + .ok_or_else(|| err!(Arithmetic("PduCount::Normal overflow")))?, + ), + Self::Backfilled(i) => Self::Backfilled( + i.checked_add(add as i64) + .ok_or_else(|| err!(Arithmetic("PduCount::Backfilled overflow")))?, + ), + }) + } + + #[inline] + pub fn checked_sub(self, sub: u64) -> Result { + Ok(match self { + Self::Normal(i) => Self::Normal( + i.checked_sub(sub) + .ok_or_else(|| err!(Arithmetic("PduCount::Normal underflow")))?, + ), + Self::Backfilled(i) => Self::Backfilled( + i.checked_sub(sub as i64) + .ok_or_else(|| err!(Arithmetic("PduCount::Backfilled underflow")))?, + ), + }) + } + + #[inline] + #[must_use] + pub fn saturating_add(self, add: u64) -> Self { + match self { + Self::Normal(i) => Self::Normal(i.saturating_add(add)), + Self::Backfilled(i) => Self::Backfilled(i.saturating_add(add as i64)), + } + } + + #[inline] + #[must_use] + pub fn saturating_sub(self, sub: u64) -> Self { + match self { + Self::Normal(i) => Self::Normal(i.saturating_sub(sub)), + Self::Backfilled(i) => Self::Backfilled(i.saturating_sub(sub as i64)), + } + } + + #[inline] + #[must_use] + pub fn min() -> Self { Self::Backfilled(i64::MIN) } + + #[inline] + #[must_use] + pub fn max() -> Self { Self::Normal(i64::MAX as u64) } + + #[inline] + pub(crate) fn debug_assert_valid(&self) { + if let Self::Backfilled(i) = self { + debug_assert!(*i <= 0, "Backfilled sequence must be negative"); + } + } +} + +impl Display for PduCount { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + self.debug_assert_valid(); + match self { + Self::Normal(i) => write!(f, "{i}"), + Self::Backfilled(i) => write!(f, "{i}"), + } + } +} + +impl FromStr for PduCount { + type Err = Error; + + fn from_str(token: &str) -> Result { Ok(Self::from_signed(token.parse()?)) } } impl PartialOrd for PduCount { @@ -40,12 +137,9 @@ impl PartialOrd for PduCount { } impl Ord for PduCount { - fn cmp(&self, other: &Self) -> Ordering { - match (self, other) { - (Self::Normal(s), Self::Normal(o)) => s.cmp(o), - (Self::Backfilled(s), Self::Backfilled(o)) => o.cmp(s), - (Self::Normal(_), Self::Backfilled(_)) => Ordering::Greater, - (Self::Backfilled(_), Self::Normal(_)) => Ordering::Less, - } - } + fn cmp(&self, other: &Self) -> Ordering { self.into_signed().cmp(&other.into_signed()) } +} + +impl Default for PduCount { + fn default() -> Self { Self::Normal(0) } } diff --git a/src/core/pdu/id.rs b/src/core/pdu/id.rs new file mode 100644 index 00000000..05d11904 --- /dev/null +++ b/src/core/pdu/id.rs @@ -0,0 +1,22 @@ +use super::{PduCount, RawPduId}; +use crate::utils::u64_from_u8x8; + +pub type ShortRoomId = ShortId; +pub type ShortEventId = ShortId; +pub type ShortId = u64; + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct PduId { + pub shortroomid: ShortRoomId, + pub shorteventid: PduCount, +} + +impl From for PduId { + #[inline] + fn from(raw: RawPduId) -> Self { + Self { + shortroomid: u64_from_u8x8(raw.shortroomid()), + shorteventid: PduCount::from_unsigned(u64_from_u8x8(raw.shorteventid())), + } + } +} diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 53fcd0a9..c785c99e 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -4,8 +4,12 @@ mod count; mod event; mod event_id; mod filter; +mod id; +mod raw_id; mod redact; +mod relation; mod strip; +mod tests; mod unsigned; use std::{cmp::Ordering, sync::Arc}; @@ -21,6 +25,8 @@ pub use self::{ count::PduCount, event::Event, event_id::*, + id::*, + raw_id::*, }; use crate::Result; diff --git a/src/core/pdu/raw_id.rs b/src/core/pdu/raw_id.rs new file mode 100644 index 00000000..faba1cbf --- /dev/null +++ b/src/core/pdu/raw_id.rs @@ -0,0 +1,117 @@ +use arrayvec::ArrayVec; + +use super::{PduCount, PduId, ShortEventId, ShortId, ShortRoomId}; + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +pub enum RawPduId { + Normal(RawPduIdNormal), + Backfilled(RawPduIdBackfilled), +} + +type RawPduIdNormal = [u8; RawPduId::NORMAL_LEN]; +type RawPduIdBackfilled = [u8; RawPduId::BACKFILLED_LEN]; + +const INT_LEN: usize = size_of::(); + +impl RawPduId { + const BACKFILLED_LEN: usize = size_of::() + INT_LEN + size_of::(); + const MAX_LEN: usize = Self::BACKFILLED_LEN; + const NORMAL_LEN: usize = size_of::() + size_of::(); + + #[inline] + #[must_use] + pub fn pdu_count(&self) -> PduCount { + let id: PduId = (*self).into(); + id.shorteventid + } + + #[inline] + #[must_use] + pub fn shortroomid(self) -> [u8; INT_LEN] { + match self { + Self::Normal(raw) => raw[0..INT_LEN] + .try_into() + .expect("normal raw shortroomid array from slice"), + Self::Backfilled(raw) => raw[0..INT_LEN] + .try_into() + .expect("backfilled raw shortroomid array from slice"), + } + } + + #[inline] + #[must_use] + pub fn shorteventid(self) -> [u8; INT_LEN] { + match self { + Self::Normal(raw) => raw[INT_LEN..INT_LEN * 2] + .try_into() + .expect("normal raw shorteventid array from slice"), + Self::Backfilled(raw) => raw[INT_LEN * 2..INT_LEN * 3] + .try_into() + .expect("backfilled raw shorteventid array from slice"), + } + } + + #[inline] + #[must_use] + pub fn as_bytes(&self) -> &[u8] { + match self { + Self::Normal(ref raw) => raw, + Self::Backfilled(ref raw) => raw, + } + } +} + +impl AsRef<[u8]> for RawPduId { + #[inline] + fn as_ref(&self) -> &[u8] { self.as_bytes() } +} + +impl From<&[u8]> for RawPduId { + #[inline] + fn from(id: &[u8]) -> Self { + match id.len() { + Self::NORMAL_LEN => Self::Normal( + id[0..Self::NORMAL_LEN] + .try_into() + .expect("normal RawPduId from [u8]"), + ), + Self::BACKFILLED_LEN => Self::Backfilled( + id[0..Self::BACKFILLED_LEN] + .try_into() + .expect("backfilled RawPduId from [u8]"), + ), + _ => unimplemented!("unrecognized RawPduId length"), + } + } +} + +impl From for RawPduId { + #[inline] + fn from(id: PduId) -> Self { + const MAX_LEN: usize = RawPduId::MAX_LEN; + type RawVec = ArrayVec; + + let mut vec = RawVec::new(); + vec.extend(id.shortroomid.to_be_bytes()); + id.shorteventid.debug_assert_valid(); + match id.shorteventid { + PduCount::Normal(shorteventid) => { + vec.extend(shorteventid.to_be_bytes()); + Self::Normal( + vec.as_ref() + .try_into() + .expect("RawVec into RawPduId::Normal"), + ) + }, + PduCount::Backfilled(shorteventid) => { + vec.extend(0_u64.to_be_bytes()); + vec.extend(shorteventid.to_be_bytes()); + Self::Backfilled( + vec.as_ref() + .try_into() + .expect("RawVec into RawPduId::Backfilled"), + ) + }, + } + } +} diff --git a/src/core/pdu/relation.rs b/src/core/pdu/relation.rs new file mode 100644 index 00000000..ae156a3d --- /dev/null +++ b/src/core/pdu/relation.rs @@ -0,0 +1,22 @@ +use ruma::events::relation::RelationType; +use serde::Deserialize; + +use crate::implement; + +#[derive(Clone, Debug, Deserialize)] +struct ExtractRelType { + rel_type: RelationType, +} +#[derive(Clone, Debug, Deserialize)] +struct ExtractRelatesToEventId { + #[serde(rename = "m.relates_to")] + relates_to: ExtractRelType, +} + +#[implement(super::PduEvent)] +#[must_use] +pub fn relation_type_equal(&self, rel_type: &RelationType) -> bool { + self.get_content() + .map(|c: ExtractRelatesToEventId| c.relates_to.rel_type) + .is_ok_and(|r| r == *rel_type) +} diff --git a/src/core/pdu/tests.rs b/src/core/pdu/tests.rs new file mode 100644 index 00000000..30ec23ba --- /dev/null +++ b/src/core/pdu/tests.rs @@ -0,0 +1,19 @@ +#![cfg(test)] + +use super::PduCount; + +#[test] +fn backfilled_parse() { + let count: PduCount = "-987654".parse().expect("parse() failed"); + let backfilled = matches!(count, PduCount::Backfilled(_)); + + assert!(backfilled, "not backfilled variant"); +} + +#[test] +fn normal_parse() { + let count: PduCount = "987654".parse().expect("parse() failed"); + let backfilled = matches!(count, PduCount::Backfilled(_)); + + assert!(!backfilled, "backfilled variant"); +} diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 45323fa2..d6c342f8 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -71,7 +71,7 @@ async fn fresh(services: &Services) -> Result<()> { db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []); // Create the admin room and server user on first run - crate::admin::create_admin_room(services).await?; + crate::admin::create_admin_room(services).boxed().await?; warn!( "Created new {} database with version {DATABASE_VERSION}", diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 5c9dbda8..3c36928a 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -7,9 +7,11 @@ use conduit::{err, utils, utils::math::usize_from_f64, Err, Result}; use database::Map; use lru_cache::LruCache; +use crate::rooms::short::ShortEventId; + pub(super) struct Data { shorteventid_authchain: Arc, - pub(super) auth_chain_cache: Mutex, Arc<[u64]>>>, + pub(super) auth_chain_cache: Mutex, Arc<[ShortEventId]>>>, } impl Data { @@ -24,7 +26,7 @@ impl Data { } } - pub(super) async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { + pub(super) async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { debug_assert!(!key.is_empty(), "auth_chain key must not be empty"); // Check RAM cache @@ -63,7 +65,7 @@ impl Data { Ok(chain) } - pub(super) fn cache_auth_chain(&self, key: Vec, auth_chain: Arc<[u64]>) { + pub(super) fn cache_auth_chain(&self, key: Vec, auth_chain: Arc<[ShortEventId]>) { debug_assert!(!key.is_empty(), "auth_chain key must not be empty"); // Only persist single events in db diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 1387bc7d..c22732c2 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -10,7 +10,7 @@ use futures::Stream; use ruma::{EventId, RoomId}; use self::data::Data; -use crate::{rooms, Dep}; +use crate::{rooms, rooms::short::ShortEventId, Dep}; pub struct Service { services: Services, @@ -64,7 +64,7 @@ impl Service { } #[tracing::instrument(skip_all, name = "auth_chain")] - pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result> { + pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result> { const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); @@ -97,7 +97,7 @@ impl Service { continue; } - let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); + let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { trace!("Found cache entry for whole chunk"); full_auth_chain.extend(cached.iter().copied()); @@ -156,7 +156,7 @@ impl Service { } #[tracing::instrument(skip(self, room_id))] - async fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + async fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { let mut todo = vec![Arc::from(event_id)]; let mut found = HashSet::new(); @@ -195,19 +195,19 @@ impl Service { } #[inline] - pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { + pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { self.db.get_cached_eventid_authchain(key).await } #[tracing::instrument(skip(self), level = "debug")] - pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) { - let val = auth_chain.iter().copied().collect::>(); + pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) { + let val = auth_chain.iter().copied().collect::>(); self.db.cache_auth_chain(key, val); } #[tracing::instrument(skip(self), level = "debug")] - pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &Vec) { - let val = auth_chain.iter().copied().collect::>(); + pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &Vec) { + let val = auth_chain.iter().copied().collect::>(); self.db.cache_auth_chain(key, val); } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index adebd332..f76f817d 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -35,7 +35,10 @@ use ruma::{ use crate::{ globals, rooms, - rooms::state_compressor::{CompressedStateEvent, HashSetCompressStateEvent}, + rooms::{ + state_compressor::{CompressedStateEvent, HashSetCompressStateEvent}, + timeline::RawPduId, + }, sending, server_keys, Dep, }; @@ -136,10 +139,10 @@ impl Service { pub async fn handle_incoming_pdu<'a>( &self, origin: &'a ServerName, room_id: &'a RoomId, event_id: &'a EventId, value: BTreeMap, is_timeline_event: bool, - ) -> Result>> { + ) -> Result> { // 1. Skip the PDU if we already have it as a timeline event if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await { - return Ok(Some(pdu_id.to_vec())); + return Ok(Some(pdu_id)); } // 1.1 Check the server is in the room @@ -488,7 +491,7 @@ impl Service { pub async fn upgrade_outlier_to_timeline_pdu( &self, incoming_pdu: Arc, val: BTreeMap, create_event: &PduEvent, origin: &ServerName, room_id: &RoomId, - ) -> Result>> { + ) -> Result> { // Skip the PDU if we already have it as a timeline event if let Ok(pduid) = self .services @@ -496,7 +499,7 @@ impl Service { .get_pdu_id(&incoming_pdu.event_id) .await { - return Ok(Some(pduid.to_vec())); + return Ok(Some(pduid)); } if self diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 51a43714..3fc06591 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -2,15 +2,21 @@ use std::{mem::size_of, sync::Arc}; use conduit::{ result::LogErr, - utils, - utils::{stream::TryIgnore, ReadyExt}, + utils::{stream::TryIgnore, u64_from_u8, ReadyExt}, PduCount, PduEvent, }; use database::Map; use futures::{Stream, StreamExt}; use ruma::{api::Direction, EventId, RoomId, UserId}; -use crate::{rooms, Dep}; +use crate::{ + rooms, + rooms::{ + short::{ShortEventId, ShortRoomId}, + timeline::{PduId, RawPduId}, + }, + Dep, +}; pub(super) struct Data { tofrom_relation: Arc, @@ -46,35 +52,36 @@ impl Data { } pub(super) fn get_relations<'a>( - &'a self, user_id: &'a UserId, shortroomid: u64, target: u64, until: PduCount, dir: Direction, + &'a self, user_id: &'a UserId, shortroomid: ShortRoomId, target: ShortEventId, from: PduCount, dir: Direction, ) -> impl Stream + Send + '_ { - let prefix = target.to_be_bytes().to_vec(); - let mut current = prefix.clone(); - let count_raw = match until { - PduCount::Normal(x) => x.saturating_sub(1), - PduCount::Backfilled(x) => { - current.extend_from_slice(&0_u64.to_be_bytes()); - u64::MAX.saturating_sub(x).saturating_sub(1) - }, - }; - current.extend_from_slice(&count_raw.to_be_bytes()); + let current: RawPduId = PduId { + shortroomid, + shorteventid: from, + } + .into(); match dir { Direction::Forward => self.tofrom_relation.raw_keys_from(¤t).boxed(), Direction::Backward => self.tofrom_relation.rev_raw_keys_from(¤t).boxed(), } .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix)) - .map(|to_from| utils::u64_from_u8(&to_from[(size_of::())..])) - .filter_map(move |from| async move { - let mut pduid = shortroomid.to_be_bytes().to_vec(); - pduid.extend_from_slice(&from.to_be_bytes()); - let mut pdu = self.services.timeline.get_pdu_from_id(&pduid).await.ok()?; + .ready_take_while(move |key| key.starts_with(&target.to_be_bytes())) + .map(|to_from| u64_from_u8(&to_from[8..16])) + .map(PduCount::from_unsigned) + .filter_map(move |shorteventid| async move { + let pdu_id: RawPduId = PduId { + shortroomid, + shorteventid, + } + .into(); + + let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; + if pdu.sender != user_id { pdu.remove_transaction_id().log_err().ok(); } - Some((PduCount::Normal(from), pdu)) + Some((shorteventid, pdu)) }) } diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index b1cf2049..82d2ee35 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,18 +1,9 @@ mod data; use std::sync::Arc; -use conduit::{ - at, - utils::{result::FlatOk, stream::ReadyExt, IterStream}, - PduCount, Result, -}; -use futures::{FutureExt, StreamExt}; -use ruma::{ - api::{client::relations::get_relating_events, Direction}, - events::{relation::RelationType, TimelineEventType}, - EventId, RoomId, UInt, UserId, -}; -use serde::Deserialize; +use conduit::{PduCount, Result}; +use futures::StreamExt; +use ruma::{api::Direction, EventId, RoomId, UserId}; use self::data::{Data, PdusIterItem}; use crate::{rooms, Dep}; @@ -24,26 +15,14 @@ pub struct Service { struct Services { short: Dep, - state_accessor: Dep, timeline: Dep, } -#[derive(Clone, Debug, Deserialize)] -struct ExtractRelType { - rel_type: RelationType, -} -#[derive(Clone, Debug, Deserialize)] -struct ExtractRelatesToEventId { - #[serde(rename = "m.relates_to")] - relates_to: ExtractRelType, -} - impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { services: Services { short: args.depend::("rooms::short"), - state_accessor: args.depend::("rooms::state_accessor"), timeline: args.depend::("rooms::timeline"), }, db: Data::new(&args), @@ -64,82 +43,9 @@ impl Service { } } - #[allow(clippy::too_many_arguments)] - pub async fn paginate_relations_with_filter( - &self, sender_user: &UserId, room_id: &RoomId, target: &EventId, filter_event_type: Option, - filter_rel_type: Option, from: Option<&str>, to: Option<&str>, limit: Option, - recurse: bool, dir: Direction, - ) -> Result { - let from = from - .map(PduCount::try_from_string) - .transpose()? - .unwrap_or_else(|| match dir { - Direction::Forward => PduCount::min(), - Direction::Backward => PduCount::max(), - }); - - let to = to.map(PduCount::try_from_string).flat_ok(); - - // Use limit or else 30, with maximum 100 - let limit: usize = limit - .map(TryInto::try_into) - .flat_ok() - .unwrap_or(30) - .min(100); - - // Spec (v1.10) recommends depth of at least 3 - let depth: u8 = if recurse { - 3 - } else { - 1 - }; - - let events: Vec = self - .get_relations(sender_user, room_id, target, from, limit, depth, dir) - .await - .into_iter() - .filter(|(_, pdu)| { - filter_event_type - .as_ref() - .is_none_or(|kind| *kind == pdu.kind) - }) - .filter(|(_, pdu)| { - filter_rel_type.as_ref().is_none_or(|rel_type| { - pdu.get_content() - .map(|c: ExtractRelatesToEventId| c.relates_to.rel_type) - .is_ok_and(|r| r == *rel_type) - }) - }) - .stream() - .filter_map(|item| self.visibility_filter(sender_user, item)) - .ready_take_while(|(count, _)| Some(*count) != to) - .take(limit) - .collect() - .boxed() - .await; - - let next_batch = match dir { - Direction::Backward => events.first(), - Direction::Forward => events.last(), - } - .map(at!(0)) - .map(|t| t.stringify()); - - Ok(get_relating_events::v1::Response { - next_batch, - prev_batch: Some(from.stringify()), - recursion_depth: recurse.then_some(depth.into()), - chunk: events - .into_iter() - .map(at!(1)) - .map(|pdu| pdu.to_message_like_event()) - .collect(), - }) - } - #[allow(clippy::too_many_arguments)] pub async fn get_relations( - &self, user_id: &UserId, room_id: &RoomId, target: &EventId, until: PduCount, limit: usize, max_depth: u8, + &self, user_id: &UserId, room_id: &RoomId, target: &EventId, from: PduCount, limit: usize, max_depth: u8, dir: Direction, ) -> Vec { let room_id = self.services.short.get_or_create_shortroomid(room_id).await; @@ -152,7 +58,7 @@ impl Service { let mut pdus: Vec<_> = self .db - .get_relations(user_id, room_id, target, until, dir) + .get_relations(user_id, room_id, target, from, dir) .collect() .await; @@ -167,7 +73,7 @@ impl Service { let relations: Vec<_> = self .db - .get_relations(user_id, room_id, target, until, dir) + .get_relations(user_id, room_id, target, from, dir) .collect() .await; @@ -186,16 +92,6 @@ impl Service { pdus } - async fn visibility_filter(&self, sender_user: &UserId, item: PdusIterItem) -> Option { - let (_, pdu) = &item; - - self.services - .state_accessor - .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) - .await - .then_some(item) - } - #[inline] #[tracing::instrument(skip_all, level = "debug")] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) { diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 70daded1..1af37d9e 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,10 +1,10 @@ -use std::{iter, sync::Arc}; +use std::sync::Arc; use arrayvec::ArrayVec; use conduit::{ implement, utils::{set, stream::TryIgnore, ArrayVecExt, IterStream, ReadyExt}, - PduEvent, Result, + PduCount, PduEvent, Result, }; use database::{keyval::Val, Map}; use futures::{Stream, StreamExt}; @@ -66,13 +66,13 @@ impl crate::Service for Service { } #[implement(Service)] -pub fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { +pub fn index_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_body: &str) { let batch = tokenize(message_body) .map(|word| { let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(word.as_bytes()); key.push(0xFF); - key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here + key.extend_from_slice(pdu_id.as_ref()); // TODO: currently we save the room id a second time here (key, Vec::::new()) }) .collect::>(); @@ -81,12 +81,12 @@ pub fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { } #[implement(Service)] -pub fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) { +pub fn deindex_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_body: &str) { let batch = tokenize(message_body).map(|word| { let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(word.as_bytes()); key.push(0xFF); - key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here + key.extend_from_slice(pdu_id.as_ref()); // TODO: currently we save the room id a second time here key }); @@ -159,24 +159,24 @@ fn search_pdu_ids_query_words<'a>( &'a self, shortroomid: ShortRoomId, word: &'a str, ) -> impl Stream + Send + '_ { self.search_pdu_ids_query_word(shortroomid, word) - .ready_filter_map(move |key| { - key[prefix_len(word)..] - .chunks_exact(PduId::LEN) - .next() - .map(RawPduId::try_from) - .and_then(Result::ok) + .map(move |key| -> RawPduId { + let key = &key[prefix_len(word)..]; + key.into() }) } /// Iterate over raw database results for a word #[implement(Service)] fn search_pdu_ids_query_word(&self, shortroomid: ShortRoomId, word: &str) -> impl Stream> + Send + '_ { - const PDUID_LEN: usize = PduId::LEN; // rustc says const'ing this not yet stable - let end_id: ArrayVec = iter::repeat(u8::MAX).take(PduId::LEN).collect(); + let end_id: RawPduId = PduId { + shortroomid, + shorteventid: PduCount::max(), + } + .into(); // Newest pdus first - let end = make_tokenid(shortroomid, word, end_id.as_slice()); + let end = make_tokenid(shortroomid, word, &end_id); let prefix = make_prefix(shortroomid, word); self.db .tokenids @@ -196,11 +196,9 @@ fn tokenize(body: &str) -> impl Iterator + Send + '_ { .map(str::to_lowercase) } -fn make_tokenid(shortroomid: ShortRoomId, word: &str, pdu_id: &[u8]) -> TokenId { - debug_assert!(pdu_id.len() == PduId::LEN, "pdu_id size mismatch"); - +fn make_tokenid(shortroomid: ShortRoomId, word: &str, pdu_id: &RawPduId) -> TokenId { let mut key = make_prefix(shortroomid, word); - key.extend_from_slice(pdu_id); + key.extend_from_slice(pdu_id.as_ref()); key } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index a903ef22..9fddf099 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,5 +1,6 @@ use std::{mem::size_of_val, sync::Arc}; +pub use conduit::pdu::{ShortEventId, ShortId, ShortRoomId}; use conduit::{err, implement, utils, Result}; use database::{Deserialized, Map}; use ruma::{events::StateEventType, EventId, RoomId}; @@ -26,9 +27,6 @@ struct Services { pub type ShortStateHash = ShortId; pub type ShortStateKey = ShortId; -pub type ShortEventId = ShortId; -pub type ShortRoomId = ShortId; -pub type ShortId = u64; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -52,7 +50,7 @@ impl crate::Service for Service { #[implement(Service)] pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEventId { - const BUFSIZE: usize = size_of::(); + const BUFSIZE: usize = size_of::(); if let Ok(shorteventid) = self .db @@ -88,7 +86,7 @@ pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> .map(|(i, result)| match result { Ok(ref short) => utils::u64_from_u8(short), Err(_) => { - const BUFSIZE: usize = size_of::(); + const BUFSIZE: usize = size_of::(); let short = self.services.globals.next_count().unwrap(); debug_assert!(size_of_val(&short) == BUFSIZE, "buffer requirement changed"); diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 5aea5f6a..37272dca 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -33,7 +33,7 @@ use ruma::{ }; use tokio::sync::Mutex; -use crate::{rooms, sending, Dep}; +use crate::{rooms, rooms::short::ShortRoomId, sending, Dep}; pub struct CachedSpaceHierarchySummary { summary: SpaceHierarchyParentSummary, @@ -49,7 +49,7 @@ pub enum SummaryAccessibility { pub struct PaginationToken { /// Path down the hierarchy of the room to start the response at, /// excluding the root space. - pub short_room_ids: Vec, + pub short_room_ids: Vec, pub limit: UInt, pub max_depth: UInt, pub suggested_only: bool, @@ -448,7 +448,7 @@ impl Service { } pub async fn get_client_hierarchy( - &self, sender_user: &UserId, room_id: &RoomId, limit: usize, short_room_ids: Vec, max_depth: u64, + &self, sender_user: &UserId, room_id: &RoomId, limit: usize, short_room_ids: Vec, max_depth: u64, suggested_only: bool, ) -> Result { let mut parents = VecDeque::new(); diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 34fab079..71a3900c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -95,7 +95,7 @@ impl Service { let event_ids = statediffnew.iter().stream().filter_map(|new| { self.services .state_compressor - .parse_compressed_state_event(new) + .parse_compressed_state_event(*new) .map_ok_or_else(|_| None, |(_, event_id)| Some(event_id)) }); @@ -428,7 +428,7 @@ impl Service { let Ok((shortstatekey, event_id)) = self .services .state_compressor - .parse_compressed_state_event(compressed) + .parse_compressed_state_event(*compressed) .await else { continue; diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 9c96785f..06cd648c 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -53,7 +53,7 @@ impl Data { let parsed = self .services .state_compressor - .parse_compressed_state_event(compressed) + .parse_compressed_state_event(*compressed) .await?; result.insert(parsed.0, parsed.1); @@ -86,7 +86,7 @@ impl Data { let (_, eventid) = self .services .state_compressor - .parse_compressed_state_event(compressed) + .parse_compressed_state_event(*compressed) .await?; if let Ok(pdu) = self.services.timeline.get_pdu(&eventid).await { @@ -132,7 +132,7 @@ impl Data { self.services .state_compressor - .parse_compressed_state_event(compressed) + .parse_compressed_state_event(*compressed) .map_ok(|(_, id)| id) .map_err(|e| { err!(Database(error!( diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index a2cc27e8..d51da8af 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -39,13 +39,17 @@ use ruma::{ use serde::Deserialize; use self::data::Data; -use crate::{rooms, rooms::state::RoomMutexGuard, Dep}; +use crate::{ + rooms, + rooms::{short::ShortStateHash, state::RoomMutexGuard}, + Dep, +}; pub struct Service { services: Services, db: Data, - pub server_visibility_cache: Mutex>, - pub user_visibility_cache: Mutex>, + pub server_visibility_cache: Mutex>, + pub user_visibility_cache: Mutex>, } struct Services { @@ -94,11 +98,13 @@ impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + pub async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result>> { self.db.state_full_ids(shortstatehash).await } - pub async fn state_full(&self, shortstatehash: u64) -> Result>> { + pub async fn state_full( + &self, shortstatehash: ShortStateHash, + ) -> Result>> { self.db.state_full(shortstatehash).await } @@ -106,7 +112,7 @@ impl Service { /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] pub async fn state_get_id( - &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result> { self.db .state_get_id(shortstatehash, event_type, state_key) @@ -117,7 +123,7 @@ impl Service { /// `state_key`). #[inline] pub async fn state_get( - &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result> { self.db .state_get(shortstatehash, event_type, state_key) @@ -126,7 +132,7 @@ impl Service { /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). pub async fn state_get_content( - &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result where T: for<'de> Deserialize<'de> + Send, @@ -137,7 +143,7 @@ impl Service { } /// Get membership for given user in state - async fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> MembershipState { + async fn user_membership(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> MembershipState { self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) .await .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) @@ -145,14 +151,14 @@ impl Service { /// The user was a joined member at this state (potentially in the past) #[inline] - async fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { + async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { self.user_membership(shortstatehash, user_id).await == MembershipState::Join } /// The user was an invited or joined room member at this state (potentially /// in the past) #[inline] - async fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { + async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { let s = self.user_membership(shortstatehash, user_id).await; s == MembershipState::Join || s == MembershipState::Invite } @@ -285,7 +291,7 @@ impl Service { } /// Returns the state hash for this pdu. - pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { self.db.pdu_shortstatehash(event_id).await } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index e213490b..bf90d5c4 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -34,25 +34,26 @@ struct Data { #[derive(Clone)] struct StateDiff { parent: Option, - added: Arc>, - removed: Arc>, + added: Arc, + removed: Arc, } #[derive(Clone, Default)] pub struct ShortStateInfo { pub shortstatehash: ShortStateHash, - pub full_state: Arc>, - pub added: Arc>, - pub removed: Arc>, + pub full_state: Arc, + pub added: Arc, + pub removed: Arc, } #[derive(Clone, Default)] pub struct HashSetCompressStateEvent { pub shortstatehash: ShortStateHash, - pub added: Arc>, - pub removed: Arc>, + pub added: Arc, + pub removed: Arc, } +pub(crate) type CompressedState = HashSet; pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; type StateInfoLruCache = LruCache; type ShortStateInfoVec = Vec; @@ -105,7 +106,7 @@ impl Service { removed, } = self.get_statediff(shortstatehash).await?; - if let Some(parent) = parent { + let response = if let Some(parent) = parent { let mut response = Box::pin(self.load_shortstatehash_info(parent)).await?; let mut state = (*response.last().expect("at least one response").full_state).clone(); state.extend(added.iter().copied()); @@ -121,27 +122,22 @@ impl Service { removed: Arc::new(removed), }); - self.stateinfo_cache - .lock() - .expect("locked") - .insert(shortstatehash, response.clone()); - - Ok(response) + response } else { - let response = vec![ShortStateInfo { + vec![ShortStateInfo { shortstatehash, full_state: added.clone(), added, removed, - }]; + }] + }; - self.stateinfo_cache - .lock() - .expect("locked") - .insert(shortstatehash, response.clone()); + self.stateinfo_cache + .lock() + .expect("locked") + .insert(shortstatehash, response.clone()); - Ok(response) - } + Ok(response) } pub async fn compress_state_event(&self, shortstatekey: ShortStateKey, event_id: &EventId) -> CompressedStateEvent { @@ -161,7 +157,7 @@ impl Service { /// Returns shortstatekey, event id #[inline] pub async fn parse_compressed_state_event( - &self, compressed_event: &CompressedStateEvent, + &self, compressed_event: CompressedStateEvent, ) -> Result<(ShortStateKey, Arc)> { use utils::u64_from_u8; diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs index f50b812c..c26dabb4 100644 --- a/src/service/rooms/threads/data.rs +++ b/src/service/rooms/threads/data.rs @@ -1,17 +1,22 @@ -use std::{mem::size_of, sync::Arc}; +use std::sync::Arc; use conduit::{ - checked, result::LogErr, - utils, utils::{stream::TryIgnore, ReadyExt}, - PduEvent, Result, + PduCount, PduEvent, Result, }; use database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; -use crate::{rooms, Dep}; +use crate::{ + rooms, + rooms::{ + short::ShortRoomId, + timeline::{PduId, RawPduId}, + }, + Dep, +}; pub(super) struct Data { threadid_userids: Arc, @@ -35,40 +40,39 @@ impl Data { } } + #[inline] pub(super) async fn threads_until<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: u64, _include: &'a IncludeThreads, - ) -> Result + Send + 'a> { - let prefix = self - .services - .short - .get_shortroomid(room_id) - .await? - .to_be_bytes() - .to_vec(); + &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, _include: &'a IncludeThreads, + ) -> Result + Send + 'a> { + let shortroomid: ShortRoomId = self.services.short.get_shortroomid(room_id).await?; - let mut current = prefix.clone(); - current.extend_from_slice(&(checked!(until - 1)?).to_be_bytes()); + let current: RawPduId = PduId { + shortroomid, + shorteventid: until.saturating_sub(1), + } + .into(); let stream = self .threadid_userids .rev_raw_keys_from(¤t) .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix)) - .map(|pduid| (utils::u64_from_u8(&pduid[(size_of::())..]), pduid)) - .filter_map(move |(count, pduid)| async move { - let mut pdu = self.services.timeline.get_pdu_from_id(pduid).await.ok()?; + .map(RawPduId::from) + .ready_take_while(move |pdu_id| pdu_id.shortroomid() == shortroomid.to_be_bytes()) + .filter_map(move |pdu_id| async move { + let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; + let pdu_id: PduId = pdu_id.into(); if pdu.sender != user_id { pdu.remove_transaction_id().log_err().ok(); } - Some((count, pdu)) + Some((pdu_id.shorteventid, pdu)) }); Ok(stream) } - pub(super) fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()> { + pub(super) fn update_participants(&self, root_id: &RawPduId, participants: &[OwnedUserId]) -> Result { let users = participants .iter() .map(|user| user.as_bytes()) @@ -80,7 +84,7 @@ impl Data { Ok(()) } - pub(super) async fn get_participants(&self, root_id: &[u8]) -> Result> { - self.threadid_userids.qry(root_id).await.deserialized() + pub(super) async fn get_participants(&self, root_id: &RawPduId) -> Result> { + self.threadid_userids.get(root_id).await.deserialized() } } diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index 2eafe5d5..02503030 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -2,7 +2,7 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduit::{err, PduEvent, Result}; +use conduit::{err, PduCount, PduEvent, Result}; use data::Data; use futures::Stream; use ruma::{ @@ -37,8 +37,8 @@ impl crate::Service for Service { impl Service { pub async fn threads_until<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: u64, include: &'a IncludeThreads, - ) -> Result + Send + 'a> { + &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, include: &'a IncludeThreads, + ) -> Result + Send + 'a> { self.db .threads_until(user_id, room_id, until, include) .await diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 5428a3b9..19dc5325 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,14 +1,13 @@ use std::{ collections::{hash_map, HashMap}, - mem::size_of, sync::Arc, }; use conduit::{ - err, expected, + at, err, result::{LogErr, NotFound}, utils, - utils::{future::TryExtExt, stream::TryIgnore, u64_from_u8, ReadyExt}, + utils::{future::TryExtExt, stream::TryIgnore, ReadyExt}, Err, PduCount, PduEvent, Result, }; use database::{Database, Deserialized, Json, KeyVal, Map}; @@ -16,7 +15,8 @@ use futures::{Stream, StreamExt}; use ruma::{CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use tokio::sync::Mutex; -use crate::{rooms, Dep}; +use super::{PduId, RawPduId}; +use crate::{rooms, rooms::short::ShortRoomId, Dep}; pub(super) struct Data { eventid_outlierpdu: Arc, @@ -58,30 +58,25 @@ impl Data { .lasttimelinecount_cache .lock() .await - .entry(room_id.to_owned()) + .entry(room_id.into()) { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(sender_user, room_id, PduCount::max()) - .await? - .next() - .await - { - Ok(*v.insert(last_count.0)) - } else { - Ok(PduCount::Normal(0)) - } - }, hash_map::Entry::Occupied(o) => Ok(*o.get()), + hash_map::Entry::Vacant(v) => Ok(self + .pdus_until(sender_user, room_id, PduCount::max()) + .await? + .next() + .await + .map(at!(0)) + .filter(|&count| matches!(count, PduCount::Normal(_))) + .map_or_else(PduCount::max, |count| *v.insert(count))), } } /// Returns the `count` of this pdu's id. pub(super) async fn get_pdu_count(&self, event_id: &EventId) -> Result { - self.eventid_pduid - .get(event_id) + self.get_pdu_id(event_id) .await - .map(|pdu_id| pdu_count(&pdu_id)) + .map(|pdu_id| pdu_id.pdu_count()) } /// Returns the json of a pdu. @@ -102,8 +97,11 @@ impl Data { /// Returns the pdu's id. #[inline] - pub(super) async fn get_pdu_id(&self, event_id: &EventId) -> Result> { - self.eventid_pduid.get(event_id).await + pub(super) async fn get_pdu_id(&self, event_id: &EventId) -> Result { + self.eventid_pduid + .get(event_id) + .await + .map(|handle| RawPduId::from(&*handle)) } /// Returns the pdu directly from `eventid_pduid` only. @@ -154,34 +152,40 @@ impl Data { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub(super) async fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result { + pub(super) async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result { self.pduid_pdu.get(pdu_id).await.deserialized() } /// Returns the pdu as a `BTreeMap`. - pub(super) async fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result { + pub(super) async fn get_pdu_json_from_id(&self, pdu_id: &RawPduId) -> Result { self.pduid_pdu.get(pdu_id).await.deserialized() } - pub(super) async fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) { + pub(super) async fn append_pdu( + &self, pdu_id: &RawPduId, pdu: &PduEvent, json: &CanonicalJsonObject, count: PduCount, + ) { + debug_assert!(matches!(count, PduCount::Normal(_)), "PduCount not Normal"); + self.pduid_pdu.raw_put(pdu_id, Json(json)); self.lasttimelinecount_cache .lock() .await - .insert(pdu.room_id.clone(), PduCount::Normal(count)); + .insert(pdu.room_id.clone(), count); self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id); self.eventid_outlierpdu.remove(pdu.event_id.as_bytes()); } - pub(super) fn prepend_backfill_pdu(&self, pdu_id: &[u8], event_id: &EventId, json: &CanonicalJsonObject) { + pub(super) fn prepend_backfill_pdu(&self, pdu_id: &RawPduId, event_id: &EventId, json: &CanonicalJsonObject) { self.pduid_pdu.raw_put(pdu_id, Json(json)); self.eventid_pduid.insert(event_id, pdu_id); self.eventid_outlierpdu.remove(event_id); } /// Removes a pdu and creates a new one with the same id. - pub(super) async fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, _pdu: &PduEvent) -> Result { + pub(super) async fn replace_pdu( + &self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObject, _pdu: &PduEvent, + ) -> Result { if self.pduid_pdu.get(pdu_id).await.is_not_found() { return Err!(Request(NotFound("PDU does not exist."))); } @@ -197,13 +201,14 @@ impl Data { pub(super) async fn pdus_until<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, ) -> Result + Send + 'a> { - let (prefix, current) = self.count_to_id(room_id, until, 1, true).await?; + let current = self.count_to_id(room_id, until, true).await?; + let prefix = current.shortroomid(); let stream = self .pduid_pdu .rev_raw_stream_from(¤t) .ignore_err() .ready_take_while(move |(key, _)| key.starts_with(&prefix)) - .map(move |item| Self::each_pdu(item, user_id)); + .map(|item| Self::each_pdu(item, user_id)); Ok(stream) } @@ -211,7 +216,8 @@ impl Data { pub(super) async fn pdus_after<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, from: PduCount, ) -> Result + Send + 'a> { - let (prefix, current) = self.count_to_id(room_id, from, 1, false).await?; + let current = self.count_to_id(room_id, from, false).await?; + let prefix = current.shortroomid(); let stream = self .pduid_pdu .raw_stream_from(¤t) @@ -223,6 +229,8 @@ impl Data { } fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: &UserId) -> PdusIterItem { + let pdu_id: RawPduId = pdu_id.into(); + let mut pdu = serde_json::from_slice::(pdu).expect("PduEvent in pduid_pdu database column is invalid JSON"); @@ -231,9 +239,8 @@ impl Data { } pdu.add_age().log_err().ok(); - let count = pdu_count(pdu_id); - (count, pdu) + (pdu_id.pdu_count(), pdu) } pub(super) fn increment_notification_counts( @@ -256,56 +263,25 @@ impl Data { } } - pub(super) async fn count_to_id( - &self, room_id: &RoomId, count: PduCount, offset: u64, subtract: bool, - ) -> Result<(Vec, Vec)> { - let prefix = self + async fn count_to_id(&self, room_id: &RoomId, count: PduCount, subtract: bool) -> Result { + let shortroomid: ShortRoomId = self .services .short .get_shortroomid(room_id) .await - .map_err(|e| err!(Request(NotFound("Room {room_id:?} not found: {e:?}"))))? - .to_be_bytes() - .to_vec(); + .map_err(|e| err!(Request(NotFound("Room {room_id:?} not found: {e:?}"))))?; - let mut pdu_id = prefix.clone(); // +1 so we don't send the base event - let count_raw = match count { - PduCount::Normal(x) => { - if subtract { - x.saturating_sub(offset) - } else { - x.saturating_add(offset) - } - }, - PduCount::Backfilled(x) => { - pdu_id.extend_from_slice(&0_u64.to_be_bytes()); - let num = u64::MAX.saturating_sub(x); - if subtract { - num.saturating_sub(offset) - } else { - num.saturating_add(offset) - } + let pdu_id = PduId { + shortroomid, + shorteventid: if subtract { + count.checked_sub(1)? + } else { + count.checked_add(1)? }, }; - pdu_id.extend_from_slice(&count_raw.to_be_bytes()); - Ok((prefix, pdu_id)) - } -} - -/// Returns the `count` of this pdu's id. -pub(super) fn pdu_count(pdu_id: &[u8]) -> PduCount { - const STRIDE: usize = size_of::(); - - let pdu_id_len = pdu_id.len(); - let last_u64 = u64_from_u8(&pdu_id[expected!(pdu_id_len - STRIDE)..]); - let second_last_u64 = u64_from_u8(&pdu_id[expected!(pdu_id_len - 2 * STRIDE)..expected!(pdu_id_len - STRIDE)]); - - if second_last_u64 == 0 { - PduCount::Backfilled(u64::MAX.saturating_sub(last_u64)) - } else { - PduCount::Normal(last_u64) + Ok(pdu_id.into()) } } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index e45bf7e5..86a47919 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,5 +1,4 @@ mod data; -mod pduid; use std::{ cmp, @@ -15,6 +14,7 @@ use conduit::{ utils::{stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, validated, warn, Err, Error, Result, Server, }; +pub use conduit::{PduId, RawPduId}; use futures::{future, future::ready, Future, FutureExt, Stream, StreamExt, TryStreamExt}; use ruma::{ api::federation, @@ -39,13 +39,13 @@ use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use self::data::Data; -pub use self::{ - data::PdusIterItem, - pduid::{PduId, RawPduId}, -}; +pub use self::data::PdusIterItem; use crate::{ - account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, - rooms::state_compressor::CompressedStateEvent, sending, server_keys, users, Dep, + account_data, admin, appservice, + appservice::NamespaceRegex, + globals, pusher, rooms, + rooms::{short::ShortRoomId, state_compressor::CompressedStateEvent}, + sending, server_keys, users, Dep, }; // Update Relationships @@ -229,9 +229,7 @@ impl Service { /// Returns the pdu's id. #[inline] - pub async fn get_pdu_id(&self, event_id: &EventId) -> Result> { - self.db.get_pdu_id(event_id).await - } + pub async fn get_pdu_id(&self, event_id: &EventId) -> Result { self.db.get_pdu_id(event_id).await } /// Returns the pdu. /// @@ -256,16 +254,16 @@ impl Service { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub async fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result { self.db.get_pdu_from_id(pdu_id).await } + pub async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result { self.db.get_pdu_from_id(pdu_id).await } /// Returns the pdu as a `BTreeMap`. - pub async fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result { + pub async fn get_pdu_json_from_id(&self, pdu_id: &RawPduId) -> Result { self.db.get_pdu_json_from_id(pdu_id).await } /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self), level = "debug")] - pub async fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, pdu: &PduEvent) -> Result<()> { + pub async fn replace_pdu(&self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObject, pdu: &PduEvent) -> Result<()> { self.db.replace_pdu(pdu_id, pdu_json, pdu).await } @@ -282,7 +280,7 @@ impl Service { mut pdu_json: CanonicalJsonObject, leaves: Vec, state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex - ) -> Result> { + ) -> Result { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); @@ -359,9 +357,12 @@ impl Service { .user .reset_notification_counts(&pdu.sender, &pdu.room_id); - let count2 = self.services.globals.next_count().unwrap(); - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); + let count2 = PduCount::Normal(self.services.globals.next_count().unwrap()); + let pdu_id: RawPduId = PduId { + shortroomid, + shorteventid: count2, + } + .into(); // Insert pdu self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2).await; @@ -544,7 +545,7 @@ impl Service { if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await { self.services .pdu_metadata - .add_relation(PduCount::Normal(count2), related_pducount); + .add_relation(count2, related_pducount); } } @@ -558,7 +559,7 @@ impl Service { if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await { self.services .pdu_metadata - .add_relation(PduCount::Normal(count2), related_pducount); + .add_relation(count2, related_pducount); } }, Relation::Thread(thread) => { @@ -580,7 +581,7 @@ impl Service { { self.services .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; continue; } @@ -596,7 +597,7 @@ impl Service { if state_key_uid == appservice_uid { self.services .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; continue; } } @@ -623,7 +624,7 @@ impl Service { { self.services .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; } } @@ -935,7 +936,7 @@ impl Service { state_ids_compressed: Arc>, soft_fail: bool, state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex - ) -> Result>> { + ) -> Result> { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. @@ -993,7 +994,7 @@ impl Service { /// Replace a PDU with the redacted form. #[tracing::instrument(skip(self, reason))] - pub async fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent, shortroomid: u64) -> Result<()> { + pub async fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent, shortroomid: ShortRoomId) -> Result { // TODO: Don't reserialize, keep original json let Ok(pdu_id) = self.get_pdu_id(event_id).await else { // If event does not exist, just noop @@ -1133,7 +1134,6 @@ impl Service { // Skip the PDU if we already have it as a timeline event if let Ok(pdu_id) = self.get_pdu_id(&event_id).await { - let pdu_id = pdu_id.to_vec(); debug!("We already know {event_id} at {pdu_id:?}"); return Ok(()); } @@ -1158,11 +1158,13 @@ impl Service { let insert_lock = self.mutex_insert.lock(&room_id).await; - let max = u64::MAX; - let count = self.services.globals.next_count().unwrap(); - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&0_u64.to_be_bytes()); - pdu_id.extend_from_slice(&(validated!(max - count)).to_be_bytes()); + let count: i64 = self.services.globals.next_count().unwrap().try_into()?; + + let pdu_id: RawPduId = PduId { + shortroomid, + shorteventid: PduCount::Backfilled(validated!(0 - count)), + } + .into(); // Insert pdu self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value); @@ -1246,16 +1248,3 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res Ok(()) } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn comparisons() { - assert!(PduCount::Normal(1) < PduCount::Normal(2)); - assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1)); - assert!(PduCount::Normal(1) > PduCount::Backfilled(1)); - assert!(PduCount::Backfilled(1) < PduCount::Normal(1)); - } -} diff --git a/src/service/rooms/timeline/pduid.rs b/src/service/rooms/timeline/pduid.rs deleted file mode 100644 index b43c382c..00000000 --- a/src/service/rooms/timeline/pduid.rs +++ /dev/null @@ -1,13 +0,0 @@ -use crate::rooms::short::{ShortEventId, ShortRoomId}; - -#[derive(Clone, Copy)] -pub struct PduId { - _room_id: ShortRoomId, - _event_id: ShortEventId, -} - -pub type RawPduId = [u8; PduId::LEN]; - -impl PduId { - pub const LEN: usize = size_of::() + size_of::(); -} diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index e484203d..99587134 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -5,7 +5,7 @@ use database::{Deserialized, Map}; use futures::{pin_mut, Stream, StreamExt}; use ruma::{RoomId, UserId}; -use crate::{globals, rooms, Dep}; +use crate::{globals, rooms, rooms::short::ShortStateHash, Dep}; pub struct Service { db: Data, @@ -93,7 +93,7 @@ pub async fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) - } #[implement(Service)] -pub async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) { +pub async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: ShortStateHash) { let shortroomid = self .services .short @@ -108,7 +108,7 @@ pub async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, } #[implement(Service)] -pub async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result { +pub async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result { let shortroomid = self.services.short.get_shortroomid(room_id).await?; let key: &[u64] = &[shortroomid, token]; diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index f75a212c..cd25776a 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -115,10 +115,10 @@ impl Data { let mut keys = Vec::new(); for (event, destination) in requests { let mut key = destination.get_prefix(); - if let SendingEvent::Pdu(value) = &event { - key.extend_from_slice(value); + if let SendingEvent::Pdu(value) = event { + key.extend(value.as_ref()); } else { - key.extend_from_slice(&self.services.globals.next_count().unwrap().to_be_bytes()); + key.extend(&self.services.globals.next_count().unwrap().to_be_bytes()); } let value = if let SendingEvent::Edu(value) = &event { &**value @@ -175,7 +175,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se ( Destination::Appservice(server), if value.is_empty() { - SendingEvent::Pdu(event.to_vec()) + SendingEvent::Pdu(event.into()) } else { SendingEvent::Edu(value.to_vec()) }, @@ -202,7 +202,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se ( Destination::Push(user_id, pushkey_string), if value.is_empty() { - SendingEvent::Pdu(event.to_vec()) + SendingEvent::Pdu(event.into()) } else { // I'm pretty sure this should never be called SendingEvent::Edu(value.to_vec()) @@ -225,7 +225,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se .map_err(|_| Error::bad_database("Invalid server string in server_currenttransaction"))?, ), if value.is_empty() { - SendingEvent::Pdu(event.to_vec()) + SendingEvent::Pdu(event.into()) } else { SendingEvent::Edu(value.to_vec()) }, diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index ea266883..77997f69 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -24,7 +24,10 @@ pub use self::{ dest::Destination, sender::{EDU_LIMIT, PDU_LIMIT}, }; -use crate::{account_data, client, globals, presence, pusher, resolver, rooms, server_keys, users, Dep}; +use crate::{ + account_data, client, globals, presence, pusher, resolver, rooms, rooms::timeline::RawPduId, server_keys, users, + Dep, +}; pub struct Service { server: Arc, @@ -61,9 +64,9 @@ struct Msg { #[allow(clippy::module_name_repetitions)] #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum SendingEvent { - Pdu(Vec), // pduid - Edu(Vec), // pdu json - Flush, // none + Pdu(RawPduId), // pduid + Edu(Vec), // pdu json + Flush, // none } #[async_trait] @@ -110,9 +113,9 @@ impl crate::Service for Service { impl Service { #[tracing::instrument(skip(self, pdu_id, user, pushkey), level = "debug")] - pub fn send_pdu_push(&self, pdu_id: &[u8], user: &UserId, pushkey: String) -> Result<()> { + pub fn send_pdu_push(&self, pdu_id: &RawPduId, user: &UserId, pushkey: String) -> Result { let dest = Destination::Push(user.to_owned(), pushkey); - let event = SendingEvent::Pdu(pdu_id.to_owned()); + let event = SendingEvent::Pdu(*pdu_id); let _cork = self.db.db.cork(); let keys = self.db.queue_requests(&[(&event, &dest)]); self.dispatch(Msg { @@ -123,7 +126,7 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub fn send_pdu_appservice(&self, appservice_id: String, pdu_id: Vec) -> Result<()> { + pub fn send_pdu_appservice(&self, appservice_id: String, pdu_id: RawPduId) -> Result { let dest = Destination::Appservice(appservice_id); let event = SendingEvent::Pdu(pdu_id); let _cork = self.db.db.cork(); @@ -136,7 +139,7 @@ impl Service { } #[tracing::instrument(skip(self, room_id, pdu_id), level = "debug")] - pub async fn send_pdu_room(&self, room_id: &RoomId, pdu_id: &[u8]) -> Result<()> { + pub async fn send_pdu_room(&self, room_id: &RoomId, pdu_id: &RawPduId) -> Result { let servers = self .services .state_cache @@ -147,13 +150,13 @@ impl Service { } #[tracing::instrument(skip(self, servers, pdu_id), level = "debug")] - pub async fn send_pdu_servers<'a, S>(&self, servers: S, pdu_id: &[u8]) -> Result<()> + pub async fn send_pdu_servers<'a, S>(&self, servers: S, pdu_id: &RawPduId) -> Result where S: Stream + Send + 'a, { let _cork = self.db.db.cork(); let requests = servers - .map(|server| (Destination::Normal(server.into()), SendingEvent::Pdu(pdu_id.into()))) + .map(|server| (Destination::Normal(server.into()), SendingEvent::Pdu(pdu_id.to_owned()))) .collect::>() .await; diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index d9087d44..464d186b 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -536,7 +536,8 @@ impl Service { &events .iter() .map(|e| match e { - SendingEvent::Edu(b) | SendingEvent::Pdu(b) => &**b, + SendingEvent::Edu(b) => &**b, + SendingEvent::Pdu(b) => b.as_ref(), SendingEvent::Flush => &[], }) .collect::>(), @@ -660,7 +661,8 @@ impl Service { &events .iter() .map(|e| match e { - SendingEvent::Edu(b) | SendingEvent::Pdu(b) => &**b, + SendingEvent::Edu(b) => &**b, + SendingEvent::Pdu(b) => b.as_ref(), SendingEvent::Flush => &[], }) .collect::>(), From 137e3008ea04d36f9562eeadc61b276032fd2ddf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 6 Nov 2024 21:02:23 +0000 Subject: [PATCH 0177/1248] merge rooms threads data and service Signed-off-by: Jason Volk --- src/api/client/relations.rs | 12 ++-- src/api/client/threads.rs | 10 ++- src/service/rooms/pdu_metadata/data.rs | 15 ++--- src/service/rooms/threads/data.rs | 90 -------------------------- src/service/rooms/threads/mod.rs | 88 +++++++++++++++++++------ 5 files changed, 91 insertions(+), 124 deletions(-) delete mode 100644 src/service/rooms/threads/data.rs diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index ef7035e2..b5d1485b 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -97,7 +97,7 @@ async fn paginate_relations_with_filter( filter_event_type: Option, filter_rel_type: Option, from: Option<&str>, to: Option<&str>, limit: Option, recurse: bool, dir: Direction, ) -> Result { - let from: PduCount = from + let start: PduCount = from .map(str::parse) .transpose()? .unwrap_or_else(|| match dir { @@ -124,7 +124,7 @@ async fn paginate_relations_with_filter( let events: Vec = services .rooms .pdu_metadata - .get_relations(sender_user, room_id, target, from, limit, depth, dir) + .get_relations(sender_user, room_id, target, start, limit, depth, dir) .await .into_iter() .filter(|(_, pdu)| { @@ -146,16 +146,20 @@ async fn paginate_relations_with_filter( .await; let next_batch = match dir { - Direction::Backward => events.first(), Direction::Forward => events.last(), + Direction::Backward => events.first(), } .map(at!(0)) + .map(|count| match dir { + Direction::Forward => count.saturating_add(1), + Direction::Backward => count.saturating_sub(1), + }) .as_ref() .map(ToString::to_string); Ok(get_relating_events::v1::Response { next_batch, - prev_batch: Some(from.to_string()), + prev_batch: from.map(Into::into), recursion_depth: recurse.then_some(depth.into()), chunk: events .into_iter() diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 02cf7992..8d4e399b 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{PduCount, PduEvent}; +use conduit::{at, PduCount, PduEvent}; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; @@ -44,12 +44,16 @@ pub(crate) async fn get_threads_route( Ok(get_threads::v1::Response { next_batch: threads .last() - .map(|(count, _)| count) + .filter(|_| threads.len() >= limit) + .map(at!(0)) + .map(|count| count.saturating_sub(1)) + .as_ref() .map(ToString::to_string), chunk: threads .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) + .map(at!(1)) + .map(|pdu| pdu.to_room_event()) .collect(), }) } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 3fc06591..f3e1ced8 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,5 +1,6 @@ use std::{mem::size_of, sync::Arc}; +use arrayvec::ArrayVec; use conduit::{ result::LogErr, utils::{stream::TryIgnore, u64_from_u8, ReadyExt}, @@ -54,15 +55,13 @@ impl Data { pub(super) fn get_relations<'a>( &'a self, user_id: &'a UserId, shortroomid: ShortRoomId, target: ShortEventId, from: PduCount, dir: Direction, ) -> impl Stream + Send + '_ { - let current: RawPduId = PduId { - shortroomid, - shorteventid: from, - } - .into(); - + let mut current = ArrayVec::::new(); + current.extend(target.to_be_bytes()); + current.extend(from.into_unsigned().to_be_bytes()); + let current = current.as_slice(); match dir { - Direction::Forward => self.tofrom_relation.raw_keys_from(¤t).boxed(), - Direction::Backward => self.tofrom_relation.rev_raw_keys_from(¤t).boxed(), + Direction::Forward => self.tofrom_relation.raw_keys_from(current).boxed(), + Direction::Backward => self.tofrom_relation.rev_raw_keys_from(current).boxed(), } .ignore_err() .ready_take_while(move |key| key.starts_with(&target.to_be_bytes())) diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs deleted file mode 100644 index c26dabb4..00000000 --- a/src/service/rooms/threads/data.rs +++ /dev/null @@ -1,90 +0,0 @@ -use std::sync::Arc; - -use conduit::{ - result::LogErr, - utils::{stream::TryIgnore, ReadyExt}, - PduCount, PduEvent, Result, -}; -use database::{Deserialized, Map}; -use futures::{Stream, StreamExt}; -use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; - -use crate::{ - rooms, - rooms::{ - short::ShortRoomId, - timeline::{PduId, RawPduId}, - }, - Dep, -}; - -pub(super) struct Data { - threadid_userids: Arc, - services: Services, -} - -struct Services { - short: Dep, - timeline: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - threadid_userids: db["threadid_userids"].clone(), - services: Services { - short: args.depend::("rooms::short"), - timeline: args.depend::("rooms::timeline"), - }, - } - } - - #[inline] - pub(super) async fn threads_until<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, _include: &'a IncludeThreads, - ) -> Result + Send + 'a> { - let shortroomid: ShortRoomId = self.services.short.get_shortroomid(room_id).await?; - - let current: RawPduId = PduId { - shortroomid, - shorteventid: until.saturating_sub(1), - } - .into(); - - let stream = self - .threadid_userids - .rev_raw_keys_from(¤t) - .ignore_err() - .map(RawPduId::from) - .ready_take_while(move |pdu_id| pdu_id.shortroomid() == shortroomid.to_be_bytes()) - .filter_map(move |pdu_id| async move { - let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; - let pdu_id: PduId = pdu_id.into(); - - if pdu.sender != user_id { - pdu.remove_transaction_id().log_err().ok(); - } - - Some((pdu_id.shorteventid, pdu)) - }); - - Ok(stream) - } - - pub(super) fn update_participants(&self, root_id: &RawPduId, participants: &[OwnedUserId]) -> Result { - let users = participants - .iter() - .map(|user| user.as_bytes()) - .collect::>() - .join(&[0xFF][..]); - - self.threadid_userids.insert(root_id, &users); - - Ok(()) - } - - pub(super) async fn get_participants(&self, root_id: &RawPduId) -> Result> { - self.threadid_userids.get(root_id).await.deserialized() - } -} diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index 02503030..fcc629e1 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,34 +1,44 @@ -mod data; - use std::{collections::BTreeMap, sync::Arc}; -use conduit::{err, PduCount, PduEvent, Result}; -use data::Data; -use futures::Stream; +use conduit::{ + err, + utils::{stream::TryIgnore, ReadyExt}, + PduCount, PduEvent, PduId, RawPduId, Result, +}; +use database::{Deserialized, Map}; +use futures::{Stream, StreamExt}; use ruma::{ api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, CanonicalJsonValue, - EventId, RoomId, UserId, + EventId, OwnedUserId, RoomId, UserId, }; use serde_json::json; -use crate::{rooms, Dep}; +use crate::{rooms, rooms::short::ShortRoomId, Dep}; pub struct Service { - services: Services, db: Data, + services: Services, } struct Services { + short: Dep, timeline: Dep, } +pub(super) struct Data { + threadid_userids: Arc, +} + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { + db: Data { + threadid_userids: args.db["threadid_userids"].clone(), + }, services: Services { + short: args.depend::("rooms::short"), timeline: args.depend::("rooms::timeline"), }, - db: Data::new(&args), })) } @@ -36,14 +46,6 @@ impl crate::Service for Service { } impl Service { - pub async fn threads_until<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, include: &'a IncludeThreads, - ) -> Result + Send + 'a> { - self.db - .threads_until(user_id, room_id, until, include) - .await - } - pub async fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { let root_id = self .services @@ -113,13 +115,61 @@ impl Service { } let mut users = Vec::new(); - if let Ok(userids) = self.db.get_participants(&root_id).await { + if let Ok(userids) = self.get_participants(&root_id).await { users.extend_from_slice(&userids); } else { users.push(root_pdu.sender); } users.push(pdu.sender.clone()); - self.db.update_participants(&root_id, &users) + self.update_participants(&root_id, &users) + } + + pub async fn threads_until<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, shorteventid: PduCount, _inc: &'a IncludeThreads, + ) -> Result + Send + 'a> { + let shortroomid: ShortRoomId = self.services.short.get_shortroomid(room_id).await?; + + let current: RawPduId = PduId { + shortroomid, + shorteventid, + } + .into(); + + let stream = self + .db + .threadid_userids + .rev_raw_keys_from(¤t) + .ignore_err() + .map(RawPduId::from) + .ready_take_while(move |pdu_id| pdu_id.shortroomid() == shortroomid.to_be_bytes()) + .filter_map(move |pdu_id| async move { + let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; + let pdu_id: PduId = pdu_id.into(); + + if pdu.sender != user_id { + pdu.remove_transaction_id().ok(); + } + + Some((pdu_id.shorteventid, pdu)) + }); + + Ok(stream) + } + + pub(super) fn update_participants(&self, root_id: &RawPduId, participants: &[OwnedUserId]) -> Result { + let users = participants + .iter() + .map(|user| user.as_bytes()) + .collect::>() + .join(&[0xFF][..]); + + self.db.threadid_userids.insert(root_id, &users); + + Ok(()) + } + + pub(super) async fn get_participants(&self, root_id: &RawPduId) -> Result> { + self.db.threadid_userids.get(root_id).await.deserialized() } } From 26c890d5ac18adf98109b4663c9eecdc289badef Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 4 Nov 2024 22:38:12 +0000 Subject: [PATCH 0178/1248] skip redundant receipts on syncs Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 08048902..2ac0bfea 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -28,7 +28,7 @@ use ruma::{ events::{ presence::PresenceEvent, room::member::{MembershipState, RoomMemberEventContent}, - AnyRawAccountDataEvent, StateEventType, + AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, }, serde::Raw, @@ -983,20 +983,22 @@ async fn load_joined_room( .collect() .await; - let mut edus: Vec<_> = services + let edus: HashMap> = services .rooms .read_receipt .readreceipts_since(room_id, since) - .filter_map(|(read_user, _, v)| async move { - (!services + .filter_map(|(read_user, _, edu)| async move { + services .users .user_is_ignored(&read_user, sender_user) - .await) - .then_some(v) + .await + .or_some((read_user, edu)) }) .collect() .await; + let mut edus: Vec> = edus.into_values().collect(); + if services.rooms.typing.last_typing_update(room_id).await? > since { edus.push( serde_json::from_str( From 3ed2c17f980497a3ea7bdf2d438b5da7984572fd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 6 Nov 2024 01:24:44 +0000 Subject: [PATCH 0179/1248] move sync watcher from globals service to sync service Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 2 +- src/api/client/sync/v4.rs | 2 +- src/service/globals/data.rs | 140 +----------------------------------- src/service/globals/mod.rs | 6 +- src/service/sync/mod.rs | 51 ++++++++++++- src/service/sync/watch.rs | 117 ++++++++++++++++++++++++++++++ 6 files changed, 170 insertions(+), 148 deletions(-) create mode 100644 src/service/sync/watch.rs diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 2ac0bfea..00976c78 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -93,7 +93,7 @@ pub(crate) async fn sync_events_route( } // Setup watchers, so if there's no response, we can wait for them - let watcher = services.globals.watch(&sender_user, &sender_device); + let watcher = services.sync.watch(&sender_user, &sender_device); let next_batch = services.globals.current_count()?; let next_batchcount = PduCount::Normal(next_batch); diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 11e3830c..91abd24e 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -51,7 +51,7 @@ pub(crate) async fn sync_events_v4_route( let sender_device = body.sender_device.expect("user is authenticated"); let mut body = body.body; // Setup watchers, so if there's no response, we can wait for them - let watcher = services.globals.watch(sender_user, &sender_device); + let watcher = services.sync.watch(sender_user, &sender_device); let next_batch = services.globals.next_count()?; diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index eea7597a..bcfe101e 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,35 +1,12 @@ use std::sync::{Arc, RwLock}; -use conduit::{trace, utils, Result, Server}; +use conduit::{utils, Result}; use database::{Database, Deserialized, Map}; -use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; -use ruma::{DeviceId, UserId}; - -use crate::{rooms, Dep}; pub struct Data { global: Arc, - todeviceid_events: Arc, - userroomid_joined: Arc, - userroomid_invitestate: Arc, - userroomid_leftstate: Arc, - userroomid_notificationcount: Arc, - userroomid_highlightcount: Arc, - pduid_pdu: Arc, - keychangeid_userid: Arc, - roomusertype_roomuserdataid: Arc, - readreceiptid_readreceipt: Arc, - userid_lastonetimekeyupdate: Arc, counter: RwLock, pub(super) db: Arc, - services: Services, -} - -struct Services { - server: Arc, - short: Dep, - state_cache: Dep, - typing: Dep, } const COUNTER: &[u8] = b"c"; @@ -39,25 +16,8 @@ impl Data { let db = &args.db; Self { global: db["global"].clone(), - todeviceid_events: db["todeviceid_events"].clone(), - userroomid_joined: db["userroomid_joined"].clone(), - userroomid_invitestate: db["userroomid_invitestate"].clone(), - userroomid_leftstate: db["userroomid_leftstate"].clone(), - userroomid_notificationcount: db["userroomid_notificationcount"].clone(), - userroomid_highlightcount: db["userroomid_highlightcount"].clone(), - pduid_pdu: db["pduid_pdu"].clone(), - keychangeid_userid: db["keychangeid_userid"].clone(), - roomusertype_roomuserdataid: db["roomusertype_roomuserdataid"].clone(), - readreceiptid_readreceipt: db["readreceiptid_readreceipt"].clone(), - userid_lastonetimekeyupdate: db["userid_lastonetimekeyupdate"].clone(), counter: RwLock::new(Self::stored_count(&db["global"]).expect("initialized global counter")), db: args.db.clone(), - services: Services { - server: args.server.clone(), - short: args.depend::("rooms::short"), - state_cache: args.depend::("rooms::state_cache"), - typing: args.depend::("rooms::typing"), - }, } } @@ -98,104 +58,6 @@ impl Data { .map_or(Ok(0_u64), utils::u64_from_bytes) } - #[tracing::instrument(skip(self), level = "debug")] - pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - let userid_bytes = user_id.as_bytes().to_vec(); - let mut userid_prefix = userid_bytes.clone(); - userid_prefix.push(0xFF); - - let mut userdeviceid_prefix = userid_prefix.clone(); - userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); - userdeviceid_prefix.push(0xFF); - - let mut futures = FuturesUnordered::new(); - - // Return when *any* user changed their key - // TODO: only send for user they share a room with - futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix)); - - futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); - futures.push(self.userroomid_invitestate.watch_prefix(&userid_prefix)); - futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); - futures.push( - self.userroomid_notificationcount - .watch_prefix(&userid_prefix), - ); - futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix)); - - // Events for rooms we are in - let rooms_joined = self.services.state_cache.rooms_joined(user_id); - - pin_mut!(rooms_joined); - while let Some(room_id) = rooms_joined.next().await { - let Ok(short_roomid) = self.services.short.get_shortroomid(room_id).await else { - continue; - }; - - let roomid_bytes = room_id.as_bytes().to_vec(); - let mut roomid_prefix = roomid_bytes.clone(); - roomid_prefix.push(0xFF); - - // Key changes - futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); - - // Room account data - let mut roomuser_prefix = roomid_prefix.clone(); - roomuser_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.roomusertype_roomuserdataid - .watch_prefix(&roomuser_prefix), - ); - - // PDUs - let short_roomid = short_roomid.to_be_bytes().to_vec(); - futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); - - // EDUs - let typing_room_id = room_id.to_owned(); - let typing_wait_for_update = async move { - self.services.typing.wait_for_update(&typing_room_id).await; - }; - - futures.push(typing_wait_for_update.boxed()); - futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix)); - } - - let mut globaluserdata_prefix = vec![0xFF]; - globaluserdata_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.roomusertype_roomuserdataid - .watch_prefix(&globaluserdata_prefix), - ); - - // More key changes (used when user is not joined to any rooms) - futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); - - // One time keys - futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes)); - - // Server shutdown - let server_shutdown = async move { - while self.services.server.running() { - self.services.server.signal.subscribe().recv().await.ok(); - } - }; - - futures.push(server_shutdown.boxed()); - if !self.services.server.running() { - return Ok(()); - } - - // Wait until one of them finds something - trace!(futures = futures.len(), "watch started"); - futures.next().await; - trace!(futures = futures.len(), "watch finished"); - - Ok(()) - } - pub async fn database_version(&self) -> u64 { self.global .get(b"version") diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index bd956964..55dd10aa 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -12,7 +12,7 @@ use data::Data; use ipaddress::IPAddress; use regex::RegexSet; use ruma::{ - api::client::discovery::discover_support::ContactRole, DeviceId, OwnedEventId, OwnedRoomAliasId, OwnedServerName, + api::client::discovery::discover_support::ContactRole, OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, RoomVersionId, ServerName, UserId, }; use tokio::sync::Mutex; @@ -163,10 +163,6 @@ impl Service { #[inline] pub fn current_count(&self) -> Result { Ok(self.db.current_count()) } - pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - self.db.watch(user_id, device_id).await - } - #[inline] pub fn server_name(&self) -> &ServerName { self.config.server_name.as_ref() } diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index 1bf4610f..f1a6ae75 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -1,9 +1,12 @@ +mod watch; + use std::{ collections::{BTreeMap, BTreeSet}, sync::{Arc, Mutex, Mutex as StdMutex}, }; -use conduit::Result; +use conduit::{Result, Server}; +use database::Map; use ruma::{ api::client::sync::sync_events::{ self, @@ -12,10 +15,35 @@ use ruma::{ OwnedDeviceId, OwnedRoomId, OwnedUserId, }; +use crate::{rooms, Dep}; + pub struct Service { + db: Data, + services: Services, connections: DbConnections, } +pub struct Data { + todeviceid_events: Arc, + userroomid_joined: Arc, + userroomid_invitestate: Arc, + userroomid_leftstate: Arc, + userroomid_notificationcount: Arc, + userroomid_highlightcount: Arc, + pduid_pdu: Arc, + keychangeid_userid: Arc, + roomusertype_roomuserdataid: Arc, + readreceiptid_readreceipt: Arc, + userid_lastonetimekeyupdate: Arc, +} + +struct Services { + server: Arc, + short: Dep, + state_cache: Dep, + typing: Dep, +} + struct SlidingSyncCache { lists: BTreeMap, subscriptions: BTreeMap, @@ -28,8 +56,27 @@ type DbConnectionsKey = (OwnedUserId, OwnedDeviceId, String); type DbConnectionsVal = Arc>; impl crate::Service for Service { - fn build(_args: crate::Args<'_>) -> Result> { + fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { + db: Data { + todeviceid_events: args.db["todeviceid_events"].clone(), + userroomid_joined: args.db["userroomid_joined"].clone(), + userroomid_invitestate: args.db["userroomid_invitestate"].clone(), + userroomid_leftstate: args.db["userroomid_leftstate"].clone(), + userroomid_notificationcount: args.db["userroomid_notificationcount"].clone(), + userroomid_highlightcount: args.db["userroomid_highlightcount"].clone(), + pduid_pdu: args.db["pduid_pdu"].clone(), + keychangeid_userid: args.db["keychangeid_userid"].clone(), + roomusertype_roomuserdataid: args.db["roomusertype_roomuserdataid"].clone(), + readreceiptid_readreceipt: args.db["readreceiptid_readreceipt"].clone(), + userid_lastonetimekeyupdate: args.db["userid_lastonetimekeyupdate"].clone(), + }, + services: Services { + server: args.server.clone(), + short: args.depend::("rooms::short"), + state_cache: args.depend::("rooms::state_cache"), + typing: args.depend::("rooms::typing"), + }, connections: StdMutex::new(BTreeMap::new()), })) } diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs new file mode 100644 index 00000000..3eb663c1 --- /dev/null +++ b/src/service/sync/watch.rs @@ -0,0 +1,117 @@ +use conduit::{implement, trace, Result}; +use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; +use ruma::{DeviceId, UserId}; + +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result { + let userid_bytes = user_id.as_bytes().to_vec(); + let mut userid_prefix = userid_bytes.clone(); + userid_prefix.push(0xFF); + + let mut userdeviceid_prefix = userid_prefix.clone(); + userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); + userdeviceid_prefix.push(0xFF); + + let mut futures = FuturesUnordered::new(); + + // Return when *any* user changed their key + // TODO: only send for user they share a room with + futures.push(self.db.todeviceid_events.watch_prefix(&userdeviceid_prefix)); + + futures.push(self.db.userroomid_joined.watch_prefix(&userid_prefix)); + futures.push(self.db.userroomid_invitestate.watch_prefix(&userid_prefix)); + futures.push(self.db.userroomid_leftstate.watch_prefix(&userid_prefix)); + futures.push( + self.db + .userroomid_notificationcount + .watch_prefix(&userid_prefix), + ); + futures.push( + self.db + .userroomid_highlightcount + .watch_prefix(&userid_prefix), + ); + + // Events for rooms we are in + let rooms_joined = self.services.state_cache.rooms_joined(user_id); + + pin_mut!(rooms_joined); + while let Some(room_id) = rooms_joined.next().await { + let Ok(short_roomid) = self.services.short.get_shortroomid(room_id).await else { + continue; + }; + + let roomid_bytes = room_id.as_bytes().to_vec(); + let mut roomid_prefix = roomid_bytes.clone(); + roomid_prefix.push(0xFF); + + // Key changes + futures.push(self.db.keychangeid_userid.watch_prefix(&roomid_prefix)); + + // Room account data + let mut roomuser_prefix = roomid_prefix.clone(); + roomuser_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.db + .roomusertype_roomuserdataid + .watch_prefix(&roomuser_prefix), + ); + + // PDUs + let short_roomid = short_roomid.to_be_bytes().to_vec(); + futures.push(self.db.pduid_pdu.watch_prefix(&short_roomid)); + + // EDUs + let typing_room_id = room_id.to_owned(); + let typing_wait_for_update = async move { + self.services.typing.wait_for_update(&typing_room_id).await; + }; + + futures.push(typing_wait_for_update.boxed()); + futures.push( + self.db + .readreceiptid_readreceipt + .watch_prefix(&roomid_prefix), + ); + } + + let mut globaluserdata_prefix = vec![0xFF]; + globaluserdata_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.db + .roomusertype_roomuserdataid + .watch_prefix(&globaluserdata_prefix), + ); + + // More key changes (used when user is not joined to any rooms) + futures.push(self.db.keychangeid_userid.watch_prefix(&userid_prefix)); + + // One time keys + futures.push( + self.db + .userid_lastonetimekeyupdate + .watch_prefix(&userid_bytes), + ); + + // Server shutdown + let server_shutdown = async move { + while self.services.server.running() { + self.services.server.signal.subscribe().recv().await.ok(); + } + }; + + futures.push(server_shutdown.boxed()); + if !self.services.server.running() { + return Ok(()); + } + + // Wait until one of them finds something + trace!(futures = futures.len(), "watch started"); + futures.next().await; + trace!(futures = futures.len(), "watch finished"); + + Ok(()) +} From 7450c654ae37e8caa9af80f40dc674d9a65893b7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 6 Nov 2024 06:20:39 +0000 Subject: [PATCH 0180/1248] add get_pdu_owned sans Arc; improve client/room/event handler Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 3 +- src/api/client/membership.rs | 4 ++- src/api/client/room.rs | 48 +++++++++++++----------------- src/api/mod.rs | 2 ++ src/api/server/send_join.rs | 8 +++-- src/service/rooms/timeline/data.rs | 15 ++++++---- src/service/rooms/timeline/mod.rs | 8 ++++- 7 files changed, 50 insertions(+), 38 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 754c9840..f9d4a521 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,7 @@ use std::{ }; use conduit::{debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result}; -use futures::StreamExt; +use futures::{FutureExt, StreamExt}; use ruma::{ api::{client::error::ErrorKind, federation::event::get_room_state}, events::room::message::RoomMessageEventContent, @@ -246,6 +246,7 @@ pub(super) async fn get_remote_pdu( .rooms .timeline .backfill_pdu(&server, response.pdu) + .boxed() .await?; let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index fa71c0c8..bf8e5c33 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -374,7 +374,9 @@ pub(crate) async fn invite_user_route( return Ok(invite_user::v3::Response {}); } - invite_helper(&services, sender_user, user_id, &body.room_id, body.reason.clone(), false).await?; + invite_helper(&services, sender_user, user_id, &body.room_id, body.reason.clone(), false) + .boxed() + .await?; Ok(invite_user::v3::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) diff --git a/src/api/client/room.rs b/src/api/client/room.rs index 4224d3fa..b6683ef4 100644 --- a/src/api/client/room.rs +++ b/src/api/client/room.rs @@ -2,7 +2,7 @@ use std::{cmp::max, collections::BTreeMap}; use axum::extract::State; use conduit::{debug_info, debug_warn, err, Err}; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ api::client::{ error::ErrorKind, @@ -486,34 +486,28 @@ pub(crate) async fn create_room_route( /// - You have to currently be joined to the room (TODO: Respect history /// visibility) pub(crate) async fn get_room_event_route( - State(services): State, body: Ruma, + State(services): State, ref body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let event = services - .rooms - .timeline - .get_pdu(&body.event_id) - .await - .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id))))?; - - if !services - .rooms - .state_accessor - .user_can_see_event(sender_user, &event.room_id, &body.event_id) - .await - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this event.", - )); - } - - let mut event = (*event).clone(); - event.add_age()?; - Ok(get_room_event::v3::Response { - event: event.to_room_event(), + event: services + .rooms + .timeline + .get_pdu_owned(&body.event_id) + .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))) + .and_then(|event| async move { + services + .rooms + .state_accessor + .user_can_see_event(body.sender_user(), &event.room_id, &body.event_id) + .await + .then_some(event) + .ok_or_else(|| err!(Request(Forbidden("You don't have permission to view this event.")))) + }) + .map_ok(|mut event| { + event.add_age().ok(); + event.to_room_event() + }) + .await?, }) } diff --git a/src/api/mod.rs b/src/api/mod.rs index ed8aacf2..fc68af5b 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,3 +1,5 @@ +#![allow(clippy::toplevel_ref_arg)] + pub mod client; pub mod router; pub mod server; diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index f2ede9d0..60ec8c1f 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -253,7 +253,9 @@ pub(crate) async fn create_join_event_v1_route( } } - let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu).await?; + let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu) + .boxed() + .await?; Ok(create_join_event::v1::Response { room_state, @@ -296,7 +298,9 @@ pub(crate) async fn create_join_event_v2_route( auth_chain, state, event, - } = create_join_event(&services, body.origin(), &body.room_id, &body.pdu).await?; + } = create_join_event(&services, body.origin(), &body.room_id, &body.pdu) + .boxed() + .await?; let room_state = create_join_event::v2::RoomState { members_omitted: false, auth_chain, diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 19dc5325..f062e7e4 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -123,15 +123,18 @@ impl Data { /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub(super) async fn get_pdu(&self, event_id: &EventId) -> Result> { + self.get_pdu_owned(event_id).await.map(Arc::new) + } + + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + pub(super) async fn get_pdu_owned(&self, event_id: &EventId) -> Result { if let Ok(pdu) = self.get_non_outlier_pdu(event_id).await { - return Ok(Arc::new(pdu)); + return Ok(pdu); } - self.eventid_outlierpdu - .get(event_id) - .await - .deserialized() - .map(Arc::new) + self.eventid_outlierpdu.get(event_id).await.deserialized() } /// Like get_non_outlier_pdu(), but without the expense of fetching and diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 86a47919..8255be7d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -244,6 +244,11 @@ impl Service { /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub async fn get_pdu(&self, event_id: &EventId) -> Result> { self.db.get_pdu(event_id).await } + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + pub async fn get_pdu_owned(&self, event_id: &EventId) -> Result { self.db.get_pdu_owned(event_id).await } + /// Checks if pdu exists /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. @@ -885,6 +890,7 @@ impl Service { vec![(*pdu.event_id).to_owned()], state_lock, ) + .boxed() .await?; // We set the room state after inserting the pdu, so that we never have a moment @@ -1104,7 +1110,7 @@ impl Service { match response { Ok(response) => { for pdu in response.pdus { - if let Err(e) = self.backfill_pdu(backfill_server, pdu).await { + if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await { warn!("Failed to add backfilled pdu in room {room_id}: {e}"); } } From f36757027eacc27f47f6415d998be6cf61cc4f0a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 6 Nov 2024 18:27:40 +0000 Subject: [PATCH 0181/1248] split api/client/room Signed-off-by: Jason Volk --- src/api/client/room/aliases.rs | 40 +++ src/api/client/{room.rs => room/create.rs} | 359 +-------------------- src/api/client/room/event.rs | 38 +++ src/api/client/room/mod.rs | 9 + src/api/client/room/upgrade.rs | 294 +++++++++++++++++ 5 files changed, 388 insertions(+), 352 deletions(-) create mode 100644 src/api/client/room/aliases.rs rename src/api/client/{room.rs => room/create.rs} (65%) create mode 100644 src/api/client/room/event.rs create mode 100644 src/api/client/room/mod.rs create mode 100644 src/api/client/room/upgrade.rs diff --git a/src/api/client/room/aliases.rs b/src/api/client/room/aliases.rs new file mode 100644 index 00000000..e530b260 --- /dev/null +++ b/src/api/client/room/aliases.rs @@ -0,0 +1,40 @@ +use axum::extract::State; +use conduit::{Error, Result}; +use futures::StreamExt; +use ruma::api::client::{error::ErrorKind, room::aliases}; + +use crate::Ruma; + +/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases` +/// +/// Lists all aliases of the room. +/// +/// - Only users joined to the room are allowed to call this, or if +/// `history_visibility` is world readable in the room +pub(crate) async fn get_room_aliases_route( + State(services): State, body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services + .rooms + .state_accessor + .user_can_see_state_events(sender_user, &body.room_id) + .await + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this room.", + )); + } + + Ok(aliases::v3::Response { + aliases: services + .rooms + .alias + .local_aliases_for_room(&body.room_id) + .map(ToOwned::to_owned) + .collect() + .await, + }) +} diff --git a/src/api/client/room.rs b/src/api/client/room/create.rs similarity index 65% rename from src/api/client/room.rs rename to src/api/client/room/create.rs index b6683ef4..2ccb1c87 100644 --- a/src/api/client/room.rs +++ b/src/api/client/room/create.rs @@ -1,12 +1,12 @@ -use std::{cmp::max, collections::BTreeMap}; +use std::collections::BTreeMap; use axum::extract::State; -use conduit::{debug_info, debug_warn, err, Err}; -use futures::{FutureExt, StreamExt, TryFutureExt}; +use conduit::{debug_info, debug_warn, error, info, pdu::PduBuilder, warn, Err, Error, Result}; +use futures::FutureExt; use ruma::{ api::client::{ error::ErrorKind, - room::{self, aliases, create_room, get_room_event, upgrade_room}, + room::{self, create_room}, }, events::{ room::{ @@ -18,36 +18,18 @@ use ruma::{ member::{MembershipState, RoomMemberEventContent}, name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent, - tombstone::RoomTombstoneEventContent, topic::RoomTopicEventContent, }, - StateEventType, TimelineEventType, + TimelineEventType, }, int, serde::{JsonObject, Raw}, CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; -use tracing::{error, info, warn}; +use service::{appservice::RegistrationInfo, Services}; -use super::invite_helper; -use crate::{ - service::{appservice::RegistrationInfo, pdu::PduBuilder, Services}, - Error, Result, Ruma, -}; - -/// Recommended transferable state events list from the spec -const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 9] = &[ - StateEventType::RoomServerAcl, - StateEventType::RoomEncryption, - StateEventType::RoomName, - StateEventType::RoomAvatar, - StateEventType::RoomTopic, - StateEventType::RoomGuestAccess, - StateEventType::RoomHistoryVisibility, - StateEventType::RoomJoinRules, - StateEventType::RoomPowerLevels, -]; +use crate::{client::invite_helper, Ruma}; /// # `POST /_matrix/client/v3/createRoom` /// @@ -479,333 +461,6 @@ pub(crate) async fn create_room_route( Ok(create_room::v3::Response::new(room_id)) } -/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` -/// -/// Gets a single event. -/// -/// - You have to currently be joined to the room (TODO: Respect history -/// visibility) -pub(crate) async fn get_room_event_route( - State(services): State, ref body: Ruma, -) -> Result { - Ok(get_room_event::v3::Response { - event: services - .rooms - .timeline - .get_pdu_owned(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))) - .and_then(|event| async move { - services - .rooms - .state_accessor - .user_can_see_event(body.sender_user(), &event.room_id, &body.event_id) - .await - .then_some(event) - .ok_or_else(|| err!(Request(Forbidden("You don't have permission to view this event.")))) - }) - .map_ok(|mut event| { - event.add_age().ok(); - event.to_room_event() - }) - .await?, - }) -} - -/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases` -/// -/// Lists all aliases of the room. -/// -/// - Only users joined to the room are allowed to call this, or if -/// `history_visibility` is world readable in the room -pub(crate) async fn get_room_aliases_route( - State(services): State, body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if !services - .rooms - .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) - .await - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this room.", - )); - } - - Ok(aliases::v3::Response { - aliases: services - .rooms - .alias - .local_aliases_for_room(&body.room_id) - .map(ToOwned::to_owned) - .collect() - .await, - }) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` -/// -/// Upgrades the room. -/// -/// - Creates a replacement room -/// - Sends a tombstone event into the current room -/// - Sender user joins the room -/// - Transfers some state events -/// - Moves local aliases -/// - Modifies old room power levels to prevent users from speaking -pub(crate) async fn upgrade_room_route( - State(services): State, body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if !services - .globals - .supported_room_versions() - .contains(&body.new_version) - { - return Err(Error::BadRequest( - ErrorKind::UnsupportedRoomVersion, - "This server does not support that room version.", - )); - } - - // Create a replacement room - let replacement_room = RoomId::new(services.globals.server_name()); - - let _short_id = services - .rooms - .short - .get_or_create_shortroomid(&replacement_room) - .await; - - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - // Send a m.room.tombstone event to the old room to indicate that it is not - // intended to be used any further Fail if the sender does not have the required - // permissions - let tombstone_event_id = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomTombstoneEventContent { - body: "This room has been replaced".to_owned(), - replacement_room: replacement_room.clone(), - }, - ), - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - // Change lock to replacement room - drop(state_lock); - let state_lock = services.rooms.state.mutex.lock(&replacement_room).await; - - // Get the old room creation event - let mut create_event_content: CanonicalJsonObject = services - .rooms - .state_accessor - .room_state_get_content(&body.room_id, &StateEventType::RoomCreate, "") - .await - .map_err(|_| err!(Database("Found room without m.room.create event.")))?; - - // Use the m.room.tombstone event as the predecessor - let predecessor = Some(ruma::events::room::create::PreviousRoom::new( - body.room_id.clone(), - (*tombstone_event_id).to_owned(), - )); - - // Send a m.room.create event containing a predecessor field and the applicable - // room_version - { - use RoomVersionId::*; - match body.new_version { - V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - create_event_content.insert( - "creator".into(), - json!(&sender_user).try_into().map_err(|e| { - info!("Error forming creation event: {e}"); - Error::BadRequest(ErrorKind::BadJson, "Error forming creation event") - })?, - ); - }, - _ => { - // "creator" key no longer exists in V11+ rooms - create_event_content.remove("creator"); - }, - } - } - - create_event_content.insert( - "room_version".into(), - json!(&body.new_version) - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, - ); - create_event_content.insert( - "predecessor".into(), - json!(predecessor) - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, - ); - - // Validate creation event content - if serde_json::from_str::( - to_raw_value(&create_event_content) - .expect("Error forming creation event") - .get(), - ) - .is_err() - { - return Err(Error::BadRequest(ErrorKind::BadJson, "Error forming creation event")); - } - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&create_event_content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(String::new()), - redacts: None, - timestamp: None, - }, - sender_user, - &replacement_room, - &state_lock, - ) - .await?; - - // Join the new room - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - is_direct: None, - third_party_invite: None, - blurhash: services.users.blurhash(sender_user).await.ok(), - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - timestamp: None, - }, - sender_user, - &replacement_room, - &state_lock, - ) - .await?; - - // Replicate transferable state events to the new room - for event_type in TRANSFERABLE_STATE_EVENTS { - let event_content = match services - .rooms - .state_accessor - .room_state_get(&body.room_id, event_type, "") - .await - { - Ok(v) => v.content.clone(), - Err(_) => continue, // Skipping missing events. - }; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: event_type.to_string().into(), - content: event_content, - state_key: Some(String::new()), - ..Default::default() - }, - sender_user, - &replacement_room, - &state_lock, - ) - .await?; - } - - // Moves any local aliases to the new room - let mut local_aliases = services - .rooms - .alias - .local_aliases_for_room(&body.room_id) - .boxed(); - - while let Some(alias) = local_aliases.next().await { - services - .rooms - .alias - .remove_alias(alias, sender_user) - .await?; - - services - .rooms - .alias - .set_alias(alias, &replacement_room, sender_user)?; - } - - // Get the old room power levels - let power_levels_event_content: RoomPowerLevelsEventContent = services - .rooms - .state_accessor - .room_state_get_content(&body.room_id, &StateEventType::RoomPowerLevels, "") - .await - .map_err(|_| err!(Database("Found room without m.room.power_levels event.")))?; - - // Setting events_default and invite to the greater of 50 and users_default + 1 - let new_level = max( - int!(50), - power_levels_event_content - .users_default - .checked_add(int!(1)) - .ok_or_else(|| err!(Request(BadJson("users_default power levels event content is not valid"))))?, - ); - - // Modify the power levels in the old room to prevent sending of events and - // inviting new users - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomPowerLevelsEventContent { - events_default: new_level, - invite: new_level, - ..power_levels_event_content - }, - ), - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - // Return the replacement room id - Ok(upgrade_room::v3::Response { - replacement_room, - }) -} - /// creates the power_levels_content for the PDU builder fn default_power_levels_content( power_level_content_override: Option<&Raw>, visibility: &room::Visibility, diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs new file mode 100644 index 00000000..0f44f25d --- /dev/null +++ b/src/api/client/room/event.rs @@ -0,0 +1,38 @@ +use axum::extract::State; +use conduit::{err, Result}; +use futures::TryFutureExt; +use ruma::api::client::room::get_room_event; + +use crate::Ruma; + +/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` +/// +/// Gets a single event. +/// +/// - You have to currently be joined to the room (TODO: Respect history +/// visibility) +pub(crate) async fn get_room_event_route( + State(services): State, ref body: Ruma, +) -> Result { + Ok(get_room_event::v3::Response { + event: services + .rooms + .timeline + .get_pdu_owned(&body.event_id) + .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))) + .and_then(|event| async move { + services + .rooms + .state_accessor + .user_can_see_event(body.sender_user(), &event.room_id, &body.event_id) + .await + .then_some(event) + .ok_or_else(|| err!(Request(Forbidden("You don't have permission to view this event.")))) + }) + .map_ok(|mut event| { + event.add_age().ok(); + event.to_room_event() + }) + .await?, + }) +} diff --git a/src/api/client/room/mod.rs b/src/api/client/room/mod.rs new file mode 100644 index 00000000..fa2d168f --- /dev/null +++ b/src/api/client/room/mod.rs @@ -0,0 +1,9 @@ +mod aliases; +mod create; +mod event; +mod upgrade; + +pub(crate) use self::{ + aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route, + upgrade::upgrade_room_route, +}; diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs new file mode 100644 index 00000000..ad5c356e --- /dev/null +++ b/src/api/client/room/upgrade.rs @@ -0,0 +1,294 @@ +use std::cmp::max; + +use axum::extract::State; +use conduit::{err, info, pdu::PduBuilder, Error, Result}; +use futures::StreamExt; +use ruma::{ + api::client::{error::ErrorKind, room::upgrade_room}, + events::{ + room::{ + member::{MembershipState, RoomMemberEventContent}, + power_levels::RoomPowerLevelsEventContent, + tombstone::RoomTombstoneEventContent, + }, + StateEventType, TimelineEventType, + }, + int, CanonicalJsonObject, RoomId, RoomVersionId, +}; +use serde_json::{json, value::to_raw_value}; + +use crate::Ruma; + +/// Recommended transferable state events list from the spec +const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 9] = &[ + StateEventType::RoomServerAcl, + StateEventType::RoomEncryption, + StateEventType::RoomName, + StateEventType::RoomAvatar, + StateEventType::RoomTopic, + StateEventType::RoomGuestAccess, + StateEventType::RoomHistoryVisibility, + StateEventType::RoomJoinRules, + StateEventType::RoomPowerLevels, +]; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` +/// +/// Upgrades the room. +/// +/// - Creates a replacement room +/// - Sends a tombstone event into the current room +/// - Sender user joins the room +/// - Transfers some state events +/// - Moves local aliases +/// - Modifies old room power levels to prevent users from speaking +pub(crate) async fn upgrade_room_route( + State(services): State, body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services + .globals + .supported_room_versions() + .contains(&body.new_version) + { + return Err(Error::BadRequest( + ErrorKind::UnsupportedRoomVersion, + "This server does not support that room version.", + )); + } + + // Create a replacement room + let replacement_room = RoomId::new(services.globals.server_name()); + + let _short_id = services + .rooms + .short + .get_or_create_shortroomid(&replacement_room) + .await; + + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + // Send a m.room.tombstone event to the old room to indicate that it is not + // intended to be used any further Fail if the sender does not have the required + // permissions + let tombstone_event_id = services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::new(), + &RoomTombstoneEventContent { + body: "This room has been replaced".to_owned(), + replacement_room: replacement_room.clone(), + }, + ), + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + // Change lock to replacement room + drop(state_lock); + let state_lock = services.rooms.state.mutex.lock(&replacement_room).await; + + // Get the old room creation event + let mut create_event_content: CanonicalJsonObject = services + .rooms + .state_accessor + .room_state_get_content(&body.room_id, &StateEventType::RoomCreate, "") + .await + .map_err(|_| err!(Database("Found room without m.room.create event.")))?; + + // Use the m.room.tombstone event as the predecessor + let predecessor = Some(ruma::events::room::create::PreviousRoom::new( + body.room_id.clone(), + (*tombstone_event_id).to_owned(), + )); + + // Send a m.room.create event containing a predecessor field and the applicable + // room_version + { + use RoomVersionId::*; + match body.new_version { + V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + create_event_content.insert( + "creator".into(), + json!(&sender_user).try_into().map_err(|e| { + info!("Error forming creation event: {e}"); + Error::BadRequest(ErrorKind::BadJson, "Error forming creation event") + })?, + ); + }, + _ => { + // "creator" key no longer exists in V11+ rooms + create_event_content.remove("creator"); + }, + } + } + + create_event_content.insert( + "room_version".into(), + json!(&body.new_version) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + ); + create_event_content.insert( + "predecessor".into(), + json!(predecessor) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + ); + + // Validate creation event content + if serde_json::from_str::( + to_raw_value(&create_event_content) + .expect("Error forming creation event") + .get(), + ) + .is_err() + { + return Err(Error::BadRequest(ErrorKind::BadJson, "Error forming creation event")); + } + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomCreate, + content: to_raw_value(&create_event_content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + timestamp: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; + + // Join the new room + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + is_direct: None, + third_party_invite: None, + blurhash: services.users.blurhash(sender_user).await.ok(), + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + timestamp: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; + + // Replicate transferable state events to the new room + for event_type in TRANSFERABLE_STATE_EVENTS { + let event_content = match services + .rooms + .state_accessor + .room_state_get(&body.room_id, event_type, "") + .await + { + Ok(v) => v.content.clone(), + Err(_) => continue, // Skipping missing events. + }; + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: event_type.to_string().into(), + content: event_content, + state_key: Some(String::new()), + ..Default::default() + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; + } + + // Moves any local aliases to the new room + let mut local_aliases = services + .rooms + .alias + .local_aliases_for_room(&body.room_id) + .boxed(); + + while let Some(alias) = local_aliases.next().await { + services + .rooms + .alias + .remove_alias(alias, sender_user) + .await?; + + services + .rooms + .alias + .set_alias(alias, &replacement_room, sender_user)?; + } + + // Get the old room power levels + let power_levels_event_content: RoomPowerLevelsEventContent = services + .rooms + .state_accessor + .room_state_get_content(&body.room_id, &StateEventType::RoomPowerLevels, "") + .await + .map_err(|_| err!(Database("Found room without m.room.power_levels event.")))?; + + // Setting events_default and invite to the greater of 50 and users_default + 1 + let new_level = max( + int!(50), + power_levels_event_content + .users_default + .checked_add(int!(1)) + .ok_or_else(|| err!(Request(BadJson("users_default power levels event content is not valid"))))?, + ); + + // Modify the power levels in the old room to prevent sending of events and + // inviting new users + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::new(), + &RoomPowerLevelsEventContent { + events_default: new_level, + invite: new_level, + ..power_levels_event_content + }, + ), + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + // Return the replacement room id + Ok(upgrade_room::v3::Response { + replacement_room, + }) +} From e507c3130673099692143a59adc30a414ef6ca54 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 6 Nov 2024 22:21:51 +0000 Subject: [PATCH 0182/1248] make pdu batch tokens zeroith-indexed Signed-off-by: Jason Volk --- src/api/client/context.rs | 24 +++++++++++++++--------- src/api/client/message.rs | 15 ++++++++++----- src/api/client/relations.rs | 5 +---- src/api/client/sync/mod.rs | 2 +- src/api/client/sync/v3.rs | 15 +++++---------- src/api/server/backfill.rs | 2 +- src/core/pdu/count.rs | 19 +++++++++++++++++++ src/service/rooms/timeline/data.rs | 18 +++++++----------- src/service/rooms/timeline/mod.rs | 19 ++++++++----------- 9 files changed, 67 insertions(+), 52 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 5b492cb1..d07f6ac1 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -2,7 +2,7 @@ use std::iter::once; use axum::extract::State; use conduit::{ - err, error, + at, err, error, utils::{future::TryExtExt, stream::ReadyExt, IterStream}, Err, Result, }; @@ -82,7 +82,7 @@ pub(crate) async fn get_context_route( let events_before: Vec<_> = services .rooms .timeline - .pdus_until(sender_user, room_id, base_token) + .pdus_rev(sender_user, room_id, base_token.saturating_sub(1)) .await? .ready_filter_map(|item| event_filter(item, filter)) .filter_map(|item| ignored_filter(&services, item, sender_user)) @@ -94,7 +94,7 @@ pub(crate) async fn get_context_route( let events_after: Vec<_> = services .rooms .timeline - .pdus_after(sender_user, room_id, base_token) + .pdus(sender_user, room_id, base_token.saturating_add(1)) .await? .ready_filter_map(|item| event_filter(item, filter)) .filter_map(|item| ignored_filter(&services, item, sender_user)) @@ -168,22 +168,28 @@ pub(crate) async fn get_context_route( start: events_before .last() - .map_or_else(|| base_token.to_string(), |(count, _)| count.to_string()) - .into(), + .map(at!(0)) + .map(|count| count.saturating_sub(1)) + .as_ref() + .map(ToString::to_string), end: events_after .last() - .map_or_else(|| base_token.to_string(), |(count, _)| count.to_string()) - .into(), + .map(at!(0)) + .map(|count| count.saturating_add(1)) + .as_ref() + .map(ToString::to_string), events_before: events_before .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) + .map(at!(1)) + .map(|pdu| pdu.to_room_event()) .collect(), events_after: events_after .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) + .map(at!(1)) + .map(|pdu| pdu.to_room_event()) .collect(), state, diff --git a/src/api/client/message.rs b/src/api/client/message.rs index cb261a7f..e76325aa 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -100,14 +100,14 @@ pub(crate) async fn get_message_events_route( Direction::Forward => services .rooms .timeline - .pdus_after(sender_user, room_id, from) + .pdus(sender_user, room_id, from) .await? .boxed(), Direction::Backward => services .rooms .timeline - .pdus_until(sender_user, room_id, from) + .pdus_rev(sender_user, room_id, from) .await? .boxed(), }; @@ -136,7 +136,12 @@ pub(crate) async fn get_message_events_route( .collect() .await; - let next_token = events.last().map(|(count, _)| count).copied(); + let start_token = events.first().map(at!(0)).unwrap_or(from); + + let next_token = events + .last() + .map(at!(0)) + .map(|count| count.saturating_inc(body.dir)); if !cfg!(feature = "element_hacks") { if let Some(next_token) = next_token { @@ -154,8 +159,8 @@ pub(crate) async fn get_message_events_route( .collect(); Ok(get_message_events::v3::Response { - start: from.to_string(), - end: next_token.as_ref().map(PduCount::to_string), + start: start_token.to_string(), + end: next_token.as_ref().map(ToString::to_string), chunk, state, }) diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index b5d1485b..ee62dbfc 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -150,10 +150,7 @@ async fn paginate_relations_with_filter( Direction::Backward => events.first(), } .map(at!(0)) - .map(|count| match dir { - Direction::Forward => count.saturating_add(1), - Direction::Backward => count.saturating_sub(1), - }) + .map(|count| count.saturating_inc(dir)) .as_ref() .map(ToString::to_string); diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 7aec7186..f047d176 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -24,7 +24,7 @@ async fn load_timeline( let mut non_timeline_pdus = services .rooms .timeline - .pdus_until(sender_user, room_id, PduCount::max()) + .pdus_rev(sender_user, room_id, PduCount::max()) .await? .ready_take_while(|(pducount, _)| *pducount > roomsincecount); diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 00976c78..ea487d8e 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduit::{ - err, error, extract_variant, is_equal_to, + at, err, error, extract_variant, is_equal_to, result::FlatOk, utils::{math::ruma_from_u64, BoolExt, IterStream, ReadyExt, TryFutureExtExt}, PduCount, @@ -945,15 +945,10 @@ async fn load_joined_room( let prev_batch = timeline_pdus .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - PduCount::Backfilled(_) => { - error!("timeline in backfill state?!"); - "0".to_owned() - }, - PduCount::Normal(c) => c.to_string(), - })) - })?; + .map(at!(0)) + .map(|count| count.saturating_sub(1)) + .as_ref() + .map(ToString::to_string); let room_events: Vec<_> = timeline_pdus .iter() diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 281bf2a2..47f02841 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -51,7 +51,7 @@ pub(crate) async fn get_backfill_route( let pdus = services .rooms .timeline - .pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until) + .pdus_rev(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until) .await? .take(limit) .filter_map(|(_, pdu)| async move { diff --git a/src/core/pdu/count.rs b/src/core/pdu/count.rs index 90e552e8..aceec1e8 100644 --- a/src/core/pdu/count.rs +++ b/src/core/pdu/count.rs @@ -2,6 +2,8 @@ use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr}; +use ruma::api::Direction; + use crate::{err, Error, Result}; #[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] @@ -54,6 +56,14 @@ impl PduCount { } } + #[inline] + pub fn checked_inc(self, dir: Direction) -> Result { + match dir { + Direction::Forward => self.checked_add(1), + Direction::Backward => self.checked_sub(1), + } + } + #[inline] pub fn checked_add(self, add: u64) -> Result { Ok(match self { @@ -82,6 +92,15 @@ impl PduCount { }) } + #[inline] + #[must_use] + pub fn saturating_inc(self, dir: Direction) -> Self { + match dir { + Direction::Forward => self.saturating_add(1), + Direction::Backward => self.saturating_sub(1), + } + } + #[inline] #[must_use] pub fn saturating_add(self, add: u64) -> Self { diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index f062e7e4..f320e6a0 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -62,7 +62,7 @@ impl Data { { hash_map::Entry::Occupied(o) => Ok(*o.get()), hash_map::Entry::Vacant(v) => Ok(self - .pdus_until(sender_user, room_id, PduCount::max()) + .pdus_rev(sender_user, room_id, PduCount::max()) .await? .next() .await @@ -201,10 +201,10 @@ impl Data { /// Returns an iterator over all events and their tokens in a room that /// happened before the event with id `until` in reverse-chronological /// order. - pub(super) async fn pdus_until<'a>( + pub(super) async fn pdus_rev<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, ) -> Result + Send + 'a> { - let current = self.count_to_id(room_id, until, true).await?; + let current = self.count_to_id(room_id, until).await?; let prefix = current.shortroomid(); let stream = self .pduid_pdu @@ -216,10 +216,10 @@ impl Data { Ok(stream) } - pub(super) async fn pdus_after<'a>( + pub(super) async fn pdus<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, from: PduCount, ) -> Result + Send + 'a> { - let current = self.count_to_id(room_id, from, false).await?; + let current = self.count_to_id(room_id, from).await?; let prefix = current.shortroomid(); let stream = self .pduid_pdu @@ -266,7 +266,7 @@ impl Data { } } - async fn count_to_id(&self, room_id: &RoomId, count: PduCount, subtract: bool) -> Result { + async fn count_to_id(&self, room_id: &RoomId, shorteventid: PduCount) -> Result { let shortroomid: ShortRoomId = self .services .short @@ -277,11 +277,7 @@ impl Data { // +1 so we don't send the base event let pdu_id = PduId { shortroomid, - shorteventid: if subtract { - count.checked_sub(1)? - } else { - count.checked_add(1)? - }, + shorteventid, }; Ok(pdu_id.into()) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 8255be7d..81d372d7 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -177,7 +177,7 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result> { - self.pdus_until(user_id!("@placeholder:conduwuit.placeholder"), room_id, PduCount::max()) + self.pdus_rev(user_id!("@placeholder:conduwuit.placeholder"), room_id, PduCount::max()) .await? .next() .await @@ -976,26 +976,23 @@ impl Service { pub async fn all_pdus<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, ) -> Result + Send + 'a> { - self.pdus_after(user_id, room_id, PduCount::min()).await + self.pdus(user_id, room_id, PduCount::min()).await } - /// Returns an iterator over all events and their tokens in a room that - /// happened before the event with id `until` in reverse-chronological - /// order. + /// Reverse iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] - pub async fn pdus_until<'a>( + pub async fn pdus_rev<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, ) -> Result + Send + 'a> { - self.db.pdus_until(user_id, room_id, until).await + self.db.pdus_rev(user_id, room_id, until).await } - /// Returns an iterator over all events and their token in a room that - /// happened after the event with id `from` in chronological order. + /// Forward iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] - pub async fn pdus_after<'a>( + pub async fn pdus<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, from: PduCount, ) -> Result + Send + 'a> { - self.db.pdus_after(user_id, room_id, from).await + self.db.pdus(user_id, room_id, from).await } /// Replace a PDU with the redacted form. From 79c6b518605da12a65a7c0ae3a769931c6eed93b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 7 Nov 2024 03:30:47 +0000 Subject: [PATCH 0183/1248] renames for core pdu Signed-off-by: Jason Volk --- src/core/pdu/content.rs | 4 ++-- src/core/pdu/count.rs | 36 +++++++++++++++++++------------ src/core/pdu/event.rs | 4 ++-- src/core/pdu/filter.rs | 10 ++++----- src/core/pdu/id.rs | 12 +++++------ src/core/pdu/mod.rs | 15 +++++++------ src/core/pdu/raw_id.rs | 46 ++++++++++++++++++---------------------- src/core/pdu/redact.rs | 10 ++++----- src/core/pdu/relation.rs | 2 +- src/core/pdu/strip.rs | 20 ++++++++--------- src/core/pdu/tests.rs | 10 ++++----- src/core/pdu/unsigned.rs | 43 ++++++++++++++++++++++++++++++------- 12 files changed, 123 insertions(+), 89 deletions(-) diff --git a/src/core/pdu/content.rs b/src/core/pdu/content.rs index a6d86554..fa724cb2 100644 --- a/src/core/pdu/content.rs +++ b/src/core/pdu/content.rs @@ -4,13 +4,13 @@ use serde_json::value::Value as JsonValue; use crate::{err, implement, Result}; #[must_use] -#[implement(super::PduEvent)] +#[implement(super::Pdu)] pub fn get_content_as_value(&self) -> JsonValue { self.get_content() .expect("pdu content must be a valid JSON value") } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] pub fn get_content(&self) -> Result where T: for<'de> Deserialize<'de>, diff --git a/src/core/pdu/count.rs b/src/core/pdu/count.rs index aceec1e8..85222382 100644 --- a/src/core/pdu/count.rs +++ b/src/core/pdu/count.rs @@ -7,12 +7,12 @@ use ruma::api::Direction; use crate::{err, Error, Result}; #[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] -pub enum PduCount { +pub enum Count { Normal(u64), Backfilled(i64), } -impl PduCount { +impl Count { #[inline] #[must_use] pub fn from_unsigned(unsigned: u64) -> Self { Self::from_signed(unsigned as i64) } @@ -69,11 +69,11 @@ impl PduCount { Ok(match self { Self::Normal(i) => Self::Normal( i.checked_add(add) - .ok_or_else(|| err!(Arithmetic("PduCount::Normal overflow")))?, + .ok_or_else(|| err!(Arithmetic("Count::Normal overflow")))?, ), Self::Backfilled(i) => Self::Backfilled( i.checked_add(add as i64) - .ok_or_else(|| err!(Arithmetic("PduCount::Backfilled overflow")))?, + .ok_or_else(|| err!(Arithmetic("Count::Backfilled overflow")))?, ), }) } @@ -83,11 +83,11 @@ impl PduCount { Ok(match self { Self::Normal(i) => Self::Normal( i.checked_sub(sub) - .ok_or_else(|| err!(Arithmetic("PduCount::Normal underflow")))?, + .ok_or_else(|| err!(Arithmetic("Count::Normal underflow")))?, ), Self::Backfilled(i) => Self::Backfilled( i.checked_sub(sub as i64) - .ok_or_else(|| err!(Arithmetic("PduCount::Backfilled underflow")))?, + .ok_or_else(|| err!(Arithmetic("Count::Backfilled underflow")))?, ), }) } @@ -121,11 +121,11 @@ impl PduCount { #[inline] #[must_use] - pub fn min() -> Self { Self::Backfilled(i64::MIN) } + pub const fn min() -> Self { Self::Backfilled(i64::MIN) } #[inline] #[must_use] - pub fn max() -> Self { Self::Normal(i64::MAX as u64) } + pub const fn max() -> Self { Self::Normal(i64::MAX as u64) } #[inline] pub(crate) fn debug_assert_valid(&self) { @@ -135,7 +135,7 @@ impl PduCount { } } -impl Display for PduCount { +impl Display for Count { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.debug_assert_valid(); match self { @@ -145,20 +145,30 @@ impl Display for PduCount { } } -impl FromStr for PduCount { +impl From for Count { + #[inline] + fn from(signed: i64) -> Self { Self::from_signed(signed) } +} + +impl From for Count { + #[inline] + fn from(unsigned: u64) -> Self { Self::from_unsigned(unsigned) } +} + +impl FromStr for Count { type Err = Error; fn from_str(token: &str) -> Result { Ok(Self::from_signed(token.parse()?)) } } -impl PartialOrd for PduCount { +impl PartialOrd for Count { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for PduCount { +impl Ord for Count { fn cmp(&self, other: &Self) -> Ordering { self.into_signed().cmp(&other.into_signed()) } } -impl Default for PduCount { +impl Default for Count { fn default() -> Self { Self::Normal(0) } } diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs index 15117f92..96a1e4ba 100644 --- a/src/core/pdu/event.rs +++ b/src/core/pdu/event.rs @@ -4,9 +4,9 @@ pub use ruma::state_res::Event; use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; use serde_json::value::RawValue as RawJsonValue; -use super::PduEvent; +use super::Pdu; -impl Event for PduEvent { +impl Event for Pdu { type Id = Arc; fn event_id(&self) -> &Self::Id { &self.event_id } diff --git a/src/core/pdu/filter.rs b/src/core/pdu/filter.rs index bd232ebd..c7c7316d 100644 --- a/src/core/pdu/filter.rs +++ b/src/core/pdu/filter.rs @@ -3,7 +3,7 @@ use serde_json::Value; use crate::{implement, is_equal_to}; -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[must_use] pub fn matches(&self, filter: &RoomEventFilter) -> bool { if !self.matches_sender(filter) { @@ -25,7 +25,7 @@ pub fn matches(&self, filter: &RoomEventFilter) -> bool { true } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] fn matches_room(&self, filter: &RoomEventFilter) -> bool { if filter.not_rooms.contains(&self.room_id) { return false; @@ -40,7 +40,7 @@ fn matches_room(&self, filter: &RoomEventFilter) -> bool { true } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] fn matches_sender(&self, filter: &RoomEventFilter) -> bool { if filter.not_senders.contains(&self.sender) { return false; @@ -55,7 +55,7 @@ fn matches_sender(&self, filter: &RoomEventFilter) -> bool { true } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] fn matches_type(&self, filter: &RoomEventFilter) -> bool { let event_type = &self.kind.to_cow_str(); if filter.not_types.iter().any(is_equal_to!(event_type)) { @@ -71,7 +71,7 @@ fn matches_type(&self, filter: &RoomEventFilter) -> bool { true } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] fn matches_url(&self, filter: &RoomEventFilter) -> bool { let Some(url_filter) = filter.url_filter.as_ref() else { return true; diff --git a/src/core/pdu/id.rs b/src/core/pdu/id.rs index 05d11904..0b23a29f 100644 --- a/src/core/pdu/id.rs +++ b/src/core/pdu/id.rs @@ -1,4 +1,4 @@ -use super::{PduCount, RawPduId}; +use super::{Count, RawId}; use crate::utils::u64_from_u8x8; pub type ShortRoomId = ShortId; @@ -6,17 +6,17 @@ pub type ShortEventId = ShortId; pub type ShortId = u64; #[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct PduId { +pub struct Id { pub shortroomid: ShortRoomId, - pub shorteventid: PduCount, + pub shorteventid: Count, } -impl From for PduId { +impl From for Id { #[inline] - fn from(raw: RawPduId) -> Self { + fn from(raw: RawId) -> Self { Self { shortroomid: u64_from_u8x8(raw.shortroomid()), - shorteventid: PduCount::from_unsigned(u64_from_u8x8(raw.shorteventid())), + shorteventid: Count::from_unsigned(u64_from_u8x8(raw.shorteventid())), } } } diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index c785c99e..2aa60ed1 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -22,17 +22,18 @@ use serde_json::value::RawValue as RawJsonValue; pub use self::{ builder::{Builder, Builder as PduBuilder}, - count::PduCount, + count::Count, event::Event, event_id::*, id::*, raw_id::*, + Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, }; use crate::Result; /// Persistent Data Unit (Event) #[derive(Clone, Deserialize, Serialize, Debug)] -pub struct PduEvent { +pub struct Pdu { pub event_id: Arc, pub room_id: OwnedRoomId, pub sender: OwnedUserId, @@ -64,7 +65,7 @@ pub struct EventHash { pub sha256: String, } -impl PduEvent { +impl Pdu { pub fn from_id_val(event_id: &EventId, mut json: CanonicalJsonObject) -> Result { let event_id = CanonicalJsonValue::String(event_id.into()); json.insert("event_id".into(), event_id); @@ -75,19 +76,19 @@ impl PduEvent { } /// Prevent derived equality which wouldn't limit itself to event_id -impl Eq for PduEvent {} +impl Eq for Pdu {} /// Equality determined by the Pdu's ID, not the memory representations. -impl PartialEq for PduEvent { +impl PartialEq for Pdu { fn eq(&self, other: &Self) -> bool { self.event_id == other.event_id } } /// Ordering determined by the Pdu's ID, not the memory representations. -impl PartialOrd for PduEvent { +impl PartialOrd for Pdu { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } /// Ordering determined by the Pdu's ID, not the memory representations. -impl Ord for PduEvent { +impl Ord for Pdu { fn cmp(&self, other: &Self) -> Ordering { self.event_id.cmp(&other.event_id) } } diff --git a/src/core/pdu/raw_id.rs b/src/core/pdu/raw_id.rs index faba1cbf..ef8502f6 100644 --- a/src/core/pdu/raw_id.rs +++ b/src/core/pdu/raw_id.rs @@ -1,27 +1,27 @@ use arrayvec::ArrayVec; -use super::{PduCount, PduId, ShortEventId, ShortId, ShortRoomId}; +use super::{Count, Id, ShortEventId, ShortId, ShortRoomId}; #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -pub enum RawPduId { - Normal(RawPduIdNormal), - Backfilled(RawPduIdBackfilled), +pub enum RawId { + Normal(RawIdNormal), + Backfilled(RawIdBackfilled), } -type RawPduIdNormal = [u8; RawPduId::NORMAL_LEN]; -type RawPduIdBackfilled = [u8; RawPduId::BACKFILLED_LEN]; +type RawIdNormal = [u8; RawId::NORMAL_LEN]; +type RawIdBackfilled = [u8; RawId::BACKFILLED_LEN]; const INT_LEN: usize = size_of::(); -impl RawPduId { +impl RawId { const BACKFILLED_LEN: usize = size_of::() + INT_LEN + size_of::(); const MAX_LEN: usize = Self::BACKFILLED_LEN; const NORMAL_LEN: usize = size_of::() + size_of::(); #[inline] #[must_use] - pub fn pdu_count(&self) -> PduCount { - let id: PduId = (*self).into(); + pub fn pdu_count(&self) -> Count { + let id: Id = (*self).into(); id.shorteventid } @@ -61,55 +61,51 @@ impl RawPduId { } } -impl AsRef<[u8]> for RawPduId { +impl AsRef<[u8]> for RawId { #[inline] fn as_ref(&self) -> &[u8] { self.as_bytes() } } -impl From<&[u8]> for RawPduId { +impl From<&[u8]> for RawId { #[inline] fn from(id: &[u8]) -> Self { match id.len() { Self::NORMAL_LEN => Self::Normal( id[0..Self::NORMAL_LEN] .try_into() - .expect("normal RawPduId from [u8]"), + .expect("normal RawId from [u8]"), ), Self::BACKFILLED_LEN => Self::Backfilled( id[0..Self::BACKFILLED_LEN] .try_into() - .expect("backfilled RawPduId from [u8]"), + .expect("backfilled RawId from [u8]"), ), - _ => unimplemented!("unrecognized RawPduId length"), + _ => unimplemented!("unrecognized RawId length"), } } } -impl From for RawPduId { +impl From for RawId { #[inline] - fn from(id: PduId) -> Self { - const MAX_LEN: usize = RawPduId::MAX_LEN; + fn from(id: Id) -> Self { + const MAX_LEN: usize = RawId::MAX_LEN; type RawVec = ArrayVec; let mut vec = RawVec::new(); vec.extend(id.shortroomid.to_be_bytes()); id.shorteventid.debug_assert_valid(); match id.shorteventid { - PduCount::Normal(shorteventid) => { + Count::Normal(shorteventid) => { vec.extend(shorteventid.to_be_bytes()); - Self::Normal( - vec.as_ref() - .try_into() - .expect("RawVec into RawPduId::Normal"), - ) + Self::Normal(vec.as_ref().try_into().expect("RawVec into RawId::Normal")) }, - PduCount::Backfilled(shorteventid) => { + Count::Backfilled(shorteventid) => { vec.extend(0_u64.to_be_bytes()); vec.extend(shorteventid.to_be_bytes()); Self::Backfilled( vec.as_ref() .try_into() - .expect("RawVec into RawPduId::Backfilled"), + .expect("RawVec into RawId::Backfilled"), ) }, } diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index 647f54c0..e116e563 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -18,9 +18,9 @@ struct ExtractRedactedBecause { redacted_because: Option, } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[tracing::instrument(skip(self), level = "debug")] -pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> Result<()> { +pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> Result { self.unsigned = None; let mut content = @@ -31,7 +31,7 @@ pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> Resul self.unsigned = Some( to_raw_value(&json!({ - "redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works") + "redacted_because": serde_json::to_value(reason).expect("to_value(Pdu) always works") })) .expect("to string always works"), ); @@ -41,7 +41,7 @@ pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> Resul Ok(()) } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[must_use] pub fn is_redacted(&self) -> bool { let Some(unsigned) = &self.unsigned else { @@ -72,7 +72,7 @@ pub fn is_redacted(&self) -> bool { /// > to the content of m.room.redaction events in older room versions when /// > serving /// > such events over the Client-Server API. -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[must_use] pub fn copy_redacts(&self) -> (Option>, Box) { if self.kind == TimelineEventType::RoomRedaction { diff --git a/src/core/pdu/relation.rs b/src/core/pdu/relation.rs index ae156a3d..2968171e 100644 --- a/src/core/pdu/relation.rs +++ b/src/core/pdu/relation.rs @@ -13,7 +13,7 @@ struct ExtractRelatesToEventId { relates_to: ExtractRelType, } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[must_use] pub fn relation_type_equal(&self, rel_type: &RelationType) -> bool { self.get_content() diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 8d20d982..30fee863 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -10,7 +10,7 @@ use serde_json::{json, value::Value as JsonValue}; use crate::{implement, warn}; -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[tracing::instrument(skip(self), level = "debug")] pub fn to_sync_room_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); @@ -36,7 +36,7 @@ pub fn to_sync_room_event(&self) -> Raw { } /// This only works for events that are also AnyRoomEvents. -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[tracing::instrument(skip(self), level = "debug")] pub fn to_any_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); @@ -62,7 +62,7 @@ pub fn to_any_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[tracing::instrument(skip(self), level = "debug")] pub fn to_room_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); @@ -88,7 +88,7 @@ pub fn to_room_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[tracing::instrument(skip(self), level = "debug")] pub fn to_message_like_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); @@ -114,7 +114,7 @@ pub fn to_message_like_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[must_use] pub fn to_state_event_value(&self) -> JsonValue { let mut json = json!({ @@ -134,13 +134,13 @@ pub fn to_state_event_value(&self) -> JsonValue { json } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[tracing::instrument(skip(self), level = "debug")] pub fn to_state_event(&self) -> Raw { serde_json::from_value(self.to_state_event_value()).expect("Raw::from_value always works") } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[tracing::instrument(skip(self), level = "debug")] pub fn to_sync_state_event(&self) -> Raw { let mut json = json!({ @@ -159,7 +159,7 @@ pub fn to_sync_state_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[tracing::instrument(skip(self), level = "debug")] pub fn to_stripped_state_event(&self) -> Raw { let json = json!({ @@ -172,7 +172,7 @@ pub fn to_stripped_state_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } -#[implement(super::PduEvent)] +#[implement(super::Pdu)] #[tracing::instrument(skip(self), level = "debug")] pub fn to_stripped_spacechild_state_event(&self) -> Raw { let json = json!({ @@ -186,7 +186,7 @@ pub fn to_stripped_spacechild_state_event(&self) -> Raw Raw> { let mut json = json!({ diff --git a/src/core/pdu/tests.rs b/src/core/pdu/tests.rs index 30ec23ba..ae3b1dd6 100644 --- a/src/core/pdu/tests.rs +++ b/src/core/pdu/tests.rs @@ -1,19 +1,19 @@ #![cfg(test)] -use super::PduCount; +use super::Count; #[test] fn backfilled_parse() { - let count: PduCount = "-987654".parse().expect("parse() failed"); - let backfilled = matches!(count, PduCount::Backfilled(_)); + let count: Count = "-987654".parse().expect("parse() failed"); + let backfilled = matches!(count, Count::Backfilled(_)); assert!(backfilled, "not backfilled variant"); } #[test] fn normal_parse() { - let count: PduCount = "987654".parse().expect("parse() failed"); - let backfilled = matches!(count, PduCount::Backfilled(_)); + let count: Count = "987654".parse().expect("parse() failed"); + let backfilled = matches!(count, Count::Backfilled(_)); assert!(!backfilled, "backfilled variant"); } diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index 1c47e826..6f3e4401 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -4,10 +4,11 @@ use ruma::MilliSecondsSinceUnixEpoch; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue, Value as JsonValue}; +use super::Pdu; use crate::{err, implement, is_true, Result}; -#[implement(super::PduEvent)] -pub fn remove_transaction_id(&mut self) -> Result<()> { +#[implement(Pdu)] +pub fn remove_transaction_id(&mut self) -> Result { let Some(unsigned) = &self.unsigned else { return Ok(()); }; @@ -23,8 +24,8 @@ pub fn remove_transaction_id(&mut self) -> Result<()> { Ok(()) } -#[implement(super::PduEvent)] -pub fn add_age(&mut self) -> Result<()> { +#[implement(Pdu)] +pub fn add_age(&mut self) -> Result { let mut unsigned: BTreeMap> = self .unsigned .as_ref() @@ -44,7 +45,33 @@ pub fn add_age(&mut self) -> Result<()> { Ok(()) } -#[implement(super::PduEvent)] +#[implement(Pdu)] +pub fn add_relation(&mut self, name: &str, pdu: &Pdu) -> Result { + let mut unsigned: BTreeMap = self + .unsigned + .as_ref() + .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; + + let relations: &mut JsonValue = unsigned.entry("m.relations".into()).or_default(); + if relations.as_object_mut().is_none() { + let mut object = serde_json::Map::::new(); + _ = relations.as_object_mut().insert(&mut object); + } + + relations + .as_object_mut() + .expect("we just created it") + .insert(name.to_owned(), serde_json::to_value(pdu)?); + + self.unsigned = to_raw_value(&unsigned) + .map(Some) + .expect("unsigned is valid"); + + Ok(()) +} + +#[implement(Pdu)] pub fn contains_unsigned_property(&self, property: &str, is_type: F) -> bool where F: FnOnce(&JsonValue) -> bool, @@ -55,7 +82,7 @@ where .is_some_and(is_true!()) } -#[implement(super::PduEvent)] +#[implement(Pdu)] pub fn get_unsigned_property(&self, property: &str) -> Result where T: for<'de> Deserialize<'de>, @@ -68,11 +95,11 @@ where .map_err(|e| err!(Database("Failed to deserialize unsigned.{property} into type: {e}"))) } -#[implement(super::PduEvent)] +#[implement(Pdu)] #[must_use] pub fn get_unsigned_as_value(&self) -> JsonValue { self.get_unsigned::().unwrap_or_default() } -#[implement(super::PduEvent)] +#[implement(Pdu)] pub fn get_unsigned(&self) -> Result { self.unsigned .as_ref() From 27966221f106ef2c4c4e88cc9381d9f1e2d0468e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 5 Nov 2024 04:37:08 +0000 Subject: [PATCH 0184/1248] add ready_try_fold to utils Signed-off-by: Jason Volk --- src/core/utils/result/inspect_log.rs | 2 ++ src/core/utils/stream/ready.rs | 15 ++++++++++++ src/core/utils/stream/try_ready.rs | 36 +++++++++++++++++++++++++++- 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/src/core/utils/result/inspect_log.rs b/src/core/utils/result/inspect_log.rs index 577761c5..e9f32663 100644 --- a/src/core/utils/result/inspect_log.rs +++ b/src/core/utils/result/inspect_log.rs @@ -11,6 +11,7 @@ where { fn log_err(self, level: Level) -> Self; + #[inline] fn err_log(self) -> Self where Self: Sized, @@ -25,6 +26,7 @@ where { fn log_err_debug(self, level: Level) -> Self; + #[inline] fn err_debug_log(self) -> Self where Self: Sized, diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index c16d1246..f4eec7d1 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -32,6 +32,11 @@ where where F: Fn(T, Item) -> T; + fn ready_fold_default(self, f: F) -> Fold, T, impl FnMut(T, Item) -> Ready> + where + F: Fn(T, Item) -> T, + T: Default; + fn ready_for_each(self, f: F) -> ForEach, impl FnMut(Item) -> Ready<()>> where F: FnMut(Item); @@ -93,6 +98,15 @@ where self.fold(init, move |a, t| ready(f(a, t))) } + #[inline] + fn ready_fold_default(self, f: F) -> Fold, T, impl FnMut(T, Item) -> Ready> + where + F: Fn(T, Item) -> T, + T: Default, + { + self.ready_fold(T::default(), f) + } + #[inline] #[allow(clippy::unit_arg)] fn ready_for_each(self, mut f: F) -> ForEach, impl FnMut(Item) -> Ready<()>> @@ -120,6 +134,7 @@ where self.scan(init, move |s, t| ready(f(s, t))) } + #[inline] fn ready_scan_each( self, init: T, f: F, ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index 3fbcbc45..0daed26e 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -3,7 +3,7 @@ use futures::{ future::{ready, Ready}, - stream::{AndThen, TryForEach, TryStream, TryStreamExt}, + stream::{AndThen, TryFold, TryForEach, TryStream, TryStreamExt}, }; use crate::Result; @@ -25,6 +25,19 @@ where ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> where F: FnMut(S::Ok) -> Result<(), E>; + + fn ready_try_fold( + self, init: U, f: F, + ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> + where + F: Fn(U, S::Ok) -> Result; + + fn ready_try_fold_default( + self, f: F, + ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> + where + F: Fn(U, S::Ok) -> Result, + U: Default; } impl TryReadyExt for S @@ -49,4 +62,25 @@ where { self.try_for_each(move |t| ready(f(t))) } + + #[inline] + fn ready_try_fold( + self, init: U, f: F, + ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> + where + F: Fn(U, S::Ok) -> Result, + { + self.try_fold(init, move |a, t| ready(f(a, t))) + } + + #[inline] + fn ready_try_fold_default( + self, f: F, + ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> + where + F: Fn(U, S::Ok) -> Result, + U: Default, + { + self.ready_try_fold(U::default(), f) + } } From 13ef6dcbcf17e04f28ad8beaab64920e63c2aa31 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 7 Nov 2024 03:59:08 +0000 Subject: [PATCH 0185/1248] add standalone getters for shortid service Signed-off-by: Jason Volk --- src/service/rooms/short/mod.rs | 35 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 9fddf099..e8b00d9b 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -52,13 +52,7 @@ impl crate::Service for Service { pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEventId { const BUFSIZE: usize = size_of::(); - if let Ok(shorteventid) = self - .db - .eventid_shorteventid - .get(event_id) - .await - .deserialized() - { + if let Ok(shorteventid) = self.get_shorteventid(event_id).await { return shorteventid; } @@ -105,11 +99,10 @@ pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> } #[implement(Service)] -pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { - let key = (event_type, state_key); +pub async fn get_shorteventid(&self, event_id: &EventId) -> Result { self.db - .statekey_shortstatekey - .qry(&key) + .eventid_shorteventid + .get(event_id) .await .deserialized() } @@ -118,17 +111,11 @@ pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &s pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> ShortStateKey { const BUFSIZE: usize = size_of::(); - let key = (event_type, state_key); - if let Ok(shortstatekey) = self - .db - .statekey_shortstatekey - .qry(&key) - .await - .deserialized() - { + if let Ok(shortstatekey) = self.get_shortstatekey(event_type, state_key).await { return shortstatekey; } + let key = (event_type, state_key); let shortstatekey = self.services.globals.next_count().unwrap(); debug_assert!(size_of_val(&shortstatekey) == BUFSIZE, "buffer requirement changed"); @@ -143,6 +130,16 @@ pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, sta shortstatekey } +#[implement(Service)] +pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { + let key = (event_type, state_key); + self.db + .statekey_shortstatekey + .qry(&key) + .await + .deserialized() +} + #[implement(Service)] pub async fn get_eventid_from_short(&self, shorteventid: ShortEventId) -> Result> { const BUFSIZE: usize = size_of::(); From 1f2e939fd56319b85426457a9eb469228e287406 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 7 Nov 2024 04:49:01 +0000 Subject: [PATCH 0186/1248] optional arguments for timeline pdus iterations Signed-off-by: Jason Volk --- src/api/client/context.rs | 4 ++-- src/api/client/message.rs | 4 ++-- src/api/client/sync/mod.rs | 4 ++-- src/api/server/backfill.rs | 4 ++-- src/service/rooms/timeline/data.rs | 13 +++++++------ src/service/rooms/timeline/mod.rs | 18 +++++++++++------- 6 files changed, 26 insertions(+), 21 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index d07f6ac1..f5f981ba 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -82,7 +82,7 @@ pub(crate) async fn get_context_route( let events_before: Vec<_> = services .rooms .timeline - .pdus_rev(sender_user, room_id, base_token.saturating_sub(1)) + .pdus_rev(Some(sender_user), room_id, Some(base_token.saturating_sub(1))) .await? .ready_filter_map(|item| event_filter(item, filter)) .filter_map(|item| ignored_filter(&services, item, sender_user)) @@ -94,7 +94,7 @@ pub(crate) async fn get_context_route( let events_after: Vec<_> = services .rooms .timeline - .pdus(sender_user, room_id, base_token.saturating_add(1)) + .pdus(Some(sender_user), room_id, Some(base_token.saturating_add(1))) .await? .ready_filter_map(|item| event_filter(item, filter)) .filter_map(|item| ignored_filter(&services, item, sender_user)) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index e76325aa..e8306de9 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -100,14 +100,14 @@ pub(crate) async fn get_message_events_route( Direction::Forward => services .rooms .timeline - .pdus(sender_user, room_id, from) + .pdus(Some(sender_user), room_id, Some(from)) .await? .boxed(), Direction::Backward => services .rooms .timeline - .pdus_rev(sender_user, room_id, from) + .pdus_rev(Some(sender_user), room_id, Some(from)) .await? .boxed(), }; diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index f047d176..3201b827 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -14,7 +14,7 @@ async fn load_timeline( let last_timeline_count = services .rooms .timeline - .last_timeline_count(sender_user, room_id) + .last_timeline_count(Some(sender_user), room_id) .await?; if last_timeline_count <= roomsincecount { @@ -24,7 +24,7 @@ async fn load_timeline( let mut non_timeline_pdus = services .rooms .timeline - .pdus_rev(sender_user, room_id, PduCount::max()) + .pdus_rev(Some(sender_user), room_id, None) .await? .ready_take_while(|(pducount, _)| *pducount > roomsincecount); diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 47f02841..be770ee8 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -6,7 +6,7 @@ use conduit::{ PduCount, Result, }; use futures::{FutureExt, StreamExt}; -use ruma::{api::federation::backfill::get_backfill, uint, user_id, MilliSecondsSinceUnixEpoch}; +use ruma::{api::federation::backfill::get_backfill, uint, MilliSecondsSinceUnixEpoch}; use super::AccessCheck; use crate::Ruma; @@ -51,7 +51,7 @@ pub(crate) async fn get_backfill_route( let pdus = services .rooms .timeline - .pdus_rev(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until) + .pdus_rev(None, &body.room_id, Some(until)) .await? .take(limit) .filter_map(|(_, pdu)| async move { diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index f320e6a0..7f1873ab 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,4 +1,5 @@ use std::{ + borrow::Borrow, collections::{hash_map, HashMap}, sync::Arc, }; @@ -53,7 +54,7 @@ impl Data { } } - pub(super) async fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + pub(super) async fn last_timeline_count(&self, sender_user: Option<&UserId>, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache .lock() @@ -202,7 +203,7 @@ impl Data { /// happened before the event with id `until` in reverse-chronological /// order. pub(super) async fn pdus_rev<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, + &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, until: PduCount, ) -> Result + Send + 'a> { let current = self.count_to_id(room_id, until).await?; let prefix = current.shortroomid(); @@ -211,13 +212,13 @@ impl Data { .rev_raw_stream_from(¤t) .ignore_err() .ready_take_while(move |(key, _)| key.starts_with(&prefix)) - .map(|item| Self::each_pdu(item, user_id)); + .map(move |item| Self::each_pdu(item, user_id)); Ok(stream) } pub(super) async fn pdus<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, from: PduCount, + &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, from: PduCount, ) -> Result + Send + 'a> { let current = self.count_to_id(room_id, from).await?; let prefix = current.shortroomid(); @@ -231,13 +232,13 @@ impl Data { Ok(stream) } - fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: &UserId) -> PdusIterItem { + fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: Option<&UserId>) -> PdusIterItem { let pdu_id: RawPduId = pdu_id.into(); let mut pdu = serde_json::from_slice::(pdu).expect("PduEvent in pduid_pdu database column is invalid JSON"); - if pdu.sender != user_id { + if Some(pdu.sender.borrow()) != user_id { pdu.remove_transaction_id().log_err().ok(); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 81d372d7..281879d2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -177,7 +177,7 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result> { - self.pdus_rev(user_id!("@placeholder:conduwuit.placeholder"), room_id, PduCount::max()) + self.pdus_rev(None, room_id, None) .await? .next() .await @@ -186,7 +186,7 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub async fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + pub async fn last_timeline_count(&self, sender_user: Option<&UserId>, room_id: &RoomId) -> Result { self.db.last_timeline_count(sender_user, room_id).await } @@ -976,23 +976,27 @@ impl Service { pub async fn all_pdus<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, ) -> Result + Send + 'a> { - self.pdus(user_id, room_id, PduCount::min()).await + self.pdus(Some(user_id), room_id, None).await } /// Reverse iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] pub async fn pdus_rev<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount, + &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, until: Option, ) -> Result + Send + 'a> { - self.db.pdus_rev(user_id, room_id, until).await + self.db + .pdus_rev(user_id, room_id, until.unwrap_or_else(PduCount::max)) + .await } /// Forward iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] pub async fn pdus<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, from: PduCount, + &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, from: Option, ) -> Result + Send + 'a> { - self.db.pdus(user_id, room_id, from).await + self.db + .pdus(user_id, room_id, from.unwrap_or_else(PduCount::min)) + .await } /// Replace a PDU with the redacted form. From f59e8af73474aad18dd68300b245fd0ce2b8ab92 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 8 Nov 2024 05:49:28 +0000 Subject: [PATCH 0187/1248] slight cleanup/simplifications to backfil Signed-off-by: Jason Volk --- src/api/server/backfill.rs | 76 +++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index be770ee8..2858d9fd 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -16,7 +16,7 @@ use crate::Ruma; /// Retrieves events from before the sender joined the room, if the room's /// history visibility allows. pub(crate) async fn get_backfill_route( - State(services): State, body: Ruma, + State(services): State, ref body: Ruma, ) -> Result { AccessCheck { services: &services, @@ -27,7 +27,13 @@ pub(crate) async fn get_backfill_route( .check() .await?; - let until = body + let limit = body + .limit + .min(uint!(100)) + .try_into() + .expect("UInt could not be converted to usize"); + + let from = body .v .iter() .stream() @@ -38,46 +44,38 @@ pub(crate) async fn get_backfill_route( .get_pdu_count(event_id) .map(Result::ok) }) - .ready_fold(PduCount::Backfilled(0), cmp::max) - .await; - - let limit = body - .limit - .min(uint!(100)) - .try_into() - .expect("UInt could not be converted to usize"); - - let origin = body.origin(); - let pdus = services - .rooms - .timeline - .pdus_rev(None, &body.room_id, Some(until)) - .await? - .take(limit) - .filter_map(|(_, pdu)| async move { - if !services - .rooms - .state_accessor - .server_can_see_event(origin, &pdu.room_id, &pdu.event_id) - .await - { - return None; - } - - services - .rooms - .timeline - .get_pdu_json(&pdu.event_id) - .await - .ok() - }) - .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) - .collect() + .ready_fold(PduCount::min(), cmp::max) .await; Ok(get_backfill::v1::Response { - origin: services.globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdus, + + origin: services.globals.server_name().to_owned(), + + pdus: services + .rooms + .timeline + .pdus_rev(None, &body.room_id, Some(from)) + .await? + .take(limit) + .filter_map(|(_, pdu)| async move { + services + .rooms + .state_accessor + .server_can_see_event(body.origin(), &pdu.room_id, &pdu.event_id) + .await + .then_some(pdu) + }) + .filter_map(|pdu| async move { + services + .rooms + .timeline + .get_pdu_json(&pdu.event_id) + .await + .ok() + }) + .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) + .collect() + .await, }) } From 6eba36d7883439539b8ca0b65f04d2935e41ad05 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 8 Nov 2024 08:21:19 +0000 Subject: [PATCH 0188/1248] split make_body template Signed-off-by: Jason Volk --- src/api/router/args.rs | 47 ++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 38236db3..4c0aff4c 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -1,8 +1,8 @@ use std::{mem, ops::Deref}; use axum::{async_trait, body::Body, extract::FromRequest}; -use bytes::{BufMut, BytesMut}; -use conduit::{debug, err, trace, utils::string::EMPTY, Error, Result}; +use bytes::{BufMut, Bytes, BytesMut}; +use conduit::{debug, err, utils::string::EMPTY, Error, Result}; use ruma::{ api::IncomingRequest, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, OwnedUserId, ServerName, UserId, }; @@ -103,7 +103,32 @@ fn make_body( where T: IncomingRequest + Send + Sync + 'static, { - let body = if let Some(CanonicalJsonValue::Object(json_body)) = json_body { + let body = take_body(services, request, json_body, auth); + let http_request = into_http_request(request, body); + T::try_from_http_request(http_request, &request.path).map_err(|e| err!(Request(BadJson(debug_warn!("{e}"))))) +} + +fn into_http_request(request: &Request, body: Bytes) -> hyper::Request { + let mut http_request = hyper::Request::builder() + .uri(request.parts.uri.clone()) + .method(request.parts.method.clone()); + + *http_request.headers_mut().expect("mutable http headers") = request.parts.headers.clone(); + + let http_request = http_request.body(body).expect("http request body"); + + let headers = http_request.headers(); + let method = http_request.method(); + let uri = http_request.uri(); + debug!("{method:?} {uri:?} {headers:?}"); + + http_request +} + +fn take_body( + services: &Services, request: &mut Request, json_body: &mut Option, auth: &Auth, +) -> Bytes { + if let Some(CanonicalJsonValue::Object(json_body)) = json_body { let user_id = auth.sender_user.clone().unwrap_or_else(|| { let server_name = services.globals.server_name(); UserId::parse_with_server_name(EMPTY, server_name).expect("valid user_id") @@ -131,19 +156,5 @@ where buf.into_inner().freeze() } else { mem::take(&mut request.body) - }; - - let mut http_request = hyper::Request::builder() - .uri(request.parts.uri.clone()) - .method(request.parts.method.clone()); - *http_request.headers_mut().expect("mutable http headers") = request.parts.headers.clone(); - let http_request = http_request.body(body).expect("http request body"); - - let headers = http_request.headers(); - let method = http_request.method(); - let uri = http_request.uri(); - debug!("{method:?} {uri:?} {headers:?}"); - trace!("{method:?} {uri:?} {json_body:?}"); - - T::try_from_http_request(http_request, &request.path).map_err(|e| err!(Request(BadJson(debug_warn!("{e}"))))) + } } From 1ce3db727fdd298ba94dd472d017c6fe7e8a92c2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 8 Nov 2024 07:30:52 +0000 Subject: [PATCH 0189/1248] split event_handler service Signed-off-by: Jason Volk --- src/service/rooms/event_handler/acl_check.rs | 35 + .../fetch_and_handle_outliers.rs | 181 +++ src/service/rooms/event_handler/fetch_prev.rs | 104 ++ .../rooms/event_handler/fetch_state.rs | 84 ++ .../event_handler/handle_incoming_pdu.rs | 172 +++ .../rooms/event_handler/handle_outlier_pdu.rs | 164 ++ .../rooms/event_handler/handle_prev_pdu.rs | 82 + src/service/rooms/event_handler/mod.rs | 1328 +---------------- .../rooms/event_handler/parse_incoming_pdu.rs | 41 +- .../rooms/event_handler/resolve_state.rs | 101 ++ .../rooms/event_handler/state_at_incoming.rs | 178 +++ .../event_handler/upgrade_outlier_pdu.rs | 298 ++++ 12 files changed, 1437 insertions(+), 1331 deletions(-) create mode 100644 src/service/rooms/event_handler/acl_check.rs create mode 100644 src/service/rooms/event_handler/fetch_and_handle_outliers.rs create mode 100644 src/service/rooms/event_handler/fetch_prev.rs create mode 100644 src/service/rooms/event_handler/fetch_state.rs create mode 100644 src/service/rooms/event_handler/handle_incoming_pdu.rs create mode 100644 src/service/rooms/event_handler/handle_outlier_pdu.rs create mode 100644 src/service/rooms/event_handler/handle_prev_pdu.rs create mode 100644 src/service/rooms/event_handler/resolve_state.rs create mode 100644 src/service/rooms/event_handler/state_at_incoming.rs create mode 100644 src/service/rooms/event_handler/upgrade_outlier_pdu.rs diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs new file mode 100644 index 00000000..f2ff1b00 --- /dev/null +++ b/src/service/rooms/event_handler/acl_check.rs @@ -0,0 +1,35 @@ +use conduit::{debug, implement, trace, warn, Err, Result}; +use ruma::{ + events::{room::server_acl::RoomServerAclEventContent, StateEventType}, + RoomId, ServerName, +}; + +/// Returns Ok if the acl allows the server +#[implement(super::Service)] +#[tracing::instrument(skip_all)] +pub async fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result { + let Ok(acl_event_content) = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomServerAcl, "") + .await + .map(|c: RoomServerAclEventContent| c) + .inspect(|acl| trace!("ACL content found: {acl:?}")) + .inspect_err(|e| trace!("No ACL content found: {e:?}")) + else { + return Ok(()); + }; + + if acl_event_content.allow.is_empty() { + warn!("Ignoring broken ACL event (allow key is empty)"); + return Ok(()); + } + + if acl_event_content.is_allowed(server_name) { + trace!("server {server_name} is allowed by ACL"); + Ok(()) + } else { + debug!("Server {server_name} was denied by room ACL in {room_id}"); + Err!(Request(Forbidden("Server was denied by room ACL"))) + } +} diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs new file mode 100644 index 00000000..677b78f2 --- /dev/null +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -0,0 +1,181 @@ +use std::{ + collections::{hash_map, BTreeMap, HashSet}, + sync::Arc, + time::Instant, +}; + +use conduit::{ + debug, debug_error, implement, info, pdu, trace, utils::math::continue_exponential_backoff_secs, warn, PduEvent, +}; +use ruma::{api::federation::event::get_event, CanonicalJsonValue, EventId, RoomId, RoomVersionId, ServerName}; + +/// Find the event and auth it. Once the event is validated (steps 1 - 8) +/// it is appended to the outliers Tree. +/// +/// Returns pdu and if we fetched it over federation the raw json. +/// +/// a. Look in the main timeline (pduid_pdu tree) +/// b. Look at outlier pdu tree +/// c. Ask origin server over federation +/// d. TODO: Ask other servers over federation? +#[implement(super::Service)] +pub(super) async fn fetch_and_handle_outliers<'a>( + &self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, + room_version_id: &'a RoomVersionId, +) -> Vec<(Arc, Option>)> { + let back_off = |id| match self + .services + .globals + .bad_event_ratelimiter + .write() + .expect("locked") + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)), + }; + + let mut events_with_auth_events = Vec::with_capacity(events.len()); + for id in events { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { + trace!("Found {id} in db"); + events_with_auth_events.push((id, Some(local_pdu), vec![])); + continue; + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len()); + let mut events_all = HashSet::with_capacity(todo_auth_events.len()); + while let Some(next_id) = todo_auth_events.pop() { + if let Some((time, tries)) = self + .services + .globals + .bad_event_ratelimiter + .read() + .expect("locked") + .get(&*next_id) + { + // Exponential backoff + const MIN_DURATION: u64 = 5 * 60; + const MAX_DURATION: u64 = 60 * 60 * 24; + if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { + info!("Backing off from {next_id}"); + continue; + } + } + + if events_all.contains(&next_id) { + continue; + } + + if self.services.timeline.pdu_exists(&next_id).await { + trace!("Found {next_id} in db"); + continue; + } + + debug!("Fetching {next_id} over federation."); + match self + .services + .sending + .send_federation_request( + origin, + get_event::v1::Request { + event_id: (*next_id).to_owned(), + include_unredacted_content: None, + }, + ) + .await + { + Ok(res) => { + debug!("Got {next_id} over federation"); + let Ok((calculated_event_id, value)) = pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) + else { + back_off((*next_id).to_owned()); + continue; + }; + + if calculated_event_id != *next_id { + warn!( + "Server didn't return event id we requested: requested: {next_id}, we got \ + {calculated_event_id}. Event: {:?}", + &res.pdu + ); + } + + if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { + for auth_event in auth_events { + if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + }, + Err(e) => { + debug_error!("Failed to fetch event {next_id}: {e}"); + back_off((*next_id).to_owned()); + }, + } + } + events_with_auth_events.push((id, None, events_in_reverse_order)); + } + + let mut pdus = Vec::with_capacity(events_with_auth_events.len()); + for (id, local_pdu, events_in_reverse_order) in events_with_auth_events { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Some(local_pdu) = local_pdu { + trace!("Found {id} in db"); + pdus.push((local_pdu.clone(), None)); + } + + for (next_id, value) in events_in_reverse_order.into_iter().rev() { + if let Some((time, tries)) = self + .services + .globals + .bad_event_ratelimiter + .read() + .expect("locked") + .get(&*next_id) + { + // Exponential backoff + const MIN_DURATION: u64 = 5 * 60; + const MAX_DURATION: u64 = 60 * 60 * 24; + if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { + debug!("Backing off from {next_id}"); + continue; + } + } + + match Box::pin(self.handle_outlier_pdu(origin, create_event, &next_id, room_id, value.clone(), true)).await + { + Ok((pdu, json)) => { + if next_id == *id { + pdus.push((pdu, Some(json))); + } + }, + Err(e) => { + warn!("Authentication of event {next_id} failed: {e:?}"); + back_off(next_id.into()); + }, + } + } + } + pdus +} diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs new file mode 100644 index 00000000..4acdba1d --- /dev/null +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -0,0 +1,104 @@ +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + sync::Arc, +}; + +use conduit::{debug_warn, err, implement, PduEvent, Result}; +use futures::{future, FutureExt}; +use ruma::{ + int, + state_res::{self}, + uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, +}; + +use super::check_room_id; + +#[implement(super::Service)] +#[allow(clippy::type_complexity)] +#[tracing::instrument(skip_all)] +pub(super) async fn fetch_prev( + &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, + initial_set: Vec>, +) -> Result<( + Vec>, + HashMap, (Arc, BTreeMap)>, +)> { + let mut graph: HashMap, _> = HashMap::with_capacity(initial_set.len()); + let mut eventid_info = HashMap::new(); + let mut todo_outlier_stack: Vec> = initial_set; + + let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; + + let mut amount = 0; + + while let Some(prev_event_id) = todo_outlier_stack.pop() { + self.services.server.check_running()?; + + if let Some((pdu, mut json_opt)) = self + .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id, room_version_id) + .boxed() + .await + .pop() + { + check_room_id(room_id, &pdu)?; + + let limit = self.services.server.config.max_fetch_prev_events; + if amount > limit { + debug_warn!("Max prev event limit reached! Limit: {limit}"); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + } + + if json_opt.is_none() { + json_opt = self + .services + .outlier + .get_outlier_pdu_json(&prev_event_id) + .await + .ok(); + } + + if let Some(json) = json_opt { + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + amount = amount.saturating_add(1); + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(prev_prev.clone()); + } + } + + graph.insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect()); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } else { + // Fetch and handle failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } + + let event_fetch = |event_id| { + let origin_server_ts = eventid_info + .get(&event_id) + .cloned() + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts); + + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + future::ok((int!(0), MilliSecondsSinceUnixEpoch(origin_server_ts))) + }; + + let sorted = state_res::lexicographical_topological_sort(&graph, &event_fetch) + .await + .map_err(|e| err!(Database(error!("Error sorting prev events: {e}"))))?; + + Ok((sorted, eventid_info)) +} diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs new file mode 100644 index 00000000..74b0bb32 --- /dev/null +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -0,0 +1,84 @@ +use std::{ + collections::{hash_map, HashMap}, + sync::Arc, +}; + +use conduit::{debug, implement, warn, Err, Error, PduEvent, Result}; +use futures::FutureExt; +use ruma::{ + api::federation::event::get_room_state_ids, events::StateEventType, EventId, RoomId, RoomVersionId, ServerName, +}; + +/// Call /state_ids to find out what the state at this pdu is. We trust the +/// server's response to some extend (sic), but we still do a lot of checks +/// on the events +#[implement(super::Service)] +#[tracing::instrument(skip(self, create_event, room_version_id))] +pub(super) async fn fetch_state( + &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, + event_id: &EventId, +) -> Result>>> { + debug!("Fetching state ids"); + let res = self + .services + .sending + .send_synapse_request( + origin, + get_room_state_ids::v1::Request { + room_id: room_id.to_owned(), + event_id: (*event_id).to_owned(), + }, + ) + .await + .inspect_err(|e| warn!("Fetching state for event failed: {e}"))?; + + debug!("Fetching state events"); + let collect = res + .pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(); + + let state_vec = self + .fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id) + .boxed() + .await; + + let mut state: HashMap<_, Arc> = HashMap::with_capacity(state_vec.len()); + for (pdu, _) in state_vec { + let state_key = pdu + .state_key + .clone() + .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; + + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key) + .await; + + match state.entry(shortstatekey) { + hash_map::Entry::Vacant(v) => { + v.insert(Arc::from(&*pdu.event_id)); + }, + hash_map::Entry::Occupied(_) => { + return Err(Error::bad_database( + "State event's type and state_key combination exists multiple times.", + )) + }, + } + } + + // The original create event must still be in the state + let create_shortstatekey = self + .services + .short + .get_shortstatekey(&StateEventType::RoomCreate, "") + .await?; + + if state.get(&create_shortstatekey).map(AsRef::as_ref) != Some(&create_event.event_id) { + return Err!(Database("Incoming event refers to wrong create event.")); + } + + Ok(Some(state)) +} diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs new file mode 100644 index 00000000..4d2d75d5 --- /dev/null +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -0,0 +1,172 @@ +use std::{ + collections::{hash_map, BTreeMap}, + time::Instant, +}; + +use conduit::{debug, err, implement, warn, Error, Result}; +use futures::FutureExt; +use ruma::{ + api::client::error::ErrorKind, events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId, +}; + +use super::{check_room_id, get_room_version_id}; +use crate::rooms::timeline::RawPduId; + +/// When receiving an event one needs to: +/// 0. Check the server is in the room +/// 1. Skip the PDU if we already know about it +/// 1.1. Remove unsigned field +/// 2. Check signatures, otherwise drop +/// 3. Check content hash, redact if doesn't match +/// 4. Fetch any missing auth events doing all checks listed here starting at 1. +/// These are not timeline events +/// 5. Reject "due to auth events" if can't get all the auth events or some of +/// the auth events are also rejected "due to auth events" +/// 6. Reject "due to auth events" if the event doesn't pass auth based on the +/// auth events +/// 7. Persist this event as an outlier +/// 8. If not timeline event: stop +/// 9. Fetch any missing prev events doing all checks listed here starting at 1. +/// These are timeline events +/// 10. Fetch missing state and auth chain events by calling `/state_ids` at +/// backwards extremities doing all the checks in this list starting at +/// 1. These are not timeline events +/// 11. Check the auth of the event passes based on the state of the event +/// 12. Ensure that the state is derived from the previous current state (i.e. +/// we calculated by doing state res where one of the inputs was a +/// previously trusted set of state, don't just trust a set of state we got +/// from a remote) +/// 13. Use state resolution to find new room state +/// 14. Check if the event passes auth based on the "current state" of the room, +/// if not soft fail it +#[implement(super::Service)] +#[tracing::instrument(skip(self, origin, value, is_timeline_event), name = "pdu")] +pub async fn handle_incoming_pdu<'a>( + &self, origin: &'a ServerName, room_id: &'a RoomId, event_id: &'a EventId, + value: BTreeMap, is_timeline_event: bool, +) -> Result> { + // 1. Skip the PDU if we already have it as a timeline event + if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await { + return Ok(Some(pdu_id)); + } + + // 1.1 Check the server is in the room + if !self.services.metadata.exists(room_id).await { + return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); + } + + // 1.2 Check if the room is disabled + if self.services.metadata.is_disabled(room_id).await { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Federation of this room is currently disabled on this server.", + )); + } + + // 1.3.1 Check room ACL on origin field/server + self.acl_check(origin, room_id).await?; + + // 1.3.2 Check room ACL on sender's server name + let sender: &UserId = value + .get("sender") + .try_into() + .map_err(|e| err!(Request(InvalidParam("PDU does not have a valid sender key: {e}"))))?; + + self.acl_check(sender.server_name(), room_id).await?; + + // Fetch create event + let create_event = self + .services + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await?; + + // Procure the room version + let room_version_id = get_room_version_id(&create_event)?; + + let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; + + let (incoming_pdu, val) = self + .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false) + .boxed() + .await?; + + check_room_id(room_id, &incoming_pdu)?; + + // 8. if not timeline event: stop + if !is_timeline_event { + return Ok(None); + } + // Skip old events + if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + return Ok(None); + } + + // 9. Fetch any missing prev events doing all checks listed here starting at 1. + // These are timeline events + let (sorted_prev_events, mut eventid_info) = self + .fetch_prev( + origin, + &create_event, + room_id, + &room_version_id, + incoming_pdu.prev_events.clone(), + ) + .await?; + + debug!(events = ?sorted_prev_events, "Got previous events"); + for prev_id in sorted_prev_events { + self.services.server.check_running()?; + if let Err(e) = self + .handle_prev_pdu( + origin, + event_id, + room_id, + &mut eventid_info, + &create_event, + &first_pdu_in_room, + &prev_id, + ) + .await + { + use hash_map::Entry; + + let now = Instant::now(); + warn!("Prev event {prev_id} failed: {e}"); + + match self + .services + .globals + .bad_event_ratelimiter + .write() + .expect("locked") + .entry(prev_id.into()) + { + Entry::Vacant(e) => { + e.insert((now, 1)); + }, + Entry::Occupied(mut e) => { + *e.get_mut() = (now, e.get().1.saturating_add(1)); + }, + }; + } + } + + // Done with prev events, now handling the incoming event + let start_time = Instant::now(); + self.federation_handletime + .write() + .expect("locked") + .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + + let r = self + .upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id) + .await; + + self.federation_handletime + .write() + .expect("locked") + .remove(&room_id.to_owned()); + + r +} diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs new file mode 100644 index 00000000..2d95ff63 --- /dev/null +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -0,0 +1,164 @@ +use std::{ + collections::{hash_map, BTreeMap, HashMap}, + sync::Arc, +}; + +use conduit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; +use futures::future::ready; +use ruma::{ + api::client::error::ErrorKind, + events::StateEventType, + state_res::{self, EventTypeExt}, + CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, +}; + +use super::{check_room_id, get_room_version_id, to_room_version}; + +#[implement(super::Service)] +#[allow(clippy::too_many_arguments)] +pub(super) async fn handle_outlier_pdu<'a>( + &self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, + mut value: CanonicalJsonObject, auth_events_known: bool, +) -> Result<(Arc, BTreeMap)> { + // 1. Remove unsigned field + value.remove("unsigned"); + + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match + let room_version_id = get_room_version_id(create_event)?; + let mut val = match self + .services + .server_keys + .verify_event(&value, Some(&room_version_id)) + .await + { + Ok(ruma::signatures::Verified::All) => value, + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + debug_info!("Calculated hash does not match (redaction): {event_id}"); + let Ok(obj) = ruma::canonical_json::redact(value, &room_version_id, None) else { + return Err!(Request(InvalidParam("Redaction failed"))); + }; + + // Skip the PDU if it is redacted and we already have it as an outlier event + if self.services.timeline.pdu_exists(event_id).await { + return Err!(Request(InvalidParam("Event was redacted and we already knew about it"))); + } + + obj + }, + Err(e) => { + return Err!(Request(InvalidParam(debug_error!( + "Signature verification failed for {event_id}: {e}" + )))) + }, + }; + + // Now that we have checked the signature and hashes we can add the eventID and + // convert to our PduEvent type + val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + let incoming_pdu = + serde_json::from_value::(serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue")) + .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; + + check_room_id(room_id, &incoming_pdu)?; + + if !auth_events_known { + // 4. fetch any missing auth events doing all checks listed here starting at 1. + // These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of + // the auth events are also rejected "due to auth events" + // NOTE: Step 5 is not applied anymore because it failed too often + debug!("Fetching auth events"); + Box::pin( + self.fetch_and_handle_outliers( + origin, + &incoming_pdu + .auth_events + .iter() + .map(|x| Arc::from(&**x)) + .collect::>>(), + create_event, + room_id, + &room_version_id, + ), + ) + .await; + } + + // 6. Reject "due to auth events" if the event doesn't pass auth based on the + // auth events + debug!("Checking based on auth events"); + // Build map of auth events + let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); + for id in &incoming_pdu.auth_events { + let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { + warn!("Could not find auth event {id}"); + continue; + }; + + check_room_id(room_id, &auth_event)?; + + match auth_events.entry(( + auth_event.kind.to_string().into(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + hash_map::Entry::Vacant(v) => { + v.insert(auth_event); + }, + hash_map::Entry::Occupied(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth event's type and state_key combination exists multiple times.", + )); + }, + } + } + + // The original create event must be in the auth events + if !matches!( + auth_events + .get(&(StateEventType::RoomCreate, String::new())) + .map(AsRef::as_ref), + Some(_) | None + ) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Incoming event refers to wrong create event.", + )); + } + + let state_fetch = |ty: &'static StateEventType, sk: &str| { + let key = ty.with_state_key(sk); + ready(auth_events.get(&key)) + }; + + let auth_check = state_res::event_auth::auth_check( + &to_room_version(&room_version_id), + &incoming_pdu, + None, // TODO: third party invite + state_fetch, + ) + .await + .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; + + if !auth_check { + return Err!(Request(Forbidden("Auth check failed"))); + } + + trace!("Validation successful."); + + // 7. Persist the event as an outlier. + self.services + .outlier + .add_pdu_outlier(&incoming_pdu.event_id, &val); + + trace!("Added pdu as outlier."); + + Ok((Arc::new(incoming_pdu), val)) +} diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs new file mode 100644 index 00000000..90ff7f06 --- /dev/null +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -0,0 +1,82 @@ +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, + time::Instant, +}; + +use conduit::{debug, implement, utils::math::continue_exponential_backoff_secs, Error, PduEvent, Result}; +use ruma::{api::client::error::ErrorKind, CanonicalJsonValue, EventId, RoomId, ServerName}; + +#[implement(super::Service)] +#[allow(clippy::type_complexity)] +#[allow(clippy::too_many_arguments)] +#[tracing::instrument( + skip(self, origin, event_id, room_id, eventid_info, create_event, first_pdu_in_room), + name = "prev" +)] +pub(super) async fn handle_prev_pdu<'a>( + &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, + eventid_info: &mut HashMap, (Arc, BTreeMap)>, + create_event: &Arc, first_pdu_in_room: &Arc, prev_id: &EventId, +) -> Result { + // Check for disabled again because it might have changed + if self.services.metadata.is_disabled(room_id).await { + debug!( + "Federaton of room {room_id} is currently disabled on this server. Request by origin {origin} and event \ + ID {event_id}" + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Federation of this room is currently disabled on this server.", + )); + } + + if let Some((time, tries)) = self + .services + .globals + .bad_event_ratelimiter + .read() + .expect("locked") + .get(prev_id) + { + // Exponential backoff + const MIN_DURATION: u64 = 5 * 60; + const MAX_DURATION: u64 = 60 * 60 * 24; + if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { + debug!( + ?tries, + duration = ?time.elapsed(), + "Backing off from prev_event" + ); + return Ok(()); + } + } + + if let Some((pdu, json)) = eventid_info.remove(prev_id) { + // Skip old events + if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + return Ok(()); + } + + let start_time = Instant::now(); + self.federation_handletime + .write() + .expect("locked") + .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + + self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) + .await?; + + self.federation_handletime + .write() + .expect("locked") + .remove(&room_id.to_owned()); + + debug!( + elapsed = ?start_time.elapsed(), + "Handled prev_event", + ); + } + + Ok(()) +} diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index f76f817d..f6440fe9 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,51 +1,34 @@ +mod acl_check; +mod fetch_and_handle_outliers; +mod fetch_prev; +mod fetch_state; +mod handle_incoming_pdu; +mod handle_outlier_pdu; +mod handle_prev_pdu; mod parse_incoming_pdu; +mod resolve_state; +mod state_at_incoming; +mod upgrade_outlier_pdu; use std::{ - borrow::Borrow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, + collections::HashMap, fmt::Write, sync::{Arc, RwLock as StdRwLock}, time::Instant, }; -use conduit::{ - debug, debug_error, debug_info, debug_warn, err, info, pdu, - result::LogErr, - trace, - utils::{math::continue_exponential_backoff_secs, IterStream, MutexMap}, - warn, Err, Error, PduEvent, Result, Server, -}; -use futures::{future, future::ready, FutureExt, StreamExt, TryFutureExt}; +use conduit::{utils::MutexMap, Err, PduEvent, Result, Server}; use ruma::{ - api::{ - client::error::ErrorKind, - federation::event::{get_event, get_room_state_ids}, - }, - events::{ - room::{ - create::RoomCreateEventContent, redaction::RoomRedactionEventContent, server_acl::RoomServerAclEventContent, - }, - StateEventType, TimelineEventType, - }, - int, - state_res::{self, EventTypeExt, RoomVersion, StateMap}, - uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, - RoomId, RoomVersionId, ServerName, UserId, + events::room::create::RoomCreateEventContent, state_res::RoomVersion, EventId, OwnedEventId, OwnedRoomId, RoomId, + RoomVersionId, }; -use crate::{ - globals, rooms, - rooms::{ - state_compressor::{CompressedStateEvent, HashSetCompressStateEvent}, - timeline::RawPduId, - }, - sending, server_keys, Dep, -}; +use crate::{globals, rooms, sending, server_keys, Dep}; pub struct Service { - services: Services, - pub federation_handletime: StdRwLock, pub mutex_federation: RoomMutexMap, + pub federation_handletime: StdRwLock, + services: Services, } struct Services { @@ -70,6 +53,8 @@ type HandleTimeMap = HashMap; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { + mutex_federation: RoomMutexMap::new(), + federation_handletime: HandleTimeMap::new().into(), services: Services { globals: args.depend::("globals"), sending: args.depend::("sending"), @@ -85,8 +70,6 @@ impl crate::Service for Service { timeline: args.depend::("rooms::timeline"), server: args.server.clone(), }, - federation_handletime: HandleTimeMap::new().into(), - mutex_federation: RoomMutexMap::new(), })) } @@ -108,1279 +91,6 @@ impl crate::Service for Service { } impl Service { - /// When receiving an event one needs to: - /// 0. Check the server is in the room - /// 1. Skip the PDU if we already know about it - /// 1.1. Remove unsigned field - /// 2. Check signatures, otherwise drop - /// 3. Check content hash, redact if doesn't match - /// 4. Fetch any missing auth events doing all checks listed here starting - /// at 1. These are not timeline events - /// 5. Reject "due to auth events" if can't get all the auth events or some - /// of the auth events are also rejected "due to auth events" - /// 6. Reject "due to auth events" if the event doesn't pass auth based on - /// the auth events - /// 7. Persist this event as an outlier - /// 8. If not timeline event: stop - /// 9. Fetch any missing prev events doing all checks listed here starting - /// at 1. These are timeline events - /// 10. Fetch missing state and auth chain events by calling `/state_ids` at - /// backwards extremities doing all the checks in this list starting at - /// 1. These are not timeline events - /// 11. Check the auth of the event passes based on the state of the event - /// 12. Ensure that the state is derived from the previous current state - /// (i.e. we calculated by doing state res where one of the inputs was a - /// previously trusted set of state, don't just trust a set of state we - /// got from a remote) - /// 13. Use state resolution to find new room state - /// 14. Check if the event passes auth based on the "current state" of the - /// room, if not soft fail it - #[tracing::instrument(skip(self, origin, value, is_timeline_event), name = "pdu")] - pub async fn handle_incoming_pdu<'a>( - &self, origin: &'a ServerName, room_id: &'a RoomId, event_id: &'a EventId, - value: BTreeMap, is_timeline_event: bool, - ) -> Result> { - // 1. Skip the PDU if we already have it as a timeline event - if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await { - return Ok(Some(pdu_id)); - } - - // 1.1 Check the server is in the room - if !self.services.metadata.exists(room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); - } - - // 1.2 Check if the room is disabled - if self.services.metadata.is_disabled(room_id).await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Federation of this room is currently disabled on this server.", - )); - } - - // 1.3.1 Check room ACL on origin field/server - self.acl_check(origin, room_id).await?; - - // 1.3.2 Check room ACL on sender's server name - let sender: &UserId = value - .get("sender") - .try_into() - .map_err(|e| err!(Request(InvalidParam("PDU does not have a valid sender key: {e}"))))?; - - self.acl_check(sender.server_name(), room_id).await?; - - // Fetch create event - let create_event = self - .services - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await?; - - // Procure the room version - let room_version_id = get_room_version_id(&create_event)?; - - let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; - - let (incoming_pdu, val) = self - .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false) - .boxed() - .await?; - - check_room_id(room_id, &incoming_pdu)?; - - // 8. if not timeline event: stop - if !is_timeline_event { - return Ok(None); - } - // Skip old events - if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - return Ok(None); - } - - // 9. Fetch any missing prev events doing all checks listed here starting at 1. - // These are timeline events - let (sorted_prev_events, mut eventid_info) = self - .fetch_prev( - origin, - &create_event, - room_id, - &room_version_id, - incoming_pdu.prev_events.clone(), - ) - .await?; - - debug!(events = ?sorted_prev_events, "Got previous events"); - for prev_id in sorted_prev_events { - self.services.server.check_running()?; - if let Err(e) = self - .handle_prev_pdu( - origin, - event_id, - room_id, - &mut eventid_info, - &create_event, - &first_pdu_in_room, - &prev_id, - ) - .await - { - use hash_map::Entry; - - let now = Instant::now(); - warn!("Prev event {prev_id} failed: {e}"); - - match self - .services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry(prev_id.into()) - { - Entry::Vacant(e) => { - e.insert((now, 1)); - }, - Entry::Occupied(mut e) => { - *e.get_mut() = (now, e.get().1.saturating_add(1)); - }, - }; - } - } - - // Done with prev events, now handling the incoming event - let start_time = Instant::now(); - self.federation_handletime - .write() - .expect("locked") - .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - - let r = self - .upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id) - .await; - - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - r - } - - #[allow(clippy::type_complexity)] - #[allow(clippy::too_many_arguments)] - #[tracing::instrument( - skip(self, origin, event_id, room_id, eventid_info, create_event, first_pdu_in_room), - name = "prev" - )] - pub async fn handle_prev_pdu<'a>( - &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - eventid_info: &mut HashMap, (Arc, BTreeMap)>, - create_event: &Arc, first_pdu_in_room: &Arc, prev_id: &EventId, - ) -> Result<()> { - // Check for disabled again because it might have changed - if self.services.metadata.is_disabled(room_id).await { - debug!( - "Federaton of room {room_id} is currently disabled on this server. Request by origin {origin} and \ - event ID {event_id}" - ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Federation of this room is currently disabled on this server.", - )); - } - - if let Some((time, tries)) = self - .services - .globals - .bad_event_ratelimiter - .read() - .expect("locked") - .get(prev_id) - { - // Exponential backoff - const MIN_DURATION: u64 = 5 * 60; - const MAX_DURATION: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { - debug!( - ?tries, - duration = ?time.elapsed(), - "Backing off from prev_event" - ); - return Ok(()); - } - } - - if let Some((pdu, json)) = eventid_info.remove(prev_id) { - // Skip old events - if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - return Ok(()); - } - - let start_time = Instant::now(); - self.federation_handletime - .write() - .expect("locked") - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - - self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) - .await?; - - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - debug!( - elapsed = ?start_time.elapsed(), - "Handled prev_event", - ); - } - - Ok(()) - } - - #[allow(clippy::too_many_arguments)] - async fn handle_outlier_pdu<'a>( - &self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, - mut value: CanonicalJsonObject, auth_events_known: bool, - ) -> Result<(Arc, BTreeMap)> { - // 1. Remove unsigned field - value.remove("unsigned"); - - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // 2. Check signatures, otherwise drop - // 3. check content hash, redact if doesn't match - let room_version_id = get_room_version_id(create_event)?; - let mut val = match self - .services - .server_keys - .verify_event(&value, Some(&room_version_id)) - .await - { - Ok(ruma::signatures::Verified::All) => value, - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - debug_info!("Calculated hash does not match (redaction): {event_id}"); - let Ok(obj) = ruma::canonical_json::redact(value, &room_version_id, None) else { - return Err!(Request(InvalidParam("Redaction failed"))); - }; - - // Skip the PDU if it is redacted and we already have it as an outlier event - if self.services.timeline.pdu_exists(event_id).await { - return Err!(Request(InvalidParam("Event was redacted and we already knew about it"))); - } - - obj - }, - Err(e) => { - return Err!(Request(InvalidParam(debug_error!( - "Signature verification failed for {event_id}: {e}" - )))) - }, - }; - - // Now that we have checked the signature and hashes we can add the eventID and - // convert to our PduEvent type - val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); - let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; - - check_room_id(room_id, &incoming_pdu)?; - - if !auth_events_known { - // 4. fetch any missing auth events doing all checks listed here starting at 1. - // These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of - // the auth events are also rejected "due to auth events" - // NOTE: Step 5 is not applied anymore because it failed too often - debug!("Fetching auth events"); - Box::pin( - self.fetch_and_handle_outliers( - origin, - &incoming_pdu - .auth_events - .iter() - .map(|x| Arc::from(&**x)) - .collect::>>(), - create_event, - room_id, - &room_version_id, - ), - ) - .await; - } - - // 6. Reject "due to auth events" if the event doesn't pass auth based on the - // auth events - debug!("Checking based on auth events"); - // Build map of auth events - let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); - for id in &incoming_pdu.auth_events { - let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { - warn!("Could not find auth event {id}"); - continue; - }; - - check_room_id(room_id, &auth_event)?; - - match auth_events.entry(( - auth_event.kind.to_string().into(), - auth_event - .state_key - .clone() - .expect("all auth events have state keys"), - )) { - hash_map::Entry::Vacant(v) => { - v.insert(auth_event); - }, - hash_map::Entry::Occupied(_) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Auth event's type and state_key combination exists multiple times.", - )); - }, - } - } - - // The original create event must be in the auth events - if !matches!( - auth_events - .get(&(StateEventType::RoomCreate, String::new())) - .map(AsRef::as_ref), - Some(_) | None - ) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Incoming event refers to wrong create event.", - )); - } - - let state_fetch = |ty: &'static StateEventType, sk: &str| { - let key = ty.with_state_key(sk); - ready(auth_events.get(&key)) - }; - - let auth_check = state_res::event_auth::auth_check( - &to_room_version(&room_version_id), - &incoming_pdu, - None, // TODO: third party invite - state_fetch, - ) - .await - .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; - - if !auth_check { - return Err!(Request(Forbidden("Auth check failed"))); - } - - trace!("Validation successful."); - - // 7. Persist the event as an outlier. - self.services - .outlier - .add_pdu_outlier(&incoming_pdu.event_id, &val); - - trace!("Added pdu as outlier."); - - Ok((Arc::new(incoming_pdu), val)) - } - - pub async fn upgrade_outlier_to_timeline_pdu( - &self, incoming_pdu: Arc, val: BTreeMap, create_event: &PduEvent, - origin: &ServerName, room_id: &RoomId, - ) -> Result> { - // Skip the PDU if we already have it as a timeline event - if let Ok(pduid) = self - .services - .timeline - .get_pdu_id(&incoming_pdu.event_id) - .await - { - return Ok(Some(pduid)); - } - - if self - .services - .pdu_metadata - .is_event_soft_failed(&incoming_pdu.event_id) - .await - { - return Err!(Request(InvalidParam("Event has been soft failed"))); - } - - debug!("Upgrading to timeline pdu"); - let timer = Instant::now(); - let room_version_id = get_room_version_id(create_event)?; - - // 10. Fetch missing state and auth chain events by calling /state_ids at - // backwards extremities doing all the checks in this list starting at 1. - // These are not timeline events. - - debug!("Resolving state at event"); - let mut state_at_incoming_event = if incoming_pdu.prev_events.len() == 1 { - self.state_at_incoming_degree_one(&incoming_pdu).await? - } else { - self.state_at_incoming_resolved(&incoming_pdu, room_id, &room_version_id) - .await? - }; - - if state_at_incoming_event.is_none() { - state_at_incoming_event = self - .fetch_state(origin, create_event, room_id, &room_version_id, &incoming_pdu.event_id) - .await?; - } - - let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); - let room_version = to_room_version(&room_version_id); - - debug!("Performing auth check"); - // 11. Check the auth of the event passes based on the state of the event - let state_fetch_state = &state_at_incoming_event; - let state_fetch = |k: &'static StateEventType, s: String| async move { - let shortstatekey = self.services.short.get_shortstatekey(k, &s).await.ok()?; - - let event_id = state_fetch_state.get(&shortstatekey)?; - self.services.timeline.get_pdu(event_id).await.ok() - }; - - let auth_check = state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None, // TODO: third party invite - |k, s| state_fetch(k, s.to_owned()), - ) - .await - .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; - - if !auth_check { - return Err!(Request(Forbidden("Event has failed auth check with state at the event."))); - } - - debug!("Gathering auth events"); - let auth_events = self - .services - .state - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - ) - .await?; - - let state_fetch = |k: &'static StateEventType, s: &str| { - let key = k.with_state_key(s); - ready(auth_events.get(&key).cloned()) - }; - - let auth_check = state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None, // third-party invite - state_fetch, - ) - .await - .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; - - // Soft fail check before doing state res - debug!("Performing soft-fail check"); - let soft_fail = { - use RoomVersionId::*; - - !auth_check - || incoming_pdu.kind == TimelineEventType::RoomRedaction - && match room_version_id { - V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = &incoming_pdu.redacts { - !self - .services - .state_accessor - .user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) - .await? - } else { - false - } - }, - _ => { - let content: RoomRedactionEventContent = incoming_pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - !self - .services - .state_accessor - .user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) - .await? - } else { - false - } - }, - } - }; - - // 13. Use state resolution to find new room state - - // We start looking at current room state now, so lets lock the room - trace!("Locking the room"); - let state_lock = self.services.state.mutex.lock(room_id).await; - - // Now we calculate the set of extremities this room has after the incoming - // event has been applied. We start with the previous extremities (aka leaves) - trace!("Calculating extremities"); - let mut extremities: HashSet<_> = self - .services - .state - .get_forward_extremities(room_id) - .map(ToOwned::to_owned) - .collect() - .await; - - // Remove any forward extremities that are referenced by this incoming event's - // prev_events - trace!( - "Calculated {} extremities; checking against {} prev_events", - extremities.len(), - incoming_pdu.prev_events.len() - ); - for prev_event in &incoming_pdu.prev_events { - extremities.remove(&(**prev_event)); - } - - // Only keep those extremities were not referenced yet - let mut retained = HashSet::new(); - for id in &extremities { - if !self - .services - .pdu_metadata - .is_event_referenced(room_id, id) - .await - { - retained.insert(id.clone()); - } - } - - extremities.retain(|id| retained.contains(id)); - debug!("Retained {} extremities. Compressing state", extremities.len()); - - let mut state_ids_compressed = HashSet::new(); - for (shortstatekey, id) in &state_at_incoming_event { - state_ids_compressed.insert( - self.services - .state_compressor - .compress_state_event(*shortstatekey, id) - .await, - ); - } - - let state_ids_compressed = Arc::new(state_ids_compressed); - - if incoming_pdu.state_key.is_some() { - debug!("Event is a state-event. Deriving new room state"); - - // We also add state after incoming event to the fork states - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) - .await; - - let event_id = &incoming_pdu.event_id; - state_after.insert(shortstatekey, event_id.clone()); - } - - let new_room_state = self - .resolve_state(room_id, &room_version_id, state_after) - .await?; - - // Set the new room state to the resolved state - debug!("Forcing new room state"); - let HashSetCompressStateEvent { - shortstatehash, - added, - removed, - } = self - .services - .state_compressor - .save_state(room_id, new_room_state) - .await?; - - self.services - .state - .force_state(room_id, shortstatehash, added, removed, &state_lock) - .await?; - } - - // 14. Check if the event passes auth based on the "current state" of the room, - // if not soft fail it - if soft_fail { - debug!("Soft failing event"); - self.services - .timeline - .append_incoming_pdu( - &incoming_pdu, - val, - extremities.iter().map(|e| (**e).to_owned()).collect(), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .await?; - - // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {incoming_pdu:?}"); - self.services - .pdu_metadata - .mark_event_soft_failed(&incoming_pdu.event_id); - - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); - } - - trace!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone().into()); - - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - let pdu_id = self - .services - .timeline - .append_incoming_pdu( - &incoming_pdu, - val, - extremities.into_iter().collect(), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .await?; - - // Event has passed all auth/stateres checks - drop(state_lock); - debug_info!( - elapsed = ?timer.elapsed(), - "Accepted", - ); - - Ok(pdu_id) - } - - #[tracing::instrument(skip_all, name = "resolve")] - pub async fn resolve_state( - &self, room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap>, - ) -> Result>> { - debug!("Loading current room state ids"); - let current_sstatehash = self - .services - .state - .get_room_shortstatehash(room_id) - .await - .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}"))))?; - - let current_state_ids = self - .services - .state_accessor - .state_full_ids(current_sstatehash) - .await?; - - let fork_states = [current_state_ids, incoming_state]; - let mut auth_chain_sets = Vec::with_capacity(fork_states.len()); - for state in &fork_states { - let starting_events: Vec<&EventId> = state.values().map(Borrow::borrow).collect(); - - let auth_chain: HashSet> = self - .services - .auth_chain - .get_event_ids(room_id, &starting_events) - .await? - .into_iter() - .collect(); - - auth_chain_sets.push(auth_chain); - } - - debug!("Loading fork states"); - let fork_states: Vec>> = fork_states - .into_iter() - .stream() - .then(|fork_state| { - fork_state - .into_iter() - .stream() - .filter_map(|(k, id)| { - self.services - .short - .get_statekey_from_short(k) - .map_ok_or_else(|_| None, move |(ty, st_key)| Some(((ty, st_key), id))) - }) - .collect() - }) - .collect() - .boxed() - .await; - - debug!("Resolving state"); - let lock = self.services.globals.stateres_mutex.lock(); - - let event_fetch = |event_id| self.event_fetch(event_id); - let event_exists = |event_id| self.event_exists(event_id); - let state = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) - .await - .map_err(|e| err!(Database(error!("State resolution failed: {e:?}"))))?; - - drop(lock); - - debug!("State resolution done. Compressing state"); - let mut new_room_state = HashSet::new(); - for ((event_type, state_key), event_id) in state { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key) - .await; - - let compressed = self - .services - .state_compressor - .compress_state_event(shortstatekey, &event_id) - .await; - - new_room_state.insert(compressed); - } - - Ok(Arc::new(new_room_state)) - } - - // TODO: if we know the prev_events of the incoming event we can avoid the - // request and build the state from a known point and resolve if > 1 prev_event - #[tracing::instrument(skip_all, name = "state")] - pub async fn state_at_incoming_degree_one( - &self, incoming_pdu: &Arc, - ) -> Result>>> { - let prev_event = &*incoming_pdu.prev_events[0]; - let Ok(prev_event_sstatehash) = self - .services - .state_accessor - .pdu_shortstatehash(prev_event) - .await - else { - return Ok(None); - }; - - let Ok(mut state) = self - .services - .state_accessor - .state_full_ids(prev_event_sstatehash) - .await - .log_err() - else { - return Ok(None); - }; - - debug!("Using cached state"); - let prev_pdu = self - .services - .timeline - .get_pdu(prev_event) - .await - .map_err(|e| err!(Database("Could not find prev event, but we know the state: {e:?}")))?; - - if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) - .await; - - state.insert(shortstatekey, Arc::from(prev_event)); - // Now it's the state after the pdu - } - - debug_assert!(!state.is_empty(), "should be returning None for empty HashMap result"); - - Ok(Some(state)) - } - - #[tracing::instrument(skip_all, name = "state")] - pub async fn state_at_incoming_resolved( - &self, incoming_pdu: &Arc, room_id: &RoomId, room_version_id: &RoomVersionId, - ) -> Result>>> { - debug!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len()); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let Ok(prev_event) = self.services.timeline.get_pdu(prev_eventid).await else { - okay = false; - break; - }; - - let Ok(sstatehash) = self - .services - .state_accessor - .pdu_shortstatehash(prev_eventid) - .await - else { - okay = false; - break; - }; - - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if !okay { - return Ok(None); - } - - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - for (sstatehash, prev_event) in extremity_sstatehashes { - let Ok(mut leaf_state) = self - .services - .state_accessor - .state_full_ids(sstatehash) - .await - else { - continue; - }; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) - .await; - - let event_id = &prev_event.event_id; - leaf_state.insert(shortstatekey, event_id.clone()); - // Now it's the state after the pdu - } - - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); - for (k, id) in &leaf_state { - if let Ok((ty, st_key)) = self - .services - .short - .get_statekey_from_short(*k) - .await - .log_err() - { - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); - } - - starting_events.push(id.borrow()); - } - - let auth_chain: HashSet> = self - .services - .auth_chain - .get_event_ids(room_id, &starting_events) - .await? - .into_iter() - .collect(); - - auth_chain_sets.push(auth_chain); - fork_states.push(state); - } - - let lock = self.services.globals.stateres_mutex.lock(); - - let event_fetch = |event_id| self.event_fetch(event_id); - let event_exists = |event_id| self.event_exists(event_id); - let result = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) - .await - .map_err(|e| err!(Database(warn!(?e, "State resolution on prev events failed.")))); - - drop(lock); - - let Ok(new_state) = result else { - return Ok(None); - }; - - new_state - .iter() - .stream() - .then(|((event_type, state_key), event_id)| { - self.services - .short - .get_or_create_shortstatekey(event_type, state_key) - .map(move |shortstatekey| (shortstatekey, event_id.clone())) - }) - .collect() - .map(Some) - .map(Ok) - .await - } - - /// Call /state_ids to find out what the state at this pdu is. We trust the - /// server's response to some extend (sic), but we still do a lot of checks - /// on the events - #[tracing::instrument(skip(self, create_event, room_version_id))] - async fn fetch_state( - &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, - event_id: &EventId, - ) -> Result>>> { - debug!("Fetching state ids"); - let res = self - .services - .sending - .send_synapse_request( - origin, - get_room_state_ids::v1::Request { - room_id: room_id.to_owned(), - event_id: (*event_id).to_owned(), - }, - ) - .await - .inspect_err(|e| warn!("Fetching state for event failed: {e}"))?; - - debug!("Fetching state events"); - let collect = res - .pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(); - - let state_vec = self - .fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id) - .boxed() - .await; - - let mut state: HashMap<_, Arc> = HashMap::with_capacity(state_vec.len()); - for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; - - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key) - .await; - - match state.entry(shortstatekey) { - hash_map::Entry::Vacant(v) => { - v.insert(Arc::from(&*pdu.event_id)); - }, - hash_map::Entry::Occupied(_) => { - return Err(Error::bad_database( - "State event's type and state_key combination exists multiple times.", - )) - }, - } - } - - // The original create event must still be in the state - let create_shortstatekey = self - .services - .short - .get_shortstatekey(&StateEventType::RoomCreate, "") - .await?; - - if state.get(&create_shortstatekey).map(AsRef::as_ref) != Some(&create_event.event_id) { - return Err!(Database("Incoming event refers to wrong create event.")); - } - - Ok(Some(state)) - } - - /// Find the event and auth it. Once the event is validated (steps 1 - 8) - /// it is appended to the outliers Tree. - /// - /// Returns pdu and if we fetched it over federation the raw json. - /// - /// a. Look in the main timeline (pduid_pdu tree) - /// b. Look at outlier pdu tree - /// c. Ask origin server over federation - /// d. TODO: Ask other servers over federation? - pub async fn fetch_and_handle_outliers<'a>( - &self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, - room_version_id: &'a RoomVersionId, - ) -> Vec<(Arc, Option>)> { - let back_off = |id| match self - .services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - }, - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)), - }; - - let mut events_with_auth_events = Vec::with_capacity(events.len()); - for id in events { - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { - trace!("Found {id} in db"); - events_with_auth_events.push((id, Some(local_pdu), vec![])); - continue; - } - - // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; - let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len()); - let mut events_all = HashSet::with_capacity(todo_auth_events.len()); - while let Some(next_id) = todo_auth_events.pop() { - if let Some((time, tries)) = self - .services - .globals - .bad_event_ratelimiter - .read() - .expect("locked") - .get(&*next_id) - { - // Exponential backoff - const MIN_DURATION: u64 = 5 * 60; - const MAX_DURATION: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { - info!("Backing off from {next_id}"); - continue; - } - } - - if events_all.contains(&next_id) { - continue; - } - - if self.services.timeline.pdu_exists(&next_id).await { - trace!("Found {next_id} in db"); - continue; - } - - debug!("Fetching {next_id} over federation."); - match self - .services - .sending - .send_federation_request( - origin, - get_event::v1::Request { - event_id: (*next_id).to_owned(), - include_unredacted_content: None, - }, - ) - .await - { - Ok(res) => { - debug!("Got {next_id} over federation"); - let Ok((calculated_event_id, value)) = - pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) - else { - back_off((*next_id).to_owned()); - continue; - }; - - if calculated_event_id != *next_id { - warn!( - "Server didn't return event id we requested: requested: {next_id}, we got \ - {calculated_event_id}. Event: {:?}", - &res.pdu - ); - } - - if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { - for auth_event in auth_events { - if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { - let a: Arc = auth_event; - todo_auth_events.push(a); - } else { - warn!("Auth event id is not valid"); - } - } - } else { - warn!("Auth event list invalid"); - } - - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - }, - Err(e) => { - debug_error!("Failed to fetch event {next_id}: {e}"); - back_off((*next_id).to_owned()); - }, - } - } - events_with_auth_events.push((id, None, events_in_reverse_order)); - } - - let mut pdus = Vec::with_capacity(events_with_auth_events.len()); - for (id, local_pdu, events_in_reverse_order) in events_with_auth_events { - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Some(local_pdu) = local_pdu { - trace!("Found {id} in db"); - pdus.push((local_pdu.clone(), None)); - } - - for (next_id, value) in events_in_reverse_order.into_iter().rev() { - if let Some((time, tries)) = self - .services - .globals - .bad_event_ratelimiter - .read() - .expect("locked") - .get(&*next_id) - { - // Exponential backoff - const MIN_DURATION: u64 = 5 * 60; - const MAX_DURATION: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { - debug!("Backing off from {next_id}"); - continue; - } - } - - match Box::pin(self.handle_outlier_pdu(origin, create_event, &next_id, room_id, value.clone(), true)) - .await - { - Ok((pdu, json)) => { - if next_id == *id { - pdus.push((pdu, Some(json))); - } - }, - Err(e) => { - warn!("Authentication of event {next_id} failed: {e:?}"); - back_off(next_id.into()); - }, - } - } - } - pdus - } - - #[allow(clippy::type_complexity)] - #[tracing::instrument(skip_all)] - async fn fetch_prev( - &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, - initial_set: Vec>, - ) -> Result<( - Vec>, - HashMap, (Arc, BTreeMap)>, - )> { - let mut graph: HashMap, _> = HashMap::with_capacity(initial_set.len()); - let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = initial_set; - - let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; - - let mut amount = 0; - - while let Some(prev_event_id) = todo_outlier_stack.pop() { - self.services.server.check_running()?; - - if let Some((pdu, mut json_opt)) = self - .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id, room_version_id) - .boxed() - .await - .pop() - { - check_room_id(room_id, &pdu)?; - - let limit = self.services.server.config.max_fetch_prev_events; - if amount > limit { - debug_warn!("Max prev event limit reached! Limit: {limit}"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if json_opt.is_none() { - json_opt = self - .services - .outlier - .get_outlier_pdu_json(&prev_event_id) - .await - .ok(); - } - - if let Some(json) = json_opt { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { - amount = amount.saturating_add(1); - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(prev_prev.clone()); - } - } - - graph.insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect()); - } else { - // Time based check failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed, so this was not fetched over federation - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } - - let event_fetch = |event_id| { - let origin_server_ts = eventid_info - .get(&event_id) - .cloned() - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts); - - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - future::ok((int!(0), MilliSecondsSinceUnixEpoch(origin_server_ts))) - }; - - let sorted = state_res::lexicographical_topological_sort(&graph, &event_fetch) - .await - .map_err(|e| err!(Database(error!("Error sorting prev events: {e}"))))?; - - Ok((sorted, eventid_info)) - } - - /// Returns Ok if the acl allows the server - #[tracing::instrument(skip_all)] - pub async fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { - let Ok(acl_event_content) = self - .services - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomServerAcl, "") - .await - .map(|c: RoomServerAclEventContent| c) - .inspect(|acl| trace!("ACL content found: {acl:?}")) - .inspect_err(|e| trace!("No ACL content found: {e:?}")) - else { - return Ok(()); - }; - - if acl_event_content.allow.is_empty() { - warn!("Ignoring broken ACL event (allow key is empty)"); - return Ok(()); - } - - if acl_event_content.is_allowed(server_name) { - trace!("server {server_name} is allowed by ACL"); - Ok(()) - } else { - debug!("Server {server_name} was denied by room ACL in {room_id}"); - Err!(Request(Forbidden("Server was denied by room ACL"))) - } - } - async fn event_exists(&self, event_id: Arc) -> bool { self.services.timeline.pdu_exists(&event_id).await } async fn event_fetch(&self, event_id: Arc) -> Option> { diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 39920219..42f44dee 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -1,30 +1,27 @@ -use conduit::{err, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; +use conduit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, RoomId}; use serde_json::value::RawValue as RawJsonValue; -impl super::Service { - pub async fn parse_incoming_pdu( - &self, pdu: &RawJsonValue, - ) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { - let value = serde_json::from_str::(pdu.get()) - .map_err(|e| err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}"))))?; +#[implement(super::Service)] +pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { + let value = serde_json::from_str::(pdu.get()) + .map_err(|e| err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}"))))?; - let room_id: OwnedRoomId = value - .get("room_id") - .and_then(CanonicalJsonValue::as_str) - .map(RoomId::parse) - .flat_ok_or(err!(Request(InvalidParam("Invalid room_id in pdu"))))?; + let room_id: OwnedRoomId = value + .get("room_id") + .and_then(CanonicalJsonValue::as_str) + .map(RoomId::parse) + .flat_ok_or(err!(Request(InvalidParam("Invalid room_id in pdu"))))?; - let room_version_id = self - .services - .state - .get_room_version(&room_id) - .await - .map_err(|_| err!("Server is not in room {room_id}"))?; + let room_version_id = self + .services + .state + .get_room_version(&room_id) + .await + .map_err(|_| err!("Server is not in room {room_id}"))?; - let (event_id, value) = gen_event_id_canonical_json(pdu, &room_version_id) - .map_err(|e| err!(Request(InvalidParam("Could not convert event to canonical json: {e}"))))?; + let (event_id, value) = gen_event_id_canonical_json(pdu, &room_version_id) + .map_err(|e| err!(Request(InvalidParam("Could not convert event to canonical json: {e}"))))?; - Ok((event_id, value, room_id)) - } + Ok((event_id, value, room_id)) } diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs new file mode 100644 index 00000000..0c9525dd --- /dev/null +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -0,0 +1,101 @@ +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use conduit::{debug, err, implement, utils::IterStream, Result}; +use futures::{FutureExt, StreamExt, TryFutureExt}; +use ruma::{ + state_res::{self, StateMap}, + EventId, RoomId, RoomVersionId, +}; + +use crate::rooms::state_compressor::CompressedStateEvent; + +#[implement(super::Service)] +#[tracing::instrument(skip_all, name = "resolve")] +pub async fn resolve_state( + &self, room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap>, +) -> Result>> { + debug!("Loading current room state ids"); + let current_sstatehash = self + .services + .state + .get_room_shortstatehash(room_id) + .await + .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}"))))?; + + let current_state_ids = self + .services + .state_accessor + .state_full_ids(current_sstatehash) + .await?; + + let fork_states = [current_state_ids, incoming_state]; + let mut auth_chain_sets = Vec::with_capacity(fork_states.len()); + for state in &fork_states { + let starting_events: Vec<&EventId> = state.values().map(Borrow::borrow).collect(); + + let auth_chain: HashSet> = self + .services + .auth_chain + .get_event_ids(room_id, &starting_events) + .await? + .into_iter() + .collect(); + + auth_chain_sets.push(auth_chain); + } + + debug!("Loading fork states"); + let fork_states: Vec>> = fork_states + .into_iter() + .stream() + .then(|fork_state| { + fork_state + .into_iter() + .stream() + .filter_map(|(k, id)| { + self.services + .short + .get_statekey_from_short(k) + .map_ok_or_else(|_| None, move |(ty, st_key)| Some(((ty, st_key), id))) + }) + .collect() + }) + .collect() + .boxed() + .await; + + debug!("Resolving state"); + let lock = self.services.globals.stateres_mutex.lock(); + + let event_fetch = |event_id| self.event_fetch(event_id); + let event_exists = |event_id| self.event_exists(event_id); + let state = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) + .await + .map_err(|e| err!(Database(error!("State resolution failed: {e:?}"))))?; + + drop(lock); + + debug!("State resolution done. Compressing state"); + let mut new_room_state = HashSet::new(); + for ((event_type, state_key), event_id) in state { + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key) + .await; + + let compressed = self + .services + .state_compressor + .compress_state_event(shortstatekey, &event_id) + .await; + + new_room_state.insert(compressed); + } + + Ok(Arc::new(new_room_state)) +} diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs new file mode 100644 index 00000000..a200ab56 --- /dev/null +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -0,0 +1,178 @@ +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use conduit::{debug, err, implement, result::LogErr, utils::IterStream, PduEvent, Result}; +use futures::{FutureExt, StreamExt}; +use ruma::{ + state_res::{self, StateMap}, + EventId, RoomId, RoomVersionId, +}; + +// TODO: if we know the prev_events of the incoming event we can avoid the +#[implement(super::Service)] +// request and build the state from a known point and resolve if > 1 prev_event +#[tracing::instrument(skip_all, name = "state")] +pub(super) async fn state_at_incoming_degree_one( + &self, incoming_pdu: &Arc, +) -> Result>>> { + let prev_event = &*incoming_pdu.prev_events[0]; + let Ok(prev_event_sstatehash) = self + .services + .state_accessor + .pdu_shortstatehash(prev_event) + .await + else { + return Ok(None); + }; + + let Ok(mut state) = self + .services + .state_accessor + .state_full_ids(prev_event_sstatehash) + .await + .log_err() + else { + return Ok(None); + }; + + debug!("Using cached state"); + let prev_pdu = self + .services + .timeline + .get_pdu(prev_event) + .await + .map_err(|e| err!(Database("Could not find prev event, but we know the state: {e:?}")))?; + + if let Some(state_key) = &prev_pdu.state_key { + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) + .await; + + state.insert(shortstatekey, Arc::from(prev_event)); + // Now it's the state after the pdu + } + + debug_assert!(!state.is_empty(), "should be returning None for empty HashMap result"); + + Ok(Some(state)) +} + +#[implement(super::Service)] +#[tracing::instrument(skip_all, name = "state")] +pub(super) async fn state_at_incoming_resolved( + &self, incoming_pdu: &Arc, room_id: &RoomId, room_version_id: &RoomVersionId, +) -> Result>>> { + debug!("Calculating state at event using state res"); + let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len()); + + let mut okay = true; + for prev_eventid in &incoming_pdu.prev_events { + let Ok(prev_event) = self.services.timeline.get_pdu(prev_eventid).await else { + okay = false; + break; + }; + + let Ok(sstatehash) = self + .services + .state_accessor + .pdu_shortstatehash(prev_eventid) + .await + else { + okay = false; + break; + }; + + extremity_sstatehashes.insert(sstatehash, prev_event); + } + + if !okay { + return Ok(None); + } + + let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); + let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); + for (sstatehash, prev_event) in extremity_sstatehashes { + let Ok(mut leaf_state) = self + .services + .state_accessor + .state_full_ids(sstatehash) + .await + else { + continue; + }; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) + .await; + + let event_id = &prev_event.event_id; + leaf_state.insert(shortstatekey, event_id.clone()); + // Now it's the state after the pdu + } + + let mut state = StateMap::with_capacity(leaf_state.len()); + let mut starting_events = Vec::with_capacity(leaf_state.len()); + for (k, id) in &leaf_state { + if let Ok((ty, st_key)) = self + .services + .short + .get_statekey_from_short(*k) + .await + .log_err() + { + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + state.insert((ty.to_string().into(), st_key), id.clone()); + } + + starting_events.push(id.borrow()); + } + + let auth_chain: HashSet> = self + .services + .auth_chain + .get_event_ids(room_id, &starting_events) + .await? + .into_iter() + .collect(); + + auth_chain_sets.push(auth_chain); + fork_states.push(state); + } + + let lock = self.services.globals.stateres_mutex.lock(); + + let event_fetch = |event_id| self.event_fetch(event_id); + let event_exists = |event_id| self.event_exists(event_id); + let result = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) + .await + .map_err(|e| err!(Database(warn!(?e, "State resolution on prev events failed.")))); + + drop(lock); + + let Ok(new_state) = result else { + return Ok(None); + }; + + new_state + .iter() + .stream() + .then(|((event_type, state_key), event_id)| { + self.services + .short + .get_or_create_shortstatekey(event_type, state_key) + .map(move |shortstatekey| (shortstatekey, event_id.clone())) + }) + .collect() + .map(Some) + .map(Ok) + .await +} diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs new file mode 100644 index 00000000..2a1e4662 --- /dev/null +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -0,0 +1,298 @@ +use std::{ + collections::{BTreeMap, HashSet}, + sync::Arc, + time::Instant, +}; + +use conduit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; +use futures::{future::ready, StreamExt}; +use ruma::{ + api::client::error::ErrorKind, + events::{room::redaction::RoomRedactionEventContent, StateEventType, TimelineEventType}, + state_res::{self, EventTypeExt}, + CanonicalJsonValue, RoomId, RoomVersionId, ServerName, +}; + +use super::{get_room_version_id, to_room_version}; +use crate::rooms::{state_compressor::HashSetCompressStateEvent, timeline::RawPduId}; + +#[implement(super::Service)] +pub(super) async fn upgrade_outlier_to_timeline_pdu( + &self, incoming_pdu: Arc, val: BTreeMap, create_event: &PduEvent, + origin: &ServerName, room_id: &RoomId, +) -> Result> { + // Skip the PDU if we already have it as a timeline event + if let Ok(pduid) = self + .services + .timeline + .get_pdu_id(&incoming_pdu.event_id) + .await + { + return Ok(Some(pduid)); + } + + if self + .services + .pdu_metadata + .is_event_soft_failed(&incoming_pdu.event_id) + .await + { + return Err!(Request(InvalidParam("Event has been soft failed"))); + } + + debug!("Upgrading to timeline pdu"); + let timer = Instant::now(); + let room_version_id = get_room_version_id(create_event)?; + + // 10. Fetch missing state and auth chain events by calling /state_ids at + // backwards extremities doing all the checks in this list starting at 1. + // These are not timeline events. + + debug!("Resolving state at event"); + let mut state_at_incoming_event = if incoming_pdu.prev_events.len() == 1 { + self.state_at_incoming_degree_one(&incoming_pdu).await? + } else { + self.state_at_incoming_resolved(&incoming_pdu, room_id, &room_version_id) + .await? + }; + + if state_at_incoming_event.is_none() { + state_at_incoming_event = self + .fetch_state(origin, create_event, room_id, &room_version_id, &incoming_pdu.event_id) + .await?; + } + + let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); + let room_version = to_room_version(&room_version_id); + + debug!("Performing auth check"); + // 11. Check the auth of the event passes based on the state of the event + let state_fetch_state = &state_at_incoming_event; + let state_fetch = |k: &'static StateEventType, s: String| async move { + let shortstatekey = self.services.short.get_shortstatekey(k, &s).await.ok()?; + + let event_id = state_fetch_state.get(&shortstatekey)?; + self.services.timeline.get_pdu(event_id).await.ok() + }; + + let auth_check = state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None, // TODO: third party invite + |k, s| state_fetch(k, s.to_owned()), + ) + .await + .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; + + if !auth_check { + return Err!(Request(Forbidden("Event has failed auth check with state at the event."))); + } + + debug!("Gathering auth events"); + let auth_events = self + .services + .state + .get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + ) + .await?; + + let state_fetch = |k: &'static StateEventType, s: &str| { + let key = k.with_state_key(s); + ready(auth_events.get(&key).cloned()) + }; + + let auth_check = state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None, // third-party invite + state_fetch, + ) + .await + .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; + + // Soft fail check before doing state res + debug!("Performing soft-fail check"); + let soft_fail = { + use RoomVersionId::*; + + !auth_check + || incoming_pdu.kind == TimelineEventType::RoomRedaction + && match room_version_id { + V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + if let Some(redact_id) = &incoming_pdu.redacts { + !self + .services + .state_accessor + .user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .await? + } else { + false + } + }, + _ => { + let content: RoomRedactionEventContent = incoming_pdu.get_content()?; + if let Some(redact_id) = &content.redacts { + !self + .services + .state_accessor + .user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .await? + } else { + false + } + }, + } + }; + + // 13. Use state resolution to find new room state + + // We start looking at current room state now, so lets lock the room + trace!("Locking the room"); + let state_lock = self.services.state.mutex.lock(room_id).await; + + // Now we calculate the set of extremities this room has after the incoming + // event has been applied. We start with the previous extremities (aka leaves) + trace!("Calculating extremities"); + let mut extremities: HashSet<_> = self + .services + .state + .get_forward_extremities(room_id) + .map(ToOwned::to_owned) + .collect() + .await; + + // Remove any forward extremities that are referenced by this incoming event's + // prev_events + trace!( + "Calculated {} extremities; checking against {} prev_events", + extremities.len(), + incoming_pdu.prev_events.len() + ); + for prev_event in &incoming_pdu.prev_events { + extremities.remove(&(**prev_event)); + } + + // Only keep those extremities were not referenced yet + let mut retained = HashSet::new(); + for id in &extremities { + if !self + .services + .pdu_metadata + .is_event_referenced(room_id, id) + .await + { + retained.insert(id.clone()); + } + } + + extremities.retain(|id| retained.contains(id)); + debug!("Retained {} extremities. Compressing state", extremities.len()); + + let mut state_ids_compressed = HashSet::new(); + for (shortstatekey, id) in &state_at_incoming_event { + state_ids_compressed.insert( + self.services + .state_compressor + .compress_state_event(*shortstatekey, id) + .await, + ); + } + + let state_ids_compressed = Arc::new(state_ids_compressed); + + if incoming_pdu.state_key.is_some() { + debug!("Event is a state-event. Deriving new room state"); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) + .await; + + let event_id = &incoming_pdu.event_id; + state_after.insert(shortstatekey, event_id.clone()); + } + + let new_room_state = self + .resolve_state(room_id, &room_version_id, state_after) + .await?; + + // Set the new room state to the resolved state + debug!("Forcing new room state"); + let HashSetCompressStateEvent { + shortstatehash, + added, + removed, + } = self + .services + .state_compressor + .save_state(room_id, new_room_state) + .await?; + + self.services + .state + .force_state(room_id, shortstatehash, added, removed, &state_lock) + .await?; + } + + // 14. Check if the event passes auth based on the "current state" of the room, + // if not soft fail it + if soft_fail { + debug!("Soft failing event"); + self.services + .timeline + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .await?; + + // Soft fail, we keep the event as an outlier but don't add it to the timeline + warn!("Event was soft failed: {incoming_pdu:?}"); + self.services + .pdu_metadata + .mark_event_soft_failed(&incoming_pdu.event_id); + + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + } + + trace!("Appending pdu to timeline"); + extremities.insert(incoming_pdu.event_id.clone().into()); + + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + let pdu_id = self + .services + .timeline + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.into_iter().collect(), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .await?; + + // Event has passed all auth/stateres checks + drop(state_lock); + debug_info!( + elapsed = ?timer.elapsed(), + "Accepted", + ); + + Ok(pdu_id) +} From 10be3016466076a76ab0e9270dabb80e2acf1afa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 9 Nov 2024 01:09:09 +0000 Subject: [PATCH 0190/1248] split large notary requests into batches Signed-off-by: Jason Volk --- src/core/config/mod.rs | 8 +++ src/service/server_keys/acquire.rs | 4 ++ src/service/server_keys/get.rs | 4 +- src/service/server_keys/mod.rs | 2 +- src/service/server_keys/request.rs | 89 +++++++++++++++++++----------- 5 files changed, 71 insertions(+), 36 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 43cca4b8..cd9c1b38 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -582,6 +582,12 @@ pub struct Config { #[serde(default)] pub only_query_trusted_key_servers: bool, + /// Maximum number of keys to request in each trusted server query. + /// + /// default: 1024 + #[serde(default = "default_trusted_server_batch_size")] + pub trusted_server_batch_size: usize, + /// max log level for conduwuit. allows debug, info, warn, or error /// see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives /// **Caveat**: @@ -2062,3 +2068,5 @@ fn parallelism_scaled_u32(val: u32) -> u32 { } fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } + +fn default_trusted_server_batch_size() -> usize { 256 } diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index cdaf28b4..190b4239 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -110,6 +110,10 @@ where {requested_servers} total servers; some events may not be verifiable" ); } + + for (server, key_ids) in missing { + debug_warn!(?server, ?key_ids, "missing"); + } } #[implement(super::Service)] diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 441e33d4..dc4627f7 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -89,8 +89,8 @@ pub async fn get_verify_key(&self, origin: &ServerName, key_id: &ServerSigningKe async fn get_verify_key_from_notaries(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> Result { for notary in self.services.globals.trusted_servers() { if let Ok(server_keys) = self.notary_request(notary, origin).await { - for server_key in &server_keys { - self.add_signing_keys(server_key.clone()).await; + for server_key in server_keys.clone() { + self.add_signing_keys(server_key).await; } for server_key in server_keys { diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index dae45a51..333970df 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -7,7 +7,7 @@ mod verify; use std::{collections::BTreeMap, sync::Arc, time::Duration}; -use conduit::{implement, utils::time::timepoint_from_now, Result, Server}; +use conduit::{implement, utils::timepoint_from_now, Result, Server}; use database::{Deserialized, Json, Map}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index 84dd2871..7078f7cd 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -1,6 +1,6 @@ -use std::collections::BTreeMap; +use std::{collections::BTreeMap, fmt::Debug}; -use conduit::{implement, Err, Result}; +use conduit::{debug, implement, Err, Result}; use ruma::{ api::federation::discovery::{ get_remote_server_keys, @@ -25,34 +25,57 @@ where minimum_valid_until_ts: Some(self.minimum_valid_ts()), }; - let mut server_keys = RumaBatch::new(); - for (server, key_ids) in batch { - let entry = server_keys.entry(server.into()).or_default(); - for key_id in key_ids { - entry.insert(key_id.into(), criteria.clone()); - } - } + let mut server_keys = batch.fold(RumaBatch::new(), |mut batch, (server, key_ids)| { + batch + .entry(server.into()) + .or_default() + .extend(key_ids.map(|key_id| (key_id.into(), criteria.clone()))); + + batch + }); debug_assert!(!server_keys.is_empty(), "empty batch request to notary"); - let request = Request { - server_keys, - }; - self.services - .sending - .send_federation_request(notary, request) - .await - .map(|response| response.server_keys) - .map(|keys| { - keys.into_iter() - .map(|key| key.deserialize()) - .filter_map(Result::ok) - .collect() - }) + let mut results = Vec::new(); + while let Some(batch) = server_keys + .keys() + .rev() + .take(self.services.server.config.trusted_server_batch_size) + .last() + .cloned() + { + let request = Request { + server_keys: server_keys.split_off(&batch), + }; + + debug!( + ?notary, + ?batch, + remaining = %server_keys.len(), + requesting = ?request.server_keys.keys(), + "notary request" + ); + + let response = self + .services + .sending + .send_synapse_request(notary, request) + .await? + .server_keys + .into_iter() + .map(|key| key.deserialize()) + .filter_map(Result::ok); + + results.extend(response); + } + + Ok(results) } #[implement(super::Service)] -pub async fn notary_request(&self, notary: &ServerName, target: &ServerName) -> Result> { +pub async fn notary_request( + &self, notary: &ServerName, target: &ServerName, +) -> Result + Clone + Debug + Send> { use get_remote_server_keys::v2::Request; let request = Request { @@ -60,17 +83,17 @@ pub async fn notary_request(&self, notary: &ServerName, target: &ServerName) -> minimum_valid_until_ts: self.minimum_valid_ts(), }; - self.services + let response = self + .services .sending .send_federation_request(notary, request) - .await - .map(|response| response.server_keys) - .map(|keys| { - keys.into_iter() - .map(|key| key.deserialize()) - .filter_map(Result::ok) - .collect() - }) + .await? + .server_keys + .into_iter() + .map(|key| key.deserialize()) + .filter_map(Result::ok); + + Ok(response) } #[implement(super::Service)] From 14fce384034c348c6ba35fc946b6cbffaa970f3e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 9 Nov 2024 02:42:09 +0000 Subject: [PATCH 0191/1248] cork around send_join response processing Signed-off-by: Jason Volk --- src/api/client/membership.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index bf8e5c33..2906d35b 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -877,6 +877,7 @@ async fn join_room_by_id_helper_remote( .await; info!("Going through send_join response room_state"); + let cork = services.db.cork_and_flush(); let mut state = HashMap::new(); for result in send_join_response.room_state.state.iter().map(|pdu| { services @@ -902,8 +903,10 @@ async fn join_room_by_id_helper_remote( state.insert(shortstatekey, pdu.event_id.clone()); } } + drop(cork); info!("Going through send_join response auth_chain"); + let cork = services.db.cork_and_flush(); for result in send_join_response.room_state.auth_chain.iter().map(|pdu| { services .server_keys @@ -915,6 +918,7 @@ async fn join_room_by_id_helper_remote( services.rooms.outlier.add_pdu_outlier(&event_id, &value); } + drop(cork); debug!("Running send_join auth check"); let fetch_state = &state; From cc86feded32bb94b5171462bf9ce9c7b1adde04d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 10 Nov 2024 01:49:16 +0000 Subject: [PATCH 0192/1248] bump ruma fixes for key type changes Signed-off-by: Jason Volk --- Cargo.lock | 475 +++++++++++++++++++++++++++++++-------- Cargo.toml | 2 +- src/api/client/keys.rs | 19 +- src/api/server/key.rs | 5 +- src/service/users/mod.rs | 20 +- 5 files changed, 402 insertions(+), 119 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f729d3d4..a1654ff9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -43,15 +43,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "arc-swap" @@ -127,7 +127,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -138,7 +138,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -373,7 +373,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", + "syn 2.0.87", "which", ] @@ -481,9 +481,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.31" +version = "1.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" dependencies = [ "jobserver", "libc", @@ -569,7 +569,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -713,7 +713,7 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", - "thiserror", + "thiserror 1.0.68", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -749,7 +749,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1047,7 +1047,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1074,7 +1074,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1135,6 +1135,17 @@ dependencies = [ "subtle", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "dunce" version = "1.0.5" @@ -1184,7 +1195,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1276,7 +1287,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" dependencies = [ "nonempty", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -1350,7 +1361,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1461,9 +1472,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" [[package]] name = "hdrhistogram" @@ -1537,7 +1548,7 @@ dependencies = [ "ipnet", "once_cell", "rand", - "thiserror", + "thiserror 1.0.68", "tinyvec", "tokio", "tracing", @@ -1560,7 +1571,7 @@ dependencies = [ "rand", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -1616,7 +1627,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1753,6 +1764,124 @@ dependencies = [ "tracing", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "idna" version = "0.4.0" @@ -1765,19 +1894,30 @@ dependencies = [ [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] name = "image" -version = "0.25.4" +version = "0.25.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc144d44a31d753b02ce64093d532f55ff8dc4ebf2ffb8a63c0dda691385acae" +checksum = "cd6f44aed642f18953a158afeb30206f4d50da59fbc66ecb53c66488de73563b" dependencies = [ "bytemuck", "byteorder-lite", @@ -1817,7 +1957,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.1", "serde", ] @@ -1980,7 +2120,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1997,9 +2137,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.161" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -2034,6 +2174,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "lock_api" version = "0.4.12" @@ -2356,7 +2502,7 @@ dependencies = [ "js-sys", "once_cell", "pin-project-lite", - "thiserror", + "thiserror 1.0.68", "urlencoding", ] @@ -2399,10 +2545,10 @@ dependencies = [ "glob", "once_cell", "opentelemetry", - "ordered-float 4.4.0", + "ordered-float 4.5.0", "percent-encoding", "rand", - "thiserror", + "thiserror 1.0.68", "tokio", "tokio-stream", ] @@ -2418,9 +2564,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "4.4.0" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83e7ccb95e240b7c9506a3d544f10d935e142cc90b0a1d56954fb44d89ad6b97" +checksum = "c65ee1f9701bf938026630b455d5315f490640234259037edb259798b3bcf85e" dependencies = [ "num-traits", ] @@ -2502,7 +2648,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2595,7 +2741,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2667,7 +2813,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2696,7 +2842,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "version_check", "yansi", ] @@ -2721,7 +2867,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2776,7 +2922,7 @@ dependencies = [ "rustc-hash 2.0.0", "rustls 0.23.16", "socket2", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -2793,17 +2939,18 @@ dependencies = [ "rustc-hash 2.0.0", "rustls 0.23.16", "slab", - "thiserror", + "thiserror 1.0.68", "tinyvec", "tracing", ] [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", @@ -2980,7 +3127,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "assign", "js_int", @@ -3002,7 +3149,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "js_int", "ruma-common", @@ -3014,7 +3161,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "as_variant", "assign", @@ -3029,7 +3176,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror", + "thiserror 2.0.1", "url", "web-time 1.1.0", ] @@ -3037,7 +3184,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "as_variant", "base64 0.22.1", @@ -3055,7 +3202,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror", + "thiserror 2.0.1", "time", "tracing", "url", @@ -3067,7 +3214,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3081,7 +3228,7 @@ dependencies = [ "ruma-macros", "serde", "serde_json", - "thiserror", + "thiserror 2.0.1", "tracing", "url", "web-time 1.1.0", @@ -3091,7 +3238,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "bytes", "http", @@ -3109,16 +3256,16 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "js_int", - "thiserror", + "thiserror 2.0.1", ] [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "js_int", "ruma-common", @@ -3128,7 +3275,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "cfg-if", "once_cell", @@ -3137,14 +3284,14 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.85", + "syn 2.0.87", "toml", ] [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "js_int", "ruma-common", @@ -3156,20 +3303,20 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "headers", "http", "http-auth", "ruma-common", - "thiserror", + "thiserror 2.0.1", "tracing", ] [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3179,13 +3326,13 @@ dependencies = [ "serde_json", "sha2", "subslice", - "thiserror", + "thiserror 2.0.1", ] [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a#dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "futures-util", "itertools 0.13.0", @@ -3194,7 +3341,7 @@ dependencies = [ "ruma-events", "serde", "serde_json", - "thiserror", + "thiserror 2.0.1", "tracing", ] @@ -3261,9 +3408,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.38" +version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" +checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ "bitflags 2.6.0", "errno", @@ -3358,7 +3505,7 @@ dependencies = [ "futures-util", "pin-project", "thingbuf", - "thiserror", + "thiserror 1.0.68", "unicode-segmentation", "unicode-width", ] @@ -3415,9 +3562,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -3558,7 +3705,7 @@ dependencies = [ "rand", "serde", "serde_json", - "thiserror", + "thiserror 1.0.68", "time", "url", "uuid", @@ -3581,7 +3728,7 @@ checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3764,7 +3911,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 1.0.68", "time", ] @@ -3815,6 +3962,12 @@ dependencies = [ "der", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "strict" version = "0.2.0" @@ -3875,9 +4028,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.85" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", @@ -3899,6 +4052,17 @@ dependencies = [ "futures-core", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "tendril" version = "0.4.3" @@ -3922,7 +4086,7 @@ dependencies = [ "lazy-regex", "minimad", "serde", - "thiserror", + "thiserror 1.0.68", "unicode-width", ] @@ -3938,22 +4102,42 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.68", +] + +[[package]] +name = "thiserror" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07c1e40dd48a282ae8edc36c732cbc219144b87fb6a4c7316d611c6b1f06ec0c" +dependencies = [ + "thiserror-impl 2.0.1", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "874aa7e446f1da8d9c3a5c95b1c5eb41d800045252121dc7f8e0ba370cee55f5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", ] [[package]] @@ -4047,6 +4231,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" version = "1.8.0" @@ -4064,9 +4258,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -4088,7 +4282,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4122,7 +4316,7 @@ checksum = "0d4770b8024672c1101b3f6733eab95b18007dbe0847a8afe341fcf79e06043f" dependencies = [ "either", "futures-util", - "thiserror", + "thiserror 1.0.68", "tokio", ] @@ -4302,7 +4496,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=4d78a14a5e03f539b8c6b47 dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4483,12 +4677,12 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding", "serde", ] @@ -4505,6 +4699,18 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "uuid" version = "1.11.0" @@ -4570,7 +4776,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -4604,7 +4810,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4934,6 +5140,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "xml5ever" version = "0.18.1" @@ -4951,6 +5169,30 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -4969,7 +5211,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure", ] [[package]] @@ -4978,6 +5241,28 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "zstd" version = "0.13.2" diff --git a/Cargo.toml b/Cargo.toml index 3ac1556c..5ea6b4e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -316,7 +316,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "dd8b13ed2fa2ec4d9fe5c6fbb18e701ac4d4d08a" +rev = "67ffedabbf43e1ff6934df0fbf770b21e101406f" features = [ "compat", "rand", diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 44d9164c..53ec12f9 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -16,7 +16,7 @@ use ruma::{ federation, }, serde::Raw, - DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, + OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; @@ -36,13 +36,12 @@ use crate::{ pub(crate) async fn upload_keys_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); - for (key_key, key_value) in &body.one_time_keys { + for (key_id, one_time_key) in &body.one_time_keys { services .users - .add_one_time_key(sender_user, sender_device, key_key, key_value) + .add_one_time_key(sender_user, sender_device, key_id, one_time_key) .await?; } @@ -400,16 +399,16 @@ where while let Some((server, response)) = futures.next().await { if let Ok(Ok(response)) = response { - for (user, masterkey) in response.master_keys { - let (master_key_id, mut master_key) = parse_master_key(&user, &masterkey)?; + for (user, master_key) in response.master_keys { + let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; if let Ok(our_master_key) = services .users .get_key(&master_key_id, sender_user, &user, &allowed_signatures) .await { - let (_, our_master_key) = parse_master_key(&user, &our_master_key)?; - master_key.signatures.extend(our_master_key.signatures); + let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; + master_key.signatures.append(&mut our_master_key.signatures); } let json = serde_json::to_value(master_key).expect("to_value always works"); let raw = serde_json::from_value(json).expect("Raw::from_value always works"); @@ -467,7 +466,7 @@ fn add_unsigned_device_display_name( } pub(crate) async fn claim_keys_helper( - services: &Services, one_time_keys_input: &BTreeMap>, + services: &Services, one_time_keys_input: &BTreeMap>, ) -> Result { let mut one_time_keys = BTreeMap::new(); diff --git a/src/api/server/key.rs b/src/api/server/key.rs index 5284593d..37fffa9f 100644 --- a/src/api/server/key.rs +++ b/src/api/server/key.rs @@ -1,5 +1,4 @@ use std::{ - collections::BTreeMap, mem::take, time::{Duration, SystemTime}, }; @@ -12,7 +11,7 @@ use ruma::{ OutgoingResponse, }, serde::Raw, - MilliSecondsSinceUnixEpoch, + MilliSecondsSinceUnixEpoch, Signatures, }; /// # `GET /_matrix/key/v2/server` @@ -42,7 +41,7 @@ pub(crate) async fn get_server_keys_route(State(services): State) old_verify_keys, server_name: server_name.to_owned(), valid_until_ts: valid_until_ts(), - signatures: BTreeMap::new(), + signatures: Signatures::new(), }; let server_key = Raw::new(&server_key)?; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b9183e12..1f8c56df 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -12,8 +12,8 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId, - OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, + DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, OneTimeKeyName, OwnedDeviceId, + OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, }; use serde_json::json; @@ -341,9 +341,9 @@ impl Service { } pub async fn add_one_time_key( - &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, + &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &KeyId, one_time_key_value: &Raw, - ) -> Result<()> { + ) -> Result { // All devices have metadata // Only existing devices should be able to call this, but we shouldn't assert // either... @@ -388,8 +388,8 @@ impl Service { } pub async fn take_one_time_key( - &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - ) -> Result<(OwnedDeviceKeyId, Raw)> { + &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &OneTimeKeyAlgorithm, + ) -> Result<(OwnedKeyId, Raw)> { let count = self.services.globals.next_count()?.to_be_bytes(); self.db.userid_lastonetimekeyupdate.insert(user_id, count); @@ -433,23 +433,23 @@ impl Service { pub async fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, - ) -> BTreeMap { + ) -> BTreeMap { type KeyVal<'a> = ((Ignore, Ignore, &'a Unquoted), Ignore); - let mut algorithm_counts = BTreeMap::::new(); + let mut algorithm_counts = BTreeMap::::new(); let query = (user_id, device_id); self.db .onetimekeyid_onetimekeys .stream_prefix(&query) .ignore_err() .ready_for_each(|((Ignore, Ignore, device_key_id), Ignore): KeyVal<'_>| { - let device_key_id: &DeviceKeyId = device_key_id + let one_time_key_id: &OneTimeKeyId = device_key_id .as_str() .try_into() .expect("Invalid DeviceKeyID in database"); let count: &mut UInt = algorithm_counts - .entry(device_key_id.algorithm()) + .entry(one_time_key_id.algorithm()) .or_default(); *count = count.saturating_add(1_u32.into()); From 5e74391c6c94e2843f6cf18aaf0b10e2a613690c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 10 Nov 2024 02:29:45 +0000 Subject: [PATCH 0193/1248] fix config generator macro matchers Signed-off-by: Jason Volk --- src/macros/config.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/macros/config.rs b/src/macros/config.rs index 6ccdb73c..d7f11535 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -168,7 +168,7 @@ fn get_default(field: &Field) -> Option { .segments .iter() .next() - .is_none_or(|s| s.ident == "serde") + .is_none_or(|s| s.ident != "serde") { continue; } @@ -218,7 +218,7 @@ fn get_doc_default(field: &Field) -> Option { continue; }; - if path.segments.iter().next().is_none_or(|s| s.ident == "doc") { + if path.segments.iter().next().is_none_or(|s| s.ident != "doc") { continue; } @@ -261,7 +261,7 @@ fn get_doc_comment(field: &Field) -> Option { continue; }; - if path.segments.iter().next().is_none_or(|s| s.ident == "doc") { + if path.segments.iter().next().is_none_or(|s| s.ident != "doc") { continue; } From 7e087bb93c316fb52ab1b0dad77530eaa6608dfa Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 10 Nov 2024 03:25:57 +0000 Subject: [PATCH 0194/1248] Fixes for CI Signed-off-by: Jason Volk --- .github/workflows/ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d299514..f59c5048 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,7 +65,7 @@ permissions: jobs: tests: name: Test - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Free Disk Space (Ubuntu) uses: jlumbroso/free-disk-space@main @@ -231,7 +231,7 @@ jobs: build: name: Build - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: tests strategy: matrix: @@ -245,7 +245,7 @@ jobs: - name: Sync repository uses: actions/checkout@v4 - - uses: nixbuild/nix-quick-install-action@v28 + - uses: nixbuild/nix-quick-install-action@master - name: Restore and cache Nix store uses: nix-community/cache-nix-action@v5.1.0 @@ -508,7 +508,7 @@ jobs: docker: name: Docker publish - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: build if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' env: From f290d1a9c850008ce932680c91ac5a039d23c9f7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 10 Nov 2024 08:39:30 +0000 Subject: [PATCH 0195/1248] prevent retry for missing keys later in join process Signed-off-by: Jason Volk --- src/api/client/membership.rs | 79 ++++++++++++++++++------------- src/service/server_keys/mod.rs | 28 +++++++++-- src/service/server_keys/verify.rs | 20 ++++++++ 3 files changed, 91 insertions(+), 36 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 2906d35b..97aa1c69 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -878,46 +878,59 @@ async fn join_room_by_id_helper_remote( info!("Going through send_join response room_state"); let cork = services.db.cork_and_flush(); - let mut state = HashMap::new(); - for result in send_join_response.room_state.state.iter().map(|pdu| { - services - .server_keys - .validate_and_add_event_id(pdu, &room_version_id) - }) { - let Ok((event_id, value)) = result.await else { - continue; - }; + let state = send_join_response + .room_state + .state + .iter() + .stream() + .then(|pdu| { + services + .server_keys + .validate_and_add_event_id_no_fetch(pdu, &room_version_id) + }) + .ready_filter_map(Result::ok) + .fold(HashMap::new(), |mut state, (event_id, value)| async move { + let pdu = match PduEvent::from_id_val(&event_id, value.clone()) { + Ok(pdu) => pdu, + Err(e) => { + debug_warn!("Invalid PDU in send_join response: {e:?}: {value:#?}"); + return state; + }, + }; - let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { - debug_warn!("Invalid PDU in send_join response: {value:#?}"); - err!(BadServerResponse("Invalid PDU in send_join response: {e:?}")) - })?; + services.rooms.outlier.add_pdu_outlier(&event_id, &value); + if let Some(state_key) = &pdu.state_key { + let shortstatekey = services + .rooms + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key) + .await; + + state.insert(shortstatekey, pdu.event_id.clone()); + } + + state + }) + .await; - services.rooms.outlier.add_pdu_outlier(&event_id, &value); - if let Some(state_key) = &pdu.state_key { - let shortstatekey = services - .rooms - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key) - .await; - state.insert(shortstatekey, pdu.event_id.clone()); - } - } drop(cork); info!("Going through send_join response auth_chain"); let cork = services.db.cork_and_flush(); - for result in send_join_response.room_state.auth_chain.iter().map(|pdu| { - services - .server_keys - .validate_and_add_event_id(pdu, &room_version_id) - }) { - let Ok((event_id, value)) = result.await else { - continue; - }; + send_join_response + .room_state + .auth_chain + .iter() + .stream() + .then(|pdu| { + services + .server_keys + .validate_and_add_event_id_no_fetch(pdu, &room_version_id) + }) + .ready_filter_map(Result::ok) + .ready_for_each(|(event_id, value)| services.rooms.outlier.add_pdu_outlier(&event_id, &value)) + .await; - services.rooms.outlier.add_pdu_outlier(&event_id, &value); - } drop(cork); debug!("Running send_join auth check"); diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index 333970df..08bcefb6 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -7,13 +7,19 @@ mod verify; use std::{collections::BTreeMap, sync::Arc, time::Duration}; -use conduit::{implement, utils::timepoint_from_now, Result, Server}; +use conduit::{ + implement, + utils::{timepoint_from_now, IterStream}, + Result, Server, +}; use database::{Deserialized, Json, Map}; +use futures::StreamExt; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, serde::Raw, signatures::{Ed25519KeyPair, PublicKeyMap, PublicKeySet}, - MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, ServerName, + ServerSigningKeyId, }; use serde_json::value::RawValue as RawJsonValue; @@ -107,7 +113,23 @@ async fn add_signing_keys(&self, new_keys: ServerSigningKeys) { } #[implement(Service)] -async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> bool { +pub async fn required_keys_exist(&self, object: &CanonicalJsonObject, version: &RoomVersionId) -> bool { + use ruma::signatures::required_keys; + + let Ok(required_keys) = required_keys(object, version) else { + return false; + }; + + required_keys + .iter() + .flat_map(|(server, key_ids)| key_ids.iter().map(move |key_id| (server, key_id))) + .stream() + .all(|(server, key_id)| self.verify_key_exists(server, key_id)) + .await +} + +#[implement(Service)] +pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> bool { type KeysMap<'a> = BTreeMap<&'a ServerSigningKeyId, &'a RawJsonValue>; let Ok(keys) = self diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs index ad20fec7..c836e324 100644 --- a/src/service/server_keys/verify.rs +++ b/src/service/server_keys/verify.rs @@ -16,6 +16,26 @@ pub async fn validate_and_add_event_id( Ok((event_id, value)) } +#[implement(super::Service)] +pub async fn validate_and_add_event_id_no_fetch( + &self, pdu: &RawJsonValue, room_version: &RoomVersionId, +) -> Result<(OwnedEventId, CanonicalJsonObject)> { + let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?; + if !self.required_keys_exist(&value, room_version).await { + return Err!(BadServerResponse(debug_warn!( + "Event {event_id} cannot be verified: missing keys." + ))); + } + + if let Err(e) = self.verify_event(&value, Some(room_version)).await { + return Err!(BadServerResponse(debug_error!("Event {event_id} failed verification: {e:?}"))); + } + + value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into())); + + Ok((event_id, value)) +} + #[implement(super::Service)] pub async fn verify_event( &self, event: &CanonicalJsonObject, room_version: Option<&RoomVersionId>, From 1efc52c4401f3237124495c7120746a8f7aa4909 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 10 Nov 2024 11:09:48 +0000 Subject: [PATCH 0196/1248] increase logging during server keys acquire Signed-off-by: Jason Volk --- src/service/server_keys/acquire.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index 190b4239..1080d79e 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -4,7 +4,7 @@ use std::{ time::Duration, }; -use conduit::{debug, debug_error, debug_warn, error, implement, result::FlatOk, trace, warn}; +use conduit::{debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn}; use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, OwnedServerName, @@ -69,7 +69,7 @@ where return; } - debug!("missing {missing_keys} keys for {missing_servers} servers locally"); + info!("{missing_keys} keys for {missing_servers} servers will be acquired"); if notary_first_always || notary_first_on_join { missing = self.acquire_notary(missing.into_iter()).await; @@ -79,7 +79,7 @@ where return; } - debug_warn!("missing {missing_keys} keys for {missing_servers} servers from all notaries first"); + warn!("missing {missing_keys} keys for {missing_servers} servers from all notaries first"); } if !notary_only { @@ -107,7 +107,7 @@ where if missing_keys > 0 { warn!( "did not obtain {missing_keys} keys for {missing_servers} servers out of {requested_keys} total keys for \ - {requested_servers} total servers; some events may not be verifiable" + {requested_servers} total servers." ); } From 24a5ecb6b4dedf39184e9c38282ab94db1b12d5d Mon Sep 17 00:00:00 2001 From: OverPhoenix Date: Sun, 10 Nov 2024 21:45:37 +0000 Subject: [PATCH 0197/1248] fix incorrect user id for non-admin invites checking --- src/api/client/membership.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 97aa1c69..bde8dee8 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1306,7 +1306,7 @@ pub(crate) async fn invite_helper( services: &Services, sender_user: &UserId, user_id: &UserId, room_id: &RoomId, reason: Option, is_direct: bool, ) -> Result<()> { - if !services.users.is_admin(user_id).await && services.globals.block_non_admin_invites() { + if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { info!("User {sender_user} is not an admin and attempted to send an invite to room {room_id}"); return Err(Error::BadRequest( ErrorKind::forbidden(), From 08a4e931a0d5353edc01716e371a489f2c14dba3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 11 Nov 2024 20:12:20 +0000 Subject: [PATCH 0198/1248] supplement a from_str for FmtSpan Signed-off-by: Jason Volk --- src/core/log/fmt_span.rs | 17 +++++++++++++++++ src/core/log/mod.rs | 1 + 2 files changed, 18 insertions(+) create mode 100644 src/core/log/fmt_span.rs diff --git a/src/core/log/fmt_span.rs b/src/core/log/fmt_span.rs new file mode 100644 index 00000000..5a340d0f --- /dev/null +++ b/src/core/log/fmt_span.rs @@ -0,0 +1,17 @@ +use tracing_subscriber::fmt::format::FmtSpan; + +use crate::Result; + +#[inline] +pub fn from_str(str: &str) -> Result { + match str.to_uppercase().as_str() { + "ENTER" => Ok(FmtSpan::ENTER), + "EXIT" => Ok(FmtSpan::EXIT), + "NEW" => Ok(FmtSpan::NEW), + "CLOSE" => Ok(FmtSpan::CLOSE), + "ACTIVE" => Ok(FmtSpan::ACTIVE), + "FULL" => Ok(FmtSpan::FULL), + "NONE" => Ok(FmtSpan::NONE), + _ => Err(FmtSpan::NONE), + } +} diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 1cba236f..1c415c6a 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -1,6 +1,7 @@ pub mod capture; pub mod color; pub mod fmt; +pub mod fmt_span; mod reload; mod suppress; From 9790a6edc992d24490e19161394c3041e137331d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 11 Nov 2024 20:33:56 +0000 Subject: [PATCH 0199/1248] add unwrap_or_err to result Signed-off-by: Jason Volk --- src/core/utils/result.rs | 2 ++ src/core/utils/result/unwrap_or_err.rs | 15 +++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 src/core/utils/result/unwrap_or_err.rs diff --git a/src/core/utils/result.rs b/src/core/utils/result.rs index fb1b7b95..6b11ea66 100644 --- a/src/core/utils/result.rs +++ b/src/core/utils/result.rs @@ -7,10 +7,12 @@ mod log_err; mod map_expect; mod not_found; mod unwrap_infallible; +mod unwrap_or_err; pub use self::{ debug_inspect::DebugInspect, filter::Filter, flat_ok::FlatOk, into_is_ok::IntoIsOk, log_debug_err::LogDebugErr, log_err::LogErr, map_expect::MapExpect, not_found::NotFound, unwrap_infallible::UnwrapInfallible, + unwrap_or_err::UnwrapOrErr, }; pub type Result = std::result::Result; diff --git a/src/core/utils/result/unwrap_or_err.rs b/src/core/utils/result/unwrap_or_err.rs new file mode 100644 index 00000000..69901958 --- /dev/null +++ b/src/core/utils/result/unwrap_or_err.rs @@ -0,0 +1,15 @@ +use std::convert::identity; + +use super::Result; + +/// Returns the Ok value or the Err value. Available when the Ok and Err types +/// are the same. This is a way to default the result using the specific Err +/// value rather than unwrap_or_default() using Ok's default. +pub trait UnwrapOrErr { + fn unwrap_or_err(self) -> T; +} + +impl UnwrapOrErr for Result { + #[inline] + fn unwrap_or_err(self) -> T { self.unwrap_or_else(identity::) } +} From e2afaa9f039d26b85bcd518013aa6bb80ce11866 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 11 Nov 2024 20:49:25 +0000 Subject: [PATCH 0200/1248] add config item for with_span_events Signed-off-by: Jason Volk --- src/core/config/mod.rs | 9 +++++++++ src/main/tracing.rs | 8 ++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index cd9c1b38..eddab2fe 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -606,6 +606,12 @@ pub struct Config { #[serde(default = "true_fn", alias = "log_colours")] pub log_colors: bool, + /// configures the span events which will be outputted with the log + /// + /// default: "none" + #[serde(default = "default_log_span_events")] + pub log_span_events: String, + /// OpenID token expiration/TTL in seconds /// /// These are the OpenID tokens that are primarily used for Matrix account @@ -1958,6 +1964,9 @@ pub fn default_log() -> String { .to_owned() } +#[must_use] +pub fn default_log_span_events() -> String { "none".into() } + fn default_notification_push_path() -> String { "/_matrix/push/v1/notify".to_owned() } fn default_openid_token_ttl() -> u64 { 60 * 60 } diff --git a/src/main/tracing.rs b/src/main/tracing.rs index 9b4ad659..c28fef6b 100644 --- a/src/main/tracing.rs +++ b/src/main/tracing.rs @@ -3,7 +3,8 @@ use std::sync::Arc; use conduit::{ config::Config, debug_warn, err, - log::{capture, LogLevelReloadHandles}, + log::{capture, fmt_span, LogLevelReloadHandles}, + result::UnwrapOrErr, Result, }; use tracing_subscriber::{layer::SubscriberExt, reload, EnvFilter, Layer, Registry}; @@ -18,7 +19,10 @@ pub(crate) fn init(config: &Config) -> Result<(LogLevelReloadHandles, TracingFla let reload_handles = LogLevelReloadHandles::default(); let console_filter = EnvFilter::try_new(&config.log).map_err(|e| err!(Config("log", "{e}.")))?; - let console_layer = tracing_subscriber::fmt::Layer::new().with_ansi(config.log_colors); + let console_span_events = fmt_span::from_str(&config.log_span_events).unwrap_or_err(); + let console_layer = tracing_subscriber::fmt::Layer::new() + .with_ansi(config.log_colors) + .with_span_events(console_span_events); let (console_reload_filter, console_reload_handle) = reload::Layer::new(console_filter.clone()); reload_handles.add("console", Box::new(console_reload_handle)); From 61174dd0d3632f551735bea9c8ea22c0bf218427 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 11 Nov 2024 21:27:40 +0000 Subject: [PATCH 0201/1248] check if lazyset already contains user prior to querying Signed-off-by: Jason Volk --- src/api/client/message.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index e8306de9..cc636511 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -192,6 +192,10 @@ pub(crate) async fn update_lazy( return lazy; } + if lazy.contains(event.sender()) { + return lazy; + } + if !services .rooms .lazy_loading From 396233304328c75d1271465f28f55e4121e956b4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 11 Nov 2024 05:00:29 +0000 Subject: [PATCH 0202/1248] partially revert e507c3130673099692143a59adc30a414ef6ca54 Signed-off-by: Jason Volk --- src/api/client/context.rs | 6 ++---- src/api/client/message.rs | 5 +---- src/api/client/relations.rs | 1 - src/api/client/threads.rs | 1 - src/api/server/backfill.rs | 2 +- src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/threads/mod.rs | 2 +- src/service/rooms/timeline/data.rs | 12 +++++++----- 8 files changed, 13 insertions(+), 18 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index f5f981ba..4359ae12 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -82,7 +82,7 @@ pub(crate) async fn get_context_route( let events_before: Vec<_> = services .rooms .timeline - .pdus_rev(Some(sender_user), room_id, Some(base_token.saturating_sub(1))) + .pdus_rev(Some(sender_user), room_id, Some(base_token)) .await? .ready_filter_map(|item| event_filter(item, filter)) .filter_map(|item| ignored_filter(&services, item, sender_user)) @@ -94,7 +94,7 @@ pub(crate) async fn get_context_route( let events_after: Vec<_> = services .rooms .timeline - .pdus(Some(sender_user), room_id, Some(base_token.saturating_add(1))) + .pdus(Some(sender_user), room_id, Some(base_token)) .await? .ready_filter_map(|item| event_filter(item, filter)) .filter_map(|item| ignored_filter(&services, item, sender_user)) @@ -169,14 +169,12 @@ pub(crate) async fn get_context_route( start: events_before .last() .map(at!(0)) - .map(|count| count.saturating_sub(1)) .as_ref() .map(ToString::to_string), end: events_after .last() .map(at!(0)) - .map(|count| count.saturating_add(1)) .as_ref() .map(ToString::to_string), diff --git a/src/api/client/message.rs b/src/api/client/message.rs index cc636511..88453de0 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -138,10 +138,7 @@ pub(crate) async fn get_message_events_route( let start_token = events.first().map(at!(0)).unwrap_or(from); - let next_token = events - .last() - .map(at!(0)) - .map(|count| count.saturating_inc(body.dir)); + let next_token = events.last().map(at!(0)); if !cfg!(feature = "element_hacks") { if let Some(next_token) = next_token { diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index ee62dbfc..902e6be6 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -150,7 +150,6 @@ async fn paginate_relations_with_filter( Direction::Backward => events.first(), } .map(at!(0)) - .map(|count| count.saturating_inc(dir)) .as_ref() .map(ToString::to_string); diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 8d4e399b..906f779d 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -46,7 +46,6 @@ pub(crate) async fn get_threads_route( .last() .filter(|_| threads.len() >= limit) .map(at!(0)) - .map(|count| count.saturating_sub(1)) .as_ref() .map(ToString::to_string), diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 2858d9fd..b0bd48e8 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -55,7 +55,7 @@ pub(crate) async fn get_backfill_route( pdus: services .rooms .timeline - .pdus_rev(None, &body.room_id, Some(from)) + .pdus_rev(None, &body.room_id, Some(from.saturating_add(1))) .await? .take(limit) .filter_map(|(_, pdu)| async move { diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index f3e1ced8..b06e988e 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -57,7 +57,7 @@ impl Data { ) -> impl Stream + Send + '_ { let mut current = ArrayVec::::new(); current.extend(target.to_be_bytes()); - current.extend(from.into_unsigned().to_be_bytes()); + current.extend(from.saturating_inc(dir).into_unsigned().to_be_bytes()); let current = current.as_slice(); match dir { Direction::Forward => self.tofrom_relation.raw_keys_from(current).boxed(), diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index fcc629e1..5821f279 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -132,7 +132,7 @@ impl Service { let current: RawPduId = PduId { shortroomid, - shorteventid, + shorteventid: shorteventid.saturating_sub(1), } .into(); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 7f1873ab..22a6c1d0 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -13,7 +13,7 @@ use conduit::{ }; use database::{Database, Deserialized, Json, KeyVal, Map}; use futures::{Stream, StreamExt}; -use ruma::{CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId}; +use ruma::{api::Direction, CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use tokio::sync::Mutex; use super::{PduId, RawPduId}; @@ -205,7 +205,9 @@ impl Data { pub(super) async fn pdus_rev<'a>( &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, until: PduCount, ) -> Result + Send + 'a> { - let current = self.count_to_id(room_id, until).await?; + let current = self + .count_to_id(room_id, until, Direction::Backward) + .await?; let prefix = current.shortroomid(); let stream = self .pduid_pdu @@ -220,7 +222,7 @@ impl Data { pub(super) async fn pdus<'a>( &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, from: PduCount, ) -> Result + Send + 'a> { - let current = self.count_to_id(room_id, from).await?; + let current = self.count_to_id(room_id, from, Direction::Forward).await?; let prefix = current.shortroomid(); let stream = self .pduid_pdu @@ -267,7 +269,7 @@ impl Data { } } - async fn count_to_id(&self, room_id: &RoomId, shorteventid: PduCount) -> Result { + async fn count_to_id(&self, room_id: &RoomId, shorteventid: PduCount, dir: Direction) -> Result { let shortroomid: ShortRoomId = self .services .short @@ -278,7 +280,7 @@ impl Data { // +1 so we don't send the base event let pdu_id = PduId { shortroomid, - shorteventid, + shorteventid: shorteventid.saturating_inc(dir), }; Ok(pdu_id.into()) From 999d731a65fe8f1313d6fe63d5139ee9f357a820 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 11 Nov 2024 22:18:14 +0000 Subject: [PATCH 0203/1248] move err macro visitor out-of-line; reduce codegen Signed-off-by: Jason Volk --- src/core/error/err.rs | 72 +++++++++++++++++++++++-------------------- src/core/error/mod.rs | 3 +- 2 files changed, 40 insertions(+), 35 deletions(-) diff --git a/src/core/error/err.rs b/src/core/error/err.rs index baeb992d..a24441e0 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -111,12 +111,8 @@ macro_rules! err { #[macro_export] macro_rules! err_log { ($out:ident, $level:ident, $($fields:tt)+) => {{ - use std::{fmt, fmt::Write}; - use $crate::tracing::{ - callsite, callsite2, level_enabled, metadata, valueset, Callsite, Event, __macro_support, - __tracing_log, - field::{Field, ValueSet, Visit}, + callsite, callsite2, metadata, valueset, Callsite, Level, }; @@ -134,34 +130,7 @@ macro_rules! err_log { fields: $($fields)+, }; - let visit = &mut |vs: ValueSet<'_>| { - struct Visitor<'a>(&'a mut String); - impl Visit for Visitor<'_> { - #[inline] - fn record_debug(&mut self, field: &Field, val: &dyn fmt::Debug) { - if field.name() == "message" { - write!(self.0, "{:?}", val).expect("stream error"); - } else { - write!(self.0, " {}={:?}", field.name(), val).expect("stream error"); - } - } - } - - let meta = __CALLSITE.metadata(); - let enabled = level_enabled!(LEVEL) && { - let interest = __CALLSITE.interest(); - !interest.is_never() && __macro_support::__is_enabled(meta, interest) - }; - - if enabled { - Event::dispatch(meta, &vs); - } - - __tracing_log!(LEVEL, __CALLSITE, &vs); - vs.record(&mut Visitor(&mut $out)); - }; - - (visit)(valueset!(__CALLSITE.metadata().fields(), $($fields)+)); + ($crate::error::visit)(&mut $out, LEVEL, &__CALLSITE, &mut valueset!(__CALLSITE.metadata().fields(), $($fields)+)); ($out).into() }} } @@ -192,3 +161,40 @@ macro_rules! err_lev { $crate::tracing::Level::ERROR }; } + +use std::{fmt, fmt::Write}; + +use tracing::{ + level_enabled, Callsite, Event, __macro_support, __tracing_log, + callsite::DefaultCallsite, + field::{Field, ValueSet, Visit}, + Level, +}; + +struct Visitor<'a>(&'a mut String); + +impl Visit for Visitor<'_> { + #[inline] + fn record_debug(&mut self, field: &Field, val: &dyn fmt::Debug) { + if field.name() == "message" { + write!(self.0, "{val:?}").expect("stream error"); + } else { + write!(self.0, " {}={val:?}", field.name()).expect("stream error"); + } + } +} + +pub fn visit(out: &mut String, level: Level, __callsite: &'static DefaultCallsite, vs: &mut ValueSet<'_>) { + let meta = __callsite.metadata(); + let enabled = level_enabled!(level) && { + let interest = __callsite.interest(); + !interest.is_never() && __macro_support::__is_enabled(meta, interest) + }; + + if enabled { + Event::dispatch(meta, vs); + } + + __tracing_log!(level, __callsite, vs); + vs.record(&mut Visitor(out)); +} diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 302d0f87..35bf9800 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -6,8 +6,7 @@ mod serde; use std::{any::Any, borrow::Cow, convert::Infallible, fmt, sync::PoisonError}; -pub use self::log::*; -use crate::error; +pub use self::{err::visit, log::*}; #[derive(thiserror::Error)] pub enum Error { From 86694f2d1d55605af2058b5347c71ebf977c5daf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 12 Nov 2024 08:01:23 +0000 Subject: [PATCH 0204/1248] move non-generic code out of generic; reduce codegen Signed-off-by: Jason Volk --- src/api/router/args.rs | 77 +++++++++++++++--------------- src/service/sending/send.rs | 93 ++++++++++++++++++++----------------- 2 files changed, 90 insertions(+), 80 deletions(-) diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 4c0aff4c..0b693956 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -66,6 +66,15 @@ where } } +impl Deref for Args +where + T: IncomingRequest + Send + Sync + 'static, +{ + type Target = T; + + fn deref(&self) -> &Self::Target { &self.body } +} + #[async_trait] impl FromRequest for Args where @@ -78,7 +87,7 @@ where let mut json_body = serde_json::from_slice::(&request.body).ok(); let auth = auth::auth(services, &mut request, json_body.as_ref(), &T::METADATA).await?; Ok(Self { - body: make_body::(services, &mut request, &mut json_body, &auth)?, + body: make_body::(services, &mut request, json_body.as_mut(), &auth)?, origin: auth.origin, sender_user: auth.sender_user, sender_device: auth.sender_device, @@ -88,20 +97,11 @@ where } } -impl Deref for Args -where - T: IncomingRequest + Send + Sync + 'static, -{ - type Target = T; - - fn deref(&self) -> &Self::Target { &self.body } -} - fn make_body( - services: &Services, request: &mut Request, json_body: &mut Option, auth: &Auth, + services: &Services, request: &mut Request, json_body: Option<&mut CanonicalJsonValue>, auth: &Auth, ) -> Result where - T: IncomingRequest + Send + Sync + 'static, + T: IncomingRequest, { let body = take_body(services, request, json_body, auth); let http_request = into_http_request(request, body); @@ -125,36 +125,37 @@ fn into_http_request(request: &Request, body: Bytes) -> hyper::Request { http_request } +#[allow(clippy::needless_pass_by_value)] fn take_body( - services: &Services, request: &mut Request, json_body: &mut Option, auth: &Auth, + services: &Services, request: &mut Request, json_body: Option<&mut CanonicalJsonValue>, auth: &Auth, ) -> Bytes { - if let Some(CanonicalJsonValue::Object(json_body)) = json_body { - let user_id = auth.sender_user.clone().unwrap_or_else(|| { - let server_name = services.globals.server_name(); - UserId::parse_with_server_name(EMPTY, server_name).expect("valid user_id") + let Some(CanonicalJsonValue::Object(json_body)) = json_body else { + return mem::take(&mut request.body); + }; + + let user_id = auth.sender_user.clone().unwrap_or_else(|| { + let server_name = services.globals.server_name(); + UserId::parse_with_server_name(EMPTY, server_name).expect("valid user_id") + }); + + let uiaa_request = json_body + .get("auth") + .and_then(CanonicalJsonValue::as_object) + .and_then(|auth| auth.get("session")) + .and_then(CanonicalJsonValue::as_str) + .and_then(|session| { + services + .uiaa + .get_uiaa_request(&user_id, auth.sender_device.as_deref(), session) }); - let uiaa_request = json_body - .get("auth") - .and_then(CanonicalJsonValue::as_object) - .and_then(|auth| auth.get("session")) - .and_then(CanonicalJsonValue::as_str) - .and_then(|session| { - services - .uiaa - .get_uiaa_request(&user_id, auth.sender_device.as_deref(), session) - }); - - if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { - for (key, value) in initial_request { - json_body.entry(key).or_insert(value); - } + if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { + for (key, value) in initial_request { + json_body.entry(key).or_insert(value); } - - let mut buf = BytesMut::new().writer(); - serde_json::to_writer(&mut buf, &json_body).expect("value serialization can't fail"); - buf.into_inner().freeze() - } else { - mem::take(&mut request.body) } + + let mut buf = BytesMut::new().writer(); + serde_json::to_writer(&mut buf, &json_body).expect("value serialization can't fail"); + buf.into_inner().freeze() } diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 939d6e73..5bf48aaa 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -1,5 +1,6 @@ -use std::{fmt::Debug, mem}; +use std::mem; +use bytes::Bytes; use conduit::{ debug, debug_error, debug_warn, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, Err, Error, Result, @@ -23,10 +24,10 @@ use crate::{ }; impl super::Service { - #[tracing::instrument(skip(self, client, req), name = "send")] - pub async fn send(&self, client: &Client, dest: &ServerName, req: T) -> Result + #[tracing::instrument(skip(self, client, request), name = "send")] + pub async fn send(&self, client: &Client, dest: &ServerName, request: T) -> Result where - T: OutgoingRequest + Debug + Send, + T: OutgoingRequest + Send, { if !self.server.config.allow_federation { return Err!(Config("allow_federation", "Federation is disabled.")); @@ -42,7 +43,8 @@ impl super::Service { } let actual = self.services.resolver.get_actual_dest(dest).await?; - let request = self.prepare::(dest, &actual, req).await?; + let request = into_http_request::(&actual, request)?; + let request = self.prepare(dest, request)?; self.execute::(dest, &actual, request, client).await } @@ -50,7 +52,7 @@ impl super::Service { &self, dest: &ServerName, actual: &ActualDest, request: Request, client: &Client, ) -> Result where - T: OutgoingRequest + Debug + Send, + T: OutgoingRequest + Send, { let url = request.url().clone(); let method = request.method().clone(); @@ -58,25 +60,14 @@ impl super::Service { debug!(?method, ?url, "Sending request"); match client.execute(request).await { Ok(response) => handle_response::(&self.services.resolver, dest, actual, &method, &url, response).await, - Err(error) => handle_error::(dest, actual, &method, &url, error), + Err(error) => Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), } } - async fn prepare(&self, dest: &ServerName, actual: &ActualDest, req: T) -> Result - where - T: OutgoingRequest + Debug + Send, - { - const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_11]; - const SATIR: SendAccessToken<'_> = SendAccessToken::IfRequired(EMPTY); + fn prepare(&self, dest: &ServerName, mut request: http::Request>) -> Result { + self.sign_request(&mut request, dest); - trace!("Preparing request"); - let mut http_request = req - .try_into_http_request::>(actual.string().as_str(), SATIR, &VERSIONS) - .map_err(|e| err!(BadServerResponse("Invalid destination: {e:?}")))?; - - self.sign_request(&mut http_request, dest); - - let request = Request::try_from(http_request)?; + let request = Request::try_from(request)?; self.validate_url(request.url())?; Ok(request) @@ -96,11 +87,31 @@ impl super::Service { async fn handle_response( resolver: &resolver::Service, dest: &ServerName, actual: &ActualDest, method: &Method, url: &Url, - mut response: Response, + response: Response, ) -> Result where - T: OutgoingRequest + Debug + Send, + T: OutgoingRequest + Send, { + let response = into_http_response(dest, actual, method, url, response).await?; + let result = T::IncomingResponse::try_from_http_response(response); + + if result.is_ok() && !actual.cached { + resolver.set_cached_destination( + dest.to_owned(), + CachedDest { + dest: actual.dest.clone(), + host: actual.host.clone(), + expire: CachedDest::default_expire(), + }, + ); + } + + result.map_err(|e| err!(BadServerResponse("Server returned bad 200 response: {e:?}"))) +} + +async fn into_http_response( + dest: &ServerName, actual: &ActualDest, method: &Method, url: &Url, mut response: Response, +) -> Result> { let status = response.status(); trace!( ?status, ?method, @@ -113,6 +124,7 @@ where let mut http_response_builder = http::Response::builder() .status(status) .version(response.version()); + mem::swap( response.headers_mut(), http_response_builder @@ -137,27 +149,10 @@ where return Err(Error::Federation(dest.to_owned(), RumaError::from_http_response(http_response))); } - let response = T::IncomingResponse::try_from_http_response(http_response); - if response.is_ok() && !actual.cached { - resolver.set_cached_destination( - dest.to_owned(), - CachedDest { - dest: actual.dest.clone(), - host: actual.host.clone(), - expire: CachedDest::default_expire(), - }, - ); - } - - response.map_err(|e| err!(BadServerResponse("Server returned bad 200 response: {e:?}"))) + Ok(http_response) } -fn handle_error( - _dest: &ServerName, actual: &ActualDest, method: &Method, url: &Url, mut e: reqwest::Error, -) -> Result -where - T: OutgoingRequest + Debug + Send, -{ +fn handle_error(actual: &ActualDest, method: &Method, url: &Url, mut e: reqwest::Error) -> Result { if e.is_timeout() || e.is_connect() { e = e.without_url(); debug_warn!("{e:?}"); @@ -246,3 +241,17 @@ fn sign_request(&self, http_request: &mut http::Request>, dest: &ServerN debug_assert!(authorization.is_none(), "Authorization header already present"); } + +fn into_http_request(actual: &ActualDest, request: T) -> Result>> +where + T: OutgoingRequest + Send, +{ + const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_11]; + const SATIR: SendAccessToken<'_> = SendAccessToken::IfRequired(EMPTY); + + let http_request = request + .try_into_http_request::>(actual.string().as_str(), SATIR, &VERSIONS) + .map_err(|e| err!(BadServerResponse("Invalid destination: {e:?}")))?; + + Ok(http_request) +} From c59f474aff0dbd96d2096d6d163629a7ecf460b5 Mon Sep 17 00:00:00 2001 From: strawberry Date: Tue, 12 Nov 2024 05:01:11 +0000 Subject: [PATCH 0205/1248] fixes for gh workflow Signed-off-by: Jason Volk --- .github/workflows/ci.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f59c5048..2d253f69 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -76,11 +76,10 @@ jobs: # large docker images sudo docker image prune --all --force || true # large packages - sudo apt-get purge -y '^llvm-.*' 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt-get autoremove -y + sudo apt-get purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true sudo apt-get clean # large folders - sudo rm -rf /var/lib/apt/lists/* /usr/local/games /usr/local/sqlpackage /usr/local/.ghcup /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/local/share/vcpkg /usr/local/lib/python* /usr/local/lib/node_modules /usr/local/julia* /opt/mssql-tools /etc/skel /usr/share/vim /usr/share/postgresql /usr/share/man /usr/share/apache-maven-* /usr/share/R /usr/share/alsa /usr/share/miniconda /usr/share/grub /usr/share/gradle-* /usr/share/locale /usr/share/texinfo /usr/share/kotlinc /usr/share/swift /usr/share/doc /usr/share/az_9.3.0 /usr/share/sbt /usr/share/ri /usr/share/icons /usr/share/java /usr/share/fonts /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/R /usr/lib/postgresql /usr/lib/heroku /usr/lib/gcc + sudo rm -rf /var/lib/apt/lists/* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/local/share/vcpkg /usr/local/julia* /opt/mssql-tools /usr/share/vim /usr/share/postgresql /usr/share/apache-maven-* /usr/share/R /usr/share/alsa /usr/share/miniconda /usr/share/grub /usr/share/gradle-* /usr/share/locale /usr/share/texinfo /usr/share/kotlinc /usr/share/swift /usr/share/sbt /usr/share/ri /usr/share/icons /usr/share/java /usr/share/fonts /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/R /usr/lib/postgresql /usr/lib/heroku set -o pipefail - name: Sync repository From feefa43e65e56f6d23fa96981128841fef609414 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 12 Nov 2024 22:01:32 +0000 Subject: [PATCH 0206/1248] add pretty/si-unit byte size parsing/printing utils Signed-off-by: Jason Volk --- Cargo.lock | 7 +++++++ Cargo.toml | 3 +++ src/core/Cargo.toml | 1 + src/core/utils/bytes.rs | 30 +++++++++++++++++++++++++++++- 4 files changed, 40 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index a1654ff9..51571264 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -458,6 +458,12 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +[[package]] +name = "bytesize" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" + [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" @@ -683,6 +689,7 @@ dependencies = [ "arrayvec", "axum", "bytes", + "bytesize", "cargo_toml", "checked_ops", "chrono", diff --git a/Cargo.toml b/Cargo.toml index 5ea6b4e0..0173e7cf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -466,6 +466,9 @@ version = "1.0.36" [workspace.dependencies.proc-macro2] version = "1.0.89" +[workspace.dependencies.bytesize] +version = "1.3.0" + # # Patches # diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 4fe413e9..b93f9a77 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -57,6 +57,7 @@ argon2.workspace = true arrayvec.workspace = true axum.workspace = true bytes.workspace = true +bytesize.workspace = true cargo_toml.workspace = true checked_ops.workspace = true chrono.workspace = true diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs index e8975a49..441ba422 100644 --- a/src/core/utils/bytes.rs +++ b/src/core/utils/bytes.rs @@ -1,4 +1,32 @@ -use crate::Result; +use bytesize::ByteSize; + +use crate::{err, Result}; + +/// Parse a human-writable size string w/ si-unit suffix into integer +#[inline] +pub fn from_str(str: &str) -> Result { + let bytes: ByteSize = str + .parse() + .map_err(|e| err!(Arithmetic("Failed to parse byte size: {e}")))?; + + let bytes: usize = bytes + .as_u64() + .try_into() + .map_err(|e| err!(Arithmetic("Failed to convert u64 to usize: {e}")))?; + + Ok(bytes) +} + +/// Output a human-readable size string w/ si-unit suffix +#[inline] +#[must_use] +pub fn pretty(bytes: usize) -> String { + const SI_UNITS: bool = true; + + let bytes: u64 = bytes.try_into().expect("failed to convert usize to u64"); + + bytesize::to_string(bytes, SI_UNITS) +} #[inline] #[must_use] From 68582dd868032944a794f4eb7bfa2e71d29891f5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 13 Nov 2024 00:59:53 +0000 Subject: [PATCH 0207/1248] add parallel query for current membership state Signed-off-by: Jason Volk --- src/service/rooms/state_cache/mod.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 11684eab..6e330fdc 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -10,7 +10,7 @@ use conduit::{ warn, Result, }; use database::{serialize_to_vec, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{stream::iter, Stream, StreamExt}; +use futures::{future::join4, stream::iter, Stream, StreamExt}; use itertools::Itertools; use ruma::{ events::{ @@ -566,6 +566,24 @@ impl Service { self.db.userroomid_leftstate.qry(&key).await.is_ok() } + pub async fn user_membership(&self, user_id: &UserId, room_id: &RoomId) -> Option { + let states = join4( + self.is_joined(user_id, room_id), + self.is_left(user_id, room_id), + self.is_invited(user_id, room_id), + self.once_joined(user_id, room_id), + ) + .await; + + match states { + (true, ..) => Some(MembershipState::Join), + (_, true, ..) => Some(MembershipState::Leave), + (_, _, true, ..) => Some(MembershipState::Invite), + (false, false, false, true) => Some(MembershipState::Ban), + _ => None, + } + } + #[tracing::instrument(skip(self), level = "debug")] pub fn servers_invite_via<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { type KeyVal<'a> = (Ignore, Vec<&'a ServerName>); From 77fab2c323b65d7d97e78dcbee946e7860cf3d1d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 13 Nov 2024 01:01:00 +0000 Subject: [PATCH 0208/1248] use ruma visibility enum in directory interface Signed-off-by: Jason Volk --- src/service/rooms/directory/mod.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index f366ffe2..63ed3519 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use conduit::{implement, utils::stream::TryIgnore, Result}; use database::Map; use futures::Stream; -use ruma::RoomId; +use ruma::{api::client::room::Visibility, RoomId}; pub struct Service { db: Data, @@ -32,7 +32,16 @@ pub fn set_public(&self, room_id: &RoomId) { self.db.publicroomids.insert(room_i pub fn set_not_public(&self, room_id: &RoomId) { self.db.publicroomids.remove(room_id); } #[implement(Service)] -pub async fn is_public_room(&self, room_id: &RoomId) -> bool { self.db.publicroomids.get(room_id).await.is_ok() } +pub fn public_rooms(&self) -> impl Stream + Send { self.db.publicroomids.keys().ignore_err() } #[implement(Service)] -pub fn public_rooms(&self) -> impl Stream + Send { self.db.publicroomids.keys().ignore_err() } +pub async fn is_public_room(&self, room_id: &RoomId) -> bool { self.visibility(room_id).await == Visibility::Public } + +#[implement(Service)] +pub async fn visibility(&self, room_id: &RoomId) -> Visibility { + if self.db.publicroomids.get(room_id).await.is_ok() { + Visibility::Public + } else { + Visibility::Private + } +} From 004be3bf00f3d0aa22bb07e03bd6af146ad67c7b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 13 Nov 2024 05:28:15 +0000 Subject: [PATCH 0209/1248] prepare utf-8 check bypass for database deserializer Signed-off-by: Jason Volk --- src/database/de.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/database/de.rs b/src/database/de.rs index d7dc1102..f8a038ef 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -277,7 +277,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { fn deserialize_str>(self, visitor: V) -> Result { let input = self.record_next(); - let out = string::str_from_bytes(input)?; + let out = deserialize_str(input)?; visitor.visit_borrowed_str(out) } @@ -360,3 +360,18 @@ impl<'a, 'de: 'a> de::MapAccess<'de> for &'a mut Deserializer<'de> { seed.deserialize(&mut **self) } } + +// activate when stable; too soon now +//#[cfg(debug_assertions)] +#[inline] +fn deserialize_str(input: &[u8]) -> Result<&str> { string::str_from_bytes(input) } + +//#[cfg(not(debug_assertions))] +#[cfg(disable)] +#[inline] +fn deserialize_str(input: &[u8]) -> Result<&str> { + // SAFETY: Strings were written by the serializer to the database. Assuming no + // database corruption, the string will be valid. Database corruption is + // detected via rocksdb checksums. + unsafe { std::str::from_utf8_unchecked(input) } +} From 6ffdc1b2a654b2225b7ee6563e1defbf5019d32d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 13 Nov 2024 22:01:46 +0000 Subject: [PATCH 0210/1248] bump serde, image, loole, termimad etc Signed-off-by: Jason Volk --- Cargo.lock | 124 +++++++++++++++++++++++++++++------------------------ Cargo.toml | 8 ++-- 2 files changed, 71 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 51571264..0e1845da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -487,9 +487,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.37" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" +checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8" dependencies = [ "jobserver", "libc", @@ -548,9 +548,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -558,9 +558,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstyle", "clap_lex", @@ -580,9 +580,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "cmake" @@ -720,7 +720,7 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", - "thiserror 1.0.68", + "thiserror 1.0.69", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -913,9 +913,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" dependencies = [ "libc", ] @@ -1294,7 +1294,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" dependencies = [ "nonempty", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -1555,7 +1555,7 @@ dependencies = [ "ipnet", "once_cell", "rand", - "thiserror 1.0.68", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -1578,7 +1578,7 @@ dependencies = [ "rand", "resolv-conf", "smallvec", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -2205,9 +2205,13 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "loole" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad95468e4700cb37d8d1f198050db18cebe55e4b4c8aa9180a715deedb2f8965" +checksum = "a2998397c725c822c6b2ba605fd9eb4c6a7a0810f1629ba3cc232ef4f0308d96" +dependencies = [ + "futures-core", + "futures-sink", +] [[package]] name = "lru-cache" @@ -2509,7 +2513,7 @@ dependencies = [ "js-sys", "once_cell", "pin-project-lite", - "thiserror 1.0.68", + "thiserror 1.0.69", "urlencoding", ] @@ -2555,7 +2559,7 @@ dependencies = [ "ordered-float 4.5.0", "percent-encoding", "rand", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tokio-stream", ] @@ -2918,9 +2922,9 @@ checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "pin-project-lite", @@ -2929,26 +2933,29 @@ dependencies = [ "rustc-hash 2.0.0", "rustls 0.23.16", "socket2", - "thiserror 1.0.68", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom", "rand", "ring", "rustc-hash 2.0.0", "rustls 0.23.16", + "rustls-pki-types", "slab", - "thiserror 1.0.68", + "thiserror 2.0.3", "tinyvec", "tracing", + "web-time 1.1.0", ] [[package]] @@ -3021,7 +3028,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -3036,9 +3043,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -3183,7 +3190,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "url", "web-time 1.1.0", ] @@ -3209,7 +3216,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "time", "tracing", "url", @@ -3235,7 +3242,7 @@ dependencies = [ "ruma-macros", "serde", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "tracing", "url", "web-time 1.1.0", @@ -3266,7 +3273,7 @@ version = "0.9.5" source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" dependencies = [ "js_int", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] @@ -3316,7 +3323,7 @@ dependencies = [ "http", "http-auth", "ruma-common", - "thiserror 2.0.1", + "thiserror 2.0.3", "tracing", ] @@ -3333,7 +3340,7 @@ dependencies = [ "serde_json", "sha2", "subslice", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] @@ -3348,7 +3355,7 @@ dependencies = [ "ruma-events", "serde", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "tracing", ] @@ -3415,9 +3422,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.39" +version = "0.38.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" +checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" dependencies = [ "bitflags 2.6.0", "errno", @@ -3483,6 +3490,9 @@ name = "rustls-pki-types" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time 1.1.0", +] [[package]] name = "rustls-webpki" @@ -3512,7 +3522,7 @@ dependencies = [ "futures-util", "pin-project", "thingbuf", - "thiserror 1.0.68", + "thiserror 1.0.69", "unicode-segmentation", "unicode-width", ] @@ -3712,7 +3722,7 @@ dependencies = [ "rand", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "url", "uuid", @@ -3720,18 +3730,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", @@ -3918,7 +3928,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", ] @@ -4083,9 +4093,9 @@ dependencies = [ [[package]] name = "termimad" -version = "0.30.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22117210909e9dfff30a558f554c7fb3edb198ef614e7691386785fb7679677c" +checksum = "9cda3a7471f9978706978454c45ef8dda67e9f8f3cdb9319eb2e9323deb6ae62" dependencies = [ "coolor", "crokey", @@ -4093,7 +4103,7 @@ dependencies = [ "lazy-regex", "minimad", "serde", - "thiserror 1.0.68", + "thiserror 1.0.69", "unicode-width", ] @@ -4109,27 +4119,27 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl 1.0.68", + "thiserror-impl 1.0.69", ] [[package]] name = "thiserror" -version = "2.0.1" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c1e40dd48a282ae8edc36c732cbc219144b87fb6a4c7316d611c6b1f06ec0c" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" dependencies = [ - "thiserror-impl 2.0.1", + "thiserror-impl 2.0.3", ] [[package]] name = "thiserror-impl" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", @@ -4138,9 +4148,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.1" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874aa7e446f1da8d9c3a5c95b1c5eb41d800045252121dc7f8e0ba370cee55f5" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", @@ -4323,7 +4333,7 @@ checksum = "0d4770b8024672c1101b3f6733eab95b18007dbe0847a8afe341fcf79e06043f" dependencies = [ "either", "futures-util", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 0173e7cf..dde005a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -142,7 +142,7 @@ features = [ ] [workspace.dependencies.serde] -version = "1.0.214" +version = "1.0.215" default-features = false features = ["rc"] @@ -171,7 +171,7 @@ default-features = false # Used to generate thumbnails for images [workspace.dependencies.image] -version = "0.25.1" +version = "0.25.5" default-features = false features = [ "jpeg", @@ -304,7 +304,7 @@ version = "2.1.1" # used to replace the channels of the tokio runtime [workspace.dependencies.loole] -version = "0.3.1" +version = "0.4.0" [workspace.dependencies.async-trait] version = "0.1.81" @@ -449,7 +449,7 @@ version = "0.4.3" default-features = false [workspace.dependencies.termimad] -version = "0.30.1" +version = "0.31.0" default-features = false [workspace.dependencies.checked_ops] From e228dec4f2d5abe02624f3d1a7cf572aab645e90 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 13 Nov 2024 01:01:33 +0000 Subject: [PATCH 0211/1248] add byte counting for compressed state caches Signed-off-by: Jason Volk --- src/service/rooms/state_compressor/mod.rs | 48 +++++++++++++++++++---- 1 file changed, 40 insertions(+), 8 deletions(-) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index bf90d5c4..6b520ad3 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,18 +1,22 @@ use std::{ - collections::HashSet, + collections::{HashMap, HashSet}, fmt::Write, mem::size_of, sync::{Arc, Mutex}, }; -use conduit::{checked, err, expected, utils, utils::math::usize_from_f64, Result}; +use conduit::{ + at, checked, err, expected, utils, + utils::{bytes, math::usize_from_f64}, + Result, +}; use database::Map; use lru_cache::LruCache; use ruma::{EventId, RoomId}; use crate::{ rooms, - rooms::short::{ShortStateHash, ShortStateKey}, + rooms::short::{ShortId, ShortStateHash, ShortStateKey}, Dep, }; @@ -53,12 +57,13 @@ pub struct HashSetCompressStateEvent { pub removed: Arc, } -pub(crate) type CompressedState = HashSet; -pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; type StateInfoLruCache = LruCache; type ShortStateInfoVec = Vec; type ParentStatesVec = Vec; +pub(crate) type CompressedState = HashSet; +pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -75,9 +80,28 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { - let stateinfo_cache = self.stateinfo_cache.lock().expect("locked").len(); - writeln!(out, "stateinfo_cache: {stateinfo_cache}")?; + fn memory_usage(&self, out: &mut dyn Write) -> Result { + let (cache_len, ents) = { + let cache = self.stateinfo_cache.lock().expect("locked"); + let ents = cache + .iter() + .map(at!(1)) + .flat_map(|vec| vec.iter()) + .fold(HashMap::new(), |mut ents, ssi| { + ents.insert(Arc::as_ptr(&ssi.added), compressed_state_size(&ssi.added)); + ents.insert(Arc::as_ptr(&ssi.removed), compressed_state_size(&ssi.removed)); + ents.insert(Arc::as_ptr(&ssi.full_state), compressed_state_size(&ssi.full_state)); + ents + }); + + (cache.len(), ents) + }; + + let ents_len = ents.len(); + let bytes = ents.values().copied().fold(0_usize, usize::saturating_add); + + let bytes = bytes::pretty(bytes); + writeln!(out, "stateinfo_cache: {cache_len} {ents_len} ({bytes})")?; Ok(()) } @@ -435,3 +459,11 @@ impl Service { .insert(&shortstatehash.to_be_bytes(), &value); } } + +#[inline] +fn compressed_state_size(compressed_state: &CompressedState) -> usize { + compressed_state + .len() + .checked_mul(size_of::()) + .expect("CompressedState size overflow") +} From 4ec5d1e28e6cfff3d98c36c2b02aece196ee93c0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 14 Nov 2024 04:31:29 +0000 Subject: [PATCH 0212/1248] replace additional use tracing:: add log:: to disallowed-macros Signed-off-by: Jason Volk --- clippy.toml | 8 ++++++++ src/api/client/report.rs | 3 +-- src/api/server/make_join.rs | 6 ++++-- src/core/debug.rs | 2 ++ src/core/log/mod.rs | 2 ++ src/core/utils/sys.rs | 4 +--- src/router/serve/plain.rs | 3 +-- 7 files changed, 19 insertions(+), 9 deletions(-) diff --git a/clippy.toml b/clippy.toml index 08641fcc..b93b2377 100644 --- a/clippy.toml +++ b/clippy.toml @@ -5,3 +5,11 @@ future-size-threshold = 7745 # TODO reduce me ALARA stack-size-threshold = 196608 # reduce me ALARA too-many-lines-threshold = 700 # TODO reduce me to <= 100 type-complexity-threshold = 250 # reduce me to ~200 + +disallowed-macros = [ + { path = "log::error", reason = "use conduit_core::error" }, + { path = "log::warn", reason = "use conduit_core::warn" }, + { path = "log::info", reason = "use conduit_core::info" }, + { path = "log::debug", reason = "use conduit_core::debug" }, + { path = "log::trace", reason = "use conduit_core::trace" }, +] diff --git a/src/api/client/report.rs b/src/api/client/report.rs index e20fa8c2..a0133704 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,7 +2,7 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{utils::ReadyExt, Err}; +use conduit::{info, utils::ReadyExt, Err}; use rand::Rng; use ruma::{ api::client::{ @@ -13,7 +13,6 @@ use ruma::{ int, EventId, RoomId, UserId, }; use tokio::time::sleep; -use tracing::info; use crate::{ debug_info, diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index c3524f0e..af570064 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,5 +1,8 @@ use axum::extract::State; -use conduit::utils::{IterStream, ReadyExt}; +use conduit::{ + utils::{IterStream, ReadyExt}, + warn, +}; use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::membership::prepare_join_event}, @@ -13,7 +16,6 @@ use ruma::{ CanonicalJsonObject, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; -use tracing::warn; use crate::{ service::{pdu::PduBuilder, Services}, diff --git a/src/core/debug.rs b/src/core/debug.rs index 85574a2f..f7420784 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -1,3 +1,5 @@ +#![allow(clippy::disallowed_macros)] + use std::{any::Any, panic}; // Export debug proc_macros diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 1c415c6a..48b7f0f3 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -1,3 +1,5 @@ +#![allow(clippy::disallowed_macros)] + pub mod capture; pub mod color; pub mod fmt; diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index 6c396921..af8bd70b 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -1,6 +1,4 @@ -use tracing::debug; - -use crate::Result; +use crate::{debug, Result}; /// This is needed for opening lots of file descriptors, which tends to /// happen more often when using RocksDB and making lots of federation diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index 08263353..144bff85 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -5,9 +5,8 @@ use std::{ use axum::Router; use axum_server::{bind, Handle as ServerHandle}; -use conduit::{debug_info, Result, Server}; +use conduit::{debug_info, info, Result, Server}; use tokio::task::JoinSet; -use tracing::info; pub(super) async fn serve( server: &Arc, app: Router, handle: ServerHandle, addrs: Vec, From 08365bf5f440a4c9f086142c23044f1884c68033 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 10 Nov 2024 20:16:38 -0500 Subject: [PATCH 0213/1248] update config documentation, commit generated example config also removes the no-op/useless "database_backend" config option Signed-off-by: strawberry --- conduwuit-example.toml | 1923 +++++++++++++++++++++------------- src/api/server/make_knock.rs | 107 ++ src/api/server/send_knock.rs | 190 ++++ src/core/config/mod.rs | 573 ++++++---- src/service/migrations.rs | 11 +- 5 files changed, 1846 insertions(+), 958 deletions(-) create mode 100644 src/api/server/make_knock.rs create mode 100644 src/api/server/send_knock.rs diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 11735616..aa0d1e5d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1,605 +1,867 @@ -# ============================================================================= -# This is the official example config for conduwuit. -# If you use it for your server, you will need to adjust it to your own needs. -# At the very least, change the server_name field! -# -# This documentation can also be found at https://conduwuit.puppyirl.gay/configuration.html -# ============================================================================= +### conduwuit Configuration +### +### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL +### BE OVERWRITTEN! +### +### You should rename this file before configuring your server. Changes +### to documentation and defaults can be contributed in source code at +### src/core/config/mod.rs. This file is generated when building. +### +### Any values pre-populated are the default values for said config option. +### +### At the minimum, you MUST edit all the config options to your environment +### that say "YOU NEED TO EDIT THIS". +### See https://conduwuit.puppyirl.gay/configuration.html for ways to +### configure conduwuit [global] -# The server_name is the pretty name of this server. It is used as a suffix for user -# and room ids. Examples: matrix.org, conduit.rs - -# The Conduit server needs all /_matrix/ requests to be reachable at -# https://your.server.name/ on port 443 (client-server) and 8448 (federation). - -# If that's not possible for you, you can create /.well-known files to redirect -# requests (delegation). See -# https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient -# and -# https://spec.matrix.org/v1.9/server-server-api/#getwell-knownmatrixserver -# for more information - -# YOU NEED TO EDIT THIS -#server_name = "your.server.name" - -# Servers listed here will be used to gather public keys of other servers (notary trusted key servers). +# The server_name is the pretty name of this server. It is used as a +# suffix for user and room IDs/aliases. # -# The default behaviour for conduwuit is to attempt to query trusted key servers before querying the individual servers. -# This is done for performance reasons, but if you would like to query individual servers before the notary servers -# configured below, set to +# See the docs for reverse proxying and delegation: https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy +# Also see the `[global.well_known]` config section at the very bottom. # -# (Currently, conduwuit doesn't support batched key requests, so this list should only contain Synapse servers) -# Defaults to `matrix.org` -# trusted_servers = ["matrix.org"] - -# Sentry.io crash/panic reporting, performance monitoring/metrics, etc. This is NOT enabled by default. -# conduwuit's default Sentry reporting endpoint is o4506996327251968.ingest.us.sentry.io +# Examples of delegation: +# - https://puppygock.gay/.well-known/matrix/server +# - https://puppygock.gay/.well-known/matrix/client # -# Defaults to *false* -#sentry = false - -# Sentry reporting URL if a custom one is desired +# YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE +# WIPE. # -# Defaults to conduwuit's default Sentry endpoint: "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" -#sentry_endpoint = "" - -# Report your Conduwuit server_name in Sentry.io crash reports and metrics +# example: "conduwuit.woof" # -# Defaults to false -#sentry_send_server_name = false +#server_name = -# Performance monitoring/tracing sample rate for Sentry.io +# default address (IPv4 or IPv6) conduwuit will listen on. # -# Note that too high values may impact performance, and can be disabled by setting it to 0.0 (0%) -# This value is read as a percentage to Sentry, represented as a decimal +# If you are using Docker or a container NAT networking setup, this must +# be "0.0.0.0". # -# Defaults to 15% of traces (0.15) -#sentry_traces_sample_rate = 0.15 +# To listen on multiple addresses, specify a vector e.g. ["127.0.0.1", +# "::1"] +# +#address = ["127.0.0.1", "::1"] -# Whether to attach a stacktrace to Sentry reports. -#sentry_attach_stacktrace = false - -# Send panics to sentry. This is true by default, but sentry has to be enabled. -#sentry_send_panic = true - -# Send errors to sentry. This is true by default, but sentry has to be enabled. This option is -# only effective in release-mode; forced to false in debug-mode. -#sentry_send_error = true - -# Controls the tracing log level for Sentry to send things like breadcrumbs and transactions -# Defaults to "info" -#sentry_filter = "info" - - -### Database configuration - -# This is the only directory where conduwuit will save its data, including media. -# Note: this was previously "/var/lib/matrix-conduit" -database_path = "/var/lib/conduwuit" - -# Database backend: Only rocksdb is supported. -database_backend = "rocksdb" - - -### Network - -# The port(s) conduwuit will be running on. You need to set up a reverse proxy such as -# Caddy or Nginx so all requests to /_matrix on port 443 and 8448 will be -# forwarded to the conduwuit instance running on this port -# Docker users: Don't change this, you'll need to map an external port to this. +# The port(s) conduwuit will be running on. +# +# See https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy for reverse proxying. +# +# Docker users: Don't change this, you'll need to map an external port to +# this. +# # To listen on multiple ports, specify a vector e.g. [8080, 8448] # -# default if unspecified is 8008 -port = 6167 +#port = 8008 -# default address (IPv4 or IPv6) conduwuit will listen on. Generally you want this to be -# localhost (127.0.0.1 / ::1). If you are using Docker or a container NAT networking setup, you -# likely need this to be 0.0.0.0. -# To listen multiple addresses, specify a vector e.g. ["127.0.0.1", "::1"] +# Uncomment unix_socket_path to listen on a UNIX socket at the specified +# path. If listening on a UNIX socket, you MUST remove/comment the +# 'address' key if definedm AND add your reverse proxy to the 'conduwuit' +# group, unless world RW permissions are specified with unix_socket_perms +# (666 minimum). # -# default if unspecified is both IPv4 and IPv6 localhost: ["127.0.0.1", "::1"] -address = "127.0.0.1" +# example: "/run/conduwuit/conduwuit.sock" +# +#unix_socket_path = -# Max request size for file uploads -max_request_size = 20_000_000 # in bytes - -# Uncomment unix_socket_path to listen on a UNIX socket at the specified path. -# If listening on a UNIX socket, you must remove/comment the 'address' key if defined and add your -# reverse proxy to the 'conduwuit' group, unless world RW permissions are specified with unix_socket_perms (666 minimum). -#unix_socket_path = "/run/conduwuit/conduwuit.sock" +# The default permissions (in octal) to create the UNIX socket with. +# #unix_socket_perms = 660 -# Set this to true for conduwuit to compress HTTP response bodies using zstd. -# This option does nothing if conduwuit was not built with `zstd_compression` feature. -# Please be aware that enabling HTTP compression may weaken TLS. -# Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this. -zstd_compression = false - -# Set this to true for conduwuit to compress HTTP response bodies using gzip. -# This option does nothing if conduwuit was not built with `gzip_compression` feature. -# Please be aware that enabling HTTP compression may weaken TLS. -# Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this. -gzip_compression = false - -# Set this to true for conduwuit to compress HTTP response bodies using brotli. -# This option does nothing if conduwuit was not built with `brotli_compression` feature. -# Please be aware that enabling HTTP compression may weaken TLS. -# Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this. -brotli_compression = false - -# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you do not want conduwuit to send outbound requests to. -# Defaults to RFC1918, unroutable, loopback, multicast, and testnet addresses for security. +# This is the only directory where conduwuit will save its data, including +# media. +# Note: this was previously "/var/lib/matrix-conduit" # -# To disable, set this to be an empty vector (`[]`). -# Please be aware that this is *not* a guarantee. You should be using a firewall with zones as doing this on the application layer may have bypasses. +# YOU NEED TO EDIT THIS. # -# Currently this does not account for proxies in use like Synapse does. -ip_range_denylist = [ - "127.0.0.0/8", - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - "100.64.0.0/10", - "192.0.0.0/24", - "169.254.0.0/16", - "192.88.99.0/24", - "198.18.0.0/15", - "192.0.2.0/24", - "198.51.100.0/24", - "203.0.113.0/24", - "224.0.0.0/4", - "::1/128", - "fe80::/10", - "fc00::/7", - "2001:db8::/32", - "ff00::/8", - "fec0::/10", -] - - -### Moderation / Privacy / Security - -# Config option to control whether the legacy unauthenticated Matrix media repository endpoints will be enabled. -# These endpoints consist of: -# - /_matrix/media/*/config -# - /_matrix/media/*/upload -# - /_matrix/media/*/preview_url -# - /_matrix/media/*/download/* -# - /_matrix/media/*/thumbnail/* +# example: "/var/lib/conduwuit" # -# The authenticated equivalent endpoints are always enabled. +#database_path = + +# conduwuit supports online database backups using RocksDB's Backup engine +# API. To use this, set a database backup path that conduwuit can write +# to. # -# Defaults to true for now, but this is highly subject to change, likely in the next release. -#allow_legacy_media = true +# See https://conduwuit.puppyirl.gay/maintenance.html#backups for more information. +# +# example: "/opt/conduwuit-db-backups" +# +#database_backup_path = -# Set to true to allow user type "guest" registrations. Element attempts to register guest users automatically. -# Defaults to false -allow_guest_registration = false +# The amount of online RocksDB database backups to keep/retain, if using +# "database_backup_path", before deleting the oldest one. +# +#database_backups_to_keep = 1 -# Set to true to log guest registrations in the admin room. -# Defaults to false as it may be noisy or unnecessary. -log_guest_registrations = false +# Set this to any float value in megabytes for conduwuit to tell the +# database engine that this much memory is available for database-related +# caches. +# +# May be useful if you have significant memory to spare to increase +# performance. +# +# Similar to the individual LRU caches, this is scaled up with your CPU +# core count. +# +# This defaults to 128.0 + (64.0 * CPU core count) +# +#db_cache_capacity_mb = -# Set to true to allow guest registrations/users to auto join any rooms specified in `auto_join_rooms` -# Defaults to false -allow_guests_auto_join_rooms = false +# Option to control adding arbitrary text to the end of the user's +# displayname upon registration with a space before the text. This was the +# lightning bolt emoji option, just replaced with support for adding your +# own custom text or emojis. To disable, set this to "" (an empty string). +# +# The default is the trans pride flag. +# +# example: "🏳️⚧️" +# +#new_user_displayname_suffix = "🏳️⚧️" -# Vector list of servers that conduwuit will refuse to download remote media from. -# No default. -# prevent_media_downloads_from = ["example.com", "example.local"] +# If enabled, conduwuit will send a simple GET request periodically to +# `https://pupbrain.dev/check-for-updates/stable` for any new +# announcements made. Despite the name, this is not an update check +# endpoint, it is simply an announcement check endpoint. +# +# This is disabled by default as this is rarely used except for security +# updates or major updates. +# +#allow_check_for_updates = false + +# Set this to any float value to multiply conduwuit's in-memory LRU caches +# with such as "auth_chain_cache_capacity". +# +# May be useful if you have significant memory to spare to increase +# performance. This was previously called +# `conduit_cache_capacity_modifier`. +# +# If you have low memory, reducing this may be viable. +# +# By default, the individual caches such as "auth_chain_cache_capacity" +# are scaled by your CPU core count. +# +#cache_capacity_modifier = 1.0 + +# This item is undocumented. Please contribute documentation for it. +# +#pdu_cache_capacity = varies by system + +# This item is undocumented. Please contribute documentation for it. +# +#auth_chain_cache_capacity = varies by system + +# This item is undocumented. Please contribute documentation for it. +# +#shorteventid_cache_capacity = varies by system + +# This item is undocumented. Please contribute documentation for it. +# +#eventidshort_cache_capacity = varies by system + +# This item is undocumented. Please contribute documentation for it. +# +#shortstatekey_cache_capacity = varies by system + +# This item is undocumented. Please contribute documentation for it. +# +#statekeyshort_cache_capacity = varies by system + +# This item is undocumented. Please contribute documentation for it. +# +#server_visibility_cache_capacity = varies by system + +# This item is undocumented. Please contribute documentation for it. +# +#user_visibility_cache_capacity = varies by system + +# This item is undocumented. Please contribute documentation for it. +# +#stateinfo_cache_capacity = varies by system + +# This item is undocumented. Please contribute documentation for it. +# +#roomid_spacehierarchy_cache_capacity = varies by system + +# Maximum entries stored in DNS memory-cache. The size of an entry may +# vary so please take care if raising this value excessively. Only +# decrease this when using an external DNS cache. Please note +# that systemd-resolved does *not* count as an external cache, even when +# configured to do so. +# +#dns_cache_entries = 32768 + +# Minimum time-to-live in seconds for entries in the DNS cache. The +# default may appear high to most administrators; this is by design as the +# majority of NXDOMAINs are correct for a long time (e.g. the server is no +# longer running Matrix). Only decrease this if you are using an external +# DNS cache. +# +# default_dns_min_ttl: 259200 +# +#dns_min_ttl = + +# Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. +# This value is critical for the server to federate efficiently. +# NXDOMAIN's are assumed to not be returning to the federation +# and aggressively cached rather than constantly rechecked. +# +# Defaults to 3 days as these are *very rarely* false negatives. +# +#dns_min_ttl_nxdomain = 259200 + +# Number of retries after a timeout. +# +#dns_attempts = 10 + +# The number of seconds to wait for a reply to a DNS query. Please note +# that recursive queries can take up to several seconds for some domains, +# so this value should not be too low, especially on slower hardware or +# resolvers. +# +#dns_timeout = 10 + +# Fallback to TCP on DNS errors. Set this to false if unsupported by +# nameserver. +# +#dns_tcp_fallback = true + +# Enable to query all nameservers until the domain is found. Referred to +# as "trust_negative_responses" in hickory_resolver. This can avoid +# useless DNS queries if the first nameserver responds with NXDOMAIN or +# an empty NOERROR response. +# +#query_all_nameservers = true + +# Enables using *only* TCP for querying your specified nameservers instead +# of UDP. +# +# If you are running conduwuit in a container environment, this config option may need to be enabled. See https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker for more details. +# +#query_over_tcp_only = false + +# DNS A/AAAA record lookup strategy +# +# Takes a number of one of the following options: +# 1 - Ipv4Only (Only query for A records, no AAAA/IPv6) +# +# 2 - Ipv6Only (Only query for AAAA records, no A/IPv4) +# +# 3 - Ipv4AndIpv6 (Query for A and AAAA records in parallel, uses whatever +# returns a successful response first) +# +# 4 - Ipv6thenIpv4 (Query for AAAA record, if that fails then query the A +# record) +# +# 5 - Ipv4thenIpv6 (Query for A record, if that fails then query the AAAA +# record) +# +# If you don't have IPv6 networking, then for better DNS performance it +# may be suitable to set this to Ipv4Only (1) as you will never ever use +# the AAAA record contents even if the AAAA record is successful instead +# of the A record. +# +#ip_lookup_strategy = 5 + +# Max request size for file uploads in bytes. Defaults to 20MB. +# +#max_request_size = 20971520 + +# This item is undocumented. Please contribute documentation for it. +# +#max_fetch_prev_events = 192 + +# Default/base connection timeout (seconds). This is used only by URL +# previews and update/news endpoint checks. +# +#request_conn_timeout = 10 + +# Default/base request timeout (seconds). The time waiting to receive more +# data from another server. This is used only by URL previews, +# update/news, and misc endpoint checks. +# +#request_timeout = 35 + +# Default/base request total timeout (seconds). The time limit for a whole +# request. This is set very high to not cancel healthy requests while +# serving as a backstop. This is used only by URL previews and +# update/news endpoint checks. +# +#request_total_timeout = 320 + +# Default/base idle connection pool timeout (seconds). This is used only +# by URL previews and update/news endpoint checks. +# +#request_idle_timeout = 5 + +# Default/base max idle connections per host. This is used only by URL +# previews and update/news endpoint checks. Defaults to 1 as generally the +# same open connection can be re-used. +# +#request_idle_per_host = 1 + +# Federation well-known resolution connection timeout (seconds) +# +#well_known_conn_timeout = 6 + +# Federation HTTP well-known resolution request timeout (seconds) +# +#well_known_timeout = 10 + +# Federation client request timeout (seconds). You most definitely want +# this to be high to account for extremely large room joins, slow +# homeservers, your own resources etc. +# +#federation_timeout = 300 + +# Federation client idle connection pool timeout (seconds) +# +#federation_idle_timeout = 25 + +# Federation client max idle connections per host. Defaults to 1 as +# generally the same open connection can be re-used +# +#federation_idle_per_host = 1 + +# Federation sender request timeout (seconds). The time it takes for the +# remote server to process sent transactions can take a while. +# +#sender_timeout = 180 + +# Federation sender idle connection pool timeout (seconds) +# +#sender_idle_timeout = 180 + +# Federation sender transaction retry backoff limit (seconds) +# +#sender_retry_backoff_limit = 86400 + +# Appservice URL request connection timeout. Defaults to 35 seconds as +# generally appservices are hosted within the same network. +# +#appservice_timeout = 35 + +# Appservice URL idle connection pool timeout (seconds) +# +#appservice_idle_timeout = 300 + +# Notification gateway pusher idle connection pool timeout +# +#pusher_idle_timeout = 15 # Enables registration. If set to false, no users can register on this # server. # -# If set to true without a token configured, users can register with no form of 2nd- -# step only if you set +# If set to true without a token configured, users can register with no +# form of 2nd-step only if you set # `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` to # true in your config. # # If you would like registration only via token reg, please configure # `registration_token` or `registration_token_file`. -allow_registration = false -# Please note that an open registration homeserver with no second-step verification -# is highly prone to abuse and potential defederation by homeservers, including -# matrix.org. +# +#allow_registration = false -# A static registration token that new users will have to provide when creating -# an account. If unset and `allow_registration` is true, registration is open -# without any condition. YOU NEED TO EDIT THIS. -registration_token = "change this token/string here or set registration_token_file" +# This item is undocumented. Please contribute documentation for it. +# +#yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = false -# Path to a file on the system that gets read for the registration token +# A static registration token that new users will have to provide when +# creating an account. If unset and `allow_registration` is true, +# registration is open without any condition. +# +# YOU NEED TO EDIT THIS OR USE registration_token_file. +# +# example: "o&^uCtes4HPf0Vu@F20jQeeWE7" +# +#registration_token = + +# Path to a file on the system that gets read for the registration token. +# this config option takes precedence/priority over "registration_token". # # conduwuit must be able to access the file, and it must not be empty # -# no default -#registration_token_file = "/etc/conduwuit/.reg_token" - -# controls whether federation is allowed or not -# defaults to true -# allow_federation = true - -# controls whether users are allowed to create rooms. -# appservices and admins are always allowed to create rooms -# defaults to true -# allow_room_creation = true - -# controls whether non-admin local users are forbidden from sending room invites (local and remote), -# and if non-admin users can receive remote room invites. admins are always allowed to send and receive all room invites. -# defaults to false -# block_non_admin_invites = false - -# List of forbidden username patterns/strings. Values in this list are matched as *contains*. -# This is checked upon username availability check, registration, and startup as warnings if any local users in your database -# have a forbidden username. -# No default. -# forbidden_usernames = [] - -# List of forbidden room aliases and room IDs as patterns/strings. Values in this list are matched as *contains*. -# This is checked upon room alias creation, custom room ID creation if used, and startup as warnings if any room aliases -# in your database have a forbidden room alias/ID. -# No default. -# forbidden_alias_names = [] - -# List of forbidden server names that we will block incoming AND outgoing federation with, and block client room joins / remote user invites. +# example: "/etc/conduwuit/.reg_token" # -# This check is applied on the room ID, room alias, sender server name, sender user's server name, inbound federation X-Matrix origin, and outbound federation handler. +#registration_token_file = + +# Controls whether encrypted rooms and events are allowed. # -# Basically "global" ACLs. No default. -# forbidden_remote_server_names = [] +#allow_encryption = true -# List of forbidden server names that we will block all outgoing federated room directory requests for. Useful for preventing our users from wandering into bad servers or spaces. -# No default. -# forbidden_remote_room_directory_server_names = [] - -# Set this to true to allow your server's public room directory to be federated. -# Set this to false to protect against /publicRooms spiders, but will forbid external users -# from viewing your server's public room directory. If federation is disabled entirely -# (`allow_federation`), this is inherently false. -allow_public_room_directory_over_federation = false - -# Set this to true to allow your server's public room directory to be queried without client -# authentication (access token) through the Client APIs. Set this to false to protect against /publicRooms spiders. -allow_public_room_directory_without_auth = false - -# Set this to true to lock down your server's public room directory and only allow admins to publish rooms to the room directory. -# Unpublishing is still allowed by all users with this enabled. +# Controls whether federation is allowed or not. It is not recommended to +# disable this after the fact due to potential federation breakage. # -# Defaults to false -lockdown_public_room_directory = false +#allow_federation = true -# Set this to true to allow federating device display names / allow external users to see your device display name. -# If federation is disabled entirely (`allow_federation`), this is inherently false. For privacy, this is best disabled. -allow_device_name_federation = false +# This item is undocumented. Please contribute documentation for it. +# +#federation_loopback = false -# Vector list of domains allowed to send requests to for URL previews. Defaults to none. -# Note: this is a *contains* match, not an explicit match. Putting "google.com" will match "https://google.com" and "http://mymaliciousdomainexamplegoogle.com" -# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so. -url_preview_domain_contains_allowlist = [] +# Set this to true to require authentication on the normally +# unauthenticated profile retrieval endpoints (GET) +# "/_matrix/client/v3/profile/{userId}". +# +# This can prevent profile scraping. +# +#require_auth_for_profile_requests = false -# Vector list of explicit domains allowed to send requests to for URL previews. Defaults to none. -# Note: This is an *explicit* match, not a contains match. Putting "google.com" will match "https://google.com", "http://google.com", but not "https://mymaliciousdomainexamplegoogle.com" -# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so. -url_preview_domain_explicit_allowlist = [] +# Set this to true to allow your server's public room directory to be +# federated. Set this to false to protect against /publicRooms spiders, +# but will forbid external users from viewing your server's public room +# directory. If federation is disabled entirely (`allow_federation`), +# this is inherently false. +# +#allow_public_room_directory_over_federation = false -# Vector list of URLs allowed to send requests to for URL previews. Defaults to none. -# Note that this is a *contains* match, not an explicit match. Putting "google.com" will match "https://google.com/", "https://google.com/url?q=https://mymaliciousdomainexample.com", and "https://mymaliciousdomainexample.com/hi/google.com" -# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so. -url_preview_url_contains_allowlist = [] +# Set this to true to allow your server's public room directory to be +# queried without client authentication (access token) through the Client +# APIs. Set this to false to protect against /publicRooms spiders. +# +#allow_public_room_directory_without_auth = false -# Vector list of explicit domains not allowed to send requests to for URL previews. Defaults to none. -# Note: This is an *explicit* match, not a contains match. Putting "google.com" will match "https://google.com", "http://google.com", but not "https://mymaliciousdomainexamplegoogle.com" -# The denylist is checked first before allowlist. Setting this to "*" will not do anything. -url_preview_domain_explicit_denylist = [] +# allow guests/unauthenticated users to access TURN credentials +# +# this is the equivalent of Synapse's `turn_allow_guests` config option. +# this allows any unauthenticated user to call the endpoint +# `/_matrix/client/v3/voip/turnServer`. +# +# It is unlikely you need to enable this as all major clients support +# authentication for this endpoint and prevents misuse of your TURN server +# from potential bots. +# +#turn_allow_guests = false -# Maximum amount of bytes allowed in a URL preview body size when spidering. Defaults to 384KB (384_000 bytes) -url_preview_max_spider_size = 384_000 +# Set this to true to lock down your server's public room directory and +# only allow admins to publish rooms to the room directory. Unpublishing +# is still allowed by all users with this enabled. +# +#lockdown_public_room_directory = false -# Option to decide whether you would like to run the domain allowlist checks (contains and explicit) on the root domain or not. Does not apply to URL contains allowlist. Defaults to false. -# Example: If this is enabled and you have "wikipedia.org" allowed in the explicit and/or contains domain allowlist, it will allow all subdomains under "wikipedia.org" such as "en.m.wikipedia.org" as the root domain is checked and matched. -# Useful if the domain contains allowlist is still too broad for you but you still want to allow all the subdomains under a root domain. -url_preview_check_root_domain = false +# Set this to true to allow federating device display names / allow +# external users to see your device display name. If federation is +# disabled entirely (`allow_federation`), this is inherently false. For +# privacy reasons, this is best left disabled. +# +#allow_device_name_federation = false -# Config option to allow or disallow incoming federation requests that obtain the profiles -# of our local users from `/_matrix/federation/v1/query/profile` +# Config option to allow or disallow incoming federation requests that +# obtain the profiles of our local users from +# `/_matrix/federation/v1/query/profile` +# +# Increases privacy of your local user's such as display names, but some +# remote users may get a false "this user does not exist" error when they +# try to invite you to a DM or room. Also can protect against profile +# spiders. # # This is inherently false if `allow_federation` is disabled # -# Defaults to true -allow_profile_lookup_federation_requests = true +#allow_inbound_profile_lookup_federation_requests = true -# Config option to automatically deactivate the account of any user who attempts to join a: -# - banned room -# - forbidden room alias -# - room alias or ID with a forbidden server name +# controls whether standard users are allowed to create rooms. appservices +# and admins are always allowed to create rooms # -# This may be useful if all your banned lists consist of toxic rooms or servers that no good faith user would ever attempt to join, and -# to automatically remediate the problem without any admin user intervention. -# -# This will also make the user leave all rooms. Federation (e.g. remote room invites) are ignored here. -# -# Defaults to false as rooms can be banned for non-moderation-related reasons -#auto_deactivate_banned_room_attempts = false +#allow_room_creation = true - -### Admin Room and Console - -# Controls whether the conduwuit admin room console / CLI will immediately activate on startup. -# This option can also be enabled with `--console` conduwuit argument +# Set to false to disable users from joining or creating room versions +# that aren't 100% officially supported by conduwuit. # -# Defaults to false -#admin_console_automatic = false +# conduwuit officially supports room versions 6 - 11. +# +# conduwuit has slightly experimental (though works fine in practice) +# support for versions 3 - 5 +# +#allow_unstable_room_versions = true -# Controls what admin commands will be executed on startup. This is a vector list of strings of admin commands to run. +# default room version conduwuit will create rooms with. # -# An example of this can be: `admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` +# per spec, room version 10 is the default. # -# This option can also be configured with the `--execute` conduwuit argument and can take standard shell commands and environment variables -# -# Such example could be: `./conduwuit --execute "server admin-notice conduwuit has started up at $(date)"` -# -# Defaults to nothing. -#admin_execute = [""] +#default_room_version = 10 -# Controls whether conduwuit should error and fail to start if an admin execute command (`--execute` / `admin_execute`) fails +# This item is undocumented. Please contribute documentation for it. # -# Defaults to false -#admin_execute_errors_ignore = false +#allow_jaeger = false -# Controls the max log level for admin command log captures (logs generated from running admin commands) +# This item is undocumented. Please contribute documentation for it. # -# Defaults to "info" on release builds, else "debug" on debug builds -#admin_log_capture = "info" +#jaeger_filter = "info" -# Allows admins to enter commands in rooms other than #admins by prefixing with \!admin. The reply -# will be publicly visible to the room, originating from the sender. -# defaults to true -#admin_escape_commands = true - -# Controls whether admin room notices like account registrations, password changes, account deactivations, -# room directory publications, etc will be sent to the admin room. +# If the 'perf_measurements' compile-time feature is enabled, enables +# collecting folded stack trace profile of tracing spans using +# tracing_flame. The resulting profile can be visualized with inferno[1], +# speedscope[2], or a number of other tools. # -# Update notices and normal admin command responses will still be sent. +# [1]: https://github.com/jonhoo/inferno +# [2]: www.speedscope.app # -# defaults to true -#admin_room_notices = true +#tracing_flame = false +# This item is undocumented. Please contribute documentation for it. +# +#tracing_flame_filter = "info" -### Misc +# This item is undocumented. Please contribute documentation for it. +# +#tracing_flame_output_path = "./tracing.folded" + +# Examples: +# - No proxy (default): +# proxy ="none" +# +# - For global proxy, create the section at the bottom of this file: +# [global.proxy] +# global = { url = "socks5h://localhost:9050" } +# +# - To proxy some domains: +# [global.proxy] +# [[global.proxy.by_domain]] +# url = "socks5h://localhost:9050" +# include = ["*.onion", "matrix.myspecial.onion"] +# exclude = ["*.myspecial.onion"] +# +# Include vs. Exclude: +# - If include is an empty list, it is assumed to be `["*"]`. +# - If a domain matches both the exclude and include list, the proxy will +# only be used if it was included because of a more specific rule than +# it was excluded. In the above example, the proxy would be used for +# `ordinary.onion`, `matrix.myspecial.onion`, but not +# `hello.myspecial.onion`. +# +#proxy = "none" + +# This item is undocumented. Please contribute documentation for it. +# +#jwt_secret = + +# Servers listed here will be used to gather public keys of other servers +# (notary trusted key servers). +# +# Currently, conduwuit doesn't support inbound batched key requests, so +# this list should only contain other Synapse servers +# +# example: ["matrix.org", "constellatory.net", "tchncs.de"] +# +#trusted_servers = ["matrix.org"] + +# Whether to query the servers listed in trusted_servers first or query +# the origin server first. For best security, querying the origin server +# first is advised to minimize the exposure to a compromised trusted +# server. For maximum federation/join performance this can be set to true, +# however other options exist to query trusted servers first under +# specific high-load circumstances and should be evaluated before setting +# this to true. +# +#query_trusted_key_servers_first = false + +# Whether to query the servers listed in trusted_servers first +# specifically on room joins. This option limits the exposure to a +# compromised trusted server to room joins only. The join operation +# requires gathering keys from many origin servers which can cause +# significant delays. Therefor this defaults to true to mitigate +# unexpected delays out-of-the-box. The security-paranoid or those +# willing to tolerate delays are advised to set this to false. Note that +# setting query_trusted_key_servers_first to true causes this option to +# be ignored. +# +#query_trusted_key_servers_first_on_join = true + +# Only query trusted servers for keys and never the origin server. This is +# intended for clusters or custom deployments using their trusted_servers +# as forwarding-agents to cache and deduplicate requests. Notary servers +# do not act as forwarding-agents by default, therefor do not enable this +# unless you know exactly what you are doing. +# +#only_query_trusted_key_servers = false + +# Maximum number of keys to request in each trusted server batch query. +# +#trusted_server_batch_size = 1024 # max log level for conduwuit. allows debug, info, warn, or error # see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives -# **Caveat**: -# For release builds, the tracing crate is configured to only implement levels higher than error to avoid unnecessary overhead in the compiled binary from trace macros. -# For debug builds, this restriction is not applied. # -# Defaults to "info" +# **Caveat**: +# For release builds, the tracing crate is configured to only implement +# levels higher than error to avoid unnecessary overhead in the compiled +# binary from trace macros. For debug builds, this restriction is not +# applied. +# #log = "info" # controls whether logs will be outputted with ANSI colours # -# defaults to true #log_colors = true -# controls whether encrypted rooms and events are allowed (default true) -#allow_encryption = false - -# if enabled, conduwuit will send a simple GET request periodically to `https://pupbrain.dev/check-for-updates/stable` -# for any new announcements made. Despite the name, this is not an update check -# endpoint, it is simply an announcement check endpoint. -# Defaults to false. -#allow_check_for_updates = false - -# Set to false to disable users from joining or creating room versions that aren't 100% officially supported by conduwuit. -# conduwuit officially supports room versions 6 - 10. conduwuit has experimental/unstable support for 3 - 5, and 11. -# Defaults to true. -#allow_unstable_room_versions = true - -# Option to control adding arbitrary text to the end of the user's displayname upon registration with a space before the text. -# This was the lightning bolt emoji option, just replaced with support for adding your own custom text or emojis. -# To disable, set this to "" (an empty string) -# Defaults to "🏳️‍⚧️" (trans pride flag) -#new_user_displayname_suffix = "🏳️‍⚧️" - -# Option to control whether conduwuit will query your list of trusted notary key servers (`trusted_servers`) for -# remote homeserver signing keys it doesn't know *first*, or query the individual servers first before falling back to the trusted -# key servers. -# -# The former/default behaviour makes federated/remote rooms joins generally faster because we're querying a single (or list of) server -# that we know works, is reasonably fast, and is reliable for just about all the homeserver signing keys in the room. Querying individual -# servers may take longer depending on the general infrastructure of everyone in there, how many dead servers there are, etc. -# -# However, this does create an increased reliance on one single or multiple large entities as `trusted_servers` should generally -# contain long-term and large servers who know a very large number of homeservers. -# -# If you don't know what any of this means, leave this and `trusted_servers` alone to their defaults. -# -# Defaults to true as this is the fastest option for federation. -#query_trusted_key_servers_first = true - -# List/vector of room **IDs** that conduwuit will make newly registered users join. -# The room IDs specified must be rooms that you have joined at least once on the server, and must be public. -# -# No default. -#auto_join_rooms = [] - -# Retry failed and incomplete messages to remote servers immediately upon startup. This is called bursting. -# If this is disabled, said messages may not be delivered until more messages are queued for that server. -# Do not change this option unless server resources are extremely limited or the scale of the server's -# deployment is huge. Do not disable this unless you know what you are doing. -#startup_netburst = true - -# Limit the startup netburst to the most recent (default: 50) messages queued for each remote server. All older -# messages are dropped and not reattempted. The `startup_netburst` option must be enabled for this value to have -# any effect. Do not change this value unless you know what you are doing. Set this value to -1 to reattempt -# every message without trimming the queues; this may consume significant disk. Set this value to 0 to drop all -# messages without any attempt at redelivery. -#startup_netburst_keep = 50 - -# If the 'perf_measurements' feature is enabled, enables collecting folded stack trace profile of tracing spans using -# tracing_flame. The resulting profile can be visualized with inferno[1], speedscope[2], or a number of other tools. -# [1]: https://github.com/jonhoo/inferno -# [2]: www.speedscope.app -# tracing_flame = false - -# If 'tracing_flame' is enabled, sets a filter for which events will be included in the profile. -# Supported syntax is documented at https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives -# tracing_flame_filter = "trace,h2=off" - -# If 'tracing_flame' is enabled, set the path to write the generated profile. -# tracing_flame_output_path = "./tracing.folded" - -# Enable the tokio-console. This option is only relevant to developers. -# See: docs/development.md#debugging-with-tokio-console for more information. -#tokio_console = false - -# Enable backward-compatibility with Conduit's media directory by creating symlinks of media. This -# option is only necessary if you plan on using Conduit again. Otherwise setting this to false -# reduces filesystem clutter and overhead for managing these symlinks in the directory. This is now -# disabled by default. You may still return to upstream Conduit but you have to run Conduwuit at -# least once with this set to true and allow the media_startup_check to take place before shutting -# down to return to Conduit. -# -# Disabled by default. -#media_compat_file_link = false - -# Prunes missing media from the database as part of the media startup checks. This means if you -# delete files from the media directory the corresponding entries will be removed from the -# database. This is disabled by default because if the media directory is accidentally moved or -# inaccessible the metadata entries in the database will be lost with sadness. -# -# Disabled by default. -#prune_missing_media = false - -# Checks consistency of the media directory at startup: -# 1. When `media_compat_file_link` is enbled, this check will upgrade media when switching back -# and forth between Conduit and Conduwuit. Both options must be enabled to handle this. -# 2. When media is deleted from the directory, this check will also delete its database entry. -# -# If none of these checks apply to your use cases, and your media directory is significantly large -# setting this to false may reduce startup time. -# -# Enabled by default. -#media_startup_check = true - # OpenID token expiration/TTL in seconds # -# These are the OpenID tokens that are primarily used for Matrix account integrations, *not* OIDC/OpenID Connect/etc +# These are the OpenID tokens that are primarily used for Matrix account +# integrations (e.g. Vector Integrations in Element), *not* OIDC/OpenID +# Connect/etc # -# Defaults to 3600 (1 hour) #openid_token_ttl = 3600 -# Emergency password feature. This password set here will let you login to the server service account (e.g. `@conduit`) -# and let you run admin commands, invite yourself to the admin room, etc. +# static TURN username to provide the client if not using a shared secret +# ("turn_secret"), It is recommended to use a shared secret over static +# credentials. # -# no default. -#emergency_password = "" +#turn_username = false - -### Generic database options - -# Set this to any float value to multiply conduwuit's in-memory LRU caches with. -# By default, the caches scale automatically with cpu-core-count. -# May be useful if you have significant memory to spare to increase performance. +# static TURN password to provide the client if not using a shared secret +# ("turn_secret"). It is recommended to use a shared secret over static +# credentials. # -# This was previously called `conduit_cache_capacity_modifier` +#turn_password = false + +# vector list of TURN URIs/servers to use # -# Defaults to 1.0. -#cache_capacity_modifier = 1.0 - -# Set this to any float value in megabytes for conduwuit to tell the database engine that this much memory is available for database-related caches. -# May be useful if you have significant memory to spare to increase performance. -# Defaults to 128.0 + (64.0 * CPU core count). -#db_cache_capacity_mb = 256.0 - - -### RocksDB options - -# Set this to true to use RocksDB config options that are tailored to HDDs (slower device storage) +# replace "example.turn.uri" with your TURN domain, such as the coturn +# "realm" config option. if using TURN over TLS, replace the URI prefix +# "turn:" with "turns:" # -# It is worth noting that by default, conduwuit will use RocksDB with Direct IO enabled. *Generally* speaking this improves performance as it bypasses buffered I/O (system page cache). -# However there is a potential chance that Direct IO may cause issues with database operations if your setup is uncommon. This has been observed with FUSE filesystems, and possibly ZFS filesystem. -# RocksDB generally deals/corrects these issues but it cannot account for all setups. -# If you experience any weird RocksDB issues, try enabling this option as it turns off Direct IO and feel free to report in the conduwuit Matrix room if this option fixes your DB issues. -# See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. +# example: ["turn:example.turn.uri?transport=udp", +# "turn:example.turn.uri?transport=tcp"] # -# Defaults to false -#rocksdb_optimize_for_spinning_disks = false +#turn_uris = [] -# Enables direct-io to increase database performance. This is enabled by default. Set this option to false if the -# database resides on a filesystem which does not support direct-io. -#rocksdb_direct_io = true +# TURN secret to use for generating the HMAC-SHA1 hash apart of username +# and password generation +# +# this is more secure, but if needed you can use traditional +# static username/password credentials. +# +#turn_secret = false -# RocksDB log level. This is not the same as conduwuit's log level. This is the log level for the RocksDB engine/library -# which show up in your database folder/path as `LOG` files. Defaults to error. conduwuit will typically log RocksDB errors as normal. +# TURN secret to use that's read from the file path specified +# +# this takes priority over "turn_secret" first, and falls back to +# "turn_secret" if invalid or failed to open. +# +# example: "/etc/conduwuit/.turn_secret" +# +#turn_secret_file = + +# TURN TTL in seconds +# +#turn_ttl = 86400 + +# List/vector of room IDs or room aliases that conduwuit will make newly +# registered users join. The rooms specified must be rooms that you +# have joined at least once on the server, and must be public. +# +# example: ["#conduwuit:puppygock.gay", +# "!eoIzvAvVwY23LPDay8:puppygock.gay"] +# +#auto_join_rooms = [] + +# Config option to automatically deactivate the account of any user who +# attempts to join a: +# - banned room +# - forbidden room alias +# - room alias or ID with a forbidden server name +# +# This may be useful if all your banned lists consist of toxic rooms or +# servers that no good faith user would ever attempt to join, and +# to automatically remediate the problem without any admin user +# intervention. +# +# This will also make the user leave all rooms. Federation (e.g. remote +# room invites) are ignored here. +# +# Defaults to false as rooms can be banned for non-moderation-related +# reasons +# +#auto_deactivate_banned_room_attempts = false + +# RocksDB log level. This is not the same as conduwuit's log level. This +# is the log level for the RocksDB engine/library which show up in your +# database folder/path as `LOG` files. conduwuit will log RocksDB errors +# as normal through tracing. +# #rocksdb_log_level = "error" -# Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB. +# This item is undocumented. Please contribute documentation for it. +# +#rocksdb_log_stderr = false + +# Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB in +# bytes. +# #rocksdb_max_log_file_size = 4194304 -# Time in seconds before RocksDB will forcibly rotate logs. Defaults to 0. +# Time in seconds before RocksDB will forcibly rotate logs. +# #rocksdb_log_time_to_roll = 0 -# Amount of threads that RocksDB will use for parallelism on database operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use all your logical threads. +# Set this to true to use RocksDB config options that are tailored to HDDs +# (slower device storage) +# +# It is worth noting that by default, conduwuit will use RocksDB with +# Direct IO enabled. *Generally* speaking this improves performance as it +# bypasses buffered I/O (system page cache). However there is a potential +# chance that Direct IO may cause issues with database operations if your +# setup is uncommon. This has been observed with FUSE filesystems, and +# possibly ZFS filesystem. RocksDB generally deals/corrects these issues +# but it cannot account for all setups. If you experience any weird +# RocksDB issues, try enabling this option as it turns off Direct IO and +# feel free to report in the conduwuit Matrix room if this option fixes +# your DB issues. +# +# See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. +# +#rocksdb_optimize_for_spinning_disks = false + +# Enables direct-io to increase database performance via unbuffered I/O. +# +# See https://github.com/facebook/rocksdb/wiki/Direct-IO for more details about Direct IO and RocksDB. +# +# Set this option to false if the database resides on a filesystem which +# does not support direct-io like FUSE, or any form of complex filesystem +# setup such as possibly ZFS. +# +#rocksdb_direct_io = true + +# Amount of threads that RocksDB will use for parallelism on database +# operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use +# all your logical threads. Defaults to your CPU logical thread count. # -# Defaults to your CPU logical thread count. #rocksdb_parallelism_threads = 0 -# Enables idle IO priority for compaction thread. This prevents any unexpected lag in the server's operation and -# is usually a good idea. Enabled by default. -#rocksdb_compaction_ioprio_idle = true - -# Enables idle CPU priority for compaction thread. This is not enabled by default to prevent compaction from -# falling too far behind on busy systems. -#rocksdb_compaction_prio_idle = false - -# Maximum number of LOG files RocksDB will keep. This must *not* be set to 0. It must be at least 1. -# Defaults to 3 as these are not very useful. +# Maximum number of LOG files RocksDB will keep. This must *not* be set to +# 0. It must be at least 1. Defaults to 3 as these are not very useful +# unless troubleshooting/debugging a RocksDB bug. +# #rocksdb_max_log_files = 3 # Type of RocksDB database compression to use. +# # Available options are "zstd", "zlib", "bz2", "lz4", or "none" -# It is best to use ZSTD as an overall good balance between speed/performance, storage, IO amplification, and CPU usage. -# For more performance but less compression (more storage used) and less CPU usage, use LZ4. -# See https://github.com/facebook/rocksdb/wiki/Compression for more details. +# +# It is best to use ZSTD as an overall good balance between +# speed/performance, storage, IO amplification, and CPU usage. +# For more performance but less compression (more storage used) and less +# CPU usage, use LZ4. See https://github.com/facebook/rocksdb/wiki/Compression for more details. # # "none" will disable compression. # -# Defaults to "zstd" #rocksdb_compression_algo = "zstd" -# Level of compression the specified compression algorithm for RocksDB to use. -# Default is 32767, which is internally read by RocksDB as the default magic number and -# translated to the library's default compression level as they all differ. +# Level of compression the specified compression algorithm for RocksDB to +# use. +# +# Default is 32767, which is internally read by RocksDB as the +# default magic number and translated to the library's default +# compression level as they all differ. # See their `kDefaultCompressionLevel`. # #rocksdb_compression_level = 32767 -# Level of compression the specified compression algorithm for the bottommost level/data for RocksDB to use. -# Default is 32767, which is internally read by RocksDB as the default magic number and -# translated to the library's default compression level as they all differ. +# Level of compression the specified compression algorithm for the +# bottommost level/data for RocksDB to use. Default is 32767, which is +# internally read by RocksDB as the default magic number and translated +# to the library's default compression level as they all differ. # See their `kDefaultCompressionLevel`. # -# Since this is the bottommost level (generally old and least used data), it may be desirable to have a very -# high compression level here as it's lesss likely for this data to be used. Research your chosen compression algorithm. +# Since this is the bottommost level (generally old and least used data), +# it may be desirable to have a very high compression level here as it's +# lesss likely for this data to be used. Research your chosen compression +# algorithm. # #rocksdb_bottommost_compression_level = 32767 -# Whether to enable RocksDB "bottommost_compression". -# At the expense of more CPU usage, this will further compress the database to reduce more storage. -# It is recommended to use ZSTD compression with this for best compression results. +# Whether to enable RocksDB's "bottommost_compression". +# +# At the expense of more CPU usage, this will further compress the +# database to reduce more storage. It is recommended to use ZSTD +# compression with this for best compression results. This may be useful +# if you're trying to reduce storage usage from the database. +# # See https://github.com/facebook/rocksdb/wiki/Compression for more details. # -# Defaults to false as this uses more CPU when compressing. #rocksdb_bottommost_compression = false -# Level of statistics collection. Some admin commands to display database statistics may require -# this option to be set. Database performance may be impacted by higher settings. +# Database recovery mode (for RocksDB WAL corruption) +# +# Use this option when the server reports corruption and refuses to start. +# Set mode 2 (PointInTime) to cleanly recover from this corruption. The +# server will continue from the last good state, several seconds or +# minutes prior to the crash. Clients may have to run "clear-cache & +# reload" to account for the rollback. Upon success, you may reset the +# mode back to default and restart again. Please note in some cases the +# corruption error may not be cleared for at least 30 minutes of +# operation in PointInTime mode. +# +# As a very last ditch effort, if PointInTime does not fix or resolve +# anything, you can try mode 3 (SkipAnyCorruptedRecord) but this will +# leave the server in a potentially inconsistent state. +# +# The default mode 1 (TolerateCorruptedTailRecords) will automatically +# drop the last entry in the database if corrupted during shutdown, but +# nothing more. It is extraordinarily unlikely this will desynchronize +# clients. To disable any form of silent rollback set mode 0 +# (AbsoluteConsistency). +# +# The options are: +# 0 = AbsoluteConsistency +# 1 = TolerateCorruptedTailRecords (default) +# 2 = PointInTime (use me if trying to recover) +# 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) +# +# See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information on these modes. +# +# See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. +# +#rocksdb_recovery_mode = 1 + +# Database repair mode (for RocksDB SST corruption) +# +# Use this option when the server reports corruption while running or +# panics. If the server refuses to start use the recovery mode options +# first. Corruption errors containing the acronym 'SST' which occur after +# startup will likely require this option. +# +# - Backing up your database directory is recommended prior to running the +# repair. +# - Disabling repair mode and restarting the server is recommended after +# running the repair. +# +# See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. +# +#rocksdb_repair = false + +# This item is undocumented. Please contribute documentation for it. +# +#rocksdb_read_only = false + +# This item is undocumented. Please contribute documentation for it. +# +#rocksdb_secondary = false + +# Enables idle CPU priority for compaction thread. This is not enabled by +# default to prevent compaction from falling too far behind on busy +# systems. +# +#rocksdb_compaction_prio_idle = false + +# Enables idle IO priority for compaction thread. This prevents any +# unexpected lag in the server's operation and is usually a good idea. +# Enabled by default. +# +#rocksdb_compaction_ioprio_idle = true + +# Config option to disable RocksDB compaction. You should never ever have +# to disable this. If you for some reason find yourself needing to disable +# this as part of troubleshooting or a bug, please reach out to us in the +# conduwuit Matrix room with information and details. +# +# Disabling compaction will lead to a significantly bloated and +# explosively large database, gradually poor performance, unnecessarily +# excessive disk read/writes, and slower shutdowns and startups. +# +#rocksdb_compaction = true + +# Level of statistics collection. Some admin commands to display database +# statistics may require this option to be set. Database performance may +# be impacted by higher settings. # # Option is a number ranging from 0 to 6: # 0 = No statistics. @@ -608,338 +870,509 @@ allow_profile_lookup_federation_requests = true # 3 to 5 = Statistics with possible performance impact. # 6 = All statistics. # -# Defaults to 1 (No statistics, except in debug-mode) #rocksdb_stats_level = 1 -# Database repair mode (for RocksDB SST corruption) +# This is a password that can be configured that will let you login to the +# server bot account (currently `@conduit`) for emergency troubleshooting +# purposes such as recovering/recreating your admin room, or inviting +# yourself back. # -# Use this option when the server reports corruption while running or panics. If the server refuses -# to start use the recovery mode options first. Corruption errors containing the acronym 'SST' which -# occur after startup will likely require this option. +# See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. # -# - Backing up your database directory is recommended prior to running the repair. -# - Disabling repair mode and restarting the server is recommended after running the repair. +# Once this password is unset, all sessions will be logged out for +# security purposes. # -# Defaults to false -#rocksdb_repair = false - -# Database recovery mode (for RocksDB WAL corruption) +# example: "F670$2CP@Hw8mG7RY1$%!#Ic7YA" # -# Use this option when the server reports corruption and refuses to start. Set mode 2 (PointInTime) -# to cleanly recover from this corruption. The server will continue from the last good state, -# several seconds or minutes prior to the crash. Clients may have to run "clear-cache & reload" to -# account for the rollback. Upon success, you may reset the mode back to default and restart again. -# Please note in some cases the corruption error may not be cleared for at least 30 minutes of -# operation in PointInTime mode. +#emergency_password = + +# This item is undocumented. Please contribute documentation for it. # -# As a very last ditch effort, if PointInTime does not fix or resolve anything, you can try mode -# 3 (SkipAnyCorruptedRecord) but this will leave the server in a potentially inconsistent state. -# -# The default mode 1 (TolerateCorruptedTailRecords) will automatically drop the last entry in the -# database if corrupted during shutdown, but nothing more. It is extraordinarily unlikely this will -# desynchronize clients. To disable any form of silent rollback set mode 0 (AbsoluteConsistency). -# -# The options are: -# 0 = AbsoluteConsistency -# 1 = TolerateCorruptedTailRecords (default) -# 2 = PointInTime (use me if trying to recover) -# 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) -# -# See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information -# -# Defaults to 1 (TolerateCorruptedTailRecords) -#rocksdb_recovery_mode = 1 +#notification_push_path = "/_matrix/push/v1/notify" - -### Domain Name Resolution and Caching - -# Maximum entries stored in DNS memory-cache. The size of an entry may vary so please take care if -# raising this value excessively. Only decrease this when using an external DNS cache. Please note -# that systemd does *not* count as an external cache, even when configured to do so. -#dns_cache_entries = 32768 - -# Minimum time-to-live in seconds for entries in the DNS cache. The default may appear high to most -# administrators; this is by design. Only decrease this if you are using an external DNS cache. -#dns_min_ttl = 10800 - -# Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. This value is critical for -# the server to federate efficiently. NXDOMAIN's are assumed to not be returning to the federation -# and aggressively cached rather than constantly rechecked. -# -# Defaults to 3 days as these are *very rarely* false negatives. -#dns_min_ttl_nxdomain = 259200 - -# The number of seconds to wait for a reply to a DNS query. Please note that recursive queries can -# take up to several seconds for some domains, so this value should not be too low. -#dns_timeout = 10 - -# Number of retries after a timeout. -#dns_attempts = 10 - -# Fallback to TCP on DNS errors. Set this to false if unsupported by nameserver. -#dns_tcp_fallback = true - -# Enable to query all nameservers until the domain is found. Referred to as "trust_negative_responses" in hickory_resolver. -# This can avoid useless DNS queries if the first nameserver responds with NXDOMAIN or an empty NOERROR response. -# -# The default is to query one nameserver and stop (false). -#query_all_nameservers = true - -# Enables using *only* TCP for querying your specified nameservers instead of UDP. -# -# You very likely do *not* want this. hickory-resolver already falls back to TCP on UDP errors. -# Defaults to false -#query_over_tcp_only = false - -# DNS A/AAAA record lookup strategy -# -# Takes a number of one of the following options: -# 1 - Ipv4Only (Only query for A records, no AAAA/IPv6) -# 2 - Ipv6Only (Only query for AAAA records, no A/IPv4) -# 3 - Ipv4AndIpv6 (Query for A and AAAA records in parallel, uses whatever returns a successful response first) -# 4 - Ipv6thenIpv4 (Query for AAAA record, if that fails then query the A record) -# 5 - Ipv4thenIpv6 (Query for A record, if that fails then query the AAAA record) -# -# If you don't have IPv6 networking, then for better performance it may be suitable to set this to Ipv4Only (1) as -# you will never ever use the AAAA record contents even if the AAAA record is successful instead of the A record. -# -# Defaults to 5 - Ipv4ThenIpv6 as this is the most compatible and IPv4 networking is currently the most prevalent. -#ip_lookup_strategy = 5 - - -### Request Timeouts, Connection Timeouts, and Connection Pooling - -## Request Timeouts are HTTP response timeouts -## Connection Timeouts are TCP connection timeouts -## -## Connection Pooling Timeouts are timeouts for keeping an open idle connection alive. -## Connection pooling and keepalive is very useful for federation or other places where for performance reasons, -## we want to keep connections open that we will re-use frequently due to TCP and TLS 1.3 overhead/expensiveness. -## -## Generally these defaults are the best, but if you find a reason to need to change these they are here. - -# Default/base connection timeout. -# This is used only by URL previews and update/news endpoint checks -# -# Defaults to 10 seconds -#request_conn_timeout = 10 - -# Default/base request timeout. The time waiting to receive more data from another server. -# This is used only by URL previews, update/news, and misc endpoint checks -# -# Defaults to 35 seconds -#request_timeout = 35 - -# Default/base request total timeout. The time limit for a whole request. This is set very high to not -# cancel healthy requests while serving as a backstop. -# This is used only by URL previews and update/news endpoint checks -# -# Defaults to 320 seconds -#request_total_timeout = 320 - -# Default/base idle connection pool timeout -# This is used only by URL previews and update/news endpoint checks -# -# Defaults to 5 seconds -#request_idle_timeout = 5 - -# Default/base max idle connections per host -# This is used only by URL previews and update/news endpoint checks -# -# Defaults to 1 as generally the same open connection can be re-used -#request_idle_per_host = 1 - -# Federation well-known resolution connection timeout -# -# Defaults to 6 seconds -#well_known_conn_timeout = 6 - -# Federation HTTP well-known resolution request timeout -# -# Defaults to 10 seconds -#well_known_timeout = 10 - -# Federation client request timeout -# You most definitely want this to be high to account for extremely large room joins, slow homeservers, your own resources etc. -# -# Defaults to 300 seconds -#federation_timeout = 300 - -# Federation client idle connection pool timeout -# -# Defaults to 25 seconds -#federation_idle_timeout = 25 - -# Federation client max idle connections per host -# -# Defaults to 1 as generally the same open connection can be re-used -#federation_idle_per_host = 1 - -# Federation sender request timeout -# The time it takes for the remote server to process sent transactions can take a while. -# -# Defaults to 180 seconds -#sender_timeout = 180 - -# Federation sender idle connection pool timeout -# -# Defaults to 180 seconds -#sender_idle_timeout = 180 - -# Federation sender transaction retry backoff limit -# -# Defaults to 86400 seconds -#sender_retry_backoff_limit = 86400 - -# Appservice URL request connection timeout -# -# Defaults to 35 seconds as generally appservices are hosted within the same network -#appservice_timeout = 35 - -# Appservice URL idle connection pool timeout -# -# Defaults to 300 seconds -#appservice_idle_timeout = 300 - -# Notification gateway pusher idle connection pool timeout -# -# Defaults to 15 seconds -#pusher_idle_timeout = 15 - - -### Presence / Typing Indicators / Read Receipts - -# Config option to control local (your server only) presence updates/requests. Defaults to true. -# Note that presence on conduwuit is very fast unlike Synapse's. -# If using outgoing presence, this MUST be enabled. +# Config option to control local (your server only) presence +# updates/requests. Note that presence on conduwuit is +# very fast unlike Synapse's. If using outgoing presence, this MUST be +# enabled. # #allow_local_presence = true -# Config option to control incoming federated presence updates/requests. Defaults to true. -# This option receives presence updates from other servers, but does not send any unless `allow_outgoing_presence` is true. +# Config option to control incoming federated presence updates/requests. +# +# This option receives presence updates from other +# servers, but does not send any unless `allow_outgoing_presence` is true. # Note that presence on conduwuit is very fast unlike Synapse's. # #allow_incoming_presence = true -# Config option to control outgoing presence updates/requests. Defaults to true. -# This option sends presence updates to other servers, but does not receive any unless `allow_incoming_presence` is true. +# Config option to control outgoing presence updates/requests. +# +# This option sends presence updates to other servers, but does not +# receive any unless `allow_incoming_presence` is true. # Note that presence on conduwuit is very fast unlike Synapse's. -# If using outgoing presence, you MUST enable `allow_local_presence` as well. +# If using outgoing presence, you MUST enable `allow_local_presence` as +# well. # #allow_outgoing_presence = true -# Config option to enable the presence idle timer for remote users. Disabling is offered as an optimization for -# servers participating in many large rooms or when resources are limited. Disabling it may cause incorrect -# presence states (i.e. stuck online) to be seen for some remote users. Defaults to true. -#presence_timeout_remote_users = true - -# Config option to control how many seconds before presence updates that you are idle. Defaults to 5 minutes. +# Config option to control how many seconds before presence updates that +# you are idle. Defaults to 5 minutes. +# #presence_idle_timeout_s = 300 -# Config option to control how many seconds before presence updates that you are offline. Defaults to 30 minutes. +# Config option to control how many seconds before presence updates that +# you are offline. Defaults to 30 minutes. +# #presence_offline_timeout_s = 1800 -# Config option to control whether we should receive remote incoming read receipts. -# Defaults to true. +# Config option to enable the presence idle timer for remote users. +# Disabling is offered as an optimization for servers participating in +# many large rooms or when resources are limited. Disabling it may cause +# incorrect presence states (i.e. stuck online) to be seen for some +# remote users. +# +#presence_timeout_remote_users = true + +# Config option to control whether we should receive remote incoming read +# receipts. +# #allow_incoming_read_receipts = true -# Config option to control whether we should send read receipts to remote servers. -# Defaults to true. +# Config option to control whether we should send read receipts to remote +# servers. +# #allow_outgoing_read_receipts = true -# Config option to control outgoing typing updates to federation. Defaults to true. +# Config option to control outgoing typing updates to federation. +# #allow_outgoing_typing = true -# Config option to control incoming typing updates from federation. Defaults to true. +# Config option to control incoming typing updates from federation. +# #allow_incoming_typing = true -# Config option to control maximum time federation user can indicate typing. +# Config option to control maximum time federation user can indicate +# typing. +# #typing_federation_timeout_s = 30 -# Config option to control minimum time local client can indicate typing. This does not override -# a client's request to stop typing. It only enforces a minimum value in case of no stop request. +# Config option to control minimum time local client can indicate typing. +# This does not override a client's request to stop typing. It only +# enforces a minimum value in case of no stop request. +# #typing_client_timeout_min_s = 15 # Config option to control maximum time local client can indicate typing. +# #typing_client_timeout_max_s = 45 +# Set this to true for conduwuit to compress HTTP response bodies using +# zstd. This option does nothing if conduwuit was not built with +# `zstd_compression` feature. Please be aware that enabling HTTP +# compression may weaken TLS. Most users should not need to enable this. +# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH +# before deciding to enable this. +# +#zstd_compression = false -### TURN / VoIP +# Set this to true for conduwuit to compress HTTP response bodies using +# gzip. This option does nothing if conduwuit was not built with +# `gzip_compression` feature. Please be aware that enabling HTTP +# compression may weaken TLS. Most users should not need to enable this. +# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before +# deciding to enable this. +# +# If you are in a large amount of rooms, you may find that enabling this +# is necessary to reduce the significantly large response bodies. +# +#gzip_compression = false -# vector list of TURN URIs/servers to use +# Set this to true for conduwuit to compress HTTP response bodies using +# brotli. This option does nothing if conduwuit was not built with +# `brotli_compression` feature. Please be aware that enabling HTTP +# compression may weaken TLS. Most users should not need to enable this. +# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before +# deciding to enable this. # -# replace "example.turn.uri" with your TURN domain, such as the coturn "realm". -# if using TURN over TLS, replace "turn:" with "turns:" -# -# No default -#turn_uris = ["turn:example.turn.uri?transport=udp", "turn:example.turn.uri?transport=tcp"] +#brotli_compression = false -# TURN secret to use that's read from the file path specified +# Set to true to allow user type "guest" registrations. Some clients like +# Element attempt to register guest users automatically. # -# this takes priority over "turn_secret" first, and falls back to "turn_secret" if invalid or -# failed to open. -# -# no default -#turn_secret_file = "/path/to/secret.txt" +#allow_guest_registration = false -# TURN secret to use for generating the HMAC-SHA1 hash apart of username and password generation +# Set to true to log guest registrations in the admin room. Note that +# these may be noisy or unnecessary if you're a public homeserver. # -# this is more secure, but if needed you can use traditional username/password below. -# -# no default -#turn_secret = "" +#log_guest_registrations = false -# TURN username to provide the client +# Set to true to allow guest registrations/users to auto join any rooms +# specified in `auto_join_rooms`. # -# no default -#turn_username = "" +#allow_guests_auto_join_rooms = false -# TURN password to provide the client +# Config option to control whether the legacy unauthenticated Matrix media +# repository endpoints will be enabled. These endpoints consist of: +# - /_matrix/media/*/config +# - /_matrix/media/*/upload +# - /_matrix/media/*/preview_url +# - /_matrix/media/*/download/* +# - /_matrix/media/*/thumbnail/* # -# no default -#turn_password = "" +# The authenticated equivalent endpoints are always enabled. +# +# Defaults to true for now, but this is highly subject to change, likely +# in the next release. +# +#allow_legacy_media = true -# TURN TTL +# This item is undocumented. Please contribute documentation for it. # -# Default is 86400 seconds -#turn_ttl = 86400 +#freeze_legacy_media = true -# allow guests/unauthenticated users to access TURN credentials +# Checks consistency of the media directory at startup: +# 1. When `media_compat_file_link` is enbled, this check will upgrade +# media when switching back and forth between Conduit and conduwuit. +# Both options must be enabled to handle this. +# 2. When media is deleted from the directory, this check will also delete +# its database entry. # -# this is the equivalent of Synapse's `turn_allow_guests` config option. this allows -# any unauthenticated user to call `/_matrix/client/v3/voip/turnServer`. +# If none of these checks apply to your use cases, and your media +# directory is significantly large setting this to false may reduce +# startup time. # -# defaults to false -#turn_allow_guests = false +#media_startup_check = true +# Enable backward-compatibility with Conduit's media directory by creating +# symlinks of media. This option is only necessary if you plan on using +# Conduit again. Otherwise setting this to false reduces filesystem +# clutter and overhead for managing these symlinks in the directory. This +# is now disabled by default. You may still return to upstream Conduit +# but you have to run conduwuit at least once with this set to true and +# allow the media_startup_check to take place before shutting +# down to return to Conduit. +# +#media_compat_file_link = false -# Other options not in [global]: +# Prunes missing media from the database as part of the media startup +# checks. This means if you delete files from the media directory the +# corresponding entries will be removed from the database. This is +# disabled by default because if the media directory is accidentally moved +# or inaccessible, the metadata entries in the database will be lost with +# sadness. +# +#prune_missing_media = false + +# Vector list of servers that conduwuit will refuse to download remote +# media from. +# +#prevent_media_downloads_from = [] + +# List of forbidden server names that we will block incoming AND outgoing +# federation with, and block client room joins / remote user invites. +# +# This check is applied on the room ID, room alias, sender server name, +# sender user's server name, inbound federation X-Matrix origin, and +# outbound federation handler. +# +# Basically "global" ACLs. +# +#forbidden_remote_server_names = [] + +# List of forbidden server names that we will block all outgoing federated +# room directory requests for. Useful for preventing our users from +# wandering into bad servers or spaces. +# +#forbidden_remote_room_directory_server_names = [] + +# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you +# do not want conduwuit to send outbound requests to. Defaults to +# RFC1918, unroutable, loopback, multicast, and testnet addresses for +# security. +# +# Please be aware that this is *not* a guarantee. You should be using a +# firewall with zones as doing this on the application layer may have +# bypasses. +# +# Currently this does not account for proxies in use like Synapse does. +# +# To disable, set this to be an empty vector (`[]`). +# +# "192.168.0.0/16", "100.64.0.0/10", "192.0.0.0/24", "169.254.0.0/16", +# "192.88.99.0/24", "198.18.0.0/15", "192.0.2.0/24", "198.51.100.0/24", +# "203.0.113.0/24", "224.0.0.0/4", "::1/128", "fe80::/10", "fc00::/7", +# "2001:db8::/32", "ff00::/8", "fec0::/10"] +# +#ip_range_denylist = ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", + +# Vector list of domains allowed to send requests to for URL previews. +# Defaults to none. Note: this is a *contains* match, not an explicit +# match. Putting "google.com" will match "https://google.com" and +# "http://mymaliciousdomainexamplegoogle.com" Setting this to "*" will +# allow all URL previews. Please note that this opens up significant +# attack surface to your server, you are expected to be aware of the +# risks by doing so. +# +#url_preview_domain_contains_allowlist = [] + +# Vector list of explicit domains allowed to send requests to for URL +# previews. Defaults to none. Note: This is an *explicit* match, not a +# contains match. Putting "google.com" will match "https://google.com", +# "http://google.com", but not +# "https://mymaliciousdomainexamplegoogle.com". Setting this to "*" will +# allow all URL previews. Please note that this opens up significant +# attack surface to your server, you are expected to be aware of the +# risks by doing so. +# +#url_preview_domain_explicit_allowlist = [] + +# Vector list of explicit domains not allowed to send requests to for URL +# previews. Defaults to none. Note: This is an *explicit* match, not a +# contains match. Putting "google.com" will match "https://google.com", +# "http://google.com", but not +# "https://mymaliciousdomainexamplegoogle.com". The denylist is checked +# first before allowlist. Setting this to "*" will not do anything. +# +#url_preview_domain_explicit_denylist = [] + +# Vector list of URLs allowed to send requests to for URL previews. +# Defaults to none. Note that this is a *contains* match, not an +# explicit match. Putting "google.com" will match +# "https://google.com/", +# "https://google.com/url?q=https://mymaliciousdomainexample.com", and +# "https://mymaliciousdomainexample.com/hi/google.com" Setting this to +# "*" will allow all URL previews. Please note that this opens up +# significant attack surface to your server, you are expected to be +# aware of the risks by doing so. +# +#url_preview_url_contains_allowlist = [] + +# Maximum amount of bytes allowed in a URL preview body size when +# spidering. Defaults to 384KB in bytes. +# +#url_preview_max_spider_size = 384000 + +# Option to decide whether you would like to run the domain allowlist +# checks (contains and explicit) on the root domain or not. Does not apply +# to URL contains allowlist. Defaults to false. +# +# Example usecase: If this is +# enabled and you have "wikipedia.org" allowed in the explicit and/or +# contains domain allowlist, it will allow all subdomains under +# "wikipedia.org" such as "en.m.wikipedia.org" as the root domain is +# checked and matched. Useful if the domain contains allowlist is still +# too broad for you but you still want to allow all the subdomains under a +# root domain. +# +#url_preview_check_root_domain = false + +# List of forbidden room aliases and room IDs as strings of regex +# patterns. +# +# Regex can be used or explicit contains matches can be done by +# just specifying the words (see example). +# +# This is checked upon room alias creation, custom room ID creation if +# used, and startup as warnings if any room aliases in your database have +# a forbidden room alias/ID. +# +# example: ["19dollarfortnitecards", "b[4a]droom"] +# +#forbidden_alias_names = [] + +# List of forbidden username patterns/strings. +# +# Regex can be used or explicit contains matches can be done by just +# specifying the words (see example). +# +# This is checked upon username availability check, registration, and +# startup as warnings if any local users in your database have a forbidden +# username. +# +# example: ["administrator", "b[a4]dusernam[3e]"] +# +#forbidden_usernames = [] + +# Retry failed and incomplete messages to remote servers immediately upon +# startup. This is called bursting. If this is disabled, said messages +# may not be delivered until more messages are queued for that server. Do +# not change this option unless server resources are extremely limited or +# the scale of the server's deployment is huge. Do not disable this +# unless you know what you are doing. +# +#startup_netburst = true + +# messages are dropped and not reattempted. The `startup_netburst` option +# must be enabled for this value to have any effect. Do not change this +# value unless you know what you are doing. Set this value to -1 to +# reattempt every message without trimming the queues; this may consume +# significant disk. Set this value to 0 to drop all messages without any +# attempt at redelivery. +# +#startup_netburst_keep = 50 + +# controls whether non-admin local users are forbidden from sending room +# invites (local and remote), and if non-admin users can receive remote +# room invites. admins are always allowed to send and receive all room +# invites. +# +#block_non_admin_invites = false + +# Allows admins to enter commands in rooms other than "#admins" (admin +# room) by prefixing your message with "\!admin" or "\\!admin" followed +# up a normal conduwuit admin command. The reply will be publicly visible +# to the room, originating from the sender. +# +# example: \\!admin debug ping puppygock.gay +# +#admin_escape_commands = true + +# Controls whether the conduwuit admin room console / CLI will immediately +# activate on startup. This option can also be enabled with `--console` +# conduwuit argument. +# +#admin_console_automatic = false + +# Controls what admin commands will be executed on startup. This is a +# vector list of strings of admin commands to run. # # -# Enables running conduwuit with direct TLS support -# It is strongly recommended you use a reverse proxy instead. This is primarily relevant for test suites like complement that require a private CA setup. -# [global.tls] -# certs = "/path/to/my/certificate.crt" -# key = "/path/to/my/private_key.key" +# This option can also be configured with the `--execute` conduwuit +# argument and can take standard shell commands and environment variables # +# Such example could be: `./conduwuit --execute "server admin-notice +# conduwuit has started up at $(date)"` +# +# example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` +# +#admin_execute = [] + +# Controls whether conduwuit should error and fail to start if an admin +# execute command (`--execute` / `admin_execute`) fails. +# +#admin_execute_errors_ignore = false + +# Controls the max log level for admin command log captures (logs +# generated from running admin commands). Defaults to "info" on release +# builds, else "debug" on debug builds. +# +#admin_log_capture = "info" + +# The default room tag to apply on the admin room. +# +# On some clients like Element, the room tag "m.server_notice" is a +# special pinned room at the very bottom of your room list. The conduwuit +# admin room can be pinned here so you always have an easy-to-access +# shortcut dedicated to your admin room. +# +#admin_room_tag = "m.server_notice" + +# Sentry.io crash/panic reporting, performance monitoring/metrics, etc. +# This is NOT enabled by default. conduwuit's default Sentry reporting +# endpoint is o4506996327251968.ingest.us.sentry.io +# +#sentry = false + +# Sentry reporting URL if a custom one is desired +# +#sentry_endpoint = "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" + +# Report your conduwuit server_name in Sentry.io crash reports and metrics +# +#sentry_send_server_name = false + +# Performance monitoring/tracing sample rate for Sentry.io +# +# Note that too high values may impact performance, and can be disabled by +# setting it to 0.0 (0%) This value is read as a percentage to Sentry, +# represented as a decimal. Defaults to 15% of traces (0.15) +# +#sentry_traces_sample_rate = 0.15 + +# Whether to attach a stacktrace to Sentry reports. +# +#sentry_attach_stacktrace = false + +# Send panics to sentry. This is true by default, but sentry has to be +# enabled. The global "sentry" config option must be enabled to send any +# data. +# +#sentry_send_panic = true + +# Send errors to sentry. This is true by default, but sentry has to be +# enabled. This option is only effective in release-mode; forced to false +# in debug-mode. +# +#sentry_send_error = true + +# Controls the tracing log level for Sentry to send things like +# breadcrumbs and transactions +# +#sentry_filter = "info" + +# Enable the tokio-console. This option is only relevant to developers. +# See https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console for more information. +# +#tokio_console = false + +# This item is undocumented. Please contribute documentation for it. +# +#test = false + +# Controls whether admin room notices like account registrations, password +# changes, account deactivations, room directory publications, etc will +# be sent to the admin room. Update notices and normal admin command +# responses will still be sent. +# +#admin_room_notices = true + +[global.tls] + +# Path to a valid TLS certificate file. +# +# example: "/path/to/my/certificate.crt" +# +#certs = + +# Path to a valid TLS certificate private key. +# +# example: "/path/to/my/certificate.key" +# +#key = + # Whether to listen and allow for HTTP and HTTPS connections (insecure!) -# This config option is only available if conduwuit was built with `axum_dual_protocol` feature (not default feature) -# Defaults to false +# #dual_protocol = false +[global.well_known] -# If you are using delegation via well-known files and you cannot serve them from your reverse proxy, you can -# uncomment these to serve them directly from conduwuit. This requires proxying all requests to conduwuit, not just `/_matrix` to work. +# The server base domain of the URL with a specific port that the server +# well-known file will serve. This should contain a port at the end, and +# should not be a URL. # -#[global.well_known] -#server = "matrix.example.com:443" -#client = "https://matrix.example.com" +# example: "matrix.example.com:443" # -# A single contact and/or support page for /.well-known/matrix/support -# All options here are strings. Currently only supports 1 single contact. -# No default. +#server = + +# The server URL that the client well-known file will serve. This should +# not contain a port, and should just be a valid HTTPS URL. # -#support_page = "" -#support_role = "" -#support_email = "" -#support_mxid = "" +# example: "https://matrix.example.com" +# +#client = + +# This item is undocumented. Please contribute documentation for it. +# +#support_page = + +# This item is undocumented. Please contribute documentation for it. +# +#support_role = + +# This item is undocumented. Please contribute documentation for it. +# +#support_email = + +# This item is undocumented. Please contribute documentation for it. +# +#support_mxid = diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs new file mode 100644 index 00000000..c1875a1f --- /dev/null +++ b/src/api/server/make_knock.rs @@ -0,0 +1,107 @@ +use axum::extract::State; +use conduit::Err; +use ruma::{ + api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, + events::room::member::{MembershipState, RoomMemberEventContent}, + RoomVersionId, +}; +use serde_json::value::to_raw_value; +use tracing::warn; +use RoomVersionId::*; + +use crate::{service::pdu::PduBuilder, Error, Result, Ruma}; + +/// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}` +/// +/// Creates a knock template. +pub(crate) async fn create_knock_event_template_route( + State(services): State, body: Ruma, +) -> Result { + if !services.rooms.metadata.exists(&body.room_id).await { + return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + } + + if body.user_id.server_name() != body.origin() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Not allowed to knock on behalf of another server/user", + )); + } + + // ACL check origin server + services + .rooms + .event_handler + .acl_check(body.origin(), &body.room_id) + .await?; + + if services + .globals + .config + .forbidden_remote_server_names + .contains(body.origin()) + { + warn!( + "Server {} for remote user {} tried knocking room ID {} which has a server name that is globally \ + forbidden. Rejecting.", + body.origin(), + &body.user_id, + &body.room_id, + ); + return Err!(Request(Forbidden("Server is banned on this homeserver."))); + } + + if let Some(server) = body.room_id.server_name() { + if services + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + { + return Err!(Request(Forbidden("Server is banned on this homeserver."))); + } + } + + let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; + + if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: room_version_id, + }, + "Room version does not support knocking.", + )); + } + + if !body.ver.contains(&room_version_id) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: room_version_id, + }, + "Your homeserver does not support the features required to knock on this room.", + )); + } + + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + let (_pdu, mut pdu_json) = services + .rooms + .timeline + .create_hash_and_sign_event( + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent::new(MembershipState::Knock)), + &body.user_id, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + // room v3 and above removed the "event_id" field from remote PDU format + super::maybe_strip_event_id(&mut pdu_json, &room_version_id)?; + + Ok(create_knock_event_template::v1::Response { + room_version: room_version_id, + event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), + }) +} diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs new file mode 100644 index 00000000..c57998ae --- /dev/null +++ b/src/api/server/send_knock.rs @@ -0,0 +1,190 @@ +use axum::extract::State; +use conduit::{err, pdu::gen_event_id_canonical_json, warn, Err, Error, PduEvent, Result}; +use ruma::{ + api::{client::error::ErrorKind, federation::knock::send_knock}, + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, + }, + serde::JsonObject, + OwnedServerName, OwnedUserId, + RoomVersionId::*, +}; + +use crate::Ruma; + +/// # `PUT /_matrix/federation/v1/send_knock/{roomId}/{eventId}` +/// +/// Submits a signed knock event. +pub(crate) async fn create_knock_event_v1_route( + State(services): State, body: Ruma, +) -> Result { + if services + .globals + .config + .forbidden_remote_server_names + .contains(body.origin()) + { + warn!( + "Server {} tried knocking room ID {} who has a server name that is globally forbidden. Rejecting.", + body.origin(), + &body.room_id, + ); + return Err!(Request(Forbidden("Server is banned on this homeserver."))); + } + + if let Some(server) = body.room_id.server_name() { + if services + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + { + warn!( + "Server {} tried knocking room ID {} which has a server name that is globally forbidden. Rejecting.", + body.origin(), + &body.room_id, + ); + return Err!(Request(Forbidden("Server is banned on this homeserver."))); + } + } + + if !services.rooms.metadata.exists(&body.room_id).await { + return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + } + + // ACL check origin server + services + .rooms + .event_handler + .acl_check(body.origin(), &body.room_id) + .await?; + + let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; + + if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6) { + return Err!(Request(Forbidden("Room version does not support knocking."))); + } + + let Ok((event_id, value)) = gen_event_id_canonical_json(&body.pdu, &room_version_id) else { + // Event could not be converted to canonical json + return Err!(Request(InvalidParam("Could not convert event to canonical json."))); + }; + + let event_type: StateEventType = serde_json::from_value( + value + .get("type") + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing type property."))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event has invalid event type."))?; + + if event_type != StateEventType::RoomMember { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Not allowed to send non-membership state event to knock endpoint.", + )); + } + + let content: RoomMemberEventContent = serde_json::from_value( + value + .get("content") + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing content property"))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event content is empty or invalid"))?; + + if content.membership != MembershipState::Knock { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Not allowed to send a non-knock membership event to knock endpoint.", + )); + } + + // ACL check sender server name + let sender: OwnedUserId = serde_json::from_value( + value + .get("sender") + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing sender property."))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "sender is not a valid user ID."))?; + + services + .rooms + .event_handler + .acl_check(sender.server_name(), &body.room_id) + .await?; + + // check if origin server is trying to send for another server + if sender.server_name() != body.origin() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Not allowed to knock on behalf of another server.", + )); + } + + let state_key: OwnedUserId = serde_json::from_value( + value + .get("state_key") + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing state_key property."))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "state_key is invalid or not a user ID."))?; + + if state_key != sender { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "State key does not match sender user", + )); + }; + + let origin: OwnedServerName = serde_json::from_value( + serde_json::to_value( + value + .get("origin") + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing origin property."))?, + ) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?; + + let mut event: JsonObject = serde_json::from_str(body.pdu.get()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid knock event PDU."))?; + + event.insert("event_id".to_owned(), "$placeholder".into()); + + let pdu: PduEvent = serde_json::from_value(event.into()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid knock event PDU."))?; + + let mutex_lock = services + .rooms + .event_handler + .mutex_federation + .lock(&body.room_id) + .await; + + let pdu_id = services + .rooms + .event_handler + .handle_incoming_pdu(&origin, &body.room_id, &event_id, value.clone(), true) + .await? + .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; + + drop(mutex_lock); + + let knock_room_state = services.rooms.state.summary_stripped(&pdu).await; + + services + .sending + .send_pdu_room(&body.room_id, &pdu_id) + .await?; + + Ok(send_knock::v1::Response { + knock_room_state, + }) +} diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index eddab2fe..4bba1455 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -36,45 +36,51 @@ use crate::{err, error::Error, utils::sys, Result}; filename = "conduwuit-example.toml", section = "global", undocumented = "# This item is undocumented. Please contribute documentation for it.", - header = "### Conduwuit Configuration\n###\n### THIS FILE IS GENERATED. YOUR CHANGES WILL BE OVERWRITTEN!\n### \ - You should rename this file before configuring your server. Changes\n### to documentation and defaults \ - can be contributed in sourcecode at\n### src/core/config/mod.rs. This file is generated when \ - building.\n###\n", + header = "### conduwuit Configuration\n###\n### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL\n### BE \ + OVERWRITTEN!\n###\n### You should rename this file before configuring your server. Changes\n### to \ + documentation and defaults can be contributed in source code at\n### src/core/config/mod.rs. This file \ + is generated when building.\n###\n### Any values pre-populated are the default values for said config \ + option.\n###\n### At the minimum, you MUST edit all the config options to your environment\n### that say \ + \"YOU NEED TO EDIT THIS\".\n### See https://conduwuit.puppyirl.gay/configuration.html for ways to\n### configure conduwuit\n", ignore = "catchall well_known tls" )] pub struct Config { /// The server_name is the pretty name of this server. It is used as a - /// suffix for user and room ids. Examples: matrix.org, conduit.rs + /// suffix for user and room IDs/aliases. /// - /// The Conduit server needs all /_matrix/ requests to be reachable at - /// https://your.server.name/ on port 443 (client-server) and 8448 (federation). + /// See the docs for reverse proxying and delegation: https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy + /// Also see the `[global.well_known]` config section at the very bottom. /// - /// If that's not possible for you, you can create /.well-known files to - /// redirect requests (delegation). See - /// https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient - /// and - /// https://spec.matrix.org/v1.9/server-server-api/#getwell-knownmatrixserver - /// for more information. + /// Examples of delegation: + /// - https://puppygock.gay/.well-known/matrix/server + /// - https://puppygock.gay/.well-known/matrix/client /// - /// YOU NEED TO EDIT THIS + /// YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE + /// WIPE. + /// + /// example: "conduwuit.woof" pub server_name: OwnedServerName, - /// default address (IPv4 or IPv6) conduwuit will listen on. Generally you - /// want this to be localhost (127.0.0.1 / ::1). If you are using Docker or - /// a container NAT networking setup, you likely need this to be 0.0.0.0. - /// To listen multiple addresses, specify a vector e.g. ["127.0.0.1", "::1"] - /// Default if unspecified is both IPv4 and IPv6 localhost. + /// default address (IPv4 or IPv6) conduwuit will listen on. + /// + /// If you are using Docker or a container NAT networking setup, this must + /// be "0.0.0.0". + /// + /// To listen on multiple addresses, specify a vector e.g. ["127.0.0.1", + /// "::1"] /// /// default: ["127.0.0.1", "::1"] #[serde(default = "default_address")] address: ListeningAddr, - /// The port(s) conduwuit will be running on. You need to set up a reverse - /// proxy such as Caddy or Nginx so all requests to /_matrix on port 443 - /// and 8448 will be forwarded to the conduwuit instance running on this - /// port Docker users: Don't change this, you'll need to map an external - /// port to this. To listen on multiple ports, specify a vector e.g. [8080, - /// 8448] + /// The port(s) conduwuit will be running on. + /// + /// See https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy for reverse proxying. + /// + /// Docker users: Don't change this, you'll need to map an external port to + /// this. + /// + /// To listen on multiple ports, specify a vector e.g. [8080, 8448] /// /// default: 8008 #[serde(default = "default_port")] @@ -84,108 +90,155 @@ pub struct Config { pub tls: Option, /// Uncomment unix_socket_path to listen on a UNIX socket at the specified - /// path. If listening on a UNIX socket, you must remove/comment the - /// 'address' key if defined and add your reverse proxy to the 'conduwuit' + /// path. If listening on a UNIX socket, you MUST remove/comment the + /// 'address' key if definedm AND add your reverse proxy to the 'conduwuit' /// group, unless world RW permissions are specified with unix_socket_perms /// (666 minimum). + /// + /// example: "/run/conduwuit/conduwuit.sock" pub unix_socket_path: Option, + /// The default permissions (in octal) to create the UNIX socket with. + /// /// default: 660 #[serde(default = "default_unix_socket_perms")] pub unix_socket_perms: u32, - /// Database backend: Only rocksdb is supported. - /// - /// default: rocksdb - #[serde(default = "default_database_backend")] - pub database_backend: String, - /// This is the only directory where conduwuit will save its data, including - /// media. Note: this was previously "/var/lib/matrix-conduit" + /// media. + /// Note: this was previously "/var/lib/matrix-conduit" + /// + /// YOU NEED TO EDIT THIS. + /// + /// example: "/var/lib/conduwuit" pub database_path: PathBuf, + /// conduwuit supports online database backups using RocksDB's Backup engine + /// API. To use this, set a database backup path that conduwuit can write + /// to. + /// + /// See https://conduwuit.puppyirl.gay/maintenance.html#backups for more information. + /// + /// example: "/opt/conduwuit-db-backups" pub database_backup_path: Option, + /// The amount of online RocksDB database backups to keep/retain, if using + /// "database_backup_path", before deleting the oldest one. + /// + /// default: 1 #[serde(default = "default_database_backups_to_keep")] pub database_backups_to_keep: i16, /// Set this to any float value in megabytes for conduwuit to tell the /// database engine that this much memory is available for database-related - /// caches. May be useful if you have significant memory to spare to - /// increase performance. + /// caches. /// - /// default: 256.0 + /// May be useful if you have significant memory to spare to increase + /// performance. + /// + /// Similar to the individual LRU caches, this is scaled up with your CPU + /// core count. + /// + /// This defaults to 128.0 + (64.0 * CPU core count) #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, /// Option to control adding arbitrary text to the end of the user's /// displayname upon registration with a space before the text. This was the /// lightning bolt emoji option, just replaced with support for adding your - /// own custom text or emojis. To disable, set this to "" (an empty string) - /// Defaults to "🏳️⚧️" (trans pride flag) + /// own custom text or emojis. To disable, set this to "" (an empty string). /// - /// default: 🏳️⚧️ + /// The default is the trans pride flag. + /// + /// example: "🏳️⚧️" + /// + /// default: "🏳️⚧️" #[serde(default = "default_new_user_displayname_suffix")] pub new_user_displayname_suffix: String, /// If enabled, conduwuit will send a simple GET request periodically to /// `https://pupbrain.dev/check-for-updates/stable` for any new /// announcements made. Despite the name, this is not an update check - /// endpoint, it is simply an announcement check endpoint. Defaults to - /// false. + /// endpoint, it is simply an announcement check endpoint. + /// + /// This is disabled by default as this is rarely used except for security + /// updates or major updates. #[serde(default)] pub allow_check_for_updates: bool, - #[serde(default = "default_pdu_cache_capacity")] - pub pdu_cache_capacity: u32, - /// Set this to any float value to multiply conduwuit's in-memory LRU caches - /// with. May be useful if you have significant memory to spare to increase + /// with such as "auth_chain_cache_capacity". + /// + /// May be useful if you have significant memory to spare to increase /// performance. This was previously called /// `conduit_cache_capacity_modifier`. /// - /// default: 1.0. + /// If you have low memory, reducing this may be viable. + /// + /// By default, the individual caches such as "auth_chain_cache_capacity" + /// are scaled by your CPU core count. + /// + /// default: 1.0 #[serde(default = "default_cache_capacity_modifier", alias = "conduit_cache_capacity_modifier")] pub cache_capacity_modifier: f64, + /// default: varies by system + #[serde(default = "default_pdu_cache_capacity")] + pub pdu_cache_capacity: u32, + + /// default: varies by system #[serde(default = "default_auth_chain_cache_capacity")] pub auth_chain_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_shorteventid_cache_capacity")] pub shorteventid_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_eventidshort_cache_capacity")] pub eventidshort_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_shortstatekey_cache_capacity")] pub shortstatekey_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_statekeyshort_cache_capacity")] pub statekeyshort_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_server_visibility_cache_capacity")] pub server_visibility_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_user_visibility_cache_capacity")] pub user_visibility_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_stateinfo_cache_capacity")] pub stateinfo_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_roomid_spacehierarchy_cache_capacity")] pub roomid_spacehierarchy_cache_capacity: u32, /// Maximum entries stored in DNS memory-cache. The size of an entry may /// vary so please take care if raising this value excessively. Only /// decrease this when using an external DNS cache. Please note - /// that systemd does *not* count as an external cache, even when configured - /// to do so. + /// that systemd-resolved does *not* count as an external cache, even when + /// configured to do so. + /// + /// default: 32768 #[serde(default = "default_dns_cache_entries")] pub dns_cache_entries: u32, /// Minimum time-to-live in seconds for entries in the DNS cache. The - /// default may appear high to most administrators; this is by design. Only - /// decrease this if you are using an external DNS cache. + /// default may appear high to most administrators; this is by design as the + /// majority of NXDOMAINs are correct for a long time (e.g. the server is no + /// longer running Matrix). Only decrease this if you are using an external + /// DNS cache. + /// + /// default_dns_min_ttl: 259200 #[serde(default = "default_dns_min_ttl")] pub dns_min_ttl: u64, @@ -195,16 +248,23 @@ pub struct Config { /// and aggressively cached rather than constantly rechecked. /// /// Defaults to 3 days as these are *very rarely* false negatives. + /// + /// default: 259200 #[serde(default = "default_dns_min_ttl_nxdomain")] pub dns_min_ttl_nxdomain: u64, /// Number of retries after a timeout. + /// + /// default: 10 #[serde(default = "default_dns_attempts")] pub dns_attempts: u16, /// The number of seconds to wait for a reply to a DNS query. Please note /// that recursive queries can take up to several seconds for some domains, - /// so this value should not be too low. + /// so this value should not be too low, especially on slower hardware or + /// resolvers. + /// + /// default: 10 #[serde(default = "default_dns_timeout")] pub dns_timeout: u64, @@ -223,8 +283,7 @@ pub struct Config { /// Enables using *only* TCP for querying your specified nameservers instead /// of UDP. /// - /// You very likely do *not* want this. hickory-resolver already falls back - /// to TCP on UDP errors. Defaults to false + /// If you are running conduwuit in a container environment, this config option may need to be enabled. See https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker for more details. #[serde(default)] pub query_over_tcp_only: bool, @@ -232,30 +291,34 @@ pub struct Config { /// /// Takes a number of one of the following options: /// 1 - Ipv4Only (Only query for A records, no AAAA/IPv6) + /// /// 2 - Ipv6Only (Only query for AAAA records, no A/IPv4) + /// /// 3 - Ipv4AndIpv6 (Query for A and AAAA records in parallel, uses whatever - /// returns a successful response first) 4 - Ipv6thenIpv4 (Query for AAAA - /// record, if that fails then query the A record) 5 - Ipv4thenIpv6 (Query - /// for A record, if that fails then query the AAAA record) + /// returns a successful response first) /// - /// If you don't have IPv6 networking, then for better performance it may be - /// suitable to set this to Ipv4Only (1) as you will never ever use the - /// AAAA record contents even if the AAAA record is successful instead of - /// the A record. + /// 4 - Ipv6thenIpv4 (Query for AAAA record, if that fails then query the A + /// record) /// - /// Defaults to 5 - Ipv4ThenIpv6 as this is the most compatible and IPv4 - /// networking is currently the most prevalent. + /// 5 - Ipv4thenIpv6 (Query for A record, if that fails then query the AAAA + /// record) + /// + /// If you don't have IPv6 networking, then for better DNS performance it + /// may be suitable to set this to Ipv4Only (1) as you will never ever use + /// the AAAA record contents even if the AAAA record is successful instead + /// of the A record. /// /// default: 5 #[serde(default = "default_ip_lookup_strategy")] pub ip_lookup_strategy: u8, - /// Max request size for file uploads + /// Max request size for file uploads in bytes. Defaults to 20MB. /// /// default: 20971520 #[serde(default = "default_max_request_size")] pub max_request_size: usize, + /// default: 192 #[serde(default = "default_max_fetch_prev_events")] pub max_fetch_prev_events: u16, @@ -365,7 +428,7 @@ pub struct Config { /// Notification gateway pusher idle connection pool timeout /// - /// Defaults to 15 seconds + /// default: 15 #[serde(default = "default_pusher_idle_timeout")] pub pusher_idle_timeout: u64, @@ -373,7 +436,7 @@ pub struct Config { /// server. /// /// If set to true without a token configured, users can register with no - /// form of 2nd- step only if you set + /// form of 2nd-step only if you set /// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` to /// true in your config. /// @@ -387,21 +450,27 @@ pub struct Config { /// A static registration token that new users will have to provide when /// creating an account. If unset and `allow_registration` is true, - /// registration is open without any condition. YOU NEED TO EDIT THIS. + /// registration is open without any condition. + /// + /// YOU NEED TO EDIT THIS OR USE registration_token_file. + /// + /// example: "o&^uCtes4HPf0Vu@F20jQeeWE7" pub registration_token: Option, - /// Path to a file on the system that gets read for the registration token + /// Path to a file on the system that gets read for the registration token. + /// this config option takes precedence/priority over "registration_token". /// /// conduwuit must be able to access the file, and it must not be empty /// - /// no default + /// example: "/etc/conduwuit/.reg_token" pub registration_token_file: Option, /// Controls whether encrypted rooms and events are allowed. #[serde(default = "true_fn")] pub allow_encryption: bool, - /// Controls whether federation is allowed or not. + /// Controls whether federation is allowed or not. It is not recommended to + /// disable this after the fact due to potential federation breakage. #[serde(default = "true_fn")] pub allow_federation: bool, @@ -433,25 +502,25 @@ pub struct Config { /// allow guests/unauthenticated users to access TURN credentials /// /// this is the equivalent of Synapse's `turn_allow_guests` config option. - /// this allows any unauthenticated user to call + /// this allows any unauthenticated user to call the endpoint /// `/_matrix/client/v3/voip/turnServer`. /// - /// defaults to false + /// It is unlikely you need to enable this as all major clients support + /// authentication for this endpoint and prevents misuse of your TURN server + /// from potential bots. #[serde(default)] pub turn_allow_guests: bool, /// Set this to true to lock down your server's public room directory and /// only allow admins to publish rooms to the room directory. Unpublishing /// is still allowed by all users with this enabled. - /// - /// Defaults to false #[serde(default)] pub lockdown_public_room_directory: bool, /// Set this to true to allow federating device display names / allow /// external users to see your device display name. If federation is /// disabled entirely (`allow_federation`), this is inherently false. For - /// privacy, this is best disabled. + /// privacy reasons, this is best left disabled. #[serde(default)] pub allow_device_name_federation: bool, @@ -464,25 +533,29 @@ pub struct Config { /// try to invite you to a DM or room. Also can protect against profile /// spiders. /// - /// Defaults to true. - /// /// This is inherently false if `allow_federation` is disabled #[serde(default = "true_fn", alias = "allow_profile_lookup_federation_requests")] pub allow_inbound_profile_lookup_federation_requests: bool, - /// controls whether users are allowed to create rooms. - /// appservices and admins are always allowed to create rooms - /// defaults to true + /// controls whether standard users are allowed to create rooms. appservices + /// and admins are always allowed to create rooms #[serde(default = "true_fn")] pub allow_room_creation: bool, /// Set to false to disable users from joining or creating room versions /// that aren't 100% officially supported by conduwuit. - /// conduwuit officially supports room versions 6 - 10. conduwuit has - /// experimental/unstable support for 3 - 5, and 11. Defaults to true. + /// + /// conduwuit officially supports room versions 6 - 11. + /// + /// conduwuit has slightly experimental (though works fine in practice) + /// support for versions 3 - 5 #[serde(default = "true_fn")] pub allow_unstable_room_versions: bool, + /// default room version conduwuit will create rooms with. + /// + /// per spec, room version 10 is the default. + /// /// default: 10 #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, @@ -498,10 +571,12 @@ pub struct Config { #[serde(default = "default_jaeger_filter")] pub jaeger_filter: String, - /// If the 'perf_measurements' feature is enabled, enables collecting folded - /// stack trace profile of tracing spans using tracing_flame. The resulting - /// profile can be visualized with inferno[1], speedscope[2], or a number of - /// other tools. [1]: https://github.com/jonhoo/inferno + /// If the 'perf_measurements' compile-time feature is enabled, enables + /// collecting folded stack trace profile of tracing spans using + /// tracing_flame. The resulting profile can be visualized with inferno[1], + /// speedscope[2], or a number of other tools. + /// + /// [1]: https://github.com/jonhoo/inferno /// [2]: www.speedscope.app #[serde(default)] pub tracing_flame: bool, @@ -546,8 +621,10 @@ pub struct Config { /// Servers listed here will be used to gather public keys of other servers /// (notary trusted key servers). /// - /// (Currently, conduwuit doesn't support batched key requests, so this list - /// should only contain other Synapse servers) Defaults to `matrix.org` + /// Currently, conduwuit doesn't support inbound batched key requests, so + /// this list should only contain other Synapse servers + /// + /// example: ["matrix.org", "constellatory.net", "tchncs.de"] /// /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] @@ -556,9 +633,10 @@ pub struct Config { /// Whether to query the servers listed in trusted_servers first or query /// the origin server first. For best security, querying the origin server /// first is advised to minimize the exposure to a compromised trusted - /// server. For maximum performance this can be set to true, however other - /// options exist to query trusted servers first under specific high-load - /// circumstances and should be evaluated before setting this to true. + /// server. For maximum federation/join performance this can be set to true, + /// however other options exist to query trusted servers first under + /// specific high-load circumstances and should be evaluated before setting + /// this to true. #[serde(default)] pub query_trusted_key_servers_first: bool, @@ -582,7 +660,7 @@ pub struct Config { #[serde(default)] pub only_query_trusted_key_servers: bool, - /// Maximum number of keys to request in each trusted server query. + /// Maximum number of keys to request in each trusted server batch query. /// /// default: 1024 #[serde(default = "default_trusted_server_batch_size")] @@ -590,6 +668,7 @@ pub struct Config { /// max log level for conduwuit. allows debug, info, warn, or error /// see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives + /// /// **Caveat**: /// For release builds, the tracing crate is configured to only implement /// levels higher than error to avoid unnecessary overhead in the compiled @@ -601,8 +680,6 @@ pub struct Config { pub log: String, /// controls whether logs will be outputted with ANSI colours - /// - /// default: true #[serde(default = "true_fn", alias = "log_colours")] pub log_colors: bool, @@ -615,40 +692,43 @@ pub struct Config { /// OpenID token expiration/TTL in seconds /// /// These are the OpenID tokens that are primarily used for Matrix account - /// integrations, *not* OIDC/OpenID Connect/etc + /// integrations (e.g. Vector Integrations in Element), *not* OIDC/OpenID + /// Connect/etc /// /// default: 3600 #[serde(default = "default_openid_token_ttl")] pub openid_token_ttl: u64, - /// TURN username to provide the client - /// - /// no default + /// static TURN username to provide the client if not using a shared secret + /// ("turn_secret"), It is recommended to use a shared secret over static + /// credentials. #[serde(default)] pub turn_username: String, - /// TURN password to provide the client - /// - /// no default + /// static TURN password to provide the client if not using a shared secret + /// ("turn_secret"). It is recommended to use a shared secret over static + /// credentials. #[serde(default)] pub turn_password: String, /// vector list of TURN URIs/servers to use /// /// replace "example.turn.uri" with your TURN domain, such as the coturn - /// "realm". if using TURN over TLS, replace "turn:" with "turns:" + /// "realm" config option. if using TURN over TLS, replace the URI prefix + /// "turn:" with "turns:" /// - /// No default - #[serde(default = "Vec::new")] + /// example: ["turn:example.turn.uri?transport=udp", + /// "turn:example.turn.uri?transport=tcp"] + /// + /// default: [] + #[serde(default)] pub turn_uris: Vec, /// TURN secret to use for generating the HMAC-SHA1 hash apart of username /// and password generation /// /// this is more secure, but if needed you can use traditional - /// username/password below. - /// - /// no default + /// static username/password credentials. #[serde(default)] pub turn_secret: String, @@ -657,7 +737,7 @@ pub struct Config { /// this takes priority over "turn_secret" first, and falls back to /// "turn_secret" if invalid or failed to open. /// - /// no default + /// example: "/etc/conduwuit/.turn_secret" pub turn_secret_file: Option, /// TURN TTL in seconds @@ -670,7 +750,10 @@ pub struct Config { /// registered users join. The rooms specified must be rooms that you /// have joined at least once on the server, and must be public. /// - /// No default. + /// example: ["#conduwuit:puppygock.gay", + /// "!eoIzvAvVwY23LPDay8:puppygock.gay"] + /// + /// default: [] #[serde(default = "Vec::new")] pub auto_join_rooms: Vec, @@ -695,15 +778,18 @@ pub struct Config { /// RocksDB log level. This is not the same as conduwuit's log level. This /// is the log level for the RocksDB engine/library which show up in your - /// database folder/path as `LOG` files. Defaults to error. conduwuit will - /// typically log RocksDB errors as normal. + /// database folder/path as `LOG` files. conduwuit will log RocksDB errors + /// as normal through tracing. + /// + /// default: "error" #[serde(default = "default_rocksdb_log_level")] pub rocksdb_log_level: String, #[serde(default)] pub rocksdb_log_stderr: bool, - /// Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB. + /// Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB in + /// bytes. /// /// default: 4194304 #[serde(default = "default_rocksdb_max_log_file_size")] @@ -727,13 +813,19 @@ pub struct Config { /// but it cannot account for all setups. If you experience any weird /// RocksDB issues, try enabling this option as it turns off Direct IO and /// feel free to report in the conduwuit Matrix room if this option fixes - /// your DB issues. See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. + /// your DB issues. + /// + /// See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. #[serde(default)] pub rocksdb_optimize_for_spinning_disks: bool, - /// Enables direct-io to increase database performance. This is enabled by - /// default. Set this option to false if the database resides on a - /// filesystem which does not support direct-io. + /// Enables direct-io to increase database performance via unbuffered I/O. + /// + /// See https://github.com/facebook/rocksdb/wiki/Direct-IO for more details about Direct IO and RocksDB. + /// + /// Set this option to false if the database resides on a filesystem which + /// does not support direct-io like FUSE, or any form of complex filesystem + /// setup such as possibly ZFS. #[serde(default = "true_fn")] pub rocksdb_direct_io: bool, @@ -746,14 +838,17 @@ pub struct Config { pub rocksdb_parallelism_threads: usize, /// Maximum number of LOG files RocksDB will keep. This must *not* be set to - /// 0. It must be at least 1. Defaults to 3 as these are not very useful. + /// 0. It must be at least 1. Defaults to 3 as these are not very useful + /// unless troubleshooting/debugging a RocksDB bug. /// /// default: 3 #[serde(default = "default_rocksdb_max_log_files")] pub rocksdb_max_log_files: usize, /// Type of RocksDB database compression to use. + /// /// Available options are "zstd", "zlib", "bz2", "lz4", or "none" + /// /// It is best to use ZSTD as an overall good balance between /// speed/performance, storage, IO amplification, and CPU usage. /// For more performance but less compression (more storage used) and less @@ -766,10 +861,14 @@ pub struct Config { pub rocksdb_compression_algo: String, /// Level of compression the specified compression algorithm for RocksDB to - /// use. Default is 32767, which is internally read by RocksDB as the + /// use. + /// + /// Default is 32767, which is internally read by RocksDB as the /// default magic number and translated to the library's default /// compression level as they all differ. /// See their `kDefaultCompressionLevel`. + /// + /// default: 32767 #[serde(default = "default_rocksdb_compression_level")] pub rocksdb_compression_level: i32, @@ -783,15 +882,19 @@ pub struct Config { /// it may be desirable to have a very high compression level here as it's /// lesss likely for this data to be used. Research your chosen compression /// algorithm. + /// + /// default: 32767 #[serde(default = "default_rocksdb_bottommost_compression_level")] pub rocksdb_bottommost_compression_level: i32, - /// Whether to enable RocksDB "bottommost_compression". + /// Whether to enable RocksDB's "bottommost_compression". + /// /// At the expense of more CPU usage, this will further compress the /// database to reduce more storage. It is recommended to use ZSTD - /// compression with this for best compression results. See https://github.com/facebook/rocksdb/wiki/Compression for more details. + /// compression with this for best compression results. This may be useful + /// if you're trying to reduce storage usage from the database. /// - /// Defaults to false as this uses more CPU when compressing. + /// See https://github.com/facebook/rocksdb/wiki/Compression for more details. #[serde(default)] pub rocksdb_bottommost_compression: bool, @@ -822,9 +925,9 @@ pub struct Config { /// 2 = PointInTime (use me if trying to recover) /// 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) /// - /// See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information + /// See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information on these modes. /// - /// Defaults to 1 (TolerateCorruptedTailRecords) + /// See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. /// /// default: 1 #[serde(default = "default_rocksdb_recovery_mode")] @@ -841,6 +944,8 @@ pub struct Config { /// repair. /// - Disabling repair mode and restarting the server is recommended after /// running the repair. + /// + /// See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. #[serde(default)] pub rocksdb_repair: bool, @@ -862,6 +967,14 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_compaction_ioprio_idle: bool, + /// Config option to disable RocksDB compaction. You should never ever have + /// to disable this. If you for some reason find yourself needing to disable + /// this as part of troubleshooting or a bug, please reach out to us in the + /// conduwuit Matrix room with information and details. + /// + /// Disabling compaction will lead to a significantly bloated and + /// explosively large database, gradually poor performance, unnecessarily + /// excessive disk read/writes, and slower shutdowns and startups. #[serde(default = "true_fn")] pub rocksdb_compaction: bool, @@ -876,33 +989,45 @@ pub struct Config { /// 3 to 5 = Statistics with possible performance impact. /// 6 = All statistics. /// - /// Defaults to 1 (No statistics, except in debug-mode) - /// /// default: 1 #[serde(default = "default_rocksdb_stats_level")] pub rocksdb_stats_level: u8, + /// This is a password that can be configured that will let you login to the + /// server bot account (currently `@conduit`) for emergency troubleshooting + /// purposes such as recovering/recreating your admin room, or inviting + /// yourself back. + /// + /// See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. + /// + /// Once this password is unset, all sessions will be logged out for + /// security purposes. + /// + /// example: "F670$2CP@Hw8mG7RY1$%!#Ic7YA" pub emergency_password: Option, + /// default: "/_matrix/push/v1/notify" #[serde(default = "default_notification_push_path")] pub notification_push_path: String, /// Config option to control local (your server only) presence - /// updates/requests. Defaults to true. Note that presence on conduwuit is + /// updates/requests. Note that presence on conduwuit is /// very fast unlike Synapse's. If using outgoing presence, this MUST be /// enabled. #[serde(default = "true_fn")] pub allow_local_presence: bool, /// Config option to control incoming federated presence updates/requests. - /// Defaults to true. This option receives presence updates from other + /// + /// This option receives presence updates from other /// servers, but does not send any unless `allow_outgoing_presence` is true. /// Note that presence on conduwuit is very fast unlike Synapse's. #[serde(default = "true_fn")] pub allow_incoming_presence: bool, - /// Config option to control outgoing presence updates/requests. Defaults to - /// true. This option sends presence updates to other servers, but does not + /// Config option to control outgoing presence updates/requests. + /// + /// This option sends presence updates to other servers, but does not /// receive any unless `allow_incoming_presence` is true. /// Note that presence on conduwuit is very fast unlike Synapse's. /// If using outgoing presence, you MUST enable `allow_local_presence` as @@ -986,6 +1111,9 @@ pub struct Config { /// compression may weaken TLS. Most users should not need to enable this. /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before /// deciding to enable this. + /// + /// If you are in a large amount of rooms, you may find that enabling this + /// is necessary to reduce the significantly large response bodies. #[serde(default)] pub gzip_compression: bool, @@ -998,18 +1126,18 @@ pub struct Config { #[serde(default)] pub brotli_compression: bool, - /// Set to true to allow user type "guest" registrations. Element attempts - /// to register guest users automatically. Defaults to false. + /// Set to true to allow user type "guest" registrations. Some clients like + /// Element attempt to register guest users automatically. #[serde(default)] pub allow_guest_registration: bool, - /// Set to true to log guest registrations in the admin room. - /// Defaults to false as it may be noisy or unnecessary. + /// Set to true to log guest registrations in the admin room. Note that + /// these may be noisy or unnecessary if you're a public homeserver. #[serde(default)] pub log_guest_registrations: bool, /// Set to true to allow guest registrations/users to auto join any rooms - /// specified in `auto_join_rooms` Defaults to false. + /// specified in `auto_join_rooms`. #[serde(default)] pub allow_guests_auto_join_rooms: bool, @@ -1033,7 +1161,7 @@ pub struct Config { /// Checks consistency of the media directory at startup: /// 1. When `media_compat_file_link` is enbled, this check will upgrade - /// media when switching back and forth between Conduit and Conduwuit. + /// media when switching back and forth between Conduit and conduwuit. /// Both options must be enabled to handle this. /// 2. When media is deleted from the directory, this check will also delete /// its database entry. @@ -1041,8 +1169,6 @@ pub struct Config { /// If none of these checks apply to your use cases, and your media /// directory is significantly large setting this to false may reduce /// startup time. - /// - /// Enabled by default. #[serde(default = "true_fn")] pub media_startup_check: bool, @@ -1051,9 +1177,9 @@ pub struct Config { /// Conduit again. Otherwise setting this to false reduces filesystem /// clutter and overhead for managing these symlinks in the directory. This /// is now disabled by default. You may still return to upstream Conduit - /// but you have to run Conduwuit at least once with this set to true and + /// but you have to run conduwuit at least once with this set to true and /// allow the media_startup_check to take place before shutting - /// down to return to Conduit. Disabled by default. + /// down to return to Conduit. #[serde(default)] pub media_compat_file_link: bool, @@ -1061,14 +1187,16 @@ pub struct Config { /// checks. This means if you delete files from the media directory the /// corresponding entries will be removed from the database. This is /// disabled by default because if the media directory is accidentally moved - /// or inaccessible the metadata entries in the database will be lost with - /// sadness. Disabled by default. + /// or inaccessible, the metadata entries in the database will be lost with + /// sadness. #[serde(default)] pub prune_missing_media: bool, /// Vector list of servers that conduwuit will refuse to download remote - /// media from. No default. - #[serde(default = "HashSet::new")] + /// media from. + /// + /// default: [] + #[serde(default)] pub prevent_media_downloads_from: HashSet, /// List of forbidden server names that we will block incoming AND outgoing @@ -1078,13 +1206,17 @@ pub struct Config { /// sender user's server name, inbound federation X-Matrix origin, and /// outbound federation handler. /// - /// Basically "global" ACLs. No default. - #[serde(default = "HashSet::new")] + /// Basically "global" ACLs. + /// + /// default: [] + #[serde(default)] pub forbidden_remote_server_names: HashSet, /// List of forbidden server names that we will block all outgoing federated /// room directory requests for. Useful for preventing our users from - /// wandering into bad servers or spaces. No default. + /// wandering into bad servers or spaces. + /// + /// default: [] #[serde(default = "HashSet::new")] pub forbidden_remote_room_directory_server_names: HashSet, @@ -1100,28 +1232,12 @@ pub struct Config { /// Currently this does not account for proxies in use like Synapse does. /// /// To disable, set this to be an empty vector (`[]`). - /// The default is: - /// [ - /// "127.0.0.0/8", - /// "10.0.0.0/8", - /// "172.16.0.0/12", - /// "192.168.0.0/16", - /// "100.64.0.0/10", - /// "192.0.0.0/24", - /// "169.254.0.0/16", - /// "192.88.99.0/24", - /// "198.18.0.0/15", - /// "192.0.2.0/24", - /// "198.51.100.0/24", - /// "203.0.113.0/24", - /// "224.0.0.0/4", - /// "::1/128", - /// "fe80::/10", - /// "fc00::/7", - /// "2001:db8::/32", - /// "ff00::/8", - /// "fec0::/10", - /// ] + /// + /// default: ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", + /// "192.168.0.0/16", "100.64.0.0/10", "192.0.0.0/24", "169.254.0.0/16", + /// "192.88.99.0/24", "198.18.0.0/15", "192.0.2.0/24", "198.51.100.0/24", + /// "203.0.113.0/24", "224.0.0.0/4", "::1/128", "fe80::/10", "fc00::/7", + /// "2001:db8::/32", "ff00::/8", "fec0::/10"] #[serde(default = "default_ip_range_denylist")] pub ip_range_denylist: Vec, @@ -1132,7 +1248,9 @@ pub struct Config { /// allow all URL previews. Please note that this opens up significant /// attack surface to your server, you are expected to be aware of the /// risks by doing so. - #[serde(default = "Vec::new")] + /// + /// default: [] + #[serde(default)] pub url_preview_domain_contains_allowlist: Vec, /// Vector list of explicit domains allowed to send requests to for URL @@ -1143,7 +1261,9 @@ pub struct Config { /// allow all URL previews. Please note that this opens up significant /// attack surface to your server, you are expected to be aware of the /// risks by doing so. - #[serde(default = "Vec::new")] + /// + /// default: [] + #[serde(default)] pub url_preview_domain_explicit_allowlist: Vec, /// Vector list of explicit domains not allowed to send requests to for URL @@ -1152,7 +1272,9 @@ pub struct Config { /// "http://google.com", but not /// "https://mymaliciousdomainexamplegoogle.com". The denylist is checked /// first before allowlist. Setting this to "*" will not do anything. - #[serde(default = "Vec::new")] + /// + /// default: [] + #[serde(default)] pub url_preview_domain_explicit_denylist: Vec, /// Vector list of URLs allowed to send requests to for URL previews. @@ -1164,19 +1286,23 @@ pub struct Config { /// "*" will allow all URL previews. Please note that this opens up /// significant attack surface to your server, you are expected to be /// aware of the risks by doing so. - #[serde(default = "Vec::new")] + /// + /// default: [] + #[serde(default)] pub url_preview_url_contains_allowlist: Vec, /// Maximum amount of bytes allowed in a URL preview body size when - /// spidering. Defaults to 384KB. + /// spidering. Defaults to 384KB in bytes. /// - /// defaukt: 384000 + /// default: 384000 #[serde(default = "default_url_preview_max_spider_size")] pub url_preview_max_spider_size: usize, /// Option to decide whether you would like to run the domain allowlist /// checks (contains and explicit) on the root domain or not. Does not apply - /// to URL contains allowlist. Defaults to false. Example: If this is + /// to URL contains allowlist. Defaults to false. + /// + /// Example usecase: If this is /// enabled and you have "wikipedia.org" allowed in the explicit and/or /// contains domain allowlist, it will allow all subdomains under /// "wikipedia.org" such as "en.m.wikipedia.org" as the root domain is @@ -1186,21 +1312,36 @@ pub struct Config { #[serde(default)] pub url_preview_check_root_domain: bool, - /// List of forbidden room aliases and room IDs as patterns/strings. Values - /// in this list are matched as *contains*. This is checked upon room alias - /// creation, custom room ID creation if used, and startup as warnings if - /// any room aliases in your database have a forbidden room alias/ID. - /// No default. - #[serde(default = "RegexSet::empty")] + /// List of forbidden room aliases and room IDs as strings of regex + /// patterns. + /// + /// Regex can be used or explicit contains matches can be done by + /// just specifying the words (see example). + /// + /// This is checked upon room alias creation, custom room ID creation if + /// used, and startup as warnings if any room aliases in your database have + /// a forbidden room alias/ID. + /// + /// example: ["19dollarfortnitecards", "b[4a]droom"] + /// + /// default: [] + #[serde(default)] #[serde(with = "serde_regex")] pub forbidden_alias_names: RegexSet, - /// List of forbidden username patterns/strings. Values in this list are - /// matched as *contains*. This is checked upon username availability - /// check, registration, and startup as warnings if any local users in your - /// database have a forbidden username. - /// No default. - #[serde(default = "RegexSet::empty")] + /// List of forbidden username patterns/strings. + /// + /// Regex can be used or explicit contains matches can be done by just + /// specifying the words (see example). + /// + /// This is checked upon username availability check, registration, and + /// startup as warnings if any local users in your database have a forbidden + /// username. + /// + /// example: ["administrator", "b[a4]dusernam[3e]"] + /// + /// default: [] + #[serde(default)] #[serde(with = "serde_regex")] pub forbidden_usernames: RegexSet, @@ -1231,9 +1372,12 @@ pub struct Config { #[serde(default)] pub block_non_admin_invites: bool, - /// Allows admins to enter commands in rooms other than #admins by prefixing - /// with \!admin. The reply will be publicly visible to the room, - /// originating from the sender. + /// Allows admins to enter commands in rooms other than "#admins" (admin + /// room) by prefixing your message with "\!admin" or "\\!admin" followed + /// up a normal conduwuit admin command. The reply will be publicly visible + /// to the room, originating from the sender. + /// + /// example: \\!admin debug ping puppygock.gay #[serde(default = "true_fn")] pub admin_escape_commands: bool, @@ -1246,8 +1390,6 @@ pub struct Config { /// Controls what admin commands will be executed on startup. This is a /// vector list of strings of admin commands to run. /// - /// An example of this can be: `admin_execute = ["debug ping puppygock.gay", - /// "debug echo hi"]` /// /// This option can also be configured with the `--execute` conduwuit /// argument and can take standard shell commands and environment variables @@ -1255,6 +1397,8 @@ pub struct Config { /// Such example could be: `./conduwuit --execute "server admin-notice /// conduwuit has started up at $(date)"` /// + /// example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` + /// /// default: [] #[serde(default)] pub admin_execute: Vec, @@ -1272,6 +1416,14 @@ pub struct Config { #[serde(default = "default_admin_log_capture")] pub admin_log_capture: String, + /// The default room tag to apply on the admin room. + /// + /// On some clients like Element, the room tag "m.server_notice" is a + /// special pinned room at the very bottom of your room list. The conduwuit + /// admin room can be pinned here so you always have an easy-to-access + /// shortcut dedicated to your admin room. + /// + /// default: "m.server_notice" #[serde(default = "default_admin_room_tag")] pub admin_room_tag: String, @@ -1283,12 +1435,11 @@ pub struct Config { /// Sentry reporting URL if a custom one is desired /// - /// Defaults to conduwuit's default Sentry endpoint: - /// "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" + /// default: "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, - /// Report your Conduwuit server_name in Sentry.io crash reports and metrics + /// Report your conduwuit server_name in Sentry.io crash reports and metrics #[serde(default)] pub sentry_send_server_name: bool, @@ -1307,7 +1458,8 @@ pub struct Config { pub sentry_attach_stacktrace: bool, /// Send panics to sentry. This is true by default, but sentry has to be - /// enabled. + /// enabled. The global "sentry" config option must be enabled to send any + /// data. #[serde(default = "true_fn")] pub sentry_send_panic: bool, @@ -1318,13 +1470,14 @@ pub struct Config { pub sentry_send_error: bool, /// Controls the tracing log level for Sentry to send things like - /// breadcrumbs and transactions Defaults to "info" + /// breadcrumbs and transactions + /// + /// default: "info" #[serde(default = "default_sentry_filter")] pub sentry_filter: String, /// Enable the tokio-console. This option is only relevant to developers. - /// See: docs/development.md#debugging-with-tokio-console for more - /// information. + /// See https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console for more information. #[serde(default)] pub tokio_console: bool, @@ -1346,18 +1499,33 @@ pub struct Config { #[derive(Clone, Debug, Deserialize)] #[config_example_generator(filename = "conduwuit-example.toml", section = "global.tls")] pub struct TlsConfig { + /// Path to a valid TLS certificate file. + /// + /// example: "/path/to/my/certificate.crt" pub certs: String, + /// Path to a valid TLS certificate private key. + /// + /// example: "/path/to/my/certificate.key" pub key: String, - #[serde(default)] /// Whether to listen and allow for HTTP and HTTPS connections (insecure!) + #[serde(default)] pub dual_protocol: bool, } #[derive(Clone, Debug, Deserialize, Default)] #[config_example_generator(filename = "conduwuit-example.toml", section = "global.well_known")] pub struct WellKnownConfig { - pub client: Option, + /// The server base domain of the URL with a specific port that the server + /// well-known file will serve. This should contain a port at the end, and + /// should not be a URL. + /// + /// example: "matrix.example.com:443" pub server: Option, + /// The server URL that the client well-known file will serve. This should + /// not contain a port, and should just be a valid HTTPS URL. + /// + /// example: "https://matrix.example.com" + pub client: Option, pub support_page: Option, pub support_role: Option, pub support_email: Option, @@ -1460,7 +1628,6 @@ impl fmt::Display for Config { }; line("Server name", self.server_name.host()); - line("Database backend", &self.database_backend); line("Database path", &self.database_path.to_string_lossy()); line( "Database backup path", @@ -1861,8 +2028,6 @@ fn default_unix_socket_perms() -> u32 { 660 } fn default_database_backups_to_keep() -> i16 { 1 } -fn default_database_backend() -> String { "rocksdb".to_owned() } - fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) } fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } diff --git a/src/service/migrations.rs b/src/service/migrations.rs index d6c342f8..4c821fa3 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -59,7 +59,6 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> { async fn fresh(services: &Services) -> Result<()> { let db = &services.db; - let config = &services.server.config; services .globals @@ -73,10 +72,7 @@ async fn fresh(services: &Services) -> Result<()> { // Create the admin room and server user on first run crate::admin::create_admin_room(services).boxed().await?; - warn!( - "Created new {} database with version {DATABASE_VERSION}", - config.database_backend, - ); + warn!("Created new RocksDB database with version {DATABASE_VERSION}"); Ok(()) } @@ -201,10 +197,7 @@ async fn migrate(services: &Services) -> Result<()> { } } - info!( - "Loaded {} database with schema version {DATABASE_VERSION}", - config.database_backend, - ); + info!("Loaded RocksDB database with schema version {DATABASE_VERSION}"); Ok(()) } From 4fe47903c24f3f9a0f33e871caecd96e7294dc49 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 10 Nov 2024 20:20:27 -0500 Subject: [PATCH 0214/1248] misc docs changes/improvements from example config Signed-off-by: strawberry --- docs/deploying/docker-compose.for-traefik.yml | 1 - docs/deploying/docker-compose.with-caddy.yml | 1 - docs/deploying/docker-compose.yml | 1 - docs/deploying/docker.md | 1 - docs/deploying/generic.md | 14 +++++++++++++- docs/troubleshooting.md | 9 +++++---- 6 files changed, 18 insertions(+), 9 deletions(-) diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index ae93d52f..b4316426 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -14,7 +14,6 @@ services: environment: CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_DATABASE_BACKEND: rocksdb CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 36924212..c080293f 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -30,7 +30,6 @@ services: environment: CONDUWUIT_SERVER_NAME: example.com # EDIT THIS CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_DATABASE_BACKEND: rocksdb CONDUWUIT_PORT: 6167 CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index 26145c5a..3b7d84ed 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -14,7 +14,6 @@ services: environment: CONDUWUIT_SERVER_NAME: your.server.name # EDIT THIS CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_DATABASE_BACKEND: rocksdb CONDUWUIT_PORT: 6167 CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index 7b8fd1a2..e9c49c71 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -40,7 +40,6 @@ When you have the image you can simply run it with docker run -d -p 8448:6167 \ -v db:/var/lib/conduwuit/ \ -e CONDUWUIT_SERVER_NAME="your.server.name" \ - -e CONDUWUIT_DATABASE_BACKEND="rocksdb" \ -e CONDUWUIT_ALLOW_REGISTRATION=false \ --name conduit $LINK ``` diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 31dc1845..6fe9709b 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -42,6 +42,9 @@ replace the binary / container image / etc. this will **NOT** work on conduwuit and you must configure delegation manually. This is not a mistake and no support for this feature will be added. +If you are using SQLite, you **MUST** migrate to RocksDB. You can use this +tool to migrate from SQLite to RocksDB: + See the `[global.well_known]` config section, or configure your web server appropriately to send the delegation responses. @@ -137,11 +140,20 @@ You will need to reverse proxy everything under following routes: You can optionally reverse proxy the following individual routes: - `/.well-known/matrix/client` and `/.well-known/matrix/server` if using -conduwuit to perform delegation +conduwuit to perform delegation (see the `[global.well_known]` config section) - `/.well-known/matrix/support` if using conduwuit to send the homeserver admin contact and support page (formerly known as MSC1929) - `/` if you would like to see `hewwo from conduwuit woof!` at the root +See the following spec pages for more details on these files: +- [`/.well-known/matrix/server`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixserver) +- [`/.well-known/matrix/client`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient) +- [`/.well-known/matrix/support`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixsupport) + +Examples of delegation: +- +- + ### Caddy Create `/etc/caddy/conf.d/conduwuit_caddyfile` and enter this (substitute for diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index c1499f3a..74e19de7 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -47,10 +47,11 @@ and communicate with your host's DNS servers (host's `/etc/resolv.conf`) Some filesystems may not like RocksDB using [Direct IO](https://github.com/facebook/rocksdb/wiki/Direct-IO). Direct IO is for -non-buffered I/O which improves conduwuit performance, but at least FUSE is a -filesystem potentially known to not like this. See the [example -config](configuration/examples.md) for disabling it if needed. Issues from -Direct IO on unsupported filesystems are usually shown as startup errors. +non-buffered I/O which improves conduwuit performance and reduces system CPU +usage, but at least FUSE and possibly ZFS are filesystems potentially known +to not like this. See the [example config](configuration/examples.md) for +disabling it if needed. Issues from Direct IO on unsupported filesystems are +usually shown as startup errors. #### Database corruption From 4296d7174f97e380f4d8e28e0ebf7d89c26c9c4a Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 2 Nov 2024 21:25:13 -0400 Subject: [PATCH 0215/1248] add receive_ephemeral check for appservice EDU sending (if it even works) Signed-off-by: strawberry --- src/service/sending/sender.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 464d186b..f4268293 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -524,8 +524,13 @@ impl Service { } }, SendingEvent::Edu(edu) => { - if let Ok(edu) = serde_json::from_slice(edu) { - edu_jsons.push(edu); + if appservice + .receive_ephemeral + .is_some_and(|receive_edus| receive_edus) + { + if let Ok(edu) = serde_json::from_slice(edu) { + edu_jsons.push(edu); + } } }, SendingEvent::Flush => {}, // flush only; no new content From fd2a0024809d5029fc679ff4008f97794fba4075 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 10 Nov 2024 20:30:34 -0500 Subject: [PATCH 0216/1248] dont build sentry or perf_measurements features for complement Signed-off-by: strawberry --- nix/pkgs/complement/default.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index 399c4449..36f12400 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -25,6 +25,7 @@ let "tokio_console" # sentry telemetry isn't useful for complement, disabled by default anyways "sentry_telemetry" + "perf_measurements" # the containers don't use or need systemd signal support "systemd" # this is non-functional on nix for some reason From 4f0bdb5194b8b496618fb8744a128f38b14a02b5 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 10 Nov 2024 21:20:38 -0500 Subject: [PATCH 0217/1248] general misc bug fixes and slight improvements Signed-off-by: strawberry --- src/api/client/membership.rs | 21 +++++----- src/api/router/auth.rs | 48 +++++++++++++---------- src/api/server/invite.rs | 26 ++++++++++++- src/api/server/make_join.rs | 18 ++++----- src/api/server/send_leave.rs | 4 +- src/service/rooms/spaces/mod.rs | 51 +++++++++++-------------- src/service/rooms/state/mod.rs | 10 +++-- src/service/rooms/state_accessor/mod.rs | 41 +++++++++++--------- src/service/sending/send.rs | 2 +- src/service/sending/sender.rs | 6 ++- 10 files changed, 127 insertions(+), 100 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index bde8dee8..10e69f58 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1240,8 +1240,8 @@ async fn make_join_request( ) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> { let mut make_join_response_and_server = Err!(BadServerResponse("No server available to assist in joining.")); - let mut make_join_counter: u16 = 0; - let mut incompatible_room_version_count: u8 = 0; + let mut make_join_counter: usize = 0; + let mut incompatible_room_version_count: usize = 0; for remote_server in servers { if services.globals.server_is_ours(remote_server) { @@ -1264,28 +1264,25 @@ async fn make_join_request( make_join_counter = make_join_counter.saturating_add(1); if let Err(ref e) = make_join_response { - trace!("make_join ErrorKind string: {:?}", e.kind().to_string()); - - // converting to a string is necessary (i think) because ruma is forcing us to - // fill in the struct for M_INCOMPATIBLE_ROOM_VERSION - if e.kind().to_string().contains("M_INCOMPATIBLE_ROOM_VERSION") - || e.kind().to_string().contains("M_UNSUPPORTED_ROOM_VERSION") - { + if matches!( + e.kind(), + ErrorKind::IncompatibleRoomVersion { .. } | ErrorKind::UnsupportedRoomVersion + ) { incompatible_room_version_count = incompatible_room_version_count.saturating_add(1); } if incompatible_room_version_count > 15 { info!( "15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or M_UNSUPPORTED_ROOM_VERSION, \ - assuming that Conduwuit does not support the room {room_id}: {e}" + assuming that conduwuit does not support the room {room_id}: {e}" ); make_join_response_and_server = Err!(BadServerResponse("Room version is not supported by Conduwuit")); return make_join_response_and_server; } - if make_join_counter > 50 { + if make_join_counter > 40 { warn!( - "50 servers failed to provide valid make_join response, assuming no server can assist in joining." + "40 servers failed to provide valid make_join response, assuming no server can assist in joining." ); make_join_response_and_server = Err!(BadServerResponse("No server available to assist in joining.")); return make_join_response_and_server; diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 2552dded..68abf5e2 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -13,6 +13,7 @@ use ruma::{ profile::{get_avatar_url, get_display_name, get_profile, get_profile_key, get_timezone_key}, voip::get_turn_server_info, }, + federation::openid::get_openid_userinfo, AuthScheme, IncomingRequest, Metadata, }, server_util::authorization::XMatrix, @@ -102,26 +103,6 @@ pub(super) async fn auth( } match (metadata.authentication, token) { - (_, Token::Invalid) => { - // OpenID endpoint uses a query param with the same name, drop this once query - // params for user auth are removed from the spec. This is required to make - // integration manager work. - if request.query.access_token.is_some() && request.parts.uri.path().contains("/openid/") { - Ok(Auth { - origin: None, - sender_user: None, - sender_device: None, - appservice_info: None, - }) - } else { - Err(Error::BadRequest( - ErrorKind::UnknownToken { - soft_logout: false, - }, - "Unknown access token.", - )) - } - }, (AuthScheme::AccessToken, Token::Appservice(info)) => Ok(auth_appservice(services, request, info).await?), (AuthScheme::None | AuthScheme::AccessTokenOptional | AuthScheme::AppserviceToken, Token::Appservice(info)) => { Ok(Auth { @@ -132,7 +113,6 @@ pub(super) async fn auth( }) }, (AuthScheme::AccessToken, Token::None) => match metadata { - // TODO: can we check this better? &get_turn_server_info::v3::Request::METADATA => { if services.globals.config.turn_allow_guests { Ok(Auth { @@ -171,6 +151,32 @@ pub(super) async fn auth( ErrorKind::Unauthorized, "Only appservice access tokens should be used on this endpoint.", )), + (AuthScheme::None, Token::Invalid) => { + // OpenID federation endpoint uses a query param with the same name, drop this + // once query params for user auth are removed from the spec. This is + // required to make integration manager work. + if request.query.access_token.is_some() && metadata == &get_openid_userinfo::v1::Request::METADATA { + Ok(Auth { + origin: None, + sender_user: None, + sender_device: None, + appservice_info: None, + }) + } else { + Err(Error::BadRequest( + ErrorKind::UnknownToken { + soft_logout: false, + }, + "Unknown access token.", + )) + } + }, + (_, Token::Invalid) => Err(Error::BadRequest( + ErrorKind::UnknownToken { + soft_logout: false, + }, + "Unknown access token.", + )), } } diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index b30a1b58..edf80cd6 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,5 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; +use base64::{engine::general_purpose, Engine as _}; use conduit::{err, utils, warn, Err, Error, PduEvent, Result}; use ruma::{ api::{client::error::ErrorKind, federation::membership::create_invite}, @@ -125,8 +126,10 @@ pub(crate) async fn create_invite_route( invite_state.push(pdu.to_stripped_state_event()); - // If we are active in the room, the remote server will notify us about the join - // via /send + // If we are active in the room, the remote server will notify us about the + // join/invite through /send. If we are not in the room, we need to manually + // record the invited state for client /sync through update_membership(), and + // send the invite PDU to the relevant appservices. if !services .rooms .state_cache @@ -148,6 +151,25 @@ pub(crate) async fn create_invite_route( .await?; } + for appservice in services.appservice.read().await.values() { + if appservice.is_user_match(&invited_user) { + services + .sending + .send_appservice_request( + appservice.registration.clone(), + ruma::api::appservice::event::push_events::v1::Request { + events: vec![pdu.to_room_event()], + txn_id: general_purpose::URL_SAFE_NO_PAD + .encode(utils::calculate_hash(&[pdu.event_id.as_bytes()])) + .into(), + ephemeral: Vec::new(), + to_device: Vec::new(), + }, + ) + .await?; + } + } + Ok(create_invite::v2::Response { event: services .sending diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index af570064..d5ea675e 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -80,6 +80,14 @@ pub(crate) async fn create_join_event_template_route( } let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; + if !body.ver.contains(&room_version_id) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: room_version_id, + }, + "Room version not supported.", + )); + } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; @@ -118,16 +126,6 @@ pub(crate) async fn create_join_event_template_route( None }; - let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; - if !body.ver.contains(&room_version_id) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: room_version_id, - }, - "Room version not supported.", - )); - } - let (_pdu, mut pdu_json) = services .rooms .timeline diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index 448e5de3..e4f41833 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -157,7 +157,5 @@ async fn create_leave_event( .room_servers(room_id) .ready_filter(|server| !services.globals.server_is_ours(server)); - services.sending.send_pdu_servers(servers, &pdu_id).await?; - - Ok(()) + services.sending.send_pdu_servers(servers, &pdu_id).await } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 37272dca..0ef7ddf5 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -8,7 +8,7 @@ use std::{ }; use conduit::{ - checked, debug, debug_info, err, + checked, debug_info, err, utils::{math::usize_from_f64, IterStream}, Error, Result, }; @@ -234,27 +234,25 @@ impl Service { }); } - Ok( - if let Some(children_pdus) = self.get_stripped_space_child_events(current_room).await? { - let summary = self - .get_room_summary(current_room, children_pdus, &identifier) - .await; - if let Ok(summary) = summary { - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { - summary: summary.clone(), - }), - ); + if let Some(children_pdus) = self.get_stripped_space_child_events(current_room).await? { + let summary = self + .get_room_summary(current_room, children_pdus, &identifier) + .await; + if let Ok(summary) = summary { + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.clone(), + Some(CachedSpaceHierarchySummary { + summary: summary.clone(), + }), + ); - Some(SummaryAccessibility::Accessible(Box::new(summary))) - } else { - None - } + Ok(Some(SummaryAccessibility::Accessible(Box::new(summary)))) } else { - None - }, - ) + Ok(None) + } + } else { + Ok(None) + } } /// Gets the summary of a space using solely federation @@ -393,7 +391,7 @@ impl Service { .is_accessible_child(current_room, &join_rule.clone().into(), identifier, &allowed_room_ids) .await { - debug!("User is not allowed to see room {room_id}"); + debug_info!("User is not allowed to see room {room_id}"); // This error will be caught later return Err(Error::BadRequest(ErrorKind::forbidden(), "User is not allowed to see the room")); } @@ -615,16 +613,13 @@ impl Service { &self, current_room: &OwnedRoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>, allowed_room_ids: &Vec, ) -> bool { - // Note: unwrap_or_default for bool means false match identifier { Identifier::ServerName(server_name) => { - let room_id: &RoomId = current_room; - // Checks if ACLs allow for the server to participate if self .services .event_handler - .acl_check(server_name, room_id) + .acl_check(server_name, current_room) .await .is_err() { @@ -645,8 +640,9 @@ impl Service { return true; } }, - } // Takes care of join rules - match join_rule { + } + match &join_rule { + SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted => true, SpaceRoomJoinRule::Restricted => { for room in allowed_room_ids { match identifier { @@ -664,7 +660,6 @@ impl Service { } false }, - SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted => true, // Invite only, Private, or Custom join rule _ => false, } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 71a3900c..7d8200f0 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -295,20 +295,22 @@ impl Service { } #[tracing::instrument(skip_all, level = "debug")] - pub async fn summary_stripped(&self, invite: &PduEvent) -> Vec> { + pub async fn summary_stripped(&self, event: &PduEvent) -> Vec> { let cells = [ (&StateEventType::RoomCreate, ""), (&StateEventType::RoomJoinRules, ""), (&StateEventType::RoomCanonicalAlias, ""), (&StateEventType::RoomName, ""), (&StateEventType::RoomAvatar, ""), - (&StateEventType::RoomMember, invite.sender.as_str()), // Add recommended events + (&StateEventType::RoomMember, event.sender.as_str()), // Add recommended events + (&StateEventType::RoomEncryption, ""), + (&StateEventType::RoomTopic, ""), ]; let fetches = cells.iter().map(|(event_type, state_key)| { self.services .state_accessor - .room_state_get(&invite.room_id, event_type, state_key) + .room_state_get(&event.room_id, event_type, state_key) }); join_all(fetches) @@ -316,7 +318,7 @@ impl Service { .into_iter() .filter_map(Result::ok) .map(|e| e.to_stripped_state_event()) - .chain(once(invite.to_stripped_state_event())) + .chain(once(event.to_stripped_state_event())) .collect() } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index d51da8af..4958c4ea 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -10,7 +10,7 @@ use conduit::{ err, error, pdu::PduBuilder, utils::{math::usize_from_f64, ReadyExt}, - Error, PduEvent, Result, + Err, Error, Event, PduEvent, Result, }; use futures::StreamExt; use lru_cache::LruCache; @@ -29,7 +29,7 @@ use ruma::{ power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, topic::RoomTopicEventContent, }, - StateEventType, + StateEventType, TimelineEventType, }, room::RoomType, space::SpaceRoomJoinRule, @@ -408,34 +408,41 @@ impl Service { pub async fn user_can_redact( &self, redacts: &EventId, sender: &UserId, room_id: &RoomId, federation: bool, ) -> Result { - if let Ok(event) = self + let redacting_event = self.services.timeline.get_pdu(redacts).await; + + if redacting_event + .as_ref() + .is_ok_and(|event| event.event_type() == &TimelineEventType::RoomCreate) + { + return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); + } + + if let Ok(pl_event_content) = self .room_state_get_content::(room_id, &StateEventType::RoomPowerLevels, "") .await { - let event: RoomPowerLevels = event.into(); - Ok(event.user_can_redact_event_of_other(sender) - || event.user_can_redact_own_event(sender) - && if let Ok(pdu) = self.services.timeline.get_pdu(redacts).await { + let pl_event: RoomPowerLevels = pl_event_content.into(); + Ok(pl_event.user_can_redact_event_of_other(sender) + || pl_event.user_can_redact_own_event(sender) + && if let Ok(redacting_event) = redacting_event { if federation { - pdu.sender.server_name() == sender.server_name() + redacting_event.sender.server_name() == sender.server_name() } else { - pdu.sender == sender + redacting_event.sender == sender } } else { false }) } else { // Falling back on m.room.create to judge power level - if let Ok(pdu) = self + if let Ok(room_create) = self .room_state_get(room_id, &StateEventType::RoomCreate, "") .await { - Ok(pdu.sender == sender - || if let Ok(pdu) = self.services.timeline.get_pdu(redacts).await { - pdu.sender == sender - } else { - false - }) + Ok(room_create.sender == sender + || redacting_event + .as_ref() + .is_ok_and(|redacting_event| redacting_event.sender == sender)) } else { Err(Error::bad_database( "No m.room.power_levels or m.room.create events in database for room", @@ -454,7 +461,7 @@ impl Service { /// Returns an empty vec if not a restricted room pub fn allowed_room_ids(&self, join_rule: JoinRule) -> Vec { - let mut room_ids = vec![]; + let mut room_ids = Vec::with_capacity(1); if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { for rule in r.allow { if let AllowRule::RoomMembership(RoomMembership { diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 5bf48aaa..6a8f1b1b 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -39,7 +39,7 @@ impl super::Service { .forbidden_remote_server_names .contains(dest) { - return Err!(Request(Forbidden(debug_warn!("Federation with this {dest} is not allowed.")))); + return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } let actual = self.services.resolver.get_actual_dest(dest).await?; diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index f4268293..f5d87504 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -235,13 +235,15 @@ impl Service { fn select_events_current(&self, dest: Destination, statuses: &mut CurTransactionStatus) -> Result<(bool, bool)> { let (mut allow, mut retry) = (true, false); statuses - .entry(dest) + .entry(dest.clone()) // TODO: can we avoid cloning? .and_modify(|e| match e { TransactionStatus::Failed(tries, time) => { // Fail if a request has failed recently (exponential backoff) let min = self.server.config.sender_timeout; let max = self.server.config.sender_retry_backoff_limit; - if continue_exponential_backoff_secs(min, max, time.elapsed(), *tries) { + if continue_exponential_backoff_secs(min, max, time.elapsed(), *tries) + && !matches!(dest, Destination::Appservice(_)) + { allow = false; } else { retry = true; From 72fb8371f9828b5c039883467ea35e3b41b4b42c Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 13 Nov 2024 17:08:16 -0500 Subject: [PATCH 0218/1248] link to migrating from conduit on the README Signed-off-by: strawberry --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 962139d6..4e97f1f0 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/bad Visit the [conduwuit documentation](https://conduwuit.puppyirl.gay/) for more -information. +information and how to deploy/setup conduwuit. @@ -63,7 +63,9 @@ and we have no plans in stopping or slowing down any time soon! conduwuit is a complete drop-in replacement for Conduit. As long as you are using RocksDB, the only "migration" you need to do is replace the binary or container image. There -is no harm or additional steps required for using conduwuit. +is no harm or additional steps required for using conduwuit. See the +[Migrating from Conduit](https://conduwuit.puppyirl.gay/deploying/generic.html#migrating-from-conduit) section +on the generic deploying guide. From 011d44b749bc206732572c826b4ca9ae2a6111e3 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 13 Nov 2024 20:06:25 -0500 Subject: [PATCH 0219/1248] add missing declared support for MSC3952 Signed-off-by: strawberry --- src/api/client/unversioned.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index d5bb14e5..3aee30c8 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -53,6 +53,7 @@ pub(crate) async fn get_supported_versions_route( ("org.matrix.msc2946".to_owned(), true), /* spaces/hierarchy summaries (https://github.com/matrix-org/matrix-spec-proposals/pull/2946) */ ("org.matrix.msc3026.busy_presence".to_owned(), true), /* busy presence status (https://github.com/matrix-org/matrix-spec-proposals/pull/3026) */ ("org.matrix.msc3827".to_owned(), true), /* filtering of /publicRooms by room type (https://github.com/matrix-org/matrix-spec-proposals/pull/3827) */ + ("org.matrix.msc3952_intentional_mentions".to_owned(), true), /* intentional mentions (https://github.com/matrix-org/matrix-spec-proposals/pull/3952) */ ("org.matrix.msc3575".to_owned(), true), /* sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/3575/files#r1588877046) */ ("org.matrix.msc3916.stable".to_owned(), true), /* authenticated media (https://github.com/matrix-org/matrix-spec-proposals/pull/3916) */ ("org.matrix.msc4180".to_owned(), true), /* stable flag for 3916 (https://github.com/matrix-org/matrix-spec-proposals/pull/4180) */ From 44a7ac07036915263f93e43f572dca8446d7ef9f Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 09:40:04 -0500 Subject: [PATCH 0220/1248] add debug_assert is_sorted for inline content types Signed-off-by: strawberry --- src/core/utils/content_disposition.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/core/utils/content_disposition.rs b/src/core/utils/content_disposition.rs index a2fe923c..3a264a74 100644 --- a/src/core/utils/content_disposition.rs +++ b/src/core/utils/content_disposition.rs @@ -45,9 +45,10 @@ pub fn content_disposition_type(content_type: Option<&str>) -> ContentDispositio return ContentDispositionType::Attachment; }; - // is_sorted is unstable - /* debug_assert!(ALLOWED_INLINE_CONTENT_TYPES.is_sorted(), - * "ALLOWED_INLINE_CONTENT_TYPES is not sorted"); */ + debug_assert!( + ALLOWED_INLINE_CONTENT_TYPES.is_sorted(), + "ALLOWED_INLINE_CONTENT_TYPES is not sorted" + ); let content_type: Cow<'_, str> = content_type .split(';') From dac1a01216e53c4aca4e54057db487dc7682f30a Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 09:43:58 -0500 Subject: [PATCH 0221/1248] update generated example config Signed-off-by: strawberry --- conduwuit-example.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index aa0d1e5d..2f3da71f 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -592,6 +592,10 @@ # #log_colors = true +# configures the span events which will be outputted with the log +# +#log_span_events = "none" + # OpenID token expiration/TTL in seconds # # These are the OpenID tokens that are primarily used for Matrix account From 3f69f2ee73960a8cc0f0a3f7a2e1202ad43928e6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 09:44:29 -0500 Subject: [PATCH 0222/1248] replace deprecated sha-1 crate, try to reduce some unnecessary crates/features Signed-off-by: strawberry --- Cargo.lock | 49 +++++------------------------------- Cargo.toml | 62 +++++++++++++++++++++++++++------------------- src/api/Cargo.toml | 2 +- 3 files changed, 43 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e1845da..65eab0b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -676,7 +676,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "sha-1", + "sha1", "tokio", "tracing", ] @@ -720,7 +720,7 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", - "thiserror 1.0.69", + "thiserror 2.0.3", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -1753,9 +1753,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1766,7 +1766,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -2080,11 +2079,9 @@ checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ "base64 0.21.7", "js-sys", - "pem", "ring", "serde", "serde_json", - "simple_asn1", ] [[package]] @@ -2662,16 +2659,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "pem" -version = "3.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" -dependencies = [ - "base64 0.22.1", - "serde", -] - [[package]] name = "percent-encoding" version = "2.3.1" @@ -3535,11 +3522,10 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "sanitize-filename" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ed72fbaf78e6f2d41744923916966c4fbe3d7c74e3037a8ee482f1115572603" +checksum = "bc984f4f9ceb736a7bb755c3e3bd17dc56370af2600c9780dcc48c66453da34d" dependencies = [ - "lazy_static", "regex", ] @@ -3827,17 +3813,6 @@ dependencies = [ "unsafe-libyaml", ] -[[package]] -name = "sha-1" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - [[package]] name = "sha1" version = "0.10.6" @@ -3920,18 +3895,6 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint", - "num-traits", - "thiserror 1.0.69", - "time", -] - [[package]] name = "siphasher" version = "0.3.11" diff --git a/Cargo.toml b/Cargo.toml index dde005a3..a84ff79f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,20 +46,20 @@ default-features = false features = ["parse"] [workspace.dependencies.sanitize-filename] -version = "0.5.0" +version = "0.6.0" [workspace.dependencies.jsonwebtoken] version = "9.3.0" +default-features = false [workspace.dependencies.base64] version = "0.22.1" +default-features = false # used for TURN server authentication [workspace.dependencies.hmac] version = "0.12.1" - -[workspace.dependencies.sha-1] -version = "0.10.1" +default-features = false # used for checking if an IP is in specific subnets / CIDR ranges easier [workspace.dependencies.ipaddress] @@ -70,16 +70,16 @@ version = "0.8.5" # Used for the http request / response body type for Ruma endpoints used with reqwest [workspace.dependencies.bytes] -version = "1.7.2" +version = "1.8.0" [workspace.dependencies.http-body-util] -version = "0.1.1" +version = "0.1.2" [workspace.dependencies.http] version = "1.1.0" [workspace.dependencies.regex] -version = "1.10.6" +version = "1.11.1" [workspace.dependencies.axum] version = "0.7.5" @@ -95,7 +95,7 @@ features = [ ] [workspace.dependencies.axum-extra] -version = "0.9.3" +version = "0.9.4" default-features = false features = ["typed-header", "tracing"] @@ -116,7 +116,7 @@ default-features = false features = ["util"] [workspace.dependencies.tower-http] -version = "0.6.0" +version = "0.6.1" default-features = false features = [ "add-extension", @@ -130,6 +130,8 @@ features = [ [workspace.dependencies.rustls] version = "0.23.16" +default-features = false +features = ["aws_lc_rs"] [workspace.dependencies.reqwest] version = "0.12.9" @@ -147,7 +149,7 @@ default-features = false features = ["rc"] [workspace.dependencies.serde_json] -version = "1.0.124" +version = "1.0.132" default-features = false features = ["raw_value"] @@ -189,9 +191,11 @@ version = "0.1.40" default-features = false [workspace.dependencies.tracing-subscriber] version = "0.3.18" -features = ["env-filter"] +default-features = false +features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"] [workspace.dependencies.tracing-core] version = "0.1.32" +default-features = false # for URL previews [workspace.dependencies.webpage] @@ -200,12 +204,14 @@ default-features = false # used for conduit's CLI and admin room command parsing [workspace.dependencies.clap] -version = "4.5.20" +version = "4.5.21" default-features = false features = [ "std", "derive", "help", + #"color", Do we need these? + #"unicode", "usage", "error-context", "string", @@ -217,7 +223,7 @@ default-features = false features = ["std", "async-await"] [workspace.dependencies.tokio] -version = "1.40.0" +version = "1.41.1" default-features = false features = [ "fs", @@ -238,7 +244,7 @@ version = "0.8.5" # Validating urls in config, was already a transitive dependency [workspace.dependencies.url] -version = "2.5.0" +version = "2.5.3" default-features = false features = ["serde"] @@ -258,26 +264,23 @@ features = [ ] [workspace.dependencies.hyper-util] -# 0.1.9 and above causes DNS issues -version = "=0.1.8" +version = "0.1.10" default-features = false features = [ - "client", "server-auto", "server-graceful", - "service", "tokio", ] # to support multiple variations of setting a config option [workspace.dependencies.either] -version = "1.11.0" +version = "1.13.0" default-features = false features = ["serde"] # Used for reading the configuration from conduwuit.toml & environment variables [workspace.dependencies.figment] -version = "0.10.18" +version = "0.10.19" default-features = false features = ["env", "toml"] @@ -287,11 +290,13 @@ default-features = false # Used for conduit::Error type [workspace.dependencies.thiserror] -version = "1.0.63" +version = "2.0.3" +default-features = false # Used when hashing the state [workspace.dependencies.ring] version = "0.17.8" +default-features = false # Used to make working with iterators easier, was already a transitive depdendency [workspace.dependencies.itertools] @@ -307,7 +312,7 @@ version = "2.1.1" version = "0.4.0" [workspace.dependencies.async-trait] -version = "0.1.81" +version = "0.1.83" [workspace.dependencies.lru-cache] version = "0.1.2" @@ -363,9 +368,13 @@ features = [ "bzip2", ] -# optional SHA256 media keys feature [workspace.dependencies.sha2] version = "0.10.8" +default-features = false + +[workspace.dependencies.sha1] +version = "0.10.6" +default-features = false # optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring [workspace.dependencies.opentelemetry] @@ -433,7 +442,8 @@ default-features = false features = ["resource"] [workspace.dependencies.sd-notify] -version = "0.4.1" +version = "0.4.3" +default-features = false [workspace.dependencies.hardened_malloc-rs] version = "0.1.2" @@ -456,12 +466,12 @@ default-features = false version = "0.1" [workspace.dependencies.syn] -version = "2.0.76" +version = "2.0.87" default-features = false features = ["full", "extra-traits"] [workspace.dependencies.quote] -version = "1.0.36" +version = "1.0.37" [workspace.dependencies.proc-macro2] version = "1.0.89" diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 6e37cb40..a0fc09de 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -59,7 +59,7 @@ ruma.workspace = true serde_html_form.workspace = true serde_json.workspace = true serde.workspace = true -sha-1.workspace = true +sha1.workspace = true tokio.workspace = true tracing.workspace = true From b4d809c68157a09bbfd2381bcea04407bf3b3dd2 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 09:49:54 -0500 Subject: [PATCH 0223/1248] add more checks for gh pages deployment workflow Signed-off-by: strawberry --- .github/workflows/documentation.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 506a87d9..ea720c43 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -39,7 +39,7 @@ concurrency: jobs: docs: name: Documentation and GitHub Pages - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 permissions: pages: write @@ -57,7 +57,7 @@ jobs: uses: actions/checkout@v4 - name: Setup GitHub Pages - if: github.event_name != 'pull_request' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') && (github.event.pull_request.user.login == 'girlbossceo') uses: actions/configure-pages@v5 - uses: nixbuild/nix-quick-install-action@master @@ -139,12 +139,12 @@ jobs: compression-level: 0 - name: Upload generated documentation (book) as GitHub Pages artifact - if: github.event_name != 'pull_request' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') && (github.event.pull_request.user.login == 'girlbossceo') uses: actions/upload-pages-artifact@v3 with: path: public - name: Deploy to GitHub Pages - if: github.event_name != 'pull_request' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') && (github.event.pull_request.user.login == 'girlbossceo') id: deployment uses: actions/deploy-pages@v4 From c1f553cf4f938da469b69bfd37060ca65a827762 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 12:25:47 -0500 Subject: [PATCH 0224/1248] bump rocksdb to v9.7.4, and ruwuma Signed-off-by: strawberry --- Cargo.lock | 55 ++++++++++++++++++------------------ Cargo.toml | 2 +- deps/rust-rocksdb/Cargo.toml | 2 +- flake.lock | 8 +++--- flake.nix | 2 +- 5 files changed, 34 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 65eab0b5..b56005ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -191,9 +191,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" +checksum = "49c41b948da08fb481a94546cd874843adc1142278b0af4badf9b1b78599d68d" dependencies = [ "async-trait", "axum-core", @@ -257,9 +257,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73c3220b188aea709cf1b6c5f9b01c3bd936bb08bd2b5184a12b35ac8131b1f9" +checksum = "37634d71e9f3c35cfb1c30c87c7cba500d55892f04c2dbe6a99383c664b820b0" dependencies = [ "axum", "axum-core", @@ -275,7 +275,6 @@ dependencies = [ "tower 0.5.1", "tower-layer", "tower-service", - "tracing", ] [[package]] @@ -487,9 +486,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8" +checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" dependencies = [ "jobserver", "libc", @@ -1264,9 +1263,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "miniz_oxide", @@ -1740,9 +1739,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", "hyper-util", @@ -3128,7 +3127,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "assign", "js_int", @@ -3150,7 +3149,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "js_int", "ruma-common", @@ -3162,7 +3161,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "as_variant", "assign", @@ -3185,7 +3184,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "as_variant", "base64 0.22.1", @@ -3215,7 +3214,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3239,7 +3238,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "bytes", "http", @@ -3257,7 +3256,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "js_int", "thiserror 2.0.3", @@ -3266,7 +3265,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "js_int", "ruma-common", @@ -3276,7 +3275,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "cfg-if", "once_cell", @@ -3292,7 +3291,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "js_int", "ruma-common", @@ -3304,7 +3303,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "headers", "http", @@ -3317,7 +3316,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3333,7 +3332,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=67ffedabbf43e1ff6934df0fbf770b21e101406f#67ffedabbf43e1ff6934df0fbf770b21e101406f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" dependencies = [ "futures-util", "itertools 0.13.0", @@ -3348,8 +3347,8 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.28.0+9.7.3" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=c1e5523eae095a893deaf9056128c7dbc2d5fd73#c1e5523eae095a893deaf9056128c7dbc2d5fd73" +version = "0.29.0+9.7.4" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2bc5495a9f8f75073390c326b47ee5928ab7c7f0#2bc5495a9f8f75073390c326b47ee5928ab7c7f0" dependencies = [ "bindgen", "bzip2-sys", @@ -3365,8 +3364,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.31.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=c1e5523eae095a893deaf9056128c7dbc2d5fd73#c1e5523eae095a893deaf9056128c7dbc2d5fd73" +version = "0.33.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2bc5495a9f8f75073390c326b47ee5928ab7c7f0#2bc5495a9f8f75073390c326b47ee5928ab7c7f0" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index a84ff79f..814a435b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -321,7 +321,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "67ffedabbf43e1ff6934df0fbf770b21e101406f" +rev = "2ab432fba19eb8862c594d24af39d8f9f6b4eac6" features = [ "compat", "rand", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 8c168b24..908a2911 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "c1e5523eae095a893deaf9056128c7dbc2d5fd73" +rev = "2bc5495a9f8f75073390c326b47ee5928ab7c7f0" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index 271a2154..7740e925 100644 --- a/flake.lock +++ b/flake.lock @@ -922,16 +922,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1729712930, - "narHash": "sha256-jlp4kPkRTpoJaUdobEoHd8rCGAQNBy4ZHZ6y5zL/ibw=", + "lastModified": 1731690620, + "narHash": "sha256-Xd4TJYqPERMJLXaGa6r6Ny1Wlw8Uy5Cyf/8q7nS58QM=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "871eda6953c3f399aae39808dcfccdd014885beb", + "rev": "292446aa2bc41699204d817a1e4b091679a886eb", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.7.3", + "ref": "v9.7.4", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index 85b7baa0..113757a7 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.7.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.7.4"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From a9c280bd4cd0616c57c9df28a11c3bc48ae8b5ba Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 12:58:02 -0500 Subject: [PATCH 0225/1248] document NAT hairpinning/loopback if needed Signed-off-by: strawberry --- docs/deploying/generic.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 6fe9709b..f0b85a25 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -68,13 +68,25 @@ sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit ## Forwarding ports in the firewall or the router -conduwuit uses the ports 443 and 8448 both of which need to be open in the -firewall. +Matrix's default federation port is port 8448, and clients must be using port 443. +If you would like to use only port 443, or a different port, you will need to setup +delegation. conduwuit has config options for doing delegation, or you can configure +your reverse proxy to manually serve the necessary JSON files to do delegation +(see the `[global.well_known]` config section). If conduwuit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. +Note for NAT users; if you have trouble connecting to your server from the inside +of your network, you need to research your router and see if it supports "NAT +hairpinning" or "NAT loopback". + +If your router does not support this feature, you need to research doing local +DNS overrides and force your Matrix DNS records to use your local IP internally. +This can be done at the host level using `/etc/hosts`. If you need this to be +on the network level, consider something like NextDNS or Pi-Hole. + ## Setting up a systemd service The systemd unit for conduwuit can be found From c23786d37f207f45632d8288affbcd51bfb5e5c8 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 12:59:05 -0500 Subject: [PATCH 0226/1248] dont try to backfill empty, private rooms Signed-off-by: strawberry --- src/api/client/membership.rs | 2 +- src/service/rooms/timeline/mod.rs | 28 ++++++++++++++++------------ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 10e69f58..c61185a7 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1481,7 +1481,7 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, .await { if let Err(e) = remote_leave_room(services, user_id, room_id).await { - warn!("Failed to leave room {user_id} remotely: {e}"); + warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); // Don't tell the client about this error } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 281879d2..2faa1c40 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1033,6 +1033,22 @@ impl Service { #[tracing::instrument(skip(self))] pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { + if self + .services + .state_cache + .room_joined_count(room_id) + .await + .is_ok_and(|count| count <= 1) + && !self + .services + .state_accessor + .is_world_readable(room_id) + .await + { + // Room is empty (1 user or none), there is no one that can backfill + return Ok(()); + } + let first_pdu = self .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id) .await? @@ -1060,20 +1076,8 @@ impl Service { } }); - let room_alias_servers = self - .services - .alias - .local_aliases_for_room(room_id) - .ready_filter_map(|alias| { - self.services - .globals - .server_is_ours(alias.server_name()) - .then_some(alias.server_name()) - }); - let mut servers = room_mods .stream() - .chain(room_alias_servers) .map(ToOwned::to_owned) .chain( self.services From 9783bc78ba096f75db2c529f3e2a7f6cb76f51fe Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 13:58:44 -0500 Subject: [PATCH 0227/1248] remove sentry_telemetry from default features Signed-off-by: strawberry --- src/main/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index b3390bfb..b9122942 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -44,7 +44,6 @@ default = [ "jemalloc", "jemalloc_stats", "release_max_log_level", - "sentry_telemetry", "systemd", "zstd_compression", ] From 666989f74ce8a80b3d24132393c2e2e331fe719c Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 13:19:32 -0500 Subject: [PATCH 0228/1248] delete trivy as lately its been terribly unreliable Signed-off-by: strawberry --- .github/workflows/trivy.yml | 42 ------------------------------------- docs/differences.md | 3 +-- 2 files changed, 1 insertion(+), 44 deletions(-) delete mode 100644 .github/workflows/trivy.yml diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml deleted file mode 100644 index 1f0dd7df..00000000 --- a/.github/workflows/trivy.yml +++ /dev/null @@ -1,42 +0,0 @@ -name: Trivy code and vulnerability scanning - -on: - pull_request: - push: - branches: - - main - tags: - - '*' - schedule: - - cron: '00 12 * * *' - -permissions: - contents: read - -jobs: - trivy-scan: - name: Trivy Scan - runs-on: ubuntu-latest - permissions: - contents: read - security-events: write - actions: read - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Run Trivy code and vulnerability scanner on repo - uses: aquasecurity/trivy-action@0.28.0 - with: - scan-type: repo - format: sarif - output: trivy-results.sarif - severity: CRITICAL,HIGH,MEDIUM,LOW - - - name: Run Trivy code and vulnerability scanner on filesystem - uses: aquasecurity/trivy-action@0.28.0 - with: - scan-type: fs - format: sarif - output: trivy-results.sarif - severity: CRITICAL,HIGH,MEDIUM,LOW diff --git a/docs/differences.md b/docs/differences.md index 6815d248..18ea7a1f 100644 --- a/docs/differences.md +++ b/docs/differences.md @@ -241,8 +241,7 @@ both new users and power users - Fixed every single clippy (default lints) and rustc warnings, including some that were performance related or potential safety issues / unsoundness - Add a **lot** of other clippy and rustc lints and a rustfmt.toml file -- Repo uses [Renovate](https://docs.renovatebot.com/), -[Trivy](https://github.com/aquasecurity/trivy-action), and keeps ALL +- Repo uses [Renovate](https://docs.renovatebot.com/) and keeps ALL dependencies as up to date as possible - Purge unmaintained/irrelevant/broken database backends (heed, sled, persy) and other unnecessary code or overhead From f897b4daeea8147a96286e96e6d828d364136dd8 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 13:20:47 -0500 Subject: [PATCH 0229/1248] ci: remove all free runner space steps due to flakiness Signed-off-by: strawberry --- .github/workflows/ci.yml | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2d253f69..f4dcb88f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,21 +67,6 @@ jobs: name: Test runs-on: ubuntu-24.04 steps: - - name: Free Disk Space (Ubuntu) - uses: jlumbroso/free-disk-space@main - - - name: Free up more runner space - run: | - set +o pipefail - # large docker images - sudo docker image prune --all --force || true - # large packages - sudo apt-get purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt-get clean - # large folders - sudo rm -rf /var/lib/apt/lists/* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/local/share/vcpkg /usr/local/julia* /opt/mssql-tools /usr/share/vim /usr/share/postgresql /usr/share/apache-maven-* /usr/share/R /usr/share/alsa /usr/share/miniconda /usr/share/grub /usr/share/gradle-* /usr/share/locale /usr/share/texinfo /usr/share/kotlinc /usr/share/swift /usr/share/sbt /usr/share/ri /usr/share/icons /usr/share/java /usr/share/fonts /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/R /usr/lib/postgresql /usr/lib/heroku - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 @@ -238,9 +223,6 @@ jobs: - target: aarch64-linux-musl - target: x86_64-linux-musl steps: - - name: Free Disk Space (Ubuntu) - uses: jlumbroso/free-disk-space@main - - name: Sync repository uses: actions/checkout@v4 @@ -449,6 +431,7 @@ jobs: steps: - name: Sync repository uses: actions/checkout@v4 + - name: Tag comparison check if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} run: | @@ -459,14 +442,17 @@ jobs: echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY exit 1 fi + # use sccache for Rust - name: Run sccache-cache if: (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') uses: mozilla-actions/sccache-action@main + # use rust-cache - uses: Swatinem/rust-cache@v2 with: cache-all-crates: "true" + # Nix can't do portable macOS builds yet - name: Build macOS x86_64 binary if: ${{ matrix.os == 'macos-13' }} @@ -474,22 +460,26 @@ jobs: CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short HEAD)" cargo build --release cp -v -f target/release/conduit conduwuit-macos-x86_64 otool -L conduwuit-macos-x86_64 + # quick smoke test of the x86_64 macOS binary - name: Run x86_64 macOS release binary if: ${{ matrix.os == 'macos-13' }} run: | ./conduwuit-macos-x86_64 --version + - name: Build macOS arm64 binary if: ${{ matrix.os == 'macos-latest' }} run: | CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short HEAD)" cargo build --release cp -v -f target/release/conduit conduwuit-macos-arm64 otool -L conduwuit-macos-arm64 + # quick smoke test of the arm64 macOS binary - name: Run arm64 macOS release binary if: ${{ matrix.os == 'macos-latest' }} run: | ./conduwuit-macos-arm64 --version + - name: Upload macOS x86_64 binary if: ${{ matrix.os == 'macos-13' }} uses: actions/upload-artifact@v4 @@ -497,6 +487,7 @@ jobs: name: conduwuit-macos-x86_64 path: conduwuit-macos-x86_64 if-no-files-found: error + - name: Upload macOS arm64 binary if: ${{ matrix.os == 'macos-latest' }} uses: actions/upload-artifact@v4 From 6b1b464abcecacea98e06040aba679e9bdc3cec9 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 14:48:10 -0500 Subject: [PATCH 0230/1248] add missing knock_restricted room type to /publicRooms Signed-off-by: strawberry --- src/api/client/directory.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 6cf7b13f..6120c7b3 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -407,7 +407,8 @@ async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> Public .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { JoinRule::Public => PublicRoomJoinRule::Public, - JoinRule::Knock => PublicRoomJoinRule::Knock, + JoinRule::Knock => "knock".into(), + JoinRule::KnockRestricted(_) => "knock_restricted".into(), _ => "invite".into(), }) .await From 9c95a74d56bf80fb1f09984a9c05b16d25d320da Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 16:35:58 -0500 Subject: [PATCH 0231/1248] fix getting canonical alias server for backfill Signed-off-by: strawberry --- src/service/rooms/timeline/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2faa1c40..59fc8e93 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -4,6 +4,7 @@ use std::{ cmp, collections::{BTreeMap, HashSet}, fmt::Write, + iter::once, sync::Arc, }; @@ -1076,9 +1077,20 @@ impl Service { } }); + let canonical_room_alias_server = once( + self.services + .state_accessor + .get_canonical_alias(room_id) + .await, + ) + .filter_map(Result::ok) + .map(|alias| alias.server_name().to_owned()) + .stream(); + let mut servers = room_mods .stream() .map(ToOwned::to_owned) + .chain(canonical_room_alias_server) .chain( self.services .server From be5a04f47cf1da5629239804601978ddcb2a1db1 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 17:09:36 -0500 Subject: [PATCH 0232/1248] ci: install liburing-dev Signed-off-by: strawberry --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f4dcb88f..96a1188b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,6 +67,10 @@ jobs: name: Test runs-on: ubuntu-24.04 steps: + - name: Install liburing + run: | + sudo apt install liburing-dev -y + - name: Sync repository uses: actions/checkout@v4 From 4b652f5236f3482316fc8dfc522f705c3b85b586 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 17:50:39 -0500 Subject: [PATCH 0233/1248] ok cargo doc Signed-off-by: strawberry --- src/core/config/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 4bba1455..cb9d087b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1524,7 +1524,7 @@ pub struct WellKnownConfig { /// The server URL that the client well-known file will serve. This should /// not contain a port, and should just be a valid HTTPS URL. /// - /// example: "https://matrix.example.com" + /// example: "" pub client: Option, pub support_page: Option, pub support_role: Option, From 59834a4b05784c6e5e9ba12c5c5cc06f5ba98825 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 14 Nov 2024 22:43:18 +0000 Subject: [PATCH 0234/1248] add is_read_only()/is_secondary() to Engine Signed-off-by: Jason Volk --- src/database/database.rs | 4 ++-- src/database/engine.rs | 8 ++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/database/database.rs b/src/database/database.rs index bf8c8855..40aec312 100644 --- a/src/database/database.rs +++ b/src/database/database.rs @@ -38,11 +38,11 @@ impl Database { #[inline] #[must_use] - pub fn is_read_only(&self) -> bool { self.db.secondary || self.db.read_only } + pub fn is_read_only(&self) -> bool { self.db.is_read_only() } #[inline] #[must_use] - pub fn is_secondary(&self) -> bool { self.db.secondary } + pub fn is_secondary(&self) -> bool { self.db.is_secondary() } } impl Index<&str> for Database { diff --git a/src/database/engine.rs b/src/database/engine.rs index 99d971ed..b57fd75e 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -274,6 +274,14 @@ impl Engine { result(self.db.property_value_cf(cf, name)) .and_then(|val| val.map_or_else(|| Err!("Property {name:?} not found."), Ok)) } + + #[inline] + #[must_use] + pub fn is_read_only(&self) -> bool { self.secondary || self.read_only } + + #[inline] + #[must_use] + pub fn is_secondary(&self) -> bool { self.secondary } } pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result<()> { From 20836cc3dbc2e22c6d7da99ec199930b6d4c7ad4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 14 Nov 2024 22:44:18 +0000 Subject: [PATCH 0235/1248] flush=false for database-backup in read-only/secondary modes; improve error Signed-off-by: Jason Volk --- src/admin/server/commands.rs | 2 +- src/database/engine.rs | 14 ++++++++------ src/service/globals/data.rs | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index f5879b03..94f695ce 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -107,7 +107,7 @@ pub(super) async fn backup_database(&self) -> Result { .runtime() .spawn_blocking(move || match globals.db.backup() { Ok(()) => String::new(), - Err(e) => (*e).to_string(), + Err(e) => e.to_string(), }) .await?; diff --git a/src/database/engine.rs b/src/database/engine.rs index b57fd75e..1fa53b01 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -17,6 +17,7 @@ use rocksdb::{ use crate::{ opts::{cf_options, db_options}, or_else, result, + util::map_err, }; pub struct Engine { @@ -183,19 +184,20 @@ impl Engine { } #[tracing::instrument(skip(self))] - pub fn backup(&self) -> Result<(), Box> { + pub fn backup(&self) -> Result { let config = &self.server.config; let path = config.database_backup_path.as_ref(); if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { return Ok(()); } - let options = BackupEngineOptions::new(path.expect("valid database backup path"))?; - let mut engine = BackupEngine::open(&options, &self.env)?; + let options = BackupEngineOptions::new(path.expect("valid database backup path")).map_err(map_err)?; + let mut engine = BackupEngine::open(&options, &self.env).map_err(map_err)?; if config.database_backups_to_keep > 0 { - if let Err(e) = engine.create_new_backup_flush(&self.db, true) { - return Err(Box::new(e)); - } + let flush = !self.is_read_only(); + engine + .create_new_backup_flush(&self.db, flush) + .map_err(map_err)?; let engine_info = engine.get_backup_info(); let info = &engine_info.last().expect("backup engine info is not empty"); diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index bcfe101e..f715e944 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -73,7 +73,7 @@ impl Data { } #[inline] - pub fn backup(&self) -> Result<(), Box> { self.db.db.backup() } + pub fn backup(&self) -> Result { self.db.db.backup() } #[inline] pub fn backup_list(&self) -> Result { self.db.db.backup_list() } From 5f625216aa027aa000037c26fee461684e25689c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 14 Nov 2024 23:35:53 +0000 Subject: [PATCH 0236/1248] slight optimizations for statediff calculate with_capacity for set/get_statediff() etc Signed-off-by: Jason Volk --- src/service/rooms/state_compressor/mod.rs | 50 +++++++++++++++-------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 6b520ad3..06054f0d 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -5,6 +5,7 @@ use std::{ sync::{Arc, Mutex}, }; +use arrayvec::ArrayVec; use conduit::{ at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64}, @@ -37,7 +38,7 @@ struct Data { #[derive(Clone)] struct StateDiff { - parent: Option, + parent: Option, added: Arc, removed: Arc, } @@ -165,17 +166,20 @@ impl Service { } pub async fn compress_state_event(&self, shortstatekey: ShortStateKey, event_id: &EventId) -> CompressedStateEvent { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .services - .short - .get_or_create_shorteventid(event_id) - .await - .to_be_bytes(), - ); + const SIZE: usize = size_of::(); - v.try_into().expect("we checked the size above") + let shorteventid = self + .services + .short + .get_or_create_shorteventid(event_id) + .await; + + let mut v = ArrayVec::::new(); + v.extend(shortstatekey.to_be_bytes()); + v.extend(shorteventid.to_be_bytes()); + v.as_ref() + .try_into() + .expect("failed to create CompressedStateEvent") } /// Returns shortstatekey, event id @@ -185,11 +189,12 @@ impl Service { ) -> Result<(ShortStateKey, Arc)> { use utils::u64_from_u8; - let shortstatekey = u64_from_u8(&compressed_event[0..size_of::()]); + let shortstatekey = u64_from_u8(&compressed_event[0..size_of::()]); + let shorteventid = u64_from_u8(&compressed_event[size_of::()..]); let event_id = self .services .short - .get_eventid_from_short(u64_from_u8(&compressed_event[size_of::()..])) + .get_eventid_from_short(shorteventid) .await?; Ok((shortstatekey, event_id)) @@ -415,9 +420,12 @@ impl Service { .ok() .take_if(|parent| *parent != 0); + debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride"); + let num_values = value.len() / STRIDE; + let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); + let mut added = HashSet::with_capacity(num_values); + let mut removed = HashSet::with_capacity(num_values); let mut i = STRIDE; while let Some(v) = value.get(i..expected!(i + 2 * STRIDE)) { @@ -434,6 +442,8 @@ impl Service { i = expected!(i + 2 * STRIDE); } + added.shrink_to_fit(); + removed.shrink_to_fit(); Ok(StateDiff { parent, added: Arc::new(added), @@ -442,7 +452,15 @@ impl Service { } fn save_statediff(&self, shortstatehash: ShortStateHash, diff: &StateDiff) { - let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); + let mut value = Vec::::with_capacity( + 2_usize + .saturating_add(diff.added.len()) + .saturating_add(diff.removed.len()), + ); + + let parent = diff.parent.unwrap_or(0_u64); + value.extend_from_slice(&parent.to_be_bytes()); + for new in diff.added.iter() { value.extend_from_slice(&new[..]); } From 9f7a4a012b38c4e3a59ea72bbb5d291ecdbf37f4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 15 Nov 2024 03:41:08 +0000 Subject: [PATCH 0237/1248] improve tracing/logging for state_compressor Signed-off-by: Jason Volk --- src/service/rooms/state_compressor/mod.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 06054f0d..f0c851de 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -7,7 +7,7 @@ use std::{ use arrayvec::ArrayVec; use conduit::{ - at, checked, err, expected, utils, + at, checked, debug, err, expected, utils, utils::{bytes, math::usize_from_f64}, Result, }; @@ -157,6 +157,13 @@ impl Service { }] }; + debug!( + ?parent, + ?shortstatehash, + vec_len = %response.len(), + "cache update" + ); + self.stateinfo_cache .lock() .expect("locked") @@ -218,7 +225,6 @@ impl Service { /// for this layer /// * `parent_states` - A stack with info on shortstatehash, full state, /// added diff and removed diff for each parent layer - #[tracing::instrument(skip_all, level = "debug")] pub fn save_state_from_diff( &self, shortstatehash: ShortStateHash, statediffnew: Arc>, statediffremoved: Arc>, diff_to_sibling: usize, @@ -335,6 +341,7 @@ impl Service { /// Returns the new shortstatehash, and the state diff from the previous /// room state + #[tracing::instrument(skip(self, new_state_ids_compressed), level = "debug")] pub async fn save_state( &self, room_id: &RoomId, new_state_ids_compressed: Arc>, ) -> Result { @@ -405,6 +412,7 @@ impl Service { }) } + #[tracing::instrument(skip(self), level = "debug", name = "get")] async fn get_statediff(&self, shortstatehash: ShortStateHash) -> Result { const BUFSIZE: usize = size_of::(); const STRIDE: usize = size_of::(); From 14e3b242dfafbb31b9a9c58b586989a828a022f3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 15 Nov 2024 03:44:04 +0000 Subject: [PATCH 0238/1248] add database get_batch stream wrapper Signed-off-by: Jason Volk --- src/database/map/get.rs | 22 +++++++++++++++------- src/service/rooms/auth_chain/mod.rs | 12 ++++++------ src/service/rooms/short/mod.rs | 13 +++++++------ 3 files changed, 28 insertions(+), 19 deletions(-) diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 72382e36..2f7df031 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,8 +1,8 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write}; use arrayvec::ArrayVec; -use conduit::{err, implement, Result}; -use futures::future::ready; +use conduit::{err, implement, utils::IterStream, Result}; +use futures::{future::ready, Stream}; use rocksdb::DBPinnableSlice; use serde::Serialize; @@ -50,6 +50,7 @@ where /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] +#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] pub fn get(&self, key: &K) -> impl Future>> + Send where K: AsRef<[u8]> + ?Sized + Debug, @@ -61,10 +62,9 @@ where /// The key is referenced directly to perform the query. This is a thread- /// blocking call. #[implement(super::Map)] -#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] pub fn get_blocking(&self, key: &K) -> Result> where - K: AsRef<[u8]> + ?Sized + Debug, + K: AsRef<[u8]> + ?Sized, { let res = self .db @@ -76,10 +76,19 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, keys), fields(%self), level = "trace")] -pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> Vec>> +pub fn get_batch<'a, I, K>(&self, keys: I) -> impl Stream>> where I: Iterator + ExactSizeIterator + Send + Debug, - K: AsRef<[u8]> + Sized + Debug + 'a, + K: AsRef<[u8]> + Send + Sync + Sized + Debug + 'a, +{ + self.get_batch_blocking(keys).stream() +} + +#[implement(super::Map)] +pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator>> +where + I: Iterator + ExactSizeIterator + Send, + K: AsRef<[u8]> + Sized + 'a, { // Optimization can be `true` if key vector is pre-sorted **by the column // comparator**. @@ -91,7 +100,6 @@ where .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options) .into_iter() .map(into_result_handle) - .collect() } fn into_result_handle(result: RocksdbResult<'_>) -> Result> { diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index c22732c2..cabb6f0c 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use conduit::{debug, debug_error, trace, utils::IterStream, validated, warn, Err, Result}; -use futures::Stream; +use futures::{Stream, StreamExt}; use ruma::{EventId, RoomId}; use self::data::Data; @@ -69,15 +69,15 @@ impl Service { const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); let started = std::time::Instant::now(); - let mut buckets = [BUCKET; NUM_BUCKETS]; - for (i, &short) in self + let mut starting_ids = self .services .short .multi_get_or_create_shorteventid(starting_events) - .await - .iter() .enumerate() - { + .boxed(); + + let mut buckets = [BUCKET; NUM_BUCKETS]; + while let Some((i, short)) = starting_ids.next().await { let bucket: usize = short.try_into()?; let bucket: usize = validated!(bucket % NUM_BUCKETS); buckets[bucket].insert((short, starting_events[i])); diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index e8b00d9b..703df796 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -3,6 +3,7 @@ use std::{mem::size_of_val, sync::Arc}; pub use conduit::pdu::{ShortEventId, ShortId, ShortRoomId}; use conduit::{err, implement, utils, Result}; use database::{Deserialized, Map}; +use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; use crate::{globals, Dep}; @@ -71,11 +72,12 @@ pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEvent } #[implement(Service)] -pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Vec { +pub fn multi_get_or_create_shorteventid<'a>( + &'a self, event_ids: &'a [&EventId], +) -> impl Stream + Send + 'a { self.db .eventid_shorteventid - .get_batch_blocking(event_ids.iter()) - .into_iter() + .get_batch(event_ids.iter()) .enumerate() .map(|(i, result)| match result { Ok(ref short) => utils::u64_from_u8(short), @@ -95,7 +97,6 @@ pub async fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> short }, }) - .collect() } #[implement(Service)] @@ -163,10 +164,10 @@ pub async fn multi_get_eventid_from_short(&self, shorteventid: &[ShortEventId]) self.db .shorteventid_eventid - .get_batch_blocking(keys.iter()) - .into_iter() + .get_batch(keys.iter()) .map(Deserialized::deserialized) .collect() + .await } #[implement(Service)] From 887ae84f1e3b3e0254e04afe011083e692af7e00 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 15 Nov 2024 22:20:28 +0000 Subject: [PATCH 0239/1248] optimize sha256 interface gather/vector inputs Signed-off-by: Jason Volk --- src/api/server/invite.rs | 4 +- src/core/utils/hash.rs | 9 +-- src/core/utils/hash/sha256.rs | 67 ++++++++++++++++++++--- src/core/utils/mod.rs | 2 +- src/service/rooms/state/mod.rs | 7 +-- src/service/rooms/state_compressor/mod.rs | 7 +-- src/service/sending/sender.rs | 39 ++++++------- 7 files changed, 85 insertions(+), 50 deletions(-) diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index edf80cd6..0ceb914f 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,7 +1,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use base64::{engine::general_purpose, Engine as _}; -use conduit::{err, utils, warn, Err, Error, PduEvent, Result}; +use conduit::{err, utils, utils::hash::sha256, warn, Err, Error, PduEvent, Result}; use ruma::{ api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, @@ -160,7 +160,7 @@ pub(crate) async fn create_invite_route( ruma::api::appservice::event::push_events::v1::Request { events: vec![pdu.to_room_event()], txn_id: general_purpose::URL_SAFE_NO_PAD - .encode(utils::calculate_hash(&[pdu.event_id.as_bytes()])) + .encode(sha256::hash(pdu.event_id.as_bytes())) .into(), ephemeral: Vec::new(), to_device: Vec::new(), diff --git a/src/core/utils/hash.rs b/src/core/utils/hash.rs index 5a3664cb..c12d4f66 100644 --- a/src/core/utils/hash.rs +++ b/src/core/utils/hash.rs @@ -1,13 +1,10 @@ mod argon; -mod sha256; +pub mod sha256; use crate::Result; -pub fn password(password: &str) -> Result { argon::password(password) } - -pub fn verify_password(password: &str, password_hash: &str) -> Result<()> { +pub fn verify_password(password: &str, password_hash: &str) -> Result { argon::verify_password(password, password_hash) } -#[must_use] -pub fn calculate_hash(keys: &[&[u8]]) -> Vec { sha256::hash(keys) } +pub fn password(password: &str) -> Result { argon::password(password) } diff --git a/src/core/utils/hash/sha256.rs b/src/core/utils/hash/sha256.rs index b2e5a94c..06e210a7 100644 --- a/src/core/utils/hash/sha256.rs +++ b/src/core/utils/hash/sha256.rs @@ -1,9 +1,62 @@ -use ring::{digest, digest::SHA256}; +use ring::{ + digest, + digest::{Context, SHA256, SHA256_OUTPUT_LEN}, +}; -#[tracing::instrument(skip_all, level = "debug")] -pub(super) fn hash(keys: &[&[u8]]) -> Vec { - // We only hash the pdu's event ids, not the whole pdu - let bytes = keys.join(&0xFF); - let hash = digest::digest(&SHA256, &bytes); - hash.as_ref().to_owned() +pub type Digest = [u8; SHA256_OUTPUT_LEN]; + +/// Sha256 hash (input gather joined by 0xFF bytes) +#[must_use] +#[tracing::instrument(skip(inputs), level = "trace")] +pub fn delimited<'a, T, I>(mut inputs: I) -> Digest +where + I: Iterator + 'a, + T: AsRef<[u8]> + 'a, +{ + let mut ctx = Context::new(&SHA256); + if let Some(input) = inputs.next() { + ctx.update(input.as_ref()); + for input in inputs { + ctx.update(b"\xFF"); + ctx.update(input.as_ref()); + } + } + + ctx.finish() + .as_ref() + .try_into() + .expect("failed to return Digest buffer") +} + +/// Sha256 hash (input gather) +#[must_use] +#[tracing::instrument(skip(inputs), level = "trace")] +pub fn concat<'a, T, I>(inputs: I) -> Digest +where + I: Iterator + 'a, + T: AsRef<[u8]> + 'a, +{ + inputs + .fold(Context::new(&SHA256), |mut ctx, input| { + ctx.update(input.as_ref()); + ctx + }) + .finish() + .as_ref() + .try_into() + .expect("failed to return Digest buffer") +} + +/// Sha256 hash +#[inline] +#[must_use] +#[tracing::instrument(skip(input), level = "trace")] +pub fn hash(input: T) -> Digest +where + T: AsRef<[u8]>, +{ + digest::digest(&SHA256, input.as_ref()) + .as_ref() + .try_into() + .expect("failed to return Digest buffer") } diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index b8640f3a..18c2dd6f 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -28,7 +28,7 @@ pub use self::{ bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}, debug::slice_truncated as debug_slice_truncated, future::TryExtExt as TryFutureExtExt, - hash::calculate_hash, + hash::sha256::delimited as calculate_hash, html::Escape as HtmlEscape, json::{deserialize_from_str, to_canonical_object}, math::clamp, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 7d8200f0..29ffedfc 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -157,12 +157,7 @@ impl Service { let previous_shortstatehash = self.get_room_shortstatehash(room_id).await; - let state_hash = calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); + let state_hash = calculate_hash(state_ids_compressed.iter().map(|s| &s[..])); let (shortstatehash, already_existed) = self .services diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index f0c851de..0466fb12 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -352,12 +352,7 @@ impl Service { .await .ok(); - let state_hash = utils::calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); + let state_hash = utils::calculate_hash(new_state_ids_compressed.iter().map(|bytes| &bytes[..])); let (new_shortstatehash, already_existed) = self .services diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index f5d87504..ee818289 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -539,16 +539,13 @@ impl Service { } } - let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(calculate_hash( - &events - .iter() - .map(|e| match e { - SendingEvent::Edu(b) => &**b, - SendingEvent::Pdu(b) => b.as_ref(), - SendingEvent::Flush => &[], - }) - .collect::>(), - )); + let txn_hash = calculate_hash(events.iter().filter_map(|e| match e { + SendingEvent::Edu(b) => Some(&**b), + SendingEvent::Pdu(b) => Some(b.as_ref()), + SendingEvent::Flush => None, + })); + + let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); //debug_assert!(pdu_jsons.len() + edu_jsons.len() > 0, "sending empty // transaction"); @@ -664,23 +661,21 @@ impl Service { //debug_assert!(pdu_jsons.len() + edu_jsons.len() > 0, "sending empty // transaction"); - let transaction_id = &*general_purpose::URL_SAFE_NO_PAD.encode(calculate_hash( - &events - .iter() - .map(|e| match e { - SendingEvent::Edu(b) => &**b, - SendingEvent::Pdu(b) => b.as_ref(), - SendingEvent::Flush => &[], - }) - .collect::>(), - )); + + let txn_hash = calculate_hash(events.iter().filter_map(|e| match e { + SendingEvent::Edu(b) => Some(&**b), + SendingEvent::Pdu(b) => Some(b.as_ref()), + SendingEvent::Flush => None, + })); + + let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); let request = send_transaction_message::v1::Request { origin: self.server.config.server_name.clone(), pdus: pdu_jsons, edus: edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - transaction_id: transaction_id.into(), + transaction_id: txn_id.into(), }; let client = &self.services.client.sender; @@ -692,7 +687,7 @@ impl Service { .iter() .filter(|(_, res)| res.is_err()) .for_each( - |(pdu_id, res)| warn!(%transaction_id, %server, "error sending PDU {pdu_id} to remote server: {res:?}"), + |(pdu_id, res)| warn!(%txn_id, %server, "error sending PDU {pdu_id} to remote server: {res:?}"), ); }) .map(|_| dest.clone()) From cd2c473bfe627389f7e24f822ca4a19e696ca555 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 21:00:26 -0500 Subject: [PATCH 0240/1248] add missing fix_referencedevents_missing_sep key on fresh db creations Signed-off-by: strawberry --- src/service/migrations.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 4c821fa3..126d3c7e 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -68,6 +68,7 @@ async fn fresh(services: &Services) -> Result<()> { db["global"].insert(b"feat_sha256_media", []); db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []); + db["global"].insert(b"fix_referencedevents_missing_sep", []); // Create the admin room and server user on first run crate::admin::create_admin_room(services).boxed().await?; From ead9d667970f77c6d4e1c9747e607d7d711a57e0 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 21:28:08 -0500 Subject: [PATCH 0241/1248] send the actual unsupported room version in join errors Signed-off-by: strawberry --- src/api/client/membership.rs | 69 ++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 31 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index c61185a7..9478e383 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -702,18 +702,20 @@ async fn join_room_by_id_helper_remote( info!("make_join finished"); - let room_version_id = match make_join_response.room_version { - Some(room_version) - if services - .globals - .supported_room_versions() - .contains(&room_version) => - { - room_version - }, - _ => return Err!(BadServerResponse("Room version is not supported")), + let Some(room_version_id) = make_join_response.room_version else { + return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); }; + if !services + .globals + .supported_room_versions() + .contains(&room_version_id) + { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + let mut join_event_stub: CanonicalJsonObject = serde_json::from_str(make_join_response.event.get()) .map_err(|e| err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")))?; @@ -1116,17 +1118,20 @@ async fn join_room_by_id_helper_local( warn!("We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements"); let (make_join_response, remote_server) = make_join_request(services, sender_user, room_id, servers).await?; - let room_version_id = match make_join_response.room_version { - Some(room_version_id) - if services - .globals - .supported_room_versions() - .contains(&room_version_id) => - { - room_version_id - }, - _ => return Err!(BadServerResponse("Room version is not supported")), + let Some(room_version_id) = make_join_response.room_version else { + return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); }; + + if !services + .globals + .supported_room_versions() + .contains(&room_version_id) + { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + let mut join_event_stub: CanonicalJsonObject = serde_json::from_str(make_join_response.event.get()) .map_err(|e| err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")))?; let join_authorized_via_users_server = join_event_stub @@ -1274,7 +1279,7 @@ async fn make_join_request( if incompatible_room_version_count > 15 { info!( "15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or M_UNSUPPORTED_ROOM_VERSION, \ - assuming that conduwuit does not support the room {room_id}: {e}" + assuming that conduwuit does not support the room version {room_id}: {e}" ); make_join_response_and_server = Err!(BadServerResponse("Room version is not supported by Conduwuit")); return make_join_response_and_server; @@ -1607,18 +1612,20 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room let (make_leave_response, remote_server) = make_leave_response_and_server?; - let room_version_id = match make_leave_response.room_version { - Some(version) - if services - .globals - .supported_room_versions() - .contains(&version) => - { - version - }, - _ => return Err!(BadServerResponse("Room version is not supported")), + let Some(room_version_id) = make_leave_response.room_version else { + return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); }; + if !services + .globals + .supported_room_versions() + .contains(&room_version_id) + { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + let mut leave_event_stub = serde_json::from_str::(make_leave_response.event.get()) .map_err(|e| err!(BadServerResponse("Invalid make_leave event json received from server: {e:?}")))?; From 63193840729ab1dea026514d807aa39a98b3f1b1 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 21:40:13 -0500 Subject: [PATCH 0242/1248] implement `GET /_matrix/client/v3/pushrules/global/` Signed-off-by: strawberry --- src/api/client/push.rs | 69 +++++++++++++++++++++++++++++++++++++++++- src/api/router.rs | 1 + 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/src/api/client/push.rs b/src/api/client/push.rs index de280b32..97243ab4 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -5,7 +5,7 @@ use ruma::{ error::ErrorKind, push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, - set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, + get_pushrules_global_scope, set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, }, }, events::{ @@ -67,6 +67,73 @@ pub(crate) async fn get_pushrules_all_route( }) } +/// # `GET /_matrix/client/r0/pushrules/global/` +/// +/// Retrieves the push rules event for this user. +/// +/// This appears to be the exact same as `GET /_matrix/client/r0/pushrules/`. +pub(crate) async fn get_pushrules_global_route( + State(services): State, body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let Some(content_value) = services + .account_data + .get_global::(sender_user, GlobalAccountDataEventType::PushRules) + .await + .ok() + .and_then(|event| event.get("content").cloned()) + .filter(CanonicalJsonValue::is_object) + else { + // user somehow has non-existent push rule event. recreate it and return server + // default silently + services + .account_data + .update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(PushRulesEvent { + content: PushRulesEventContent { + global: Ruleset::server_default(sender_user), + }, + }) + .expect("to json always works"), + ) + .await?; + + return Ok(get_pushrules_global_scope::v3::Response { + global: Ruleset::server_default(sender_user), + }); + }; + + let account_data_content = serde_json::from_value::(content_value.into()) + .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; + + let mut global_ruleset = account_data_content.global; + + // remove old deprecated mentions push rules as per MSC4210 + #[allow(deprecated)] + { + use ruma::push::RuleKind::*; + + global_ruleset + .remove(Override, PredefinedOverrideRuleId::ContainsDisplayName) + .ok(); + global_ruleset + .remove(Override, PredefinedOverrideRuleId::RoomNotif) + .ok(); + + global_ruleset + .remove(Content, PredefinedContentRuleId::ContainsUserName) + .ok(); + }; + + Ok(get_pushrules_global_scope::v3::Response { + global: global_ruleset, + }) +} + /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Retrieves a single specified push rule for this user. diff --git a/src/api/router.rs b/src/api/router.rs index ddd91d11..1df4342f 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -45,6 +45,7 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&client::check_registration_token_validity) .ruma_route(&client::get_capabilities_route) .ruma_route(&client::get_pushrules_all_route) + .ruma_route(&client::get_pushrules_global_route) .ruma_route(&client::set_pushrule_route) .ruma_route(&client::get_pushrule_route) .ruma_route(&client::set_pushrule_enabled_route) From b92b4e043c03dfcc6f0163af92a219251b745351 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 22:16:11 -0500 Subject: [PATCH 0243/1248] drop hyper-util back down to 0.1.8 due to DNS issues Signed-off-by: strawberry --- Cargo.lock | 17 +++++++++-------- Cargo.toml | 3 ++- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b56005ff..3a95f83a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -164,9 +164,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" +checksum = "fe7c2840b66236045acd2607d5866e274380afd87ef99d6226e961e2cb47df45" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -176,9 +176,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" +checksum = "ad3a619a9de81e1d7de1f1186dcba4506ed661a0e483d84410fdef0ee87b2f96" dependencies = [ "bindgen", "cc", @@ -1739,9 +1739,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ "hyper", "hyper-util", @@ -1752,9 +1752,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -1765,6 +1765,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", + "tower 0.4.13", "tower-service", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index 814a435b..68c87c57 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -264,7 +264,8 @@ features = [ ] [workspace.dependencies.hyper-util] -version = "0.1.10" +# hyper-util >=0.1.9 seems to have DNS issues +version = "=0.1.8" default-features = false features = [ "server-auto", From 7f96b2f92ad11e9a357c5c0607f7be403f7c8a85 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 23:18:12 -0500 Subject: [PATCH 0244/1248] nix: remove libllvm, libgcc, and llvm from OCI images as well aarch64 OCI images love llvm?? Signed-off-by: strawberry --- nix/pkgs/main/default.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 1088b03c..d11c36cc 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -176,7 +176,7 @@ commonAttrs = { # # postInstall = with pkgsBuildHost; '' - find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${rustc.unwrapped} -t ${rustc} -t ${libidn2} -t ${libunistring} '{}' + + find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${libgcc} -t ${llvm} -t ${libllvm} -t ${rustc.unwrapped} -t ${rustc} -t ${libidn2} -t ${libunistring} '{}' + ''; }; in From 8f140485287adb8534d299ce553d15994ba1fab7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 15 Nov 2024 23:48:55 -0500 Subject: [PATCH 0245/1248] ci: free up a bit of runner space safely (again) Signed-off-by: strawberry --- .github/workflows/ci.yml | 9 +++++++++ .github/workflows/documentation.yml | 10 ++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 96a1188b..9385c5e3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,6 +71,15 @@ jobs: run: | sudo apt install liburing-dev -y + - name: Free up a bit of runner space + run: | + set +o pipefail + sudo docker image prune --all --force || true + sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true + sudo apt clean + sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku + set -o pipefail + - name: Sync repository uses: actions/checkout@v4 diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index ea720c43..17b1f9c1 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -50,8 +50,14 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} steps: - - name: Free Disk Space (Ubuntu) - uses: jlumbroso/free-disk-space@main + - name: Free up a bit of runner space + run: | + set +o pipefail + sudo docker image prune --all --force || true + sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true + sudo apt clean + sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku + set -o pipefail - name: Sync repository uses: actions/checkout@v4 From 2b2793fac6b2384217e70424dbd16752060dec14 Mon Sep 17 00:00:00 2001 From: Tamara Schmitz Date: Mon, 4 Nov 2024 14:27:43 +0100 Subject: [PATCH 0246/1248] docs: add note about the nixos service defaulting to sqlite MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: June 🍓🦴 --- docs/deploying/nixos.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md index 9147db7f..61fb3916 100644 --- a/docs/deploying/nixos.md +++ b/docs/deploying/nixos.md @@ -39,6 +39,15 @@ The `flake.nix` and `default.nix` do not currently provide a NixOS module (contr welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure conduwuit. +### Conduit NixOS Config Module and SQLite + +Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend. +Conduwuit dropped SQLite support in favor of exclusively supporting the much faster RocksDB. +Make sure that you are using the RocksDB backend before migrating! + +There is a [tool to migrate a Conduit SQLite database to +RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/). + If you want to run the latest code, you should get conduwuit from the `flake.nix` or `default.nix` and set [`services.matrix-conduit.package`][package] appropriately to use conduwuit instead of Conduit. From 85a6d8fc6bd25adaaebb395bc925dea043e3852a Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 16 Nov 2024 01:57:31 -0500 Subject: [PATCH 0247/1248] ci: fix github pages publish check Signed-off-by: strawberry --- .github/workflows/documentation.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 17b1f9c1..20b1e4c6 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -63,7 +63,7 @@ jobs: uses: actions/checkout@v4 - name: Setup GitHub Pages - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') && (github.event.pull_request.user.login == 'girlbossceo') + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') uses: actions/configure-pages@v5 - uses: nixbuild/nix-quick-install-action@master @@ -145,12 +145,12 @@ jobs: compression-level: 0 - name: Upload generated documentation (book) as GitHub Pages artifact - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') && (github.event.pull_request.user.login == 'girlbossceo') + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') uses: actions/upload-pages-artifact@v3 with: path: public - name: Deploy to GitHub Pages - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') && (github.event.pull_request.user.login == 'girlbossceo') + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') id: deployment uses: actions/deploy-pages@v4 From 26bcc7e3127129e845e97e95913430df6f5b37fc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 17 Nov 2024 00:13:20 +0000 Subject: [PATCH 0248/1248] fix default stateinfo cache size Signed-off-by: Jason Volk --- src/core/config/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index cb9d087b..355881b3 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2048,7 +2048,7 @@ fn default_server_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(50 fn default_user_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } -fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } +fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } From a05dc0310056c5efc5789158d643951964f8fce4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 17 Nov 2024 07:46:36 +0000 Subject: [PATCH 0249/1248] use debug_warn for backfill event evals Signed-off-by: Jason Volk --- src/service/rooms/timeline/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 59fc8e93..a3fc6a0b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -9,7 +9,7 @@ use std::{ }; use conduit::{ - debug, err, error, implement, info, + debug, debug_warn, err, error, implement, info, pdu::{EventHash, PduBuilder, PduCount, PduEvent}, utils, utils::{stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, @@ -1128,7 +1128,7 @@ impl Service { Ok(response) => { for pdu in response.pdus { if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await { - warn!("Failed to add backfilled pdu in room {room_id}: {e}"); + debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}"); } } return Ok(()); From 90106c4c33946cfda514e9a2cdf75f36605c08bd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 16 Nov 2024 23:13:27 +0000 Subject: [PATCH 0250/1248] streamline batch insertions Signed-off-by: Jason Volk --- src/database/map/insert.rs | 2 +- src/service/rooms/search/mod.rs | 6 ++- src/service/sending/data.rs | 84 ++++++++++++++++++++------------- src/service/sending/mod.rs | 16 +++---- src/service/sending/sender.rs | 4 +- 5 files changed, 63 insertions(+), 49 deletions(-) diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index 39a0c422..b8b08b34 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -203,7 +203,7 @@ where #[tracing::instrument(skip(self, iter), fields(%self), level = "trace")] pub fn insert_batch<'a, I, K, V>(&'a self, iter: I) where - I: Iterator + Send + Debug, + I: Iterator + Send + Debug, K: AsRef<[u8]> + Sized + Debug + 'a, V: AsRef<[u8]> + Sized + 'a, { diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 1af37d9e..d59d1d11 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -73,11 +73,13 @@ pub fn index_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_bod key.extend_from_slice(word.as_bytes()); key.push(0xFF); key.extend_from_slice(pdu_id.as_ref()); // TODO: currently we save the room id a second time here - (key, Vec::::new()) + key }) .collect::>(); - self.db.tokenids.insert_batch(batch.iter()); + self.db + .tokenids + .insert_batch(batch.iter().map(|k| (k.as_slice(), &[]))); } #[implement(Service)] diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index cd25776a..ca7ca19a 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -1,7 +1,7 @@ -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use conduit::{ - utils, + at, utils, utils::{stream::TryIgnore, ReadyExt}, Error, Result, }; @@ -69,20 +69,22 @@ impl Data { .await; } - pub(super) fn mark_as_active(&self, events: &[QueueItem]) { - for (key, e) in events { - if key.is_empty() { - continue; - } + pub(super) fn mark_as_active<'a, I>(&self, events: I) + where + I: Iterator, + { + events + .filter(|(key, _)| !key.is_empty()) + .for_each(|(key, val)| { + let val = if let SendingEvent::Edu(val) = &val { + &**val + } else { + &[] + }; - let value = if let SendingEvent::Edu(value) = &e { - &**value - } else { - &[] - }; - self.servercurrentevent_data.insert(key, value); - self.servernameevent_data.remove(key); - } + self.servercurrentevent_data.insert(key, val); + self.servernameevent_data.remove(key); + }); } #[inline] @@ -110,26 +112,40 @@ impl Data { }) } - pub(super) fn queue_requests(&self, requests: &[(&SendingEvent, &Destination)]) -> Vec> { - let mut batch = Vec::new(); - let mut keys = Vec::new(); - for (event, destination) in requests { - let mut key = destination.get_prefix(); - if let SendingEvent::Pdu(value) = event { - key.extend(value.as_ref()); - } else { - key.extend(&self.services.globals.next_count().unwrap().to_be_bytes()); - } - let value = if let SendingEvent::Edu(value) = &event { - &**value - } else { - &[] - }; - batch.push((key.clone(), value.to_owned())); - keys.push(key); - } + pub(super) fn queue_requests<'a, I>(&self, requests: I) -> Vec> + where + I: Iterator + Clone + Debug + Send, + { + let keys: Vec<_> = requests + .clone() + .map(|(event, dest)| { + let mut key = dest.get_prefix(); + if let SendingEvent::Pdu(value) = event { + key.extend(value.as_ref()); + } else { + let count = self.services.globals.next_count().unwrap(); + key.extend(&count.to_be_bytes()); + } + + key + }) + .collect(); + + self.servernameevent_data.insert_batch( + keys.iter() + .map(Vec::as_slice) + .zip(requests.map(at!(0))) + .map(|(key, event)| { + let value = if let SendingEvent::Edu(value) = &event { + &**value + } else { + &[] + }; + + (key, value) + }), + ); - self.servernameevent_data.insert_batch(batch.iter()); keys } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 77997f69..5a070306 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -4,7 +4,7 @@ mod dest; mod send; mod sender; -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, iter::once, sync::Arc}; use async_trait::async_trait; use conduit::{ @@ -117,7 +117,7 @@ impl Service { let dest = Destination::Push(user.to_owned(), pushkey); let event = SendingEvent::Pdu(*pdu_id); let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(&[(&event, &dest)]); + let keys = self.db.queue_requests(once((&event, &dest))); self.dispatch(Msg { dest, event, @@ -130,7 +130,7 @@ impl Service { let dest = Destination::Appservice(appservice_id); let event = SendingEvent::Pdu(pdu_id); let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(&[(&event, &dest)]); + let keys = self.db.queue_requests(once((&event, &dest))); self.dispatch(Msg { dest, event, @@ -160,9 +160,7 @@ impl Service { .collect::>() .await; - let keys = self - .db - .queue_requests(&requests.iter().map(|(o, e)| (e, o)).collect::>()); + let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); for ((dest, event), queue_id) in requests.into_iter().zip(keys) { self.dispatch(Msg { @@ -180,7 +178,7 @@ impl Service { let dest = Destination::Normal(server.to_owned()); let event = SendingEvent::Edu(serialized); let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(&[(&event, &dest)]); + let keys = self.db.queue_requests(once((&event, &dest))); self.dispatch(Msg { dest, event, @@ -210,9 +208,7 @@ impl Service { .collect::>() .await; - let keys = self - .db - .queue_requests(&requests.iter().map(|(o, e)| (e, o)).collect::>()); + let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); for ((dest, event), queue_id) in requests.into_iter().zip(keys) { self.dispatch(Msg { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index ee818289..0a0aae39 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -118,7 +118,7 @@ impl Service { // Insert any pdus we found if !new_events.is_empty() { - self.db.mark_as_active(&new_events); + self.db.mark_as_active(new_events.iter()); let new_events_vec = new_events.into_iter().map(|(_, event)| event).collect(); futures.push(self.send_events(dest.clone(), new_events_vec).boxed()); @@ -213,7 +213,7 @@ impl Service { // Compose the next transaction let _cork = self.db.db.cork(); if !new_events.is_empty() { - self.db.mark_as_active(&new_events); + self.db.mark_as_active(new_events.iter()); for (_, e) in new_events { events.push(e); } From 8fedc358e063cfc90a4f7598107d0e04685bb10a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 15 Nov 2024 22:23:42 +0000 Subject: [PATCH 0251/1248] typename additional shortids cleanup/split state_compressor load Signed-off-by: Jason Volk --- src/service/rooms/state/mod.rs | 19 ++++-- src/service/rooms/state_compressor/mod.rs | 83 ++++++++++++----------- 2 files changed, 58 insertions(+), 44 deletions(-) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 29ffedfc..622b8325 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -23,8 +23,14 @@ use ruma::{ EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, }; -use super::state_compressor::CompressedStateEvent; -use crate::{globals, rooms, Dep}; +use crate::{ + globals, rooms, + rooms::{ + short::{ShortEventId, ShortStateHash}, + state_compressor::CompressedStateEvent, + }, + Dep, +}; pub struct Service { pub mutex: RoomMutexMap, @@ -146,8 +152,9 @@ impl Service { #[tracing::instrument(skip(self, state_ids_compressed), level = "debug")] pub async fn set_event_state( &self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: Arc>, - ) -> Result { - const BUFSIZE: usize = size_of::(); + ) -> Result { + const KEY_LEN: usize = size_of::(); + const VAL_LEN: usize = size_of::(); let shorteventid = self .services @@ -202,7 +209,7 @@ impl Service { self.db .shorteventid_shortstatehash - .aput::(shorteventid, shortstatehash); + .aput::(shorteventid, shortstatehash); Ok(shortstatehash) } @@ -343,7 +350,7 @@ impl Service { .map_err(|e| err!(Request(NotFound("No create event found: {e:?}")))) } - pub async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result { + pub async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result { self.db .roomid_shortstatehash .get(room_id) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 0466fb12..f9db6f67 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -89,9 +89,10 @@ impl crate::Service for Service { .map(at!(1)) .flat_map(|vec| vec.iter()) .fold(HashMap::new(), |mut ents, ssi| { - ents.insert(Arc::as_ptr(&ssi.added), compressed_state_size(&ssi.added)); - ents.insert(Arc::as_ptr(&ssi.removed), compressed_state_size(&ssi.removed)); - ents.insert(Arc::as_ptr(&ssi.full_state), compressed_state_size(&ssi.full_state)); + for cs in &[&ssi.added, &ssi.removed, &ssi.full_state] { + ents.insert(Arc::as_ptr(cs), compressed_state_size(cs)); + } + ents }); @@ -125,51 +126,57 @@ impl Service { return Ok(r.clone()); } - let StateDiff { - parent, - added, - removed, - } = self.get_statediff(shortstatehash).await?; - - let response = if let Some(parent) = parent { - let mut response = Box::pin(self.load_shortstatehash_info(parent)).await?; - let mut state = (*response.last().expect("at least one response").full_state).clone(); - state.extend(added.iter().copied()); - let removed = (*removed).clone(); - for r in &removed { - state.remove(r); - } - - response.push(ShortStateInfo { - shortstatehash, - full_state: Arc::new(state), - added, - removed: Arc::new(removed), - }); - - response - } else { - vec![ShortStateInfo { - shortstatehash, - full_state: added.clone(), - added, - removed, - }] - }; + let stack = self.new_shortstatehash_info(shortstatehash).await?; debug!( - ?parent, ?shortstatehash, - vec_len = %response.len(), + len = %stack.len(), "cache update" ); self.stateinfo_cache .lock() .expect("locked") - .insert(shortstatehash, response.clone()); + .insert(shortstatehash, stack.clone()); - Ok(response) + Ok(stack) + } + + async fn new_shortstatehash_info(&self, shortstatehash: ShortStateHash) -> Result { + let StateDiff { + parent, + added, + removed, + } = self.get_statediff(shortstatehash).await?; + + let Some(parent) = parent else { + return Ok(vec![ShortStateInfo { + shortstatehash, + full_state: added.clone(), + added, + removed, + }]); + }; + + let mut stack = Box::pin(self.load_shortstatehash_info(parent)).await?; + let top = stack.last().expect("at least one frame"); + + let mut full_state = (*top.full_state).clone(); + full_state.extend(added.iter().copied()); + + let removed = (*removed).clone(); + for r in &removed { + full_state.remove(r); + } + + stack.push(ShortStateInfo { + shortstatehash, + added, + removed: Arc::new(removed), + full_state: Arc::new(full_state), + }); + + Ok(stack) } pub async fn compress_state_event(&self, shortstatekey: ShortStateKey, event_id: &EventId) -> CompressedStateEvent { From 7680d1bd5e856630e670a739b912c4d183061911 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 19 Nov 2024 08:14:02 +0000 Subject: [PATCH 0252/1248] replace yields point with consume_budget Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 1 - src/database/map/get.rs | 6 ++++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index ea487d8e..77ba4c3f 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -757,7 +757,6 @@ async fn load_joined_room( }; delta_state_events.push(pdu); - tokio::task::yield_now().await; } } } diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 2f7df031..a3c6c492 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -2,9 +2,10 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write}; use arrayvec::ArrayVec; use conduit::{err, implement, utils::IterStream, Result}; -use futures::{future::ready, Stream}; +use futures::{FutureExt, Stream}; use rocksdb::DBPinnableSlice; use serde::Serialize; +use tokio::task; use crate::{ser, util, Handle}; @@ -55,7 +56,8 @@ pub fn get(&self, key: &K) -> impl Future>> + Send where K: AsRef<[u8]> + ?Sized + Debug, { - ready(self.get_blocking(key)) + let result = self.get_blocking(key); + task::consume_budget().map(move |()| result) } /// Fetch a value from the database into cache, returning a reference-handle. From 411c60009da6858f40e9f623c9c2420c09d346d8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 17 Nov 2024 03:57:21 +0000 Subject: [PATCH 0253/1248] enrich state iteration interface Signed-off-by: Jason Volk --- src/service/rooms/state/mod.rs | 32 ++--- src/service/rooms/state_accessor/data.rs | 155 ++++++++++++---------- src/service/rooms/state_accessor/mod.rs | 6 + src/service/rooms/state_compressor/mod.rs | 31 ++--- 4 files changed, 117 insertions(+), 107 deletions(-) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 622b8325..9d702cd7 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -27,7 +27,7 @@ use crate::{ globals, rooms, rooms::{ short::{ShortEventId, ShortStateHash}, - state_compressor::CompressedStateEvent, + state_compressor::{parse_compressed_state_event, CompressedStateEvent}, }, Dep, }; @@ -98,12 +98,12 @@ impl Service { _statediffremoved: Arc>, state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex ) -> Result { - let event_ids = statediffnew.iter().stream().filter_map(|new| { - self.services - .state_compressor - .parse_compressed_state_event(*new) - .map_ok_or_else(|_| None, |(_, event_id)| Some(event_id)) - }); + let event_ids = statediffnew + .iter() + .stream() + .map(|&new| parse_compressed_state_event(new).1) + .then(|shorteventid| self.services.short.get_eventid_from_short(shorteventid)) + .ignore_err(); pin_mut!(event_ids); while let Some(event_id) = event_ids.next().await { @@ -428,17 +428,19 @@ impl Service { .full_state; let mut ret = HashMap::new(); - for compressed in full_state.iter() { - let Ok((shortstatekey, event_id)) = self - .services - .state_compressor - .parse_compressed_state_event(*compressed) - .await - else { + for &compressed in full_state.iter() { + let (shortstatekey, shorteventid) = parse_compressed_state_event(compressed); + + let Some((ty, state_key)) = sauthevents.remove(&shortstatekey) else { continue; }; - let Some((ty, state_key)) = sauthevents.remove(&shortstatekey) else { + let Ok(event_id) = self + .services + .short + .get_eventid_from_short(shorteventid) + .await + else { continue; }; diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 06cd648c..8df0d8b0 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,11 +1,22 @@ use std::{collections::HashMap, sync::Arc}; -use conduit::{err, PduEvent, Result}; +use conduit::{ + err, + utils::{future::TryExtExt, IterStream}, + PduEvent, Result, +}; use database::{Deserialized, Map}; -use futures::TryFutureExt; +use futures::{StreamExt, TryFutureExt}; use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{rooms, rooms::short::ShortStateHash, Dep}; +use crate::{ + rooms, + rooms::{ + short::{ShortEventId, ShortStateHash, ShortStateKey}, + state_compressor::parse_compressed_state_event, + }, + Dep, +}; pub(super) struct Data { eventid_shorteventid: Arc, @@ -35,9 +46,55 @@ impl Data { } } - #[allow(unused_qualifications)] // async traits + pub(super) async fn state_full( + &self, shortstatehash: ShortStateHash, + ) -> Result>> { + Ok(self + .state_full_pdus(shortstatehash) + .await? + .into_iter() + .filter_map(|pdu| Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu))) + .collect()) + } + + pub(super) async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result>> { + Ok(self + .state_full_shortids(shortstatehash) + .await? + .iter() + .stream() + .filter_map(|(_, shorteventid)| { + self.services + .short + .get_eventid_from_short(*shorteventid) + .ok() + }) + .filter_map(|eventid| async move { self.services.timeline.get_pdu(&eventid).await.ok() }) + .collect() + .await) + } + pub(super) async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result>> { - let full_state = self + Ok(self + .state_full_shortids(shortstatehash) + .await? + .iter() + .stream() + .filter_map(|(shortstatekey, shorteventid)| { + self.services + .short + .get_eventid_from_short(*shorteventid) + .map_ok(move |eventid| (*shortstatekey, eventid)) + .ok() + }) + .collect() + .await) + } + + pub(super) async fn state_full_shortids( + &self, shortstatehash: ShortStateHash, + ) -> Result> { + Ok(self .services .state_compressor .load_shortstatehash_info(shortstatehash) @@ -45,63 +102,11 @@ impl Data { .map_err(|e| err!(Database("Missing state IDs: {e}")))? .pop() .expect("there is always one layer") - .full_state; - - let mut result = HashMap::new(); - let mut i: u8 = 0; - for compressed in full_state.iter() { - let parsed = self - .services - .state_compressor - .parse_compressed_state_event(*compressed) - .await?; - - result.insert(parsed.0, parsed.1); - - i = i.wrapping_add(1); - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - #[allow(unused_qualifications)] // async traits - pub(super) async fn state_full( - &self, shortstatehash: ShortStateHash, - ) -> Result>> { - let full_state = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await? - .pop() - .expect("there is always one layer") - .full_state; - - let mut result = HashMap::new(); - let mut i: u8 = 0; - for compressed in full_state.iter() { - let (_, eventid) = self - .services - .state_compressor - .parse_compressed_state_event(*compressed) - .await?; - - if let Ok(pdu) = self.services.timeline.get_pdu(&eventid).await { - if let Some(state_key) = pdu.state_key.as_ref() { - result.insert((pdu.kind.to_string().into(), state_key.clone()), pdu); - } - } - - i = i.wrapping_add(1); - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) + .full_state + .iter() + .copied() + .map(parse_compressed_state_event) + .collect()) } /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). @@ -130,18 +135,11 @@ impl Data { .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) .ok_or(err!(Database("No shortstatekey in compressed state")))?; + let (_, shorteventid) = parse_compressed_state_event(*compressed); + self.services - .state_compressor - .parse_compressed_state_event(*compressed) - .map_ok(|(_, id)| id) - .map_err(|e| { - err!(Database(error!( - ?event_type, - ?state_key, - ?shortstatekey, - "Failed to parse compressed: {e:?}" - ))) - }) + .short + .get_eventid_from_short(shorteventid) .await } @@ -176,6 +174,17 @@ impl Data { .await } + /// Returns the full room state's pdus. + #[allow(unused_qualifications)] // async traits + pub(super) async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result>> { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_full_pdus(shortstatehash)) + .map_err(|e| err!(Database("Missing state pdus for {room_id:?}: {e:?}"))) + .await + } + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). pub(super) async fn room_state_get_id( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 4958c4ea..697f7236 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -301,6 +301,12 @@ impl Service { self.db.room_state_full(room_id).await } + /// Returns the full room state pdus + #[tracing::instrument(skip(self), level = "debug")] + pub async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result>> { + self.db.room_state_full_pdus(room_id).await + } + /// Returns a single PDU from `room_id` with key (`event_type`, /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index f9db6f67..52ad5437 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -17,7 +17,7 @@ use ruma::{EventId, RoomId}; use crate::{ rooms, - rooms::short::{ShortId, ShortStateHash, ShortStateKey}, + rooms::short::{ShortEventId, ShortId, ShortStateHash, ShortStateKey}, Dep, }; @@ -196,24 +196,6 @@ impl Service { .expect("failed to create CompressedStateEvent") } - /// Returns shortstatekey, event id - #[inline] - pub async fn parse_compressed_state_event( - &self, compressed_event: CompressedStateEvent, - ) -> Result<(ShortStateKey, Arc)> { - use utils::u64_from_u8; - - let shortstatekey = u64_from_u8(&compressed_event[0..size_of::()]); - let shorteventid = u64_from_u8(&compressed_event[size_of::()..]); - let event_id = self - .services - .short - .get_eventid_from_short(shorteventid) - .await?; - - Ok((shortstatekey, event_id)) - } - /// Creates a new shortstatehash that often is just a diff to an already /// existing shortstatehash and therefore very efficient. /// @@ -488,6 +470,17 @@ impl Service { } } +#[inline] +#[must_use] +pub fn parse_compressed_state_event(compressed_event: CompressedStateEvent) -> (ShortStateKey, ShortEventId) { + use utils::u64_from_u8; + + let shortstatekey = u64_from_u8(&compressed_event[0..size_of::()]); + let shorteventid = u64_from_u8(&compressed_event[size_of::()..]); + + (shortstatekey, shorteventid) +} + #[inline] fn compressed_state_size(compressed_state: &CompressedState) -> usize { compressed_state From e257512aa7c3a19ec2c4f695288c8de3d13663df Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 17 Nov 2024 04:45:45 +0000 Subject: [PATCH 0254/1248] relax state visibility for invited modes Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 697f7236..89db88a6 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -287,7 +287,11 @@ impl Service { c.history_visibility }); - history_visibility == HistoryVisibility::WorldReadable + match history_visibility { + HistoryVisibility::Invited => self.services.state_cache.is_invited(user_id, room_id).await, + HistoryVisibility::WorldReadable => true, + _ => false, + } } /// Returns the state hash for this pdu. From 2f2cebe84d319608631273cef32c8236868e5baa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 12 Nov 2024 03:46:31 +0000 Subject: [PATCH 0255/1248] implement local room preview Signed-off-by: Jason Volk --- src/api/client/room/initial_sync.rs | 72 +++++++++++++++++++++++++++++ src/api/client/room/mod.rs | 3 +- src/api/router.rs | 7 +-- 3 files changed, 75 insertions(+), 7 deletions(-) create mode 100644 src/api/client/room/initial_sync.rs diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs new file mode 100644 index 00000000..16b3a53b --- /dev/null +++ b/src/api/client/room/initial_sync.rs @@ -0,0 +1,72 @@ +use axum::extract::State; +use conduit::{at, utils::BoolExt, Err, Result}; +use futures::StreamExt; +use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; + +use crate::Ruma; + +const LIMIT_MAX: usize = 100; + +pub(crate) async fn room_initial_sync_route( + State(services): State, body: Ruma, +) -> Result { + let room_id = &body.room_id; + + if !services + .rooms + .state_accessor + .user_can_see_state_events(body.sender_user(), room_id) + .await + { + return Err!(Request(Forbidden("No room preview available."))); + } + + let limit = LIMIT_MAX; + let events: Vec<_> = services + .rooms + .timeline + .pdus_rev(None, room_id, None) + .await? + .take(limit) + .collect() + .await; + + let state: Vec<_> = services + .rooms + .state_accessor + .room_state_full_pdus(room_id) + .await? + .into_iter() + .map(|pdu| pdu.to_state_event()) + .collect(); + + let messages = PaginationChunk { + start: events.last().map(at!(0)).as_ref().map(ToString::to_string), + + end: events + .first() + .map(at!(0)) + .as_ref() + .map(ToString::to_string) + .unwrap_or_default(), + + chunk: events + .into_iter() + .map(at!(1)) + .map(|pdu| pdu.to_room_event()) + .collect(), + }; + + Ok(Response { + room_id: room_id.to_owned(), + account_data: None, + state: state.into(), + messages: messages.chunk.is_empty().or_some(messages), + visibility: services.rooms.directory.visibility(room_id).await.into(), + membership: services + .rooms + .state_cache + .user_membership(body.sender_user(), room_id) + .await, + }) +} diff --git a/src/api/client/room/mod.rs b/src/api/client/room/mod.rs index fa2d168f..16fcadab 100644 --- a/src/api/client/room/mod.rs +++ b/src/api/client/room/mod.rs @@ -1,9 +1,10 @@ mod aliases; mod create; mod event; +mod initial_sync; mod upgrade; pub(crate) use self::{ aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route, - upgrade::upgrade_room_route, + initial_sync::room_initial_sync_route, upgrade::upgrade_room_route, }; diff --git a/src/api/router.rs b/src/api/router.rs index 1df4342f..4bdd692d 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -183,8 +183,7 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&client::well_known_support) .ruma_route(&client::well_known_client) .route("/_conduwuit/server_version", get(client::conduwuit_server_version)) - .route("/_matrix/client/r0/rooms/:room_id/initialSync", get(initial_sync)) - .route("/_matrix/client/v3/rooms/:room_id/initialSync", get(initial_sync)) + .ruma_route(&client::room_initial_sync_route) .route("/client/server.json", get(client::syncv3_client_server_json)); if config.allow_federation { @@ -285,10 +284,6 @@ async fn redirect_legacy_preview(uri: Uri) -> impl IntoResponse { Redirect::temporary(&uri) } -async fn initial_sync(_uri: Uri) -> impl IntoResponse { - err!(Request(GuestAccessForbidden("Guest access not implemented"))) -} - async fn legacy_media_disabled() -> impl IntoResponse { err!(Request(Forbidden("Unauthenticated media is disabled."))) } async fn federation_disabled() -> impl IntoResponse { err!(Request(Forbidden("Federation is disabled."))) } From 876c6e933cafd17d870ac51ff52c7a9ad052db16 Mon Sep 17 00:00:00 2001 From: nisbet-hubbard <87453615+nisbet-hubbard@users.noreply.github.com> Date: Sun, 17 Nov 2024 11:12:57 +0800 Subject: [PATCH 0256/1248] A minimal caveat --- docs/deploying/generic.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index f0b85a25..9eafbc46 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -54,13 +54,13 @@ While conduwuit can run as any user it is better to use dedicated users for different services. This also allows you to make sure that the file permissions are correctly set up. -In Debian or Fedora/RHEL, you can use this command to create a conduwuit user: +In Debian, you can use this command to create a conduwuit user: ```bash sudo adduser --system conduwuit --group --disabled-login --no-create-home ``` -For distros without `adduser`: +For distros without `adduser` (or where it's a symlink to `useradd`): ```bash sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit @@ -142,8 +142,8 @@ If using Nginx, you need to give conduwuit the request URI using `$request_uri`, - `proxy_pass http://127.0.0.1:6167$request_uri;` - `proxy_pass http://127.0.0.1:6167;` -Nginx users may need to set `proxy_buffering off;` if there are issues with -uploading media like images. This is due to Nginx storing the entire POST content in-memory (`/tmp`) and running out of memory if on low memory hardware. +Nginx users need to increase `client_max_body_size` (default is 1M) to match +`max_request_size` defined in conduwuit.toml. You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs From ee3c58f78f88464c1ba8b01812c8bd7de7f1b29a Mon Sep 17 00:00:00 2001 From: emily Date: Wed, 20 Nov 2024 16:29:42 +0100 Subject: [PATCH 0257/1248] docs: add workaround to use unix sockets with the nixos module --- docs/deploying/nixos.md | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md index 61fb3916..0372228d 100644 --- a/docs/deploying/nixos.md +++ b/docs/deploying/nixos.md @@ -55,15 +55,31 @@ appropriately to use conduwuit instead of Conduit. ### UNIX sockets Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module -it is not possible to use UNIX sockets. This is because the UNIX socket option does not exist -in Conduit, and their module forces listening on `[::1]:6167` by default if unspecified. +a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX +socket option does not exist in Conduit, and the module forcibly sets the `address` and +`port` config options. + +```nix +options.services.matrix-conduit.settings = lib.mkOption { + apply = old: old // ( + if (old.global ? "unix_socket_path") + then { global = builtins.removeAttrs old.global [ "address" "port" ]; } + else { } + ); +}; + +``` Additionally, the [`matrix-conduit` systemd unit][systemd-unit] in the module does not allow the `AF_UNIX` socket address family in their systemd unit's `RestrictAddressFamilies=` which -disallows the namespace from accessing or creating UNIX sockets. +disallows the namespace from accessing or creating UNIX sockets and has to be enabled like so: -There is no known workaround these. A conduwuit NixOS configuration module must be developed and -published by the community. +```nix +systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ]; +``` + +Even though those workarounds are feasible a conduwuit NixOS configuration module, developed and +published by the community, would be appreciated. ### jemalloc and hardened profile From 336de49e6aeca51d886018aa39ca25f222c49695 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 20 Nov 2024 15:58:07 -0500 Subject: [PATCH 0258/1248] tiny optimisation in append_pdu push notif Signed-off-by: strawberry --- src/service/rooms/timeline/mod.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a3fc6a0b..5d5566cb 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -11,8 +11,7 @@ use std::{ use conduit::{ debug, debug_warn, err, error, implement, info, pdu::{EventHash, PduBuilder, PduCount, PduEvent}, - utils, - utils::{stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, + utils::{self, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, validated, warn, Err, Error, Result, Server, }; pub use conduit::{PduId, RawPduId}; @@ -386,17 +385,19 @@ impl Service { let sync_pdu = pdu.to_sync_room_event(); - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - let mut push_target: HashSet<_> = self .services .state_cache .active_local_users_in_room(&pdu.room_id) + // Don't notify the sender of their own events + .ready_filter(|user| user != &pdu.sender) .map(ToOwned::to_owned) .collect() .await; + let mut notifies = Vec::with_capacity(push_target.len().saturating_add(1)); + let mut highlights = Vec::with_capacity(push_target.len().saturating_add(1)); + if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { let target_user_id = UserId::parse(state_key.clone())?; @@ -408,11 +409,6 @@ impl Service { } for user in &push_target { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - let rules_for_user = self .services .account_data @@ -436,6 +432,11 @@ impl Service { }, _ => {}, }; + + // Break early if both conditions are true + if notify && highlight { + break; + } } if notify { From b6d53e97a6c560109e4bd270de80a73e60ca8605 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 20 Nov 2024 16:31:36 -0500 Subject: [PATCH 0259/1248] bump ruwuma and a few http deps Signed-off-by: strawberry --- Cargo.lock | 68 +++++++++++++++++++++++++++++------------------------- Cargo.toml | 19 +++++++-------- 2 files changed, 47 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a95f83a..d39375d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -191,9 +191,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49c41b948da08fb481a94546cd874843adc1142278b0af4badf9b1b78599d68d" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", @@ -257,9 +257,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37634d71e9f3c35cfb1c30c87c7cba500d55892f04c2dbe6a99383c664b820b0" +checksum = "c794b30c904f0a1c2fb7740f7df7f7972dfaa14ef6f57cb6178dc63e5dca2f04" dependencies = [ "axum", "axum-core", @@ -1048,9 +1048,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" +checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", "syn 2.0.87", @@ -1699,9 +1699,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -3128,7 +3128,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "assign", "js_int", @@ -3150,7 +3150,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "js_int", "ruma-common", @@ -3162,7 +3162,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "as_variant", "assign", @@ -3185,7 +3185,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "as_variant", "base64 0.22.1", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3239,7 +3239,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "bytes", "http", @@ -3257,7 +3257,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "js_int", "thiserror 2.0.3", @@ -3266,7 +3266,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "js_int", "ruma-common", @@ -3276,7 +3276,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "cfg-if", "once_cell", @@ -3292,7 +3292,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "js_int", "ruma-common", @@ -3304,7 +3304,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "headers", "http", @@ -3317,7 +3317,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3333,7 +3333,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=2ab432fba19eb8862c594d24af39d8f9f6b4eac6#2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" dependencies = [ "futures-util", "itertools 0.13.0", @@ -3502,16 +3502,16 @@ checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rustyline-async" version = "0.4.3" -source = "git+https://github.com/girlbossceo/rustyline-async?rev=9654cc84e19241f6e19021eb8e677892656f5071#9654cc84e19241f6e19021eb8e677892656f5071" +source = "git+https://github.com/girlbossceo/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" dependencies = [ "crossterm", "futures-channel", "futures-util", "pin-project", "thingbuf", - "thiserror 1.0.69", + "thiserror 2.0.3", "unicode-segmentation", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] @@ -3749,9 +3749,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -4056,9 +4056,9 @@ dependencies = [ [[package]] name = "termimad" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cda3a7471f9978706978454c45ef8dda67e9f8f3cdb9319eb2e9323deb6ae62" +checksum = "ea6a5d4cf55d9f1cb04fcda48f725772d0733ae34e030dfc4dd36e738a5965f4" dependencies = [ "coolor", "crokey", @@ -4067,7 +4067,7 @@ dependencies = [ "minimad", "serde", "thiserror 1.0.69", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -4425,9 +4425,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", "bitflags 2.6.0", @@ -4628,6 +4628,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unsafe-libyaml" version = "0.2.11" diff --git a/Cargo.toml b/Cargo.toml index 68c87c57..ed24435f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ features = ["std", "serde"] version = "0.5.7" [workspace.dependencies.ctor] -version = "0.2.8" +version = "0.2.9" [workspace.dependencies.cargo_toml] version = "0.20" @@ -82,7 +82,7 @@ version = "1.1.0" version = "1.11.1" [workspace.dependencies.axum] -version = "0.7.5" +version = "0.7.9" default-features = false features = [ "form", @@ -95,7 +95,7 @@ features = [ ] [workspace.dependencies.axum-extra] -version = "0.9.4" +version = "0.9.6" default-features = false features = ["typed-header", "tracing"] @@ -116,7 +116,7 @@ default-features = false features = ["util"] [workspace.dependencies.tower-http] -version = "0.6.1" +version = "0.6.2" default-features = false features = [ "add-extension", @@ -149,7 +149,7 @@ default-features = false features = ["rc"] [workspace.dependencies.serde_json] -version = "1.0.132" +version = "1.0.133" default-features = false features = ["raw_value"] @@ -255,7 +255,7 @@ features = ["alloc", "std"] default-features = false [workspace.dependencies.hyper] -version = "1.5.0" +version = "1.5.1" default-features = false features = [ "server", @@ -322,7 +322,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "2ab432fba19eb8862c594d24af39d8f9f6b4eac6" +rev = "97e2fb6df13f65532d33fc2f0f097ad5a449dd70" features = [ "compat", "rand", @@ -335,6 +335,7 @@ features = [ "server-util", "unstable-exhaustive-types", "ring-compat", + "compat-upload-signatures", "identifiers-validation", "unstable-unspecified", "unstable-msc2409", @@ -460,7 +461,7 @@ version = "0.4.3" default-features = false [workspace.dependencies.termimad] -version = "0.31.0" +version = "0.31.1" default-features = false [workspace.dependencies.checked_ops] @@ -504,7 +505,7 @@ rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91" # adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b [patch.crates-io.rustyline-async] git = "https://github.com/girlbossceo/rustyline-async" -rev = "9654cc84e19241f6e19021eb8e677892656f5071" +rev = "deaeb0694e2083f53d363b648da06e10fc13900c" # # Our crates From 9100af9974c12d06a917ce0715b636b5d1b237f2 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 21 Nov 2024 23:45:16 -0500 Subject: [PATCH 0260/1248] add eventid_pdu database cf cache Signed-off-by: strawberry --- src/core/config/mod.rs | 6 ++++++ src/database/opts.rs | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 355881b3..753a08fa 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -198,6 +198,10 @@ pub struct Config { #[serde(default = "default_eventidshort_cache_capacity")] pub eventidshort_cache_capacity: u32, + /// default: varies by system + #[serde(default = "default_eventid_pdu_cache_capacity")] + pub eventid_pdu_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_shortstatekey_cache_capacity")] pub shortstatekey_cache_capacity: u32, @@ -2040,6 +2044,8 @@ fn default_shorteventid_cache_capacity() -> u32 { parallelism_scaled_u32(50_000) fn default_eventidshort_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(100_000) } +fn default_eventid_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(100_000) } + fn default_shortstatekey_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } fn default_statekeyshort_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } diff --git a/src/database/opts.rs b/src/database/opts.rs index 46fb4c54..732f571f 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -136,6 +136,14 @@ pub(crate) fn cf_options( cache_size(cfg, cfg.eventidshort_cache_capacity, 64)?, ), + "eventid_pduid" => set_table_with_new_cache( + &mut opts, + cfg, + cache, + name, + cache_size(cfg, cfg.eventid_pdu_cache_capacity, 64)?, + ), + "shorteventid_authchain" => { set_table_with_new_cache( &mut opts, From 5256cad396b97b520a1078e327b8c2adfb423e9b Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 21 Nov 2024 23:49:46 -0500 Subject: [PATCH 0261/1248] ignore bare_urls lint for well_known client config option Signed-off-by: strawberry --- src/core/config/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 753a08fa..66c78440 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1516,6 +1516,7 @@ pub struct TlsConfig { pub dual_protocol: bool, } +#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] #[derive(Clone, Debug, Deserialize, Default)] #[config_example_generator(filename = "conduwuit-example.toml", section = "global.well_known")] pub struct WellKnownConfig { @@ -1528,7 +1529,7 @@ pub struct WellKnownConfig { /// The server URL that the client well-known file will serve. This should /// not contain a port, and should just be a valid HTTPS URL. /// - /// example: "" + /// example: "https://matrix.example.com" pub client: Option, pub support_page: Option, pub support_role: Option, From bae06670661ada619f9a72c0ee4dcad2621c9dd4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 20 Nov 2024 19:42:34 +0000 Subject: [PATCH 0262/1248] limit sync response events to within the since/next_batch window fixes #606 Signed-off-by: Jason Volk --- src/api/client/sync/mod.rs | 6 ++++-- src/api/client/sync/v3.rs | 3 ++- src/api/client/sync/v4.rs | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 3201b827..a9715b84 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -9,7 +9,8 @@ pub(crate) use self::{v3::sync_events_route, v4::sync_events_v4_route}; use crate::{service::Services, Error, PduEvent, Result}; async fn load_timeline( - services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: usize, + services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, + next_batch: Option, limit: usize, ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { let last_timeline_count = services .rooms @@ -26,7 +27,8 @@ async fn load_timeline( .timeline .pdus_rev(Some(sender_user), room_id, None) .await? - .ready_take_while(|(pducount, _)| *pducount > roomsincecount); + .ready_skip_while(|&(pducount, _)| pducount > next_batch.unwrap_or_else(PduCount::max)) + .ready_take_while(|&(pducount, _)| pducount > roomsincecount); // Take the last events for the timeline let timeline_pdus: Vec<_> = non_timeline_pdus diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 77ba4c3f..7a78ea74 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -540,7 +540,8 @@ async fn load_joined_room( let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; drop(insert_lock); - let (timeline_pdus, limited) = load_timeline(services, sender_user, room_id, sincecount, 10_usize).await?; + let (timeline_pdus, limited) = + load_timeline(services, sender_user, room_id, sincecount, Some(next_batchcount), 10_usize).await?; let send_notification_counts = !timeline_pdus.is_empty() || services diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 91abd24e..57edc953 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -473,7 +473,7 @@ pub(crate) async fn sync_events_v4_route( (timeline_pdus, limited) = (Vec::new(), true); } else { (timeline_pdus, limited) = - match load_timeline(&services, sender_user, room_id, roomsincecount, *timeline_limit).await { + match load_timeline(&services, sender_user, room_id, roomsincecount, None, *timeline_limit).await { Ok(value) => value, Err(err) => { warn!("Encountered missing timeline in {}, error {}", room_id, err); From aea82183b2deb7318f6c0b1f8e4c2c86be9fbdcc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 21 Nov 2024 05:51:25 +0000 Subject: [PATCH 0263/1248] add set intersection util for two sorted streams Signed-off-by: Jason Volk --- src/core/utils/set.rs | 32 +++++++++++++++++++++++++++++++- src/core/utils/tests.rs | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/src/core/utils/set.rs b/src/core/utils/set.rs index 563f9df5..ddcf05ff 100644 --- a/src/core/utils/set.rs +++ b/src/core/utils/set.rs @@ -1,4 +1,10 @@ -use std::cmp::{Eq, Ord}; +use std::{ + cmp::{Eq, Ord}, + pin::Pin, + sync::Arc, +}; + +use futures::{Stream, StreamExt}; use crate::{is_equal_to, is_less_than}; @@ -45,3 +51,27 @@ where }) }) } + +/// Intersection of sets +/// +/// Outputs the set of elements common to both streams. Streams must be sorted. +pub fn intersection_sorted_stream2(a: S, b: S) -> impl Stream + Send +where + S: Stream + Send + Unpin, + Item: Eq + PartialOrd + Send + Sync, +{ + use tokio::sync::Mutex; + + let b = Arc::new(Mutex::new(b.peekable())); + a.map(move |ai| (ai, b.clone())) + .filter_map(|(ai, b)| async move { + let mut lock = b.lock().await; + while let Some(bi) = Pin::new(&mut *lock).next_if(|bi| *bi <= ai).await.as_ref() { + if ai == *bi { + return Some(ai); + } + } + + None + }) +} diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs index 84d35936..f4f78b02 100644 --- a/src/core/utils/tests.rs +++ b/src/core/utils/tests.rs @@ -237,3 +237,42 @@ fn set_intersection_sorted_all() { let r = intersection_sorted(i.into_iter()); assert!(r.eq(["bar", "baz", "foo"].iter())); } + +#[tokio::test] +async fn set_intersection_sorted_stream2() { + use futures::StreamExt; + use utils::{set::intersection_sorted_stream2, IterStream}; + + let a = ["bar"]; + let b = ["bar", "foo"]; + let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream()) + .collect::>() + .await; + assert!(r.eq(&["bar"])); + + let r = intersection_sorted_stream2(b.iter().stream(), a.iter().stream()) + .collect::>() + .await; + assert!(r.eq(&["bar"])); + + let a = ["aaa", "ccc", "xxx", "yyy"]; + let b = ["hhh", "iii", "jjj", "zzz"]; + let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream()) + .collect::>() + .await; + assert!(r.is_empty()); + + let a = ["aaa", "ccc", "eee", "ggg"]; + let b = ["aaa", "bbb", "ccc", "ddd", "eee"]; + let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream()) + .collect::>() + .await; + assert!(r.eq(&["aaa", "ccc", "eee"])); + + let a = ["aaa", "ccc", "eee", "ggg", "hhh", "iii"]; + let b = ["bbb", "ccc", "ddd", "fff", "ggg", "iii"]; + let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream()) + .collect::>() + .await; + assert!(r.eq(&["ccc", "ggg", "iii"])); +} From 3968d038683592745799caca89e00e5547b6fde8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 20 Nov 2024 20:21:31 +0000 Subject: [PATCH 0264/1248] move and improve common-rooms related Signed-off-by: Jason Volk --- src/api/client/presence.rs | 4 ++-- src/api/client/sync/mod.rs | 2 +- src/api/client/unstable.rs | 2 +- src/api/client/user_directory.rs | 4 ++-- src/service/rooms/state_cache/mod.rs | 25 +++++++++++++++---------- src/service/rooms/user/mod.rs | 22 ---------------------- 6 files changed, 21 insertions(+), 38 deletions(-) diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index ba48808b..948d6caa 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -52,8 +52,8 @@ pub(crate) async fn get_presence_route( let has_shared_rooms = services .rooms - .user - .has_shared_rooms(sender_user, &body.user_id) + .state_cache + .user_sees_user(sender_user, &body.user_id) .await; if has_shared_rooms { diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index a9715b84..ba50d77c 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -52,7 +52,7 @@ async fn share_encrypted_room( ) -> bool { services .rooms - .user + .state_cache .get_shared_rooms(sender_user, user_id) .ready_filter(|&room_id| Some(room_id) != ignore_room) .any(|other_room_id| { diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index dc570295..5de41f44 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -55,7 +55,7 @@ pub(crate) async fn get_mutual_rooms_route( let mutual_rooms: Vec = services .rooms - .user + .state_cache .get_shared_rooms(sender_user, &body.user_id) .map(ToOwned::to_owned) .collect() diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 868811a3..f3fee8d1 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -71,8 +71,8 @@ pub(crate) async fn search_users_route( } else { let user_is_in_shared_rooms = services .rooms - .user - .has_shared_rooms(sender_user, &user.user_id) + .state_cache + .user_sees_user(sender_user, &user.user_id) .await; if user_is_in_shared_rooms { diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 6e330fdc..156345fe 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -10,7 +10,7 @@ use conduit::{ warn, Result, }; use database::{serialize_to_vec, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{future::join4, stream::iter, Stream, StreamExt}; +use futures::{future::join4, pin_mut, stream::iter, Stream, StreamExt}; use itertools::Itertools; use ruma::{ events::{ @@ -385,16 +385,21 @@ impl Service { /// Returns true if user_a and user_b share at least one room. #[tracing::instrument(skip(self), level = "debug")] pub async fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> bool { - // Minimize number of point-queries by iterating user with least nr rooms - let (a, b) = if self.rooms_joined(user_a).count().await < self.rooms_joined(user_b).count().await { - (user_a, user_b) - } else { - (user_b, user_a) - }; + let get_shared_rooms = self.get_shared_rooms(user_a, user_b); - self.rooms_joined(a) - .any(|room_id| self.is_joined(b, room_id)) - .await + pin_mut!(get_shared_rooms); + get_shared_rooms.next().await.is_some() + } + + /// List the rooms common between two users + pub fn get_shared_rooms<'a>( + &'a self, user_a: &'a UserId, user_b: &'a UserId, + ) -> impl Stream + Send + 'a { + use conduit::utils::set; + + let a = self.rooms_joined(user_a); + let b = self.rooms_joined(user_b); + set::intersection_sorted_stream2(a, b) } /// Returns an iterator of all joined members of a room. diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 99587134..948baa5e 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use conduit::{implement, Result}; use database::{Deserialized, Map}; -use futures::{pin_mut, Stream, StreamExt}; use ruma::{RoomId, UserId}; use crate::{globals, rooms, rooms::short::ShortStateHash, Dep}; @@ -22,7 +21,6 @@ struct Data { struct Services { globals: Dep, short: Dep, - state_cache: Dep, } impl crate::Service for Service { @@ -38,7 +36,6 @@ impl crate::Service for Service { services: Services { globals: args.depend::("globals"), short: args.depend::("rooms::short"), - state_cache: args.depend::("rooms::state_cache"), }, })) } @@ -118,22 +115,3 @@ pub async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Re .await .deserialized() } - -#[implement(Service)] -pub async fn has_shared_rooms<'a>(&'a self, user_a: &'a UserId, user_b: &'a UserId) -> bool { - let get_shared_rooms = self.get_shared_rooms(user_a, user_b); - - pin_mut!(get_shared_rooms); - get_shared_rooms.next().await.is_some() -} - -//TODO: optimize; replace point-queries with dual iteration -#[implement(Service)] -pub fn get_shared_rooms<'a>( - &'a self, user_a: &'a UserId, user_b: &'a UserId, -) -> impl Stream + Send + 'a { - self.services - .state_cache - .rooms_joined(user_a) - .filter(|room_id| self.services.state_cache.is_joined(user_b, room_id)) -} From b94eeb9580af01d68d0c916a01a304b58e20e4ab Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 22 Nov 2024 03:57:29 +0000 Subject: [PATCH 0265/1248] fix deletions on readreceipt update Signed-off-by: Jason Volk --- src/service/rooms/read_receipt/data.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index 80a35e88..1194598d 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -42,16 +42,14 @@ impl Data { } pub(super) async fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) { - type KeyVal<'a> = (&'a RoomId, u64, &'a UserId); - // Remove old entry let last_possible_key = (room_id, u64::MAX); self.readreceiptid_readreceipt - .rev_keys_from(&last_possible_key) + .rev_keys_from_raw(&last_possible_key) .ignore_err() - .ready_take_while(|(r, ..): &KeyVal<'_>| *r == room_id) - .ready_filter_map(|(r, c, u): KeyVal<'_>| (u == user_id).then_some((r, c, u))) - .ready_for_each(|old: KeyVal<'_>| self.readreceiptid_readreceipt.del(old)) + .ready_take_while(|key| key.starts_with(room_id.as_bytes())) + .ready_filter_map(|key| key.ends_with(user_id.as_bytes()).then_some(key)) + .ready_for_each(|key| self.readreceiptid_readreceipt.del(key)) .await; let count = self.services.globals.next_count().unwrap(); From f15370027e9712eb0fce07f0619a8d74cead149a Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 23 Nov 2024 12:05:52 -0500 Subject: [PATCH 0266/1248] improve DNS error messages Signed-off-by: strawberry --- src/service/resolver/actual.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 5dc03d14..fec29133 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -4,7 +4,7 @@ use std::{ sync::Arc, }; -use conduit::{debug, debug_error, debug_info, debug_warn, err, trace, Err, Result}; +use conduit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; use hickory_resolver::{error::ResolveError, lookup::SrvLookup}; use ipaddress::IPAddress; use ruma::ServerName; @@ -313,7 +313,6 @@ impl super::Service { Ok(None) } - #[allow(clippy::single_match_else)] fn handle_resolve_error(e: &ResolveError) -> Result<()> { use hickory_resolver::error::ResolveErrorKind; @@ -322,10 +321,21 @@ impl super::Service { .. } => { // Raise to debug_warn if we can find out the result wasn't from cache - debug!("{e}"); + debug!("No DNS records found: {e}"); Ok(()) }, - _ => Err!(error!("DNS {e}")), + ResolveErrorKind::Timeout => { + Err!(warn!("DNS {e}")) + }, + ResolveErrorKind::NoConnections => { + error!( + "Your DNS server is overloaded and has ran out of connections. It is strongly recommended you \ + remediate this issue to ensure proper federation connectivity." + ); + + Err!(error!("DNS error: {e}")) + }, + _ => Err!(error!("DNS error: {e}")), } } From 9d23a2b6f5e5a721d02c818f2c1bbd0caa716357 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 23 Nov 2024 12:53:26 -0500 Subject: [PATCH 0267/1248] add missing length checks on pushkey/appid, improve error msgs for pusher Signed-off-by: strawberry --- src/api/client/push.rs | 4 +-- src/service/pusher/mod.rs | 54 ++++++++++++++++++++++----------------- 2 files changed, 33 insertions(+), 25 deletions(-) diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 97243ab4..f2376e7c 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -441,9 +441,9 @@ pub(crate) async fn set_pushers_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services.pusher.set_pusher(sender_user, &body.action); + services.pusher.set_pusher(sender_user, &body.action)?; - Ok(set_pusher::v3::Response::default()) + Ok(set_pusher::v3::Response::new()) } /// user somehow has bad push rules, these must always exist per spec. diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 2b90319e..fb43fdb8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -2,9 +2,9 @@ use std::{fmt::Debug, mem, sync::Arc}; use bytes::BytesMut; use conduit::{ - debug_error, err, trace, + debug_warn, err, trace, utils::{stream::TryIgnore, string_from_bytes}, - Err, PduEvent, Result, + warn, Err, PduEvent, Result, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; @@ -65,17 +65,29 @@ impl crate::Service for Service { } impl Service { - pub fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) { + pub fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) -> Result { match pusher { set_pusher::v3::PusherAction::Post(data) => { - let key = (sender, &data.pusher.ids.pushkey); + let pushkey = data.pusher.ids.pushkey.as_str(); + + if pushkey.len() > 512 { + return Err!(Request(InvalidParam("Push key length cannot be greater than 512 bytes."))); + } + + if data.pusher.ids.app_id.as_str().len() > 64 { + return Err!(Request(InvalidParam("App ID length cannot be greater than 64 bytes."))); + } + + let key = (sender, data.pusher.ids.pushkey.as_str()); self.db.senderkey_pusher.put(key, Json(pusher)); }, set_pusher::v3::PusherAction::Delete(ids) => { - let key = (sender, &ids.pushkey); + let key = (sender, ids.pushkey.as_str()); self.db.senderkey_pusher.del(key); }, } + + Ok(()) } pub async fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result { @@ -166,8 +178,8 @@ impl Service { let body = response.bytes().await?; // TODO: handle timeout if !status.is_success() { - debug_error!("Push gateway response body: {:?}", string_from_bytes(&body)); - return Err!(BadServerResponse(error!( + debug_warn!("Push gateway response body: {:?}", string_from_bytes(&body)); + return Err!(BadServerResponse(warn!( "Push gateway {dest} returned unsuccessful HTTP response: {status}" ))); } @@ -178,10 +190,10 @@ impl Service { .expect("reqwest body is valid http body"), ); response - .map_err(|e| err!(BadServerResponse(error!("Push gateway {dest} returned invalid response: {e}")))) + .map_err(|e| err!(BadServerResponse(warn!("Push gateway {dest} returned invalid response: {e}")))) }, Err(e) => { - debug_error!("Could not send request to pusher {dest}: {e}"); + warn!("Could not send request to pusher {dest}: {e}"); Err(e.into()) }, } @@ -278,11 +290,7 @@ impl Service { // TODO: email match &pusher.kind { PusherKind::Http(http) => { - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add - // more info - // 2. can pusher/devices have conflicting formats + // TODO (timo): can pusher/devices have conflicting formats let event_id_only = http.format == Some(PushFormat::EventIdOnly); let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone()); @@ -297,24 +305,24 @@ impl Service { let d = vec![device]; let mut notifi = Notification::new(d); - notifi.prio = NotificationPriority::Low; notifi.event_id = Some((*event.event_id).to_owned()); notifi.room_id = Some((*event.room_id).to_owned()); // TODO: missed calls notifi.counts = NotificationCounts::new(unread, uint!(0)); - if event.kind == TimelineEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High; - } - if event_id_only { self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) .await?; } else { + if event.kind == TimelineEventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + { + notifi.prio = NotificationPriority::High; + } else { + notifi.prio = NotificationPriority::Low; + } notifi.sender = Some(event.sender.clone()); notifi.event_type = Some(event.kind.clone()); notifi.content = serde_json::value::to_raw_value(&event.content).ok(); From 3fe98f35f2571f80d39d251977208b1b074c1622 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 23 Nov 2024 13:45:27 -0500 Subject: [PATCH 0268/1248] remove queued push keys on pusher deletion, use more refs Signed-off-by: strawberry --- src/api/client/push.rs | 5 ++++- src/service/appservice/mod.rs | 14 ++++++-------- src/service/pusher/mod.rs | 12 ++++++++++-- src/service/sending/mod.rs | 35 ++++++++++++++++++++++++++++------- 4 files changed, 48 insertions(+), 18 deletions(-) diff --git a/src/api/client/push.rs b/src/api/client/push.rs index f2376e7c..f27ead1f 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -441,7 +441,10 @@ pub(crate) async fn set_pushers_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services.pusher.set_pusher(sender_user, &body.action)?; + services + .pusher + .set_pusher(sender_user, &body.action) + .await?; Ok(set_pusher::v3::Response::new()) } diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 1617e6e6..4a20b130 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -79,26 +79,24 @@ impl Service { /// /// # Arguments /// - /// * `service_name` - the name you send to register the service previously - pub async fn unregister_appservice(&self, service_name: &str) -> Result<()> { + /// * `service_name` - the registration ID of the appservice + pub async fn unregister_appservice(&self, appservice_id: &str) -> Result<()> { // removes the appservice registration info self.registration_info .write() .await - .remove(service_name) + .remove(appservice_id) .ok_or(err!("Appservice not found"))?; // remove the appservice from the database - self.db.id_appserviceregistrations.remove(service_name); + self.db.id_appserviceregistrations.del(appservice_id); // deletes all active requests for the appservice if there are any so we stop // sending to the URL self.services .sending - .cleanup_events(service_name.to_owned()) - .await; - - Ok(()) + .cleanup_events(Some(appservice_id), None, None) + .await } pub async fn get_registration(&self, id: &str) -> Option { diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index fb43fdb8..6b02c7f8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -26,7 +26,7 @@ use ruma::{ uint, RoomId, UInt, UserId, }; -use crate::{client, globals, rooms, users, Dep}; +use crate::{client, globals, rooms, sending, users, Dep}; pub struct Service { db: Data, @@ -39,6 +39,7 @@ struct Services { state_accessor: Dep, state_cache: Dep, users: Dep, + sending: Dep, } struct Data { @@ -57,6 +58,7 @@ impl crate::Service for Service { state_accessor: args.depend::("rooms::state_accessor"), state_cache: args.depend::("rooms::state_cache"), users: args.depend::("users"), + sending: args.depend::("sending"), }, })) } @@ -65,7 +67,7 @@ impl crate::Service for Service { } impl Service { - pub fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) -> Result { + pub async fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) -> Result { match pusher { set_pusher::v3::PusherAction::Post(data) => { let pushkey = data.pusher.ids.pushkey.as_str(); @@ -84,6 +86,12 @@ impl Service { set_pusher::v3::PusherAction::Delete(ids) => { let key = (sender, ids.pushkey.as_str()); self.db.senderkey_pusher.del(key); + + self.services + .sending + .cleanup_events(None, Some(sender), Some(ids.pushkey.as_str())) + .await + .ok(); }, } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 5a070306..611940be 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -8,7 +8,7 @@ use std::{fmt::Debug, iter::once, sync::Arc}; use async_trait::async_trait; use conduit::{ - err, + debug_warn, err, utils::{ReadyExt, TryReadyExt}, warn, Result, Server, }; @@ -285,13 +285,34 @@ impl Service { appservice::send_request(client, registration, request).await } - /// Cleanup event data - /// Used for instance after we remove an appservice registration + /// Clean up queued sending event data + /// + /// Used after we remove an appservice registration or a user deletes a push + /// key #[tracing::instrument(skip(self), level = "debug")] - pub async fn cleanup_events(&self, appservice_id: String) { - self.db - .delete_all_requests_for(&Destination::Appservice(appservice_id)) - .await; + pub async fn cleanup_events( + &self, appservice_id: Option<&str>, user_id: Option<&UserId>, push_key: Option<&str>, + ) -> Result { + match (appservice_id, user_id, push_key) { + (None, Some(user_id), Some(push_key)) => { + self.db + .delete_all_requests_for(&Destination::Push(user_id.to_owned(), push_key.to_owned())) + .await; + + Ok(()) + }, + (Some(appservice_id), None, None) => { + self.db + .delete_all_requests_for(&Destination::Appservice(appservice_id.to_owned())) + .await; + + Ok(()) + }, + _ => { + debug_warn!("cleanup_events called with too many or too few arguments"); + Ok(()) + }, + } } fn dispatch(&self, msg: Msg) -> Result<()> { From af772b0240c471d8b2e902d9cef0deab08d5c6fb Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 23 Nov 2024 22:35:54 -0500 Subject: [PATCH 0269/1248] various misc documentation improvements Signed-off-by: strawberry --- CONTRIBUTING.md | 3 ++- README.md | 3 +-- debian/README.md | 15 ++++++++----- docs/SUMMARY.md | 1 + docs/deploying/docker.md | 37 +++++++++++++++++++++---------- docs/deploying/kubernetes.md | 8 +++++-- docs/development.md | 24 ++++++++++++--------- docs/development/testing.md | 4 ++-- docs/troubleshooting.md | 42 +++++++++++++++++++++++++++--------- 9 files changed, 94 insertions(+), 43 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0a11394e..fb540011 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -131,7 +131,8 @@ allowed to be licenced under the Apache-2.0 licence and all of your conduct is in line with the Contributor's Covenant, and conduwuit's Code of Conduct. Contribution by users who violate either of these code of conducts will not have -their contributions accepted. +their contributions accepted. This includes users who have been banned from +conduwuit Matrix rooms for Code of Conduct violations. [issues]: https://github.com/girlbossceo/conduwuit/issues [conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay diff --git a/README.md b/README.md index 4e97f1f0..4faf1ad7 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,6 @@ # conduwuit -`main`: [![CI and -Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) +[![conduwuit main room](https://img.shields.io/matrix/conduwuit%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit:puppygock.gay) [![conduwuit space](https://img.shields.io/matrix/conduwuit-space%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit-space%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit-space:puppygock.gay) [![CI and Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) diff --git a/debian/README.md b/debian/README.md index 62aa2112..89354469 100644 --- a/debian/README.md +++ b/debian/README.md @@ -1,17 +1,22 @@ # conduwuit for Debian -Information about downloading and deploying the Debian package. This may also be referenced for other `apt`-based distros such as Ubuntu. +Information about downloading and deploying the Debian package. This may also be +referenced for other `apt`-based distros such as Ubuntu. ### Installation -It is recommended to see the [generic deployment guide](../deploying/generic.md) for further information if needed as usage of the Debian package is generally related. +It is recommended to see the [generic deployment guide](../deploying/generic.md) +for further information if needed as usage of the Debian package is generally +related. ### Configuration -When installed, the example config is placed at `/etc/conduwuit/conduwuit.toml` as the default config. At the minimum, you will need to change your `server_name` here. +When installed, the example config is placed at `/etc/conduwuit/conduwuit.toml` +as the default config. The config mentions things required to be changed before +starting. -You can tweak more detailed settings by uncommenting and setting the config options -in `/etc/conduwuit/conduwuit.toml`. +You can tweak more detailed settings by uncommenting and setting the config +options in `/etc/conduwuit/conduwuit.toml`. ### Running diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 5e4155c4..8e07adc2 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -8,6 +8,7 @@ - [Generic](deploying/generic.md) - [NixOS](deploying/nixos.md) - [Docker](deploying/docker.md) + - [Kubernetes](deploying/kubernetes.md) - [Arch Linux](deploying/arch-linux.md) - [Debian](deploying/debian.md) - [FreeBSD](deploying/freebsd.md) diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index e9c49c71..fffa7770 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -11,9 +11,9 @@ OCI images for conduwuit are available in the registries listed below. | Registry | Image | Size | Notes | | --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- | -| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable tagged image. | -| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable tagged image. | -| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable tagged image. | +| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable latest tagged image. | +| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable latest tagged image. | +| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable latest tagged image. | | GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. | | GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. | | Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. | @@ -92,16 +92,28 @@ Additional info about deploying conduwuit can be found [here](generic.md). ### Build -To build the conduwuit image with docker-compose, you first need to open and -modify the `docker-compose.yml` file. There you need to comment the `image:` -option and uncomment the `build:` option. Then call docker compose with: +Official conduwuit images are built using Nix's +[`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are +repeatable and reproducible by anyone, keeps the images lightweight, and can be +built offline. -```bash -docker compose up -``` +This also ensures portability of our images because `buildLayeredImage` builds +OCI images, not Docker images, and works with other container software. -This will also start the container right afterwards, so if want it to run in -detached mode, you also should use the `-d` flag. +The OCI images are OS-less with only a very minimal environment of the `tini` +init system, CA certificates, and the conduwuit binary. This does mean there is +not a shell, but in theory you can get a shell by adding the necessary layers +to the layered image. However it's very unlikely you will need a shell for any +real troubleshooting. + +The flake file for the OCI image definition is at [`nix/pkgs/oci-image/default.nix`][oci-image-def]. + +To build an OCI image using Nix, the following outputs can be built: +- `nix build -L .#oci-image` (default features, x86_64 glibc) +- `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl) +- `nix build -L .#oci-image-aarch64-linux-musl` (default features, aarch64 musl) +- `nix build -L .#oci-image-x86_64-linux-musl-all-features` (all features, x86_64 musl) +- `nix build -L .#oci-image-aarch64-linux-musl-all-features` (all features, aarch64 musl) ### Run @@ -136,3 +148,6 @@ those two files. ## Voice communication See the [TURN](../turn.md) page. + +[nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage +[oci-image-def]: https://github.com/girlbossceo/conduwuit/blob/main/nix/pkgs/oci-image/default.nix diff --git a/docs/deploying/kubernetes.md b/docs/deploying/kubernetes.md index 2a1bcb51..d7721722 100644 --- a/docs/deploying/kubernetes.md +++ b/docs/deploying/kubernetes.md @@ -1,4 +1,8 @@ # conduwuit for Kubernetes -conduwuit doesn't support horizontal scalability or distributed loading natively, however a community maintained Helm Chart is available here to run conduwuit on Kubernetes: - +conduwuit doesn't support horizontal scalability or distributed loading +natively, however a community maintained Helm Chart is available here to run +conduwuit on Kubernetes: + +Should changes need to be made, please reach out to the maintainer in our +Matrix room as this is not maintained/controlled by the conduwuit maintainers. diff --git a/docs/development.md b/docs/development.md index e1f36c0c..28b07667 100644 --- a/docs/development.md +++ b/docs/development.md @@ -75,21 +75,21 @@ development (unresponsive or slow upstream), conduwuit-specific usecases, or lack of time to upstream some things. - [ruma/ruma][1]: - various performance -improvements, more features, faster-paced development, client/server interop +improvements, more features, faster-paced development, better client/server interop hacks upstream won't accept, etc - [facebook/rocksdb][2]: - liburing -build fixes, GCC build fix, and logging callback C API for Rust tracing -integration +build fixes and GCC debug build fix - [tikv/jemallocator][3]: - musl -builds seem to be broken on upstream +builds seem to be broken on upstream, fixes some broken/suspicious code in +places, additional safety measures, and support redzones for Valgrind - [zyansheep/rustyline-async][4]: - tab completion callback and -`CTRL+\` signal quit event for CLI +`CTRL+\` signal quit event for conduwuit console CLI - [rust-rocksdb/rust-rocksdb][5]: - - [`@zaidoon1`'s][8] fork -has quicker updates, more up to date dependencies. Our changes fix musl build -issues, Rust part of the logging callback C API, removes unnecessary `gtest` -include, and uses our RocksDB and jemallocator + - [`@zaidoon1`][8]'s fork +has quicker updates, more up to date dependencies, etc. Our fork fixes musl build +issues, removes unnecessary `gtest` include, and uses our RocksDB and jemallocator +forks. - [tokio-rs/tracing][6]: - Implements `Clone` for `EnvFilter` to support dynamically changing tracing envfilter's alongside other logging/metrics things @@ -103,12 +103,16 @@ tokio_unstable` flag to enable experimental tokio APIs. A build might look like this: ```bash -RUSTFLAGS="--cfg tokio_unstable" cargo build \ +RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \ --release \ --no-default-features \ --features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console ``` +You will also need to enable the `tokio_console` config option in conduwuit when +starting it. This was due to tokio-console causing gradual memory leak/usage +if left enabled. + [1]: https://github.com/ruma/ruma/ [2]: https://github.com/facebook/rocksdb/ [3]: https://github.com/tikv/jemallocator/ diff --git a/docs/development/testing.md b/docs/development/testing.md index 06720dd8..2d421767 100644 --- a/docs/development/testing.md +++ b/docs/development/testing.md @@ -5,8 +5,8 @@ Have a look at [Complement's repository][complement] for an explanation of what it is. -To test against Complement, with Nix (or [Lix](https://lix.systems) and direnv installed -and set up, you can: +To test against Complement, with Nix (or [Lix](https://lix.systems) and direnv +installed and set up, you can: * Run `./bin/complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl ./path/to/results.jsonl` to build a Complement image, run the tests, and output diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 74e19de7..c8655e06 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -106,24 +106,46 @@ Various debug commands can be found in `!admin debug`. #### Debug/Trace log level -conduwuit builds without debug or trace log levels by default for at least -performance reasons. This may change in the future and/or binaries providing -such configurations may be provided. If you need to access debug/trace log -levels, you will need to build without the `release_max_log_level` feature. +conduwuit builds without debug or trace log levels at compile time by default +for substantial performance gains in CPU usage and improved compile times. If +you need to access debug/trace log levels, you will need to build without the +`release_max_log_level` feature or use our provided static debug binaries. #### Changing log level dynamically conduwuit supports changing the tracing log environment filter on-the-fly using -the admin command `!admin debug change-log-level`. This accepts a string -**without quotes** the same format as the `log` config option. +the admin command `!admin debug change-log-level `. This accepts +a string **without quotes** the same format as the `log` config option. + +Example: `!admin debug change-log-level debug` + +This can also accept complex filters such as: +`!admin debug change-log-level info,conduit_service[{dest="example.com"}]=trace,ruma_state_res=trace` +`!admin debug change-log-level info,conduit_service[{dest="example.com"}]=trace,conduit_service[send{dest="example.org"}]=trace` + +And to reset the log level to the one that was set at startup / last config +load, simply pass the `--reset` flag. + +`!admin debug change-log-level --reset` #### Pinging servers -conduwuit can ping other servers using `!admin debug ping`. This takes a server -name and goes through the server discovery process and queries +conduwuit can ping other servers using `!admin debug ping `. This takes +a server name and goes through the server discovery process and queries `/_matrix/federation/v1/version`. Errors are outputted. +While it does measure the latency of the request, it is not indicative of +server performance on either side as that endpoint is completely unauthenticated +and simply fetches a string on a static JSON endpoint. It is very low cost both +bandwidth and computationally. + #### Allocator memory stats -When using jemalloc with jemallocator's `stats` feature, you can see conduwuit's -jemalloc memory stats by using `!admin debug memory-stats` +When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you +can see conduwuit's high-level allocator stats by using +`!admin server memory-usage` at the bottom. + +If you are a developer, you can also view the raw jemalloc statistics with +`!admin debug memory-stats`. Please note that this output is extremely large +which may only be visible in the conduwuit console CLI due to PDU size limits, +and is not easy for non-developers to understand. From 175e1c6453ac5ebe89f630acb7cbda0bec921d93 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 23 Nov 2024 22:36:22 -0500 Subject: [PATCH 0270/1248] correct admin cmd getting version and bin name Signed-off-by: strawberry --- src/admin/admin.rs | 2 +- src/main/clap.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/admin/admin.rs b/src/admin/admin.rs index fa497205..d1d8d394 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -9,7 +9,7 @@ use crate::{ }; #[derive(Debug, Parser)] -#[command(name = "admin", version = env!("CARGO_PKG_VERSION"))] +#[command(name = "conduwuit", version = conduit::version())] pub(super) enum AdminCommand { #[command(subcommand)] /// - Commands for managing appservices diff --git a/src/main/clap.rs b/src/main/clap.rs index 86b9fbd6..638398dd 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -10,7 +10,7 @@ use conduit::{ /// Commandline arguments #[derive(Parser, Debug)] -#[clap(version = conduit::version(), about, long_about = None)] +#[clap(version = conduit::version(), about, long_about = None, name = "conduwuit")] pub(crate) struct Args { #[arg(short, long)] /// Path to the config TOML file (optional) From 5f1cab6850cb78d4d4722315c914faf37fb809e3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 23 Nov 2024 03:45:11 +0000 Subject: [PATCH 0271/1248] passthru worker thread count from env Signed-off-by: Jason Volk --- Cargo.toml | 9 ++++----- src/main/clap.rs | 8 +++++++- src/main/main.rs | 5 ++--- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ed24435f..058a15e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -207,14 +207,13 @@ default-features = false version = "4.5.21" default-features = false features = [ - "std", "derive", - "help", - #"color", Do we need these? - #"unicode", - "usage", + "env", "error-context", + "help", + "std", "string", + "usage", ] [workspace.dependencies.futures] diff --git a/src/main/clap.rs b/src/main/clap.rs index 638398dd..b10242be 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -5,7 +5,9 @@ use std::path::PathBuf; use clap::Parser; use conduit::{ config::{Figment, FigmentValue}, - err, toml, Err, Result, + err, toml, + utils::available_parallelism, + Err, Result, }; /// Commandline arguments @@ -32,6 +34,10 @@ pub(crate) struct Args { /// Set functional testing modes if available. Ex '--test=smoke' #[arg(long, hide(true))] pub(crate) test: Vec, + + /// Override the tokio worker_thread count. + #[arg(long, hide(true), env = "TOKIO_WORKER_THREADS", default_value = available_parallelism().to_string())] + pub(crate) worker_threads: usize, } /// Parse commandline arguments into structured data diff --git a/src/main/main.rs b/src/main/main.rs index 8e644a15..32d122f6 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -9,12 +9,11 @@ mod tracing; extern crate conduit_core as conduit; use std::{ - cmp, sync::{atomic::Ordering, Arc}, time::Duration, }; -use conduit::{debug_info, error, rustc_flags_capture, utils::available_parallelism, Error, Result}; +use conduit::{debug_info, error, rustc_flags_capture, Error, Result}; use server::Server; use tokio::runtime; @@ -30,7 +29,7 @@ fn main() -> Result<(), Error> { .enable_io() .enable_time() .thread_name(WORKER_NAME) - .worker_threads(cmp::max(WORKER_MIN, available_parallelism())) + .worker_threads(args.worker_threads.max(WORKER_MIN)) .thread_keep_alive(Duration::from_secs(WORKER_KEEPALIVE)) .build() .expect("built runtime"); From f30b08f015b00de2292d2b6938e352f0eb8d7daa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 24 Nov 2024 00:19:55 +0000 Subject: [PATCH 0272/1248] fix optional config section related split api/client well_known simplify well_known config access Signed-off-by: Jason Volk --- src/api/client/mod.rs | 2 + src/api/client/session.rs | 6 +- src/api/client/unversioned.rs | 104 +-------------------------------- src/api/client/well_known.rs | 105 ++++++++++++++++++++++++++++++++++ src/api/server/well_known.rs | 2 +- src/core/config/mod.rs | 26 ++++++--- src/router/serve/mod.rs | 2 +- src/router/serve/tls.rs | 19 +++--- src/service/globals/mod.rs | 16 +----- 9 files changed, 144 insertions(+), 138 deletions(-) create mode 100644 src/api/client/well_known.rs diff --git a/src/api/client/mod.rs b/src/api/client/mod.rs index 9ee88bec..3c9736ea 100644 --- a/src/api/client/mod.rs +++ b/src/api/client/mod.rs @@ -37,6 +37,7 @@ pub(super) mod unstable; pub(super) mod unversioned; pub(super) mod user_directory; pub(super) mod voip; +pub(super) mod well_known; pub use account::full_user_deactivate; pub(super) use account::*; @@ -80,6 +81,7 @@ pub(super) use unstable::*; pub(super) use unversioned::*; pub(super) use user_directory::*; pub(super) use voip::*; +pub(super) use well_known::*; /// generated device ID length const DEVICE_ID_LENGTH: usize = 10; diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 6347a2c9..573f3d97 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -198,8 +198,10 @@ pub(crate) async fn login_route( // send client well-known if specified so the client knows to reconfigure itself let client_discovery_info: Option = services - .globals - .well_known_client() + .server + .config + .well_known + .client .as_ref() .map(|server| DiscoveryInfo::new(HomeserverInfo::new(server.to_string()))); diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 3aee30c8..ed3ce37a 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -2,16 +2,9 @@ use std::collections::BTreeMap; use axum::{extract::State, response::IntoResponse, Json}; use futures::StreamExt; -use ruma::api::client::{ - discovery::{ - discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, - discover_support::{self, Contact}, - get_supported_versions, - }, - error::ErrorKind, -}; +use ruma::api::client::discovery::get_supported_versions; -use crate::{Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `GET /_matrix/client/versions` /// @@ -65,99 +58,6 @@ pub(crate) async fn get_supported_versions_route( Ok(resp) } -/// # `GET /.well-known/matrix/client` -/// -/// Returns the .well-known URL if it is configured, otherwise returns 404. -pub(crate) async fn well_known_client( - State(services): State, _body: Ruma, -) -> Result { - let client_url = match services.globals.well_known_client() { - Some(url) => url.to_string(), - None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), - }; - - Ok(discover_homeserver::Response { - homeserver: HomeserverInfo { - base_url: client_url.clone(), - }, - identity_server: None, - sliding_sync_proxy: Some(SlidingSyncProxyInfo { - url: client_url, - }), - tile_server: None, - }) -} - -/// # `GET /.well-known/matrix/support` -/// -/// Server support contact and support page of a homeserver's domain. -pub(crate) async fn well_known_support( - State(services): State, _body: Ruma, -) -> Result { - let support_page = services - .globals - .well_known_support_page() - .as_ref() - .map(ToString::to_string); - - let role = services.globals.well_known_support_role().clone(); - - // support page or role must be either defined for this to be valid - if support_page.is_none() && role.is_none() { - return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); - } - - let email_address = services.globals.well_known_support_email().clone(); - let matrix_id = services.globals.well_known_support_mxid().clone(); - - // if a role is specified, an email address or matrix id is required - if role.is_some() && (email_address.is_none() && matrix_id.is_none()) { - return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); - } - - // TOOD: support defining multiple contacts in the config - let mut contacts: Vec = vec![]; - - if let Some(role) = role { - let contact = Contact { - role, - email_address, - matrix_id, - }; - - contacts.push(contact); - } - - // support page or role+contacts must be either defined for this to be valid - if contacts.is_empty() && support_page.is_none() { - return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); - } - - Ok(discover_support::Response { - contacts, - support_page, - }) -} - -/// # `GET /client/server.json` -/// -/// Endpoint provided by sliding sync proxy used by some clients such as Element -/// Web as a non-standard health check. -pub(crate) async fn syncv3_client_server_json(State(services): State) -> Result { - let server_url = match services.globals.well_known_client() { - Some(url) => url.to_string(), - None => match services.globals.well_known_server() { - Some(url) => url.to_string(), - None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), - }, - }; - - Ok(Json(serde_json::json!({ - "server": server_url, - "version": conduit::version(), - }))) -} - /// # `GET /_conduwuit/server_version` /// /// Conduwuit-specific API to get the server version, results akin to diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs new file mode 100644 index 00000000..674c9bb0 --- /dev/null +++ b/src/api/client/well_known.rs @@ -0,0 +1,105 @@ +use axum::{extract::State, response::IntoResponse, Json}; +use ruma::api::client::{ + discovery::{ + discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, + discover_support::{self, Contact}, + }, + error::ErrorKind, +}; + +use crate::{Error, Result, Ruma}; + +/// # `GET /.well-known/matrix/client` +/// +/// Returns the .well-known URL if it is configured, otherwise returns 404. +pub(crate) async fn well_known_client( + State(services): State, _body: Ruma, +) -> Result { + let client_url = match services.server.config.well_known.client.as_ref() { + Some(url) => url.to_string(), + None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), + }; + + Ok(discover_homeserver::Response { + homeserver: HomeserverInfo { + base_url: client_url.clone(), + }, + identity_server: None, + sliding_sync_proxy: Some(SlidingSyncProxyInfo { + url: client_url, + }), + tile_server: None, + }) +} + +/// # `GET /.well-known/matrix/support` +/// +/// Server support contact and support page of a homeserver's domain. +pub(crate) async fn well_known_support( + State(services): State, _body: Ruma, +) -> Result { + let support_page = services + .server + .config + .well_known + .support_page + .as_ref() + .map(ToString::to_string); + + let role = services.server.config.well_known.support_role.clone(); + + // support page or role must be either defined for this to be valid + if support_page.is_none() && role.is_none() { + return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); + } + + let email_address = services.server.config.well_known.support_email.clone(); + let matrix_id = services.server.config.well_known.support_mxid.clone(); + + // if a role is specified, an email address or matrix id is required + if role.is_some() && (email_address.is_none() && matrix_id.is_none()) { + return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); + } + + // TOOD: support defining multiple contacts in the config + let mut contacts: Vec = vec![]; + + if let Some(role) = role { + let contact = Contact { + role, + email_address, + matrix_id, + }; + + contacts.push(contact); + } + + // support page or role+contacts must be either defined for this to be valid + if contacts.is_empty() && support_page.is_none() { + return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); + } + + Ok(discover_support::Response { + contacts, + support_page, + }) +} + +/// # `GET /client/server.json` +/// +/// Endpoint provided by sliding sync proxy used by some clients such as Element +/// Web as a non-standard health check. +pub(crate) async fn syncv3_client_server_json(State(services): State) -> Result { + let server_url = match services.server.config.well_known.client.as_ref() { + Some(url) => url.to_string(), + None => match services.server.config.well_known.server.as_ref() { + Some(url) => url.to_string(), + None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), + }, + }; + + Ok(Json(serde_json::json!({ + "server": server_url, + "version": conduit::version(), + }))) +} diff --git a/src/api/server/well_known.rs b/src/api/server/well_known.rs index 2cc8f238..e6145aea 100644 --- a/src/api/server/well_known.rs +++ b/src/api/server/well_known.rs @@ -10,7 +10,7 @@ pub(crate) async fn well_known_server( State(services): State, _body: Ruma, ) -> Result { Ok(discover_homeserver::Response { - server: match services.globals.well_known_server() { + server: match services.server.config.well_known.server.as_ref() { Some(server_name) => server_name.to_owned(), None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), }, diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 66c78440..1754581d 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -87,7 +87,8 @@ pub struct Config { port: ListeningPort, // external structure; separate section - pub tls: Option, + #[serde(default)] + pub tls: TlsConfig, /// Uncomment unix_socket_path to listen on a UNIX socket at the specified /// path. If listening on a UNIX socket, you MUST remove/comment the @@ -1500,17 +1501,19 @@ pub struct Config { catchall: BTreeMap, } -#[derive(Clone, Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize, Default)] #[config_example_generator(filename = "conduwuit-example.toml", section = "global.tls")] pub struct TlsConfig { /// Path to a valid TLS certificate file. /// /// example: "/path/to/my/certificate.crt" - pub certs: String, + pub certs: Option, + /// Path to a valid TLS certificate private key. /// /// example: "/path/to/my/certificate.key" - pub key: String, + pub key: Option, + /// Whether to listen and allow for HTTP and HTTPS connections (insecure!) #[serde(default)] pub dual_protocol: bool, @@ -1520,20 +1523,25 @@ pub struct TlsConfig { #[derive(Clone, Debug, Deserialize, Default)] #[config_example_generator(filename = "conduwuit-example.toml", section = "global.well_known")] pub struct WellKnownConfig { + /// The server URL that the client well-known file will serve. This should + /// not contain a port, and should just be a valid HTTPS URL. + /// + /// example: "https://matrix.example.com" + pub client: Option, + /// The server base domain of the URL with a specific port that the server /// well-known file will serve. This should contain a port at the end, and /// should not be a URL. /// /// example: "matrix.example.com:443" pub server: Option, - /// The server URL that the client well-known file will serve. This should - /// not contain a port, and should just be a valid HTTPS URL. - /// - /// example: "https://matrix.example.com" - pub client: Option, + pub support_page: Option, + pub support_role: Option, + pub support_email: Option, + pub support_mxid: Option, } diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs index 858d3455..b0254772 100644 --- a/src/router/serve/mod.rs +++ b/src/router/serve/mod.rs @@ -23,7 +23,7 @@ pub(super) async fn serve( if cfg!(unix) && config.unix_socket_path.is_some() { unix::serve(server, app, shutdown).await - } else if config.tls.is_some() { + } else if config.tls.certs.is_some() { #[cfg(feature = "direct_tls")] return tls::serve(server, app, handle, addrs).await; diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index 08c5e7b6..f8d69048 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -6,17 +6,20 @@ use axum_server_dual_protocol::{ axum_server::{bind_rustls, tls_rustls::RustlsConfig}, ServerExt, }; -use conduit::{Result, Server}; +use conduit::{err, Result, Server}; use tokio::task::JoinSet; use tracing::{debug, info, warn}; -pub(super) async fn serve( - server: &Arc, app: Router, handle: ServerHandle, addrs: Vec, -) -> Result<()> { - let config = &server.config; - let tls = config.tls.as_ref().expect("TLS configuration"); - let certs = &tls.certs; - let key = &tls.key; +pub(super) async fn serve(server: &Arc, app: Router, handle: ServerHandle, addrs: Vec) -> Result { + let tls = &server.config.tls; + let certs = tls + .certs + .as_ref() + .ok_or(err!(Config("tls.certs", "Missing required value in tls config section")))?; + let key = tls + .key + .as_ref() + .ok_or(err!(Config("tls.key", "Missing required value in tls config section")))?; // we use ring for ruma and hashing state, but aws-lc-rs is the new default. // without this, TLS mode will panic. diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 55dd10aa..3eefe4b7 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -12,11 +12,9 @@ use data::Data; use ipaddress::IPAddress; use regex::RegexSet; use ruma::{ - api::client::discovery::discover_support::ContactRole, OwnedEventId, OwnedRoomAliasId, OwnedServerName, - OwnedUserId, RoomAliasId, RoomVersionId, ServerName, UserId, + OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, RoomVersionId, ServerName, UserId, }; use tokio::sync::Mutex; -use url::Url; use crate::service; @@ -243,14 +241,6 @@ impl Service { pub fn allow_outgoing_read_receipts(&self) -> bool { self.config.allow_outgoing_read_receipts } - pub fn well_known_support_page(&self) -> &Option { &self.config.well_known.support_page } - - pub fn well_known_support_role(&self) -> &Option { &self.config.well_known.support_role } - - pub fn well_known_support_email(&self) -> &Option { &self.config.well_known.support_email } - - pub fn well_known_support_mxid(&self) -> &Option { &self.config.well_known.support_mxid } - pub fn block_non_admin_invites(&self) -> bool { self.config.block_non_admin_invites } pub fn supported_room_versions(&self) -> Vec { @@ -265,10 +255,6 @@ impl Service { } } - pub fn well_known_client(&self) -> &Option { &self.config.well_known.client } - - pub fn well_known_server(&self) -> &Option { &self.config.well_known.server } - #[inline] pub fn valid_cidr_range(&self, ip: &IPAddress) -> bool { for cidr in &self.cidr_range_denylist { From fd4c447a2d9f847c5b0e4de183e4ee95fcbda677 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 23 Nov 2024 23:25:14 +0000 Subject: [PATCH 0273/1248] move attribute argument extractor to utils Signed-off-by: Jason Volk --- src/macros/config.rs | 47 ++++++-------------------------------------- src/macros/utils.rs | 34 +++++++++++++++++++++++++++++++- 2 files changed, 39 insertions(+), 42 deletions(-) diff --git a/src/macros/config.rs b/src/macros/config.rs index d7f11535..2934a0b2 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -1,9 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Write as _, - fs::OpenOptions, - io::Write as _, -}; +use std::{collections::HashSet, fmt::Write as _, fs::OpenOptions, io::Write as _}; use proc_macro::TokenStream; use proc_macro2::Span; @@ -13,7 +8,10 @@ use syn::{ ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, }; -use crate::{utils::is_cargo_build, Result}; +use crate::{ + utils::{get_simple_settings, is_cargo_build}, + Result, +}; const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; @@ -29,7 +27,7 @@ pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result Result<()> { - let settings = get_settings(args); + let settings = get_simple_settings(args); let filename = settings .get("filename") @@ -120,39 +118,6 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { Ok(()) } -fn get_settings(args: &[Meta]) -> HashMap { - let mut map = HashMap::new(); - for arg in args { - let Meta::NameValue(MetaNameValue { - path, - value, - .. - }) = arg - else { - continue; - }; - - let Expr::Lit( - ExprLit { - lit: Lit::Str(str), - .. - }, - .., - ) = value - else { - continue; - }; - - let Some(key) = path.segments.iter().next().map(|s| s.ident.clone()) else { - continue; - }; - - map.insert(key.to_string(), str.value()); - } - - map -} - fn get_default(field: &Field) -> Option { for attr in &field.attrs { let Meta::List(MetaList { diff --git a/src/macros/utils.rs b/src/macros/utils.rs index e4ffc622..23c4c16f 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -1,7 +1,39 @@ -use syn::{parse_str, Expr, Generics, Lit, Meta}; +use std::collections::HashMap; + +use syn::{parse_str, Expr, ExprLit, Generics, Lit, Meta, MetaNameValue}; use crate::Result; +pub(crate) fn get_simple_settings(args: &[Meta]) -> HashMap { + args.iter().fold(HashMap::new(), |mut map, arg| { + let Meta::NameValue(MetaNameValue { + path, + value, + .. + }) = arg + else { + return map; + }; + + let Expr::Lit( + ExprLit { + lit: Lit::Str(str), + .. + }, + .., + ) = value + else { + return map; + }; + + if let Some(key) = path.segments.iter().next().map(|s| s.ident.clone()) { + map.insert(key.to_string(), str.value()); + } + + map + }) +} + pub(crate) fn is_cargo_build() -> bool { std::env::args() .find(|flag| flag.starts_with("--emit")) From 5da42fb859ba987c3488cfcbb7c1f65bca06ffa8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 19 Nov 2024 03:08:09 +0000 Subject: [PATCH 0274/1248] refactor account_data.changes_since to stream Signed-off-by: Jason Volk --- src/admin/query/account_data.rs | 6 ++-- src/api/client/sync/v3.rs | 14 ++++---- src/api/client/sync/v4.rs | 21 +++++------- src/service/account_data/mod.rs | 59 +++++++++++---------------------- 4 files changed, 39 insertions(+), 61 deletions(-) diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index ea45eb16..91217334 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,5 +1,6 @@ use clap::Subcommand; use conduit::Result; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId}; use crate::Command; @@ -39,10 +40,11 @@ pub(super) async fn process(subcommand: AccountDataCommand, context: &Command<'_ room_id, } => { let timer = tokio::time::Instant::now(); - let results = services + let results: Vec<_> = services .account_data .changes_since(room_id.as_deref(), &user_id, since) - .await?; + .collect() + .await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 7a78ea74..614970f0 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -275,10 +275,9 @@ pub(crate) async fn sync_events_route( events: services .account_data .changes_since(None, &sender_user, since) - .await? - .into_iter() - .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) - .collect(), + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) + .collect() + .await, }, device_lists: DeviceLists { changed: device_list_updates.into_iter().collect(), @@ -1023,10 +1022,9 @@ async fn load_joined_room( events: services .account_data .changes_since(Some(room_id), sender_user, since) - .await? - .into_iter() - .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect(), + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect() + .await, }, summary: RoomSummary { heroes, diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 57edc953..78b0b277 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -136,10 +136,9 @@ pub(crate) async fn sync_events_v4_route( account_data.global = services .account_data .changes_since(None, sender_user, globalsince) - .await? - .into_iter() - .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) - .collect(); + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) + .collect() + .await; if let Some(rooms) = body.extensions.account_data.rooms { for room in rooms { @@ -148,10 +147,9 @@ pub(crate) async fn sync_events_v4_route( services .account_data .changes_since(Some(&room), sender_user, globalsince) - .await? - .into_iter() - .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect(), + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect() + .await, ); } } @@ -487,10 +485,9 @@ pub(crate) async fn sync_events_v4_route( services .account_data .changes_since(Some(room_id), sender_user, *roomsince) - .await? - .into_iter() - .filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect(), + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect() + .await, ); let vector: Vec<_> = services diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index ac3f5f83..b752f9b8 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,12 +1,12 @@ -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use conduit::{ - implement, - utils::{stream::TryIgnore, ReadyExt}, - Err, Error, Result, + err, implement, + utils::{result::LogErr, stream::TryIgnore, ReadyExt}, + Err, Result, }; -use database::{Deserialized, Handle, Json, Map}; -use futures::{StreamExt, TryFutureExt}; +use database::{Deserialized, Handle, Interfix, Json, Map}; +use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ events::{ AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, GlobalAccountDataEventType, @@ -112,46 +112,27 @@ pub async fn get_raw(&self, room_id: Option<&RoomId>, user_id: &UserId, kind: &s /// Returns all changes to the account data that happened after `since`. #[implement(Service)] -pub async fn changes_since( - &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, -) -> Result> { - let mut userdata = HashMap::new(); - - let mut prefix = room_id - .map(ToString::to_string) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xFF); +pub fn changes_since<'a>( + &'a self, room_id: Option<&'a RoomId>, user_id: &'a UserId, since: u64, +) -> impl Stream + Send + 'a { + let prefix = (room_id, user_id, Interfix); + let prefix = database::serialize_to_vec(prefix).expect("failed to serialize prefix"); // Skip the data that's exactly at since, because we sent that last time - let mut first_possible = prefix.clone(); - first_possible.extend_from_slice(&(since.saturating_add(1)).to_be_bytes()); + let first_possible = (room_id, user_id, since.saturating_add(1)); self.db .roomuserdataid_accountdata - .raw_stream_from(&first_possible) + .stream_from_raw(&first_possible) .ignore_err() .ready_take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(k, v)| { - let v = match room_id { - None => serde_json::from_slice::>(v) - .map(AnyRawAccountDataEvent::Global) - .map_err(|_| Error::bad_database("Database contains invalid account data."))?, - Some(_) => serde_json::from_slice::>(v) - .map(AnyRawAccountDataEvent::Room) - .map_err(|_| Error::bad_database("Database contains invalid account data."))?, - }; - - Ok((k.to_owned(), v)) + .map(move |(_, v)| { + match room_id { + Some(_) => serde_json::from_slice::>(v).map(AnyRawAccountDataEvent::Room), + None => serde_json::from_slice::>(v).map(AnyRawAccountDataEvent::Global), + } + .map_err(|e| err!(Database("Database contains invalid account data: {e}"))) + .log_err() }) .ignore_err() - .ready_for_each(|(kind, data)| { - userdata.insert(kind, data); - }) - .await; - - Ok(userdata.into_values().collect()) } From 3789d60b6abf7d758bb75f898ccbaa7f1b4251aa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 22 Nov 2024 12:25:46 +0000 Subject: [PATCH 0275/1248] refactor to iterator inputs for auth_chain/short batch functions Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 3 +- src/api/server/event_auth.rs | 4 +- src/api/server/send_join.rs | 6 +- src/api/server/state.rs | 4 +- src/api/server/state_ids.rs | 4 +- src/database/map/get.rs | 8 +- src/service/rooms/auth_chain/mod.rs | 32 +++++--- .../rooms/event_handler/resolve_state.rs | 4 +- .../rooms/event_handler/state_at_incoming.rs | 2 +- src/service/rooms/short/mod.rs | 80 +++++++++---------- 10 files changed, 76 insertions(+), 71 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index f9d4a521..89e47d4e 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -1,6 +1,7 @@ use std::{ collections::HashMap, fmt::Write, + iter::once, sync::Arc, time::{Instant, SystemTime}, }; @@ -43,7 +44,7 @@ pub(super) async fn get_auth_chain(&self, event_id: Box) -> Result = state_ids.values().map(Borrow::borrow).collect(); + let starting_events = state_ids.values().map(Borrow::borrow); let auth_chain = services .rooms .auth_chain - .event_ids_iter(room_id, &starting_events) + .event_ids_iter(room_id, starting_events) .await? .map(Ok) .and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) diff --git a/src/api/server/state.rs b/src/api/server/state.rs index 06a44a99..b21fce68 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,4 +1,4 @@ -use std::borrow::Borrow; +use std::{borrow::Borrow, iter::once}; use axum::extract::State; use conduit::{err, result::LogErr, utils::IterStream, Result}; @@ -52,7 +52,7 @@ pub(crate) async fn get_room_state_route( let auth_chain = services .rooms .auth_chain - .event_ids_iter(&body.room_id, &[body.event_id.borrow()]) + .event_ids_iter(&body.room_id, once(body.event_id.borrow())) .await? .map(Ok) .and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await }) diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 52d8e7cc..0c023bf0 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,4 +1,4 @@ -use std::borrow::Borrow; +use std::{borrow::Borrow, iter::once}; use axum::extract::State; use conduit::{err, Result}; @@ -44,7 +44,7 @@ pub(crate) async fn get_room_state_ids_route( let auth_chain_ids = services .rooms .auth_chain - .event_ids_iter(&body.room_id, &[body.event_id.borrow()]) + .event_ids_iter(&body.room_id, once(body.event_id.borrow())) .await? .map(|id| (*id).to_owned()) .collect() diff --git a/src/database/map/get.rs b/src/database/map/get.rs index a3c6c492..3ee2a194 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -80,8 +80,8 @@ where #[tracing::instrument(skip(self, keys), fields(%self), level = "trace")] pub fn get_batch<'a, I, K>(&self, keys: I) -> impl Stream>> where - I: Iterator + ExactSizeIterator + Send + Debug, - K: AsRef<[u8]> + Send + Sync + Sized + Debug + 'a, + I: Iterator + ExactSizeIterator + Debug + Send, + K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, { self.get_batch_blocking(keys).stream() } @@ -89,8 +89,8 @@ where #[implement(super::Map)] pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator>> where - I: Iterator + ExactSizeIterator + Send, - K: AsRef<[u8]> + Sized + 'a, + I: Iterator + ExactSizeIterator + Debug + Send, + K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, { // Optimization can be `true` if key vector is pre-sorted **by the column // comparator**. diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index cabb6f0c..1d0490c2 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -2,6 +2,7 @@ mod data; use std::{ collections::{BTreeSet, HashSet}, + fmt::Debug, sync::Arc, }; @@ -37,9 +38,12 @@ impl crate::Service for Service { } impl Service { - pub async fn event_ids_iter( - &self, room_id: &RoomId, starting_events: &[&EventId], - ) -> Result> + Send + '_> { + pub async fn event_ids_iter<'a, I>( + &'a self, room_id: &RoomId, starting_events: I, + ) -> Result> + Send + '_> + where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, + { let stream = self .get_event_ids(room_id, starting_events) .await? @@ -49,12 +53,15 @@ impl Service { Ok(stream) } - pub async fn get_event_ids(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result>> { + pub async fn get_event_ids<'a, I>(&'a self, room_id: &RoomId, starting_events: I) -> Result>> + where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, + { let chain = self.get_auth_chain(room_id, starting_events).await?; let event_ids = self .services .short - .multi_get_eventid_from_short(&chain) + .multi_get_eventid_from_short(chain.into_iter()) .await .into_iter() .filter_map(Result::ok) @@ -64,7 +71,10 @@ impl Service { } #[tracing::instrument(skip_all, name = "auth_chain")] - pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result> { + pub async fn get_auth_chain<'a, I>(&'a self, room_id: &RoomId, starting_events: I) -> Result> + where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, + { const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); @@ -72,19 +82,19 @@ impl Service { let mut starting_ids = self .services .short - .multi_get_or_create_shorteventid(starting_events) - .enumerate() + .multi_get_or_create_shorteventid(starting_events.clone()) + .zip(starting_events.clone().stream()) .boxed(); let mut buckets = [BUCKET; NUM_BUCKETS]; - while let Some((i, short)) = starting_ids.next().await { + while let Some((short, starting_event)) = starting_ids.next().await { let bucket: usize = short.try_into()?; let bucket: usize = validated!(bucket % NUM_BUCKETS); - buckets[bucket].insert((short, starting_events[i])); + buckets[bucket].insert((short, starting_event)); } debug!( - starting_events = ?starting_events.len(), + starting_events = ?starting_events.count(), elapsed = ?started.elapsed(), "start", ); diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 0c9525dd..4863e340 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -35,12 +35,12 @@ pub async fn resolve_state( let fork_states = [current_state_ids, incoming_state]; let mut auth_chain_sets = Vec::with_capacity(fork_states.len()); for state in &fork_states { - let starting_events: Vec<&EventId> = state.values().map(Borrow::borrow).collect(); + let starting_events = state.values().map(Borrow::borrow); let auth_chain: HashSet> = self .services .auth_chain - .get_event_ids(room_id, &starting_events) + .get_event_ids(room_id, starting_events) .await? .into_iter() .collect(); diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index a200ab56..05a9d8ca 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -139,7 +139,7 @@ pub(super) async fn state_at_incoming_resolved( let auth_chain: HashSet> = self .services .auth_chain - .get_event_ids(room_id, &starting_events) + .get_event_ids(room_id, starting_events.into_iter()) .await? .into_iter() .collect(); diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 703df796..e4ff2975 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,7 @@ -use std::{mem::size_of_val, sync::Arc}; +use std::{fmt::Debug, mem::size_of_val, sync::Arc}; pub use conduit::pdu::{ShortEventId, ShortId, ShortRoomId}; -use conduit::{err, implement, utils, Result}; +use conduit::{err, implement, utils, utils::stream::ReadyExt, Result}; use database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; @@ -51,52 +51,46 @@ impl crate::Service for Service { #[implement(Service)] pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEventId { - const BUFSIZE: usize = size_of::(); - if let Ok(shorteventid) = self.get_shorteventid(event_id).await { return shorteventid; } - let shorteventid = self.services.globals.next_count().unwrap(); - debug_assert!(size_of_val(&shorteventid) == BUFSIZE, "buffer requirement changed"); - - self.db - .eventid_shorteventid - .raw_aput::(event_id, shorteventid); - - self.db - .shorteventid_eventid - .aput_raw::(shorteventid, event_id); - - shorteventid + self.create_shorteventid(event_id) } #[implement(Service)] -pub fn multi_get_or_create_shorteventid<'a>( - &'a self, event_ids: &'a [&EventId], -) -> impl Stream + Send + 'a { +pub fn multi_get_or_create_shorteventid<'a, I>(&'a self, event_ids: I) -> impl Stream + Send + '_ +where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, + ::Item: AsRef<[u8]> + Send + Sync + 'a, +{ self.db .eventid_shorteventid - .get_batch(event_ids.iter()) - .enumerate() - .map(|(i, result)| match result { - Ok(ref short) => utils::u64_from_u8(short), - Err(_) => { - const BUFSIZE: usize = size_of::(); - - let short = self.services.globals.next_count().unwrap(); - debug_assert!(size_of_val(&short) == BUFSIZE, "buffer requirement changed"); - - self.db - .eventid_shorteventid - .raw_aput::(event_ids[i], short); - self.db - .shorteventid_eventid - .aput_raw::(short, event_ids[i]); - - short - }, + .get_batch(event_ids.clone()) + .ready_scan(event_ids, |event_ids, result| { + event_ids.next().map(|event_id| (event_id, result)) }) + .map(|(event_id, result)| match result { + Ok(ref short) => utils::u64_from_u8(short), + Err(_) => self.create_shorteventid(event_id), + }) +} + +#[implement(Service)] +fn create_shorteventid(&self, event_id: &EventId) -> ShortEventId { + const BUFSIZE: usize = size_of::(); + + let short = self.services.globals.next_count().unwrap(); + debug_assert!(size_of_val(&short) == BUFSIZE, "buffer requirement changed"); + + self.db + .eventid_shorteventid + .raw_aput::(event_id, short); + self.db + .shorteventid_eventid + .aput_raw::(short, event_id); + + short } #[implement(Service)] @@ -154,13 +148,13 @@ pub async fn get_eventid_from_short(&self, shorteventid: ShortEventId) -> Result } #[implement(Service)] -pub async fn multi_get_eventid_from_short(&self, shorteventid: &[ShortEventId]) -> Vec>> { +pub async fn multi_get_eventid_from_short(&self, shorteventid: I) -> Vec>> +where + I: Iterator + Send, +{ const BUFSIZE: usize = size_of::(); - let keys: Vec<[u8; BUFSIZE]> = shorteventid - .iter() - .map(|short| short.to_be_bytes()) - .collect(); + let keys: Vec<[u8; BUFSIZE]> = shorteventid.map(u64::to_be_bytes).collect(); self.db .shorteventid_eventid From c519a40cb8d220e9d3d18702f4b4f593e0280505 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 22 Nov 2024 16:01:27 +0000 Subject: [PATCH 0276/1248] use multiget for shortid conversions Signed-off-by: Jason Volk --- src/service/rooms/state/mod.rs | 71 +++++++++++------------ src/service/rooms/state_accessor/data.rs | 73 ++++++++++++++---------- src/service/rooms/state_accessor/mod.rs | 12 +++- 3 files changed, 86 insertions(+), 70 deletions(-) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 9d702cd7..4429e912 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use conduit::{ - err, + at, err, result::FlatOk, utils::{calculate_hash, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, warn, PduEvent, Result, @@ -398,59 +398,52 @@ impl Service { return Ok(HashMap::new()); }; - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content)?; - - let mut sauthevents: HashMap<_, _> = auth_events + let mut sauthevents: HashMap<_, _> = state_res::auth_types_for_event(kind, sender, state_key, content)? .iter() .stream() .filter_map(|(event_type, state_key)| { self.services .short .get_shortstatekey(event_type, state_key) - .map_ok(move |s| (s, (event_type, state_key))) + .map_ok(move |ssk| (ssk, (event_type, state_key))) .map(Result::ok) }) + .map(|(ssk, (event_type, state_key))| (ssk, (event_type.to_owned(), state_key.to_owned()))) .collect() .await; - let full_state = self + let auth_state: Vec<_> = self .services - .state_compressor - .load_shortstatehash_info(shortstatehash) + .state_accessor + .state_full_shortids(shortstatehash) .await - .map_err(|e| { - err!(Database( - "Missing shortstatehash info for {room_id:?} at {shortstatehash:?}: {e:?}" - )) - })? - .pop() - .expect("there is always one layer") - .full_state; + .map_err(|e| err!(Database(error!(?room_id, ?shortstatehash, "{e:?}"))))? + .into_iter() + .filter_map(|(shortstatekey, shorteventid)| { + sauthevents + .remove(&shortstatekey) + .map(|(event_type, state_key)| ((event_type, state_key), shorteventid)) + }) + .collect(); - let mut ret = HashMap::new(); - for &compressed in full_state.iter() { - let (shortstatekey, shorteventid) = parse_compressed_state_event(compressed); + let auth_pdus: Vec<_> = self + .services + .short + .multi_get_eventid_from_short(auth_state.iter().map(at!(1))) + .await + .into_iter() + .stream() + .and_then(|event_id| async move { self.services.timeline.get_pdu(&event_id).await }) + .collect() + .await; - let Some((ty, state_key)) = sauthevents.remove(&shortstatekey) else { - continue; - }; + let auth_pdus = auth_state + .into_iter() + .map(at!(0)) + .zip(auth_pdus.into_iter()) + .filter_map(|((event_type, state_key), pdu)| Some(((event_type, state_key), pdu.ok()?))) + .collect(); - let Ok(event_id) = self - .services - .short - .get_eventid_from_short(shorteventid) - .await - else { - continue; - }; - - let Ok(pdu) = self.services.timeline.get_pdu(&event_id).await else { - continue; - }; - - ret.insert((ty.to_owned(), state_key.to_owned()), pdu); - } - - Ok(ret) + Ok(auth_pdus) } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 8df0d8b0..80046d77 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,8 +1,8 @@ use std::{collections::HashMap, sync::Arc}; use conduit::{ - err, - utils::{future::TryExtExt, IterStream}, + at, err, + utils::stream::{IterStream, ReadyExt}, PduEvent, Result, }; use database::{Deserialized, Map}; @@ -49,52 +49,63 @@ impl Data { pub(super) async fn state_full( &self, shortstatehash: ShortStateHash, ) -> Result>> { - Ok(self + let state = self .state_full_pdus(shortstatehash) .await? .into_iter() .filter_map(|pdu| Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu))) - .collect()) + .collect(); + + Ok(state) } pub(super) async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result>> { - Ok(self + let short_ids = self .state_full_shortids(shortstatehash) .await? - .iter() + .into_iter() + .map(at!(1)); + + let event_ids = self + .services + .short + .multi_get_eventid_from_short(short_ids) + .await; + + let full_pdus = event_ids + .into_iter() .stream() - .filter_map(|(_, shorteventid)| { - self.services - .short - .get_eventid_from_short(*shorteventid) - .ok() - }) - .filter_map(|eventid| async move { self.services.timeline.get_pdu(&eventid).await.ok() }) + .ready_filter_map(Result::ok) + .filter_map(|event_id| async move { self.services.timeline.get_pdu(&event_id).await.ok() }) .collect() - .await) + .await; + + Ok(full_pdus) } pub(super) async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result>> { - Ok(self - .state_full_shortids(shortstatehash) - .await? - .iter() - .stream() - .filter_map(|(shortstatekey, shorteventid)| { - self.services - .short - .get_eventid_from_short(*shorteventid) - .map_ok(move |eventid| (*shortstatekey, eventid)) - .ok() - }) - .collect() - .await) + let short_ids = self.state_full_shortids(shortstatehash).await?; + + let event_ids = self + .services + .short + .multi_get_eventid_from_short(short_ids.iter().map(at!(1))) + .await; + + let full_ids = short_ids + .into_iter() + .map(at!(0)) + .zip(event_ids.into_iter()) + .filter_map(|(shortstatekey, event_id)| Some((shortstatekey, event_id.ok()?))) + .collect(); + + Ok(full_ids) } pub(super) async fn state_full_shortids( &self, shortstatehash: ShortStateHash, ) -> Result> { - Ok(self + let shortids = self .services .state_compressor .load_shortstatehash_info(shortstatehash) @@ -106,7 +117,9 @@ impl Data { .iter() .copied() .map(parse_compressed_state_event) - .collect()) + .collect(); + + Ok(shortids) } /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 89db88a6..e08fac66 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -41,7 +41,10 @@ use serde::Deserialize; use self::data::Data; use crate::{ rooms, - rooms::{short::ShortStateHash, state::RoomMutexGuard}, + rooms::{ + short::{ShortEventId, ShortStateHash, ShortStateKey}, + state::RoomMutexGuard, + }, Dep, }; @@ -102,6 +105,13 @@ impl Service { self.db.state_full_ids(shortstatehash).await } + #[inline] + pub async fn state_full_shortids( + &self, shortstatehash: ShortStateHash, + ) -> Result> { + self.db.state_full_shortids(shortstatehash).await + } + pub async fn state_full( &self, shortstatehash: ShortStateHash, ) -> Result>> { From 97ad9afc866d420354c44c9ffddf99d622c38f4b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 24 Nov 2024 06:04:54 +0000 Subject: [PATCH 0277/1248] default to main event for batch tokens fix prev_batch token for legacy sync timeline Signed-off-by: Jason Volk --- src/api/client/context.rs | 2 ++ src/api/client/message.rs | 4 +--- src/api/client/sync/v3.rs | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 4359ae12..5b6b516e 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -169,12 +169,14 @@ pub(crate) async fn get_context_route( start: events_before .last() .map(at!(0)) + .or(Some(base_token)) .as_ref() .map(ToString::to_string), end: events_after .last() .map(at!(0)) + .or(Some(base_token)) .as_ref() .map(ToString::to_string), diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 88453de0..f1a10aa2 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -136,8 +136,6 @@ pub(crate) async fn get_message_events_route( .collect() .await; - let start_token = events.first().map(at!(0)).unwrap_or(from); - let next_token = events.last().map(at!(0)); if !cfg!(feature = "element_hacks") { @@ -156,7 +154,7 @@ pub(crate) async fn get_message_events_route( .collect(); Ok(get_message_events::v3::Response { - start: start_token.to_string(), + start: from.to_string(), end: next_token.as_ref().map(ToString::to_string), chunk, state, diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 614970f0..80aa8184 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -945,7 +945,6 @@ async fn load_joined_room( let prev_batch = timeline_pdus .first() .map(at!(0)) - .map(|count| count.saturating_sub(1)) .as_ref() .map(ToString::to_string); From 4e74a1811b3f61a18445740a4d3a84a5006e8499 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 24 Nov 2024 22:15:25 +0000 Subject: [PATCH 0278/1248] ci: set cancel-in-progress to true Signed-off-by: Jason Volk --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9385c5e3..b4b2c1f1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ on: concurrency: group: ${{ github.head_ref || github.ref_name }} - cancel-in-progress: false + cancel-in-progress: true env: # sccache only on main repo From a582d0559a7e8cdf6f98a33082fe39984d959666 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 23 Nov 2024 22:31:44 +0000 Subject: [PATCH 0279/1248] bump url and cargo lock Signed-off-by: Jason Volk --- Cargo.lock | 202 +++++++++++++++++++++++++---------------------------- Cargo.toml | 2 +- 2 files changed, 98 insertions(+), 106 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d39375d1..e3af8ae0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -94,9 +94,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-compression" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" +checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" dependencies = [ "brotli", "flate2", @@ -127,7 +127,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -138,7 +138,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -164,21 +164,20 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe7c2840b66236045acd2607d5866e274380afd87ef99d6226e961e2cb47df45" +checksum = "f47bb8cc16b669d267eeccf585aea077d0882f4777b1c1f740217885d6e6e5a3" dependencies = [ "aws-lc-sys", - "mirai-annotations", "paste", "zeroize", ] [[package]] name = "aws-lc-sys" -version = "0.23.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad3a619a9de81e1d7de1f1186dcba4506ed661a0e483d84410fdef0ee87b2f96" +checksum = "a2101df3813227bbaaaa0b04cd61c534c7954b22bd68d399b440be937dc63ff7" dependencies = [ "bindgen", "cc", @@ -215,7 +214,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tower 0.5.1", "tower-layer", @@ -249,7 +248,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tower-layer", "tower-service", "tracing", @@ -292,7 +291,7 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "rustls 0.23.16", + "rustls 0.23.18", "rustls-pemfile", "rustls-pki-types", "tokio", @@ -312,7 +311,7 @@ dependencies = [ "http", "http-body-util", "pin-project", - "rustls 0.23.16", + "rustls 0.23.18", "tokio", "tokio-rustls", "tokio-util", @@ -372,7 +371,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.87", + "syn 2.0.89", "which", ] @@ -435,9 +434,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" +checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" [[package]] name = "byteorder" @@ -574,7 +573,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -755,7 +754,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -779,7 +778,7 @@ dependencies = [ "hyper-util", "log", "ruma", - "rustls 0.23.16", + "rustls 0.23.18", "sd-notify", "sentry", "sentry-tower", @@ -896,9 +895,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.4" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" dependencies = [ "core-foundation-sys", "libc", @@ -912,9 +911,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -1053,7 +1052,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1080,7 +1079,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1149,7 +1148,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1201,7 +1200,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1367,7 +1366,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1447,9 +1446,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -1633,7 +1632,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1728,7 +1727,7 @@ dependencies = [ "http", "hyper", "hyper-util", - "rustls 0.23.16", + "rustls 0.23.18", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -1885,7 +1884,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2031,9 +2030,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" [[package]] name = "jobserver" @@ -2086,9 +2085,9 @@ dependencies = [ [[package]] name = "konst" -version = "0.3.9" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50a0ba6de5f7af397afff922f22c149ff605c766cd3269cf6c1cd5e466dbe3b9" +checksum = "b65f00fb3910881e52bf0850ae2a82aea411488a557e1c02820ceaa60963dce3" dependencies = [ "const_panic", "konst_kernel", @@ -2097,9 +2096,9 @@ dependencies = [ [[package]] name = "konst_kernel" -version = "0.3.9" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0a455a1719220fd6adf756088e1c69a85bf14b6a9e24537a5cc04f503edb2b" +checksum = "599c1232f55c72c7fc378335a3efe1c878c92720838c8e6a4fd87784ef7764de" dependencies = [ "typewit", ] @@ -2124,7 +2123,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2141,9 +2140,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.162" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "libloading" @@ -2180,9 +2179,9 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" @@ -2338,12 +2337,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "mirai-annotations" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" - [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -2656,7 +2649,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2739,7 +2732,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2811,7 +2804,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2825,9 +2818,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -2840,7 +2833,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "version_check", "yansi", ] @@ -2865,7 +2858,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2918,7 +2911,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.16", + "rustls 0.23.18", "socket2", "thiserror 2.0.3", "tokio", @@ -2936,7 +2929,7 @@ dependencies = [ "rand", "ring", "rustc-hash 2.0.0", - "rustls 0.23.16", + "rustls 0.23.18", "rustls-pki-types", "slab", "thiserror 2.0.3", @@ -3079,14 +3072,14 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.16", + "rustls 0.23.18", "rustls-native-certs", "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tokio-rustls", "tokio-socks", @@ -3285,7 +3278,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.87", + "syn 2.0.89", "toml", ] @@ -3409,9 +3402,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.40" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -3436,9 +3429,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "aws-lc-rs", "log", @@ -3452,12 +3445,11 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ "openssl-probe", - "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework", @@ -3531,9 +3523,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -3552,9 +3544,9 @@ checksum = "1be20c5f7f393ee700f8b2f28ea35812e4e212f40774b550cd2a93ea91684451" [[package]] name = "security-framework" -version = "2.11.1" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" dependencies = [ "bitflags 2.6.0", "core-foundation", @@ -3731,7 +3723,7 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4008,9 +4000,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.87" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -4025,9 +4017,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -4040,7 +4032,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4106,7 +4098,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4117,7 +4109,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4262,7 +4254,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4283,7 +4275,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.16", + "rustls 0.23.18", "rustls-pki-types", "tokio", ] @@ -4476,7 +4468,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=4d78a14a5e03f539b8c6b47 dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4603,9 +4595,9 @@ checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" @@ -4655,7 +4647,7 @@ dependencies = [ "base64 0.22.1", "log", "once_cell", - "rustls 0.23.16", + "rustls 0.23.18", "rustls-pki-types", "url", "webpki-roots", @@ -4663,9 +4655,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna 1.0.3", @@ -4762,7 +4754,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -4796,7 +4788,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4851,9 +4843,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.6" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] @@ -5157,9 +5149,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" dependencies = [ "serde", "stable_deref_trait", @@ -5169,13 +5161,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -5197,27 +5189,27 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "zerofrom" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -5246,7 +5238,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 058a15e0..02c3b5ab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -243,7 +243,7 @@ version = "0.8.5" # Validating urls in config, was already a transitive dependency [workspace.dependencies.url] -version = "2.5.3" +version = "2.5.4" default-features = false features = ["serde"] From 1c751168c6e237a29dd0cf00b771e8ea3ca961f1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 24 Nov 2024 23:49:09 +0000 Subject: [PATCH 0280/1248] check-in missed example config changes Signed-off-by: Jason Volk --- conduwuit-example.toml | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 2f3da71f..78136efb 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -158,6 +158,10 @@ # #eventidshort_cache_capacity = varies by system +# This item is undocumented. Please contribute documentation for it. +# +#eventid_pdu_cache_capacity = varies by system + # This item is undocumented. Please contribute documentation for it. # #shortstatekey_cache_capacity = varies by system @@ -1350,6 +1354,13 @@ [global.well_known] +# The server URL that the client well-known file will serve. This should +# not contain a port, and should just be a valid HTTPS URL. +# +# example: "https://matrix.example.com" +# +#client = + # The server base domain of the URL with a specific port that the server # well-known file will serve. This should contain a port at the end, and # should not be a URL. @@ -1358,13 +1369,6 @@ # #server = -# The server URL that the client well-known file will serve. This should -# not contain a port, and should just be a valid HTTPS URL. -# -# example: "https://matrix.example.com" -# -#client = - # This item is undocumented. Please contribute documentation for it. # #support_page = From b87362cbf1377592e7939d9667721183c25d14a9 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 24 Nov 2024 19:17:07 -0500 Subject: [PATCH 0281/1248] ci: add test for validating generated example config is current Signed-off-by: strawberry --- engage.toml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/engage.toml b/engage.toml index 633cb95d..9a6ef8ca 100644 --- a/engage.toml +++ b/engage.toml @@ -188,6 +188,16 @@ cargo test \ --color=always """ +# Checks if the generated example config differs from the checked in repo's +# example config. +[[task]] +name = "example-config" +group = "tests" +depends = ["cargo/default"] +script = """ +git diff --exit-code conduwuit-example.toml +""" + # Ensure that the flake's default output can build and run without crashing # # This is a dynamically-linked jemalloc build, which is a case not covered by From 2675033aac4caf9c77a1dc5024468cae71cf92bc Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 24 Nov 2024 19:19:08 -0500 Subject: [PATCH 0282/1248] send plain txt admin room error responses fixes bracketed arguments not showing up on missing args Signed-off-by: strawberry --- src/admin/processor.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 3c1895ff..caaa7d2d 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -157,10 +157,7 @@ fn parse<'a>( let message = error .to_string() .replace("server.name", services.globals.server_name().as_str()); - Err(reply( - RoomMessageEventContent::notice_markdown(message), - input.reply_id.as_deref(), - )) + Err(reply(RoomMessageEventContent::notice_plain(message), input.reply_id.as_deref())) }, } } From 29c715a45fe1c6c177b1ea88452a94feb26d5174 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 24 Nov 2024 19:30:54 -0500 Subject: [PATCH 0283/1248] ci: remove some old/unnecessary paths-ignore Signed-off-by: strawberry --- .github/workflows/ci.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b4b2c1f1..1441dd44 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,15 +3,10 @@ name: CI and Artifacts on: pull_request: push: - # documentation workflow deals with this or is not relevant for this workflow paths-ignore: - - '*.md' - - 'conduwuit-example.toml' - - 'book.toml' - '.gitlab-ci.yml' - '.gitignore' - 'renovate.json' - - 'docs/**' - 'debian/**' - 'docker/**' branches: From 6f1d50dda31b4c5d55e2297c701520553ba8327e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 25 Nov 2024 01:14:12 +0000 Subject: [PATCH 0284/1248] panic on otherwise ignored errors in debug mode Signed-off-by: Jason Volk --- src/core/utils/stream/ignore.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/core/utils/stream/ignore.rs b/src/core/utils/stream/ignore.rs index 997aa4ba..4e426557 100644 --- a/src/core/utils/stream/ignore.rs +++ b/src/core/utils/stream/ignore.rs @@ -13,6 +13,15 @@ where T: Stream> + TryStream + Send + 'a, Item: Send + 'a, { + #[cfg(debug_assertions)] + #[inline] + fn ignore_err(self: T) -> impl Stream + Send + 'a { + use super::TryExpect; + + self.expect_ok() + } + + #[cfg(not(debug_assertions))] #[inline] fn ignore_err(self: T) -> impl Stream + Send + 'a { self.filter_map(|res| ready(res.ok())) } From 343ec59a8b6b766b913538811c1d7b04327e8382 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 25 Nov 2024 01:01:39 +0000 Subject: [PATCH 0285/1248] use arrayvec for integer deserialization buffer Signed-off-by: Jason Volk --- src/core/error/mod.rs | 2 ++ src/database/de.rs | 29 ++++++++++++++++++++++++----- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 35bf9800..b84f1b46 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -35,6 +35,8 @@ pub enum Error { // third-party #[error(transparent)] + CapacityError(#[from] arrayvec::CapacityError), + #[error(transparent)] CargoToml(#[from] cargo_toml::Error), #[error(transparent)] Clap(#[from] clap::error::Error), diff --git a/src/database/de.rs b/src/database/de.rs index f8a038ef..d303eab2 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -1,3 +1,4 @@ +use arrayvec::ArrayVec; use conduit::{checked, debug::DebugInspect, err, utils::string, Error, Result}; use serde::{ de, @@ -52,7 +53,7 @@ impl<'de> Deserializer<'de> { let len = self.buf.len(); let parsed = &self.buf[0..pos]; let unparsed = &self.buf[pos..]; - let remain = checked!(len - pos)?; + let remain = self.remaining()?; let trailing_sep = remain == 1 && unparsed[0] == Self::SEP; (remain == 0 || trailing_sep) .then_some(()) @@ -139,6 +140,14 @@ impl<'de> Deserializer<'de> { self.pos = self.pos.saturating_add(n); debug_assert!(self.pos <= self.buf.len(), "pos out of range"); } + + /// Unconsumed input bytes. + #[inline] + fn remaining(&self) -> Result { + let pos = self.pos; + let len = self.buf.len(); + checked!(len - pos) + } } impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { @@ -240,8 +249,13 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } fn deserialize_i64>(self, visitor: V) -> Result { - let bytes: [u8; size_of::()] = self.buf[self.pos..].try_into()?; - self.inc_pos(size_of::()); + const BYTES: usize = size_of::(); + + let end = self.pos.saturating_add(BYTES); + let bytes: ArrayVec = self.buf[self.pos..end].try_into()?; + let bytes = bytes.into_inner().expect("array size matches i64"); + + self.inc_pos(BYTES); visitor.visit_i64(i64::from_be_bytes(bytes)) } @@ -258,8 +272,13 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } fn deserialize_u64>(self, visitor: V) -> Result { - let bytes: [u8; size_of::()] = self.buf[self.pos..].try_into()?; - self.inc_pos(size_of::()); + const BYTES: usize = size_of::(); + + let end = self.pos.saturating_add(BYTES); + let bytes: ArrayVec = self.buf[self.pos..end].try_into()?; + let bytes = bytes.into_inner().expect("array size matches u64"); + + self.inc_pos(BYTES); visitor.visit_u64(u64::from_be_bytes(bytes)) } From c903a718075ddef637529294ccd45554d11d767d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 25 Nov 2024 01:55:57 +0000 Subject: [PATCH 0286/1248] refactor and optimize receipt service data Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 6 ++-- src/api/client/sync/v4.rs | 8 ++--- src/service/rooms/read_receipt/data.rs | 43 ++++++++------------------ src/service/rooms/read_receipt/mod.rs | 8 ++--- src/service/sending/sender.rs | 6 ++-- 5 files changed, 26 insertions(+), 45 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 80aa8184..b69cbc87 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -559,8 +559,6 @@ async fn load_joined_room( .lazy_loading .lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount); - // Database queries: - let current_shortstatehash = services .rooms .state @@ -983,9 +981,9 @@ async fn load_joined_room( .filter_map(|(read_user, _, edu)| async move { services .users - .user_is_ignored(&read_user, sender_user) + .user_is_ignored(read_user, sender_user) .await - .or_some((read_user, edu)) + .or_some((read_user.to_owned(), edu)) }) .collect() .await; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 78b0b277..0913336d 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -495,11 +495,11 @@ pub(crate) async fn sync_events_v4_route( .read_receipt .readreceipts_since(room_id, *roomsince) .filter_map(|(read_user, ts, v)| async move { - (!services + services .users - .user_is_ignored(&read_user, sender_user) - .await) - .then_some((read_user, ts, v)) + .user_is_ignored(read_user, sender_user) + .await + .or_some((read_user.to_owned(), ts, v)) }) .collect() .await; diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index 1194598d..34639e27 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -1,16 +1,15 @@ -use std::{mem::size_of, sync::Arc}; +use std::sync::Arc; use conduit::{ - utils, utils::{stream::TryIgnore, ReadyExt}, - Error, Result, + Result, }; use database::{Deserialized, Json, Map}; use futures::{Stream, StreamExt}; use ruma::{ events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent}, serde::Raw, - CanonicalJsonObject, OwnedUserId, RoomId, UserId, + CanonicalJsonObject, RoomId, UserId, }; use crate::{globals, Dep}; @@ -26,7 +25,7 @@ struct Services { globals: Dep, } -pub(super) type ReceiptItem = (OwnedUserId, u64, Raw); +pub(super) type ReceiptItem<'a> = (&'a UserId, u64, Raw); impl Data { pub(super) fn new(args: &crate::Args<'_>) -> Self { @@ -59,39 +58,23 @@ impl Data { pub(super) fn readreceipts_since<'a>( &'a self, room_id: &'a RoomId, since: u64, - ) -> impl Stream + Send + 'a { + ) -> impl Stream> + Send + 'a { + type Key<'a> = (&'a RoomId, u64, &'a UserId); + type KeyVal<'a> = (Key<'a>, CanonicalJsonObject); + let after_since = since.saturating_add(1); // +1 so we don't send the event at since let first_possible_edu = (room_id, after_since); - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xFF); - let prefix2 = prefix.clone(); - self.readreceiptid_readreceipt - .stream_from_raw(&first_possible_edu) + .stream_from(&first_possible_edu) .ignore_err() - .ready_take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count_offset = prefix.len().saturating_add(size_of::()); - let user_id_offset = count_offset.saturating_add(1); - - let count = utils::u64_from_bytes(&k[prefix.len()..count_offset]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - - let user_id_str = utils::string_from_bytes(&k[user_id_offset..]) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid bytes in db."))?; - - let user_id = UserId::parse(user_id_str) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(v) - .map_err(|_| Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json."))?; - + .ready_take_while(move |((r, ..), _): &KeyVal<'_>| *r == room_id) + .map(move |((_, count, user_id), mut json): KeyVal<'_>| { json.remove("room_id"); - let event = Raw::from_json(serde_json::value::to_raw_value(&json)?); + let event = serde_json::value::to_raw_value(&json)?; - Ok((user_id, count, event)) + Ok((user_id, count, Raw::from_json(event))) }) .ignore_err() } diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index ec34361e..e089d369 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -7,10 +7,10 @@ use futures::Stream; use ruma::{ events::{ receipt::{ReceiptEvent, ReceiptEventContent}, - SyncEphemeralRoomEvent, + AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, }, serde::Raw, - RoomId, UserId, + OwnedUserId, RoomId, UserId, }; use self::data::{Data, ReceiptItem}; @@ -55,7 +55,7 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub fn readreceipts_since<'a>( &'a self, room_id: &'a RoomId, since: u64, - ) -> impl Stream + Send + 'a { + ) -> impl Stream> + Send + 'a { self.db.readreceipts_since(room_id, since) } @@ -83,7 +83,7 @@ impl Service { #[must_use] pub fn pack_receipts(receipts: I) -> Raw> where - I: Iterator, + I: Iterator)>, { let mut json = BTreeMap::new(); for (_, _, value) in receipts { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 0a0aae39..3c544725 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -376,7 +376,7 @@ impl Service { let mut read = BTreeMap::::new(); while let Some((user_id, count, read_receipt)) = receipts.next().await { *max_edu_count = cmp::max(count, *max_edu_count); - if !self.services.globals.user_is_local(&user_id) { + if !self.services.globals.user_is_local(user_id) { continue; } @@ -400,7 +400,7 @@ impl Service { let receipt = receipt .remove(&ReceiptType::Read) .expect("our read receipts always set this") - .remove(&user_id) + .remove(user_id) .expect("our read receipts always have the user here"); let receipt_data = ReceiptData { @@ -408,7 +408,7 @@ impl Service { event_ids: vec![event_id.clone()], }; - if read.insert(user_id, receipt_data).is_none() { + if read.insert(user_id.to_owned(), receipt_data).is_none() { *num = num.saturating_add(1); if *num >= SELECT_RECEIPT_LIMIT { break; From 2592f83b69de54e012a66652d43c09183074d77a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 25 Nov 2024 02:10:24 +0000 Subject: [PATCH 0287/1248] add migration fix for duplicate readreceipt entries Signed-off-by: Jason Volk --- src/service/migrations.rs | 61 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 126d3c7e..cded9bce 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -14,7 +14,7 @@ use itertools::Itertools; use ruma::{ events::{push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType}, push::Ruleset, - OwnedUserId, UserId, + OwnedUserId, RoomId, UserId, }; use crate::{media, Services}; @@ -69,6 +69,7 @@ async fn fresh(services: &Services) -> Result<()> { db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []); db["global"].insert(b"fix_referencedevents_missing_sep", []); + db["global"].insert(b"fix_readreceiptid_readreceipt_duplicates", []); // Create the admin room and server user on first run crate::admin::create_admin_room(services).boxed().await?; @@ -130,6 +131,14 @@ async fn migrate(services: &Services) -> Result<()> { fix_referencedevents_missing_sep(services).await?; } + if db["global"] + .get(b"fix_readreceiptid_readreceipt_duplicates") + .await + .is_not_found() + { + fix_readreceiptid_readreceipt_duplicates(services).await?; + } + let version_match = services.globals.db.database_version().await == DATABASE_VERSION || services.globals.db.database_version().await == CONDUIT_DATABASE_VERSION; @@ -493,3 +502,53 @@ async fn fix_referencedevents_missing_sep(services: &Services) -> Result { db["global"].insert(b"fix_referencedevents_missing_sep", []); db.db.cleanup() } + +async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result { + use ruma::identifiers_validation::MAX_BYTES; + type ArrayId = arrayvec::ArrayString; + type Key<'a> = (&'a RoomId, u64, &'a UserId); + + warn!("Fixing undeleted entries in readreceiptid_readreceipt..."); + + let db = &services.db; + let cork = db.cork_and_sync(); + let readreceiptid_readreceipt = db["readreceiptid_readreceipt"].clone(); + + let mut cur_room: Option = None; + let mut cur_user: Option = None; + let (mut total, mut fixed): (usize, usize) = (0, 0); + readreceiptid_readreceipt + .keys() + .expect_ok() + .ready_for_each(|key: Key<'_>| { + let (room_id, _, user_id) = key; + let last_room = cur_room.replace( + room_id + .as_str() + .try_into() + .expect("invalid room_id in database"), + ); + + let last_user = cur_user.replace( + user_id + .as_str() + .try_into() + .expect("invalid user_id in database"), + ); + + let is_dup = cur_room == last_room && cur_user == last_user; + if is_dup { + readreceiptid_readreceipt.del(key); + } + + fixed = fixed.saturating_add(is_dup.into()); + total = total.saturating_add(1); + }) + .await; + + drop(cork); + info!(?total, ?fixed, "Fixed undeleted entries in readreceiptid_readreceipt."); + + db["global"].insert(b"fix_readreceiptid_readreceipt_duplicates", []); + db.db.cleanup() +} From 8611cc0ee9afb4691f6f55fbe2fd583d4337cca2 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 24 Nov 2024 22:30:14 -0500 Subject: [PATCH 0288/1248] fix ignored_filter check, exclude dummy events over sync Signed-off-by: strawberry --- src/api/client/message.rs | 18 +++++++++--------- src/api/client/sync/v3.rs | 25 +++---------------------- src/api/client/sync/v4.rs | 25 +++---------------------- 3 files changed, 15 insertions(+), 53 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index f1a10aa2..d8043855 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -25,8 +25,8 @@ use crate::Ruma; pub(crate) type LazySet = HashSet; -/// list of safe and common non-state events to ignore -const IGNORED_MESSAGE_TYPES: &[TimelineEventType] = &[ +/// list of safe and common non-state events to ignore if the user is ignored +const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 16] = &[ RoomMessage, Sticker, CallInvite, @@ -206,19 +206,19 @@ pub(crate) async fn update_lazy( pub(crate) async fn ignored_filter(services: &Services, item: PdusIterItem, user_id: &UserId) -> Option { let (_, pdu) = &item; + // exclude Synapse's dummy events from bloating up response bodies. clients + // don't need to see this. if pdu.kind.to_cow_str() == "org.matrix.dummy_event" { return None; } - if !IGNORED_MESSAGE_TYPES.iter().any(is_equal_to!(&pdu.kind)) { - return Some(item); + if IGNORED_MESSAGE_TYPES.iter().any(is_equal_to!(&pdu.kind)) + && services.users.user_is_ignored(&pdu.sender, user_id).await + { + return None; } - if !services.users.user_is_ignored(&pdu.sender, user_id).await { - return Some(item); - } - - None + Some(item) } pub(crate) async fn visibility_filter( diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index b69cbc87..0cb22317 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -38,6 +38,7 @@ use tracing::{Instrument as _, Span}; use super::{load_timeline, share_encrypted_room}; use crate::{ + client::ignored_filter, service::{pdu::EventHash, Services}, utils, Error, PduEvent, Result, Ruma, RumaResponse, }; @@ -949,28 +950,8 @@ async fn load_joined_room( let room_events: Vec<_> = timeline_pdus .iter() .stream() - .filter_map(|(_, pdu)| async move { - // list of safe and common non-state events to ignore - if matches!( - &pdu.kind, - RoomMessage - | Sticker | CallInvite - | CallNotify | RoomEncrypted - | Image | File | Audio - | Voice | Video | UnstablePollStart - | PollStart | KeyVerificationStart - | Reaction | Emote - | Location - ) && services - .users - .user_is_ignored(&pdu.sender, sender_user) - .await - { - return None; - } - - Some(pdu.to_sync_room_event()) - }) + .filter_map(|item| ignored_filter(services, item.clone(), sender_user)) + .map(|(_, pdu)| pdu.to_sync_room_event()) .collect() .await; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 0913336d..62c313e2 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -35,7 +35,7 @@ use ruma::{ use service::{rooms::read_receipt::pack_receipts, Services}; use super::{load_timeline, share_encrypted_room}; -use crate::Ruma; +use crate::{client::ignored_filter, Ruma}; const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = @@ -539,27 +539,8 @@ pub(crate) async fn sync_events_v4_route( let room_events: Vec<_> = timeline_pdus .iter() .stream() - .filter_map(|(_, pdu)| async move { - // list of safe and common non-state events to ignore - if matches!( - &pdu.kind, - RoomMessage - | Sticker | CallInvite - | CallNotify | RoomEncrypted - | Image | File | Audio - | Voice | Video | UnstablePollStart - | PollStart | KeyVerificationStart - | Reaction | Emote | Location - ) && services - .users - .user_is_ignored(&pdu.sender, sender_user) - .await - { - return None; - } - - Some(pdu.to_sync_room_event()) - }) + .filter_map(|item| ignored_filter(&services, item.clone(), sender_user)) + .map(|(_, pdu)| pdu.to_sync_room_event()) .collect() .await; From e9fee04eef52c781455468e0fe68cbb13f60dad2 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 24 Nov 2024 23:08:40 -0500 Subject: [PATCH 0289/1248] fix needlessly strict appservice user existence check Signed-off-by: strawberry --- src/api/router/auth.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 68abf5e2..56c16550 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -197,10 +197,6 @@ async fn auth_appservice(services: &Services, request: &Request, info: Box Date: Sun, 24 Nov 2024 23:11:13 -0500 Subject: [PATCH 0290/1248] slightly refactor appservice registration command Signed-off-by: strawberry --- src/admin/appservice/commands.rs | 18 ++++++++++++------ src/service/appservice/mod.rs | 12 ++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs index 7d6378f3..6d661308 100644 --- a/src/admin/appservice/commands.rs +++ b/src/admin/appservice/commands.rs @@ -11,19 +11,25 @@ pub(super) async fn register(&self) -> Result { )); } - let appservice_config = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config); + let appservice_config_body = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); + let parsed_config = serde_yaml::from_str::(&appservice_config_body); match parsed_config { - Ok(yaml) => match self.services.appservice.register_appservice(yaml).await { - Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {id}." + Ok(registration) => match self + .services + .appservice + .register_appservice(®istration, &appservice_config_body) + .await + { + Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( + "Appservice registered with ID: {}", + registration.id ))), Err(e) => Ok(RoomMessageEventContent::text_plain(format!( "Failed to register appservice: {e}" ))), }, Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config: {e}" + "Could not parse appservice config as YAML: {e}" ))), } } diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 4a20b130..a55d6534 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -61,18 +61,18 @@ impl crate::Service for Service { impl Service { /// Registers an appservice and returns the ID to the caller - pub async fn register_appservice(&self, yaml: Registration) -> Result { + pub async fn register_appservice(&self, registration: &Registration, appservice_config_body: &str) -> Result { //TODO: Check for collisions between exclusive appservice namespaces self.registration_info .write() .await - .insert(yaml.id.clone(), yaml.clone().try_into()?); + .insert(registration.id.clone(), registration.clone().try_into()?); - let id = yaml.id.as_str(); - let yaml = serde_yaml::to_string(&yaml)?; - self.db.id_appserviceregistrations.insert(id, yaml); + self.db + .id_appserviceregistrations + .insert(®istration.id, appservice_config_body); - Ok(id.to_owned()) + Ok(()) } /// Remove an appservice registration From 6c663919880f49738b502e051686601af7cb5079 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 25 Nov 2024 06:47:20 +0000 Subject: [PATCH 0291/1248] fix unnecessary serialization of sender query keys Signed-off-by: Jason Volk --- src/service/sending/data.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index ca7ca19a..118dab91 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -103,8 +103,9 @@ impl Data { pub fn active_requests_for(&self, destination: &Destination) -> impl Stream + Send + '_ { let prefix = destination.get_prefix(); self.servercurrentevent_data - .stream_prefix_raw(&prefix) + .raw_stream_from(&prefix) .ignore_err() + .ready_take_while(move |(key, _)| key.starts_with(&prefix)) .map(|(key, val)| { let (_, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); @@ -152,8 +153,9 @@ impl Data { pub fn queued_requests(&self, destination: &Destination) -> impl Stream + Send + '_ { let prefix = destination.get_prefix(); self.servernameevent_data - .stream_prefix_raw(&prefix) + .raw_stream_from(&prefix) .ignore_err() + .ready_take_while(move |(key, _)| key.starts_with(&prefix)) .map(|(key, val)| { let (_, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); From 62d560e2fbd866f6d1b9f33d5ea0d5753acecfbb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 25 Nov 2024 06:26:34 +0000 Subject: [PATCH 0292/1248] improve tracing instruments on database::map Signed-off-by: Jason Volk --- src/database/map/get.rs | 2 +- src/database/map/insert.rs | 1 + src/database/map/keys.rs | 1 - src/database/map/keys_from.rs | 6 ++---- src/database/map/keys_prefix.rs | 5 +---- src/database/map/remove.rs | 2 +- src/database/map/rev_keys.rs | 1 - src/database/map/rev_keys_from.rs | 6 ++---- src/database/map/rev_keys_prefix.rs | 5 +---- src/database/map/rev_stream.rs | 1 - src/database/map/rev_stream_from.rs | 6 ++---- src/database/map/rev_stream_prefix.rs | 5 +---- src/database/map/stream.rs | 1 - src/database/map/stream_from.rs | 6 ++---- src/database/map/stream_prefix.rs | 5 +---- 15 files changed, 15 insertions(+), 38 deletions(-) diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 3ee2a194..24649175 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -38,7 +38,7 @@ where /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is serialized into a user-supplied Writer. #[implement(super::Map)] -#[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] +#[tracing::instrument(skip(self, buf), level = "trace")] pub fn bqry(&self, key: &K, buf: &mut B) -> impl Future>> + Send where K: Serialize + ?Sized + Debug, diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index b8b08b34..9bebe7cf 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -149,6 +149,7 @@ where /// - Key is serialized to supplied buffer /// - Val is raw #[implement(super::Map)] +#[tracing::instrument(skip(self, val, buf), level = "trace")] pub fn bput_raw(&self, key: K, val: V, mut buf: Bk) where K: Serialize + Debug, diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 2396494c..9c4d66e4 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -5,7 +5,6 @@ use serde::Deserialize; use crate::{keyval, keyval::Key, stream}; #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn keys<'a, K>(&'a self) -> impl Stream>> + Send where K: Deserialize<'a> + Send, diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 4eb3b12e..630bf3fb 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -7,7 +7,6 @@ use serde::{Deserialize, Serialize}; use crate::{keyval, keyval::Key, ser, stream}; #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn keys_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -18,7 +17,7 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self), level = "trace")] pub fn keys_from_raw

    (&self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -28,7 +27,6 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn keys_raw_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, @@ -39,7 +37,7 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self, from), fields(%self), level = "trace")] pub fn raw_keys_from

    (&self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index 0ff755f3..df214af4 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -11,7 +11,6 @@ use serde::{Deserialize, Serialize}; use crate::{keyval, keyval::Key, ser}; #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn keys_prefix<'a, K, P>(&'a self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -22,7 +21,7 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self), level = "trace")] pub fn keys_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -33,7 +32,6 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn keys_raw_prefix<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, @@ -44,7 +42,6 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn raw_keys_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs index 42eaa477..949817a0 100644 --- a/src/database/map/remove.rs +++ b/src/database/map/remove.rs @@ -25,7 +25,7 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] +#[tracing::instrument(skip(self, buf), level = "trace")] pub fn bdel(&self, key: K, buf: &mut B) where K: Serialize + Debug, diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index 449ccfff..e10a199c 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -5,7 +5,6 @@ use serde::Deserialize; use crate::{keyval, keyval::Key, stream}; #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_keys<'a, K>(&'a self) -> impl Stream>> + Send where K: Deserialize<'a> + Send, diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index b142718c..c1c6f3da 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -7,7 +7,6 @@ use serde::{Deserialize, Serialize}; use crate::{keyval, keyval::Key, ser, stream}; #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_keys_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -18,7 +17,7 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self), level = "trace")] pub fn rev_keys_from_raw

    (&self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -28,7 +27,6 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_keys_raw_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, @@ -39,7 +37,7 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self, from), fields(%self), level = "trace")] pub fn rev_raw_keys_from

    (&self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index 5297cecf..957b974e 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -11,7 +11,6 @@ use serde::{Deserialize, Serialize}; use crate::{keyval, keyval::Key, ser}; #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_keys_prefix<'a, K, P>(&'a self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -22,7 +21,7 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self), level = "trace")] pub fn rev_keys_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -33,7 +32,6 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_keys_raw_prefix<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, @@ -44,7 +42,6 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_raw_keys_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index de22fd5c..f4be69fd 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -8,7 +8,6 @@ use crate::{keyval, keyval::KeyVal, stream}; /// /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_stream<'a, K, V>(&'a self) -> impl Stream>> + Send where K: Deserialize<'a> + Send, diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 78318a7f..7ef25ee0 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -11,7 +11,6 @@ use crate::{keyval, keyval::KeyVal, ser, stream}; /// - Query is serialized /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_stream_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -27,7 +26,7 @@ where /// - Query is serialized /// - Result is raw #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self), level = "trace")] pub fn rev_stream_from_raw

    (&self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -41,7 +40,6 @@ where /// - Query is raw /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_stream_raw_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, @@ -57,7 +55,7 @@ where /// - Query is raw /// - Result is raw #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self, from), fields(%self), level = "trace")] pub fn rev_raw_stream_from

    (&self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index 601c3298..286cedca 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -15,7 +15,6 @@ use crate::{keyval, keyval::KeyVal, ser}; /// - Query is serialized /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_stream_prefix<'a, K, V, P>(&'a self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -31,7 +30,7 @@ where /// - Query is serialized /// - Result is raw #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self), level = "trace")] pub fn rev_stream_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -46,7 +45,6 @@ where /// - Query is raw /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_stream_raw_prefix<'a, K, V, P>( &'a self, prefix: &'a P, ) -> impl Stream>> + Send + 'a @@ -64,7 +62,6 @@ where /// - Query is raw /// - Result is raw #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_raw_stream_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index dfbea072..143b0d0c 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -8,7 +8,6 @@ use crate::{keyval, keyval::KeyVal, stream}; /// /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn stream<'a, K, V>(&'a self) -> impl Stream>> + Send where K: Deserialize<'a> + Send, diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 0d3bb1e1..fe89afe1 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -11,7 +11,6 @@ use crate::{keyval, keyval::KeyVal, ser, stream}; /// - Query is serialized /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn stream_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -27,7 +26,7 @@ where /// - Query is serialized /// - Result is raw #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self), level = "trace")] pub fn stream_from_raw

    (&self, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -41,7 +40,6 @@ where /// - Query is raw /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn stream_raw_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, @@ -57,7 +55,7 @@ where /// - Query is raw /// - Result is raw #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self, from), fields(%self), level = "trace")] pub fn raw_stream_from

    (&self, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index cab3dd09..ca4cfeaa 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -15,7 +15,6 @@ use crate::{keyval, keyval::KeyVal, ser}; /// - Query is serialized /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn stream_prefix<'a, K, V, P>(&'a self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -31,7 +30,7 @@ where /// - Query is serialized /// - Result is raw #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] +#[tracing::instrument(skip(self), level = "trace")] pub fn stream_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -46,7 +45,6 @@ where /// - Query is raw /// - Result is deserialized #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn stream_raw_prefix<'a, K, V, P>( &'a self, prefix: &'a P, ) -> impl Stream>> + Send + 'a @@ -64,7 +62,6 @@ where /// - Query is raw /// - Result is raw #[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn raw_stream_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, From b20bd65d383129a4cb59c6e9a2262e512ea7aeff Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 24 Nov 2024 23:46:34 -0500 Subject: [PATCH 0293/1248] fix matrix-appservice-irc workaround Signed-off-by: strawberry --- src/api/client/account.rs | 44 +++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 5ed4b312..47f6fec8 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -55,8 +55,15 @@ pub(crate) async fn get_register_available_route( || appservice.registration.id.contains("matrix_appservice_irc") }); + // don't force the username lowercase if it's from matrix-appservice-irc + let body_username = if is_matrix_appservice_irc { + body.username.clone() + } else { + body.username.to_lowercase() + }; + // Validate user id - let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), services.globals.server_name()) + let user_id = UserId::parse_with_server_name(body_username, services.globals.server_name()) .ok() .filter(|user_id| { (!user_id.is_historical() || is_matrix_appservice_irc) && services.globals.user_is_local(user_id) @@ -143,23 +150,28 @@ pub(crate) async fn register_route( return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration temporarily disabled.")); } - // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue - let is_matrix_appservice_irc = body.appservice_info.as_ref().is_some_and(|appservice| { - appservice.registration.id == "irc" - || appservice.registration.id.contains("matrix-appservice-irc") - || appservice.registration.id.contains("matrix_appservice_irc") - }); - let user_id = match (&body.username, is_guest) { (Some(username), false) => { - let proposed_user_id = - UserId::parse_with_server_name(username.to_lowercase(), services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) - && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue + let is_matrix_appservice_irc = body.appservice_info.as_ref().is_some_and(|appservice| { + appservice.registration.id == "irc" + || appservice.registration.id.contains("matrix-appservice-irc") + || appservice.registration.id.contains("matrix_appservice_irc") + }); + + // don't force the username lowercase if it's from matrix-appservice-irc + let body_username = if is_matrix_appservice_irc { + username.clone() + } else { + username.to_lowercase() + }; + + let proposed_user_id = UserId::parse_with_server_name(body_username, services.globals.server_name()) + .ok() + .filter(|user_id| { + (!user_id.is_historical() || is_matrix_appservice_irc) && services.globals.user_is_local(user_id) + }) + .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; if services.users.exists(&proposed_user_id).await { return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); From 63d1fcf213ada2841169c652869b72bc70f9f8d6 Mon Sep 17 00:00:00 2001 From: morguldir Date: Mon, 25 Nov 2024 16:08:30 -0500 Subject: [PATCH 0294/1248] add queued transactions rocksdb cf cache Signed-off-by: strawberry --- conduwuit-example.toml | 4 ++++ src/core/config/mod.rs | 6 ++++++ src/database/opts.rs | 9 +++++++++ 3 files changed, 19 insertions(+) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 78136efb..30da80e6 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -170,6 +170,10 @@ # #statekeyshort_cache_capacity = varies by system +# This item is undocumented. Please contribute documentation for it. +# +#servernameevent_data_cache_capacity = varies by system + # This item is undocumented. Please contribute documentation for it. # #server_visibility_cache_capacity = varies by system diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 1754581d..fafd3396 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -211,6 +211,10 @@ pub struct Config { #[serde(default = "default_statekeyshort_cache_capacity")] pub statekeyshort_cache_capacity: u32, + /// default: varies by system + #[serde(default = "default_servernameevent_data_cache_capacity")] + pub servernameevent_data_cache_capacity: u32, + /// default: varies by system #[serde(default = "default_server_visibility_cache_capacity")] pub server_visibility_cache_capacity: u32, @@ -2059,6 +2063,8 @@ fn default_shortstatekey_cache_capacity() -> u32 { parallelism_scaled_u32(10_000 fn default_statekeyshort_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } +fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } + fn default_server_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(500) } fn default_user_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } diff --git a/src/database/opts.rs b/src/database/opts.rs index 732f571f..28a39cca 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -170,6 +170,15 @@ pub(crate) fn cf_options( cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024)?, ), + "servernameevent_data" => set_table_with_new_cache( + &mut opts, + cfg, + cache, + name, + cache_size(cfg, cfg.servernameevent_data_cache_capacity, 128)?, /* Raw average value size = 102, key + * size = 34 */ + ), + "eventid_outlierpdu" => { set_table_with_new_cache(&mut opts, cfg, cache, name, cache_size(cfg, cfg.pdu_cache_capacity, 1536)?); }, From c5c74febb5bf0a300a61c905e0e614f2b7b806c2 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 25 Nov 2024 16:32:09 -0500 Subject: [PATCH 0295/1248] bump rust-rocksdb to 4bce1bb97d8be6f0d47245c99d465ca9cef33aad Signed-off-by: strawberry --- Cargo.lock | 6 +++--- deps/rust-rocksdb/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e3af8ae0..28f0edad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2151,7 +2151,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -3342,7 +3342,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.29.0+9.7.4" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2bc5495a9f8f75073390c326b47ee5928ab7c7f0#2bc5495a9f8f75073390c326b47ee5928ab7c7f0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=4bce1bb97d8be6f0d47245c99d465ca9cef33aad#4bce1bb97d8be6f0d47245c99d465ca9cef33aad" dependencies = [ "bindgen", "bzip2-sys", @@ -3359,7 +3359,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.33.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2bc5495a9f8f75073390c326b47ee5928ab7c7f0#2bc5495a9f8f75073390c326b47ee5928ab7c7f0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=4bce1bb97d8be6f0d47245c99d465ca9cef33aad#4bce1bb97d8be6f0d47245c99d465ca9cef33aad" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 908a2911..64a8dcb6 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "2bc5495a9f8f75073390c326b47ee5928ab7c7f0" +rev = "4bce1bb97d8be6f0d47245c99d465ca9cef33aad" #branch = "master" default-features = false From 238523f17701e711a47b072646fa25043f62d4b6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 25 Nov 2024 21:01:15 +0000 Subject: [PATCH 0296/1248] cleanup: reuse api rather than querying db Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/data.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 80046d77..62e1f550 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -19,7 +19,6 @@ use crate::{ }; pub(super) struct Data { - eventid_shorteventid: Arc, shorteventid_shortstatehash: Arc, services: Services, } @@ -35,7 +34,6 @@ impl Data { pub(super) fn new(args: &crate::Args<'_>) -> Self { let db = &args.db; Self { - eventid_shorteventid: db["eventid_shorteventid"].clone(), shorteventid_shortstatehash: db["shorteventid_shortstatehash"].clone(), services: Services { short: args.depend::("rooms::short"), @@ -167,9 +165,15 @@ impl Data { /// Returns the state hash for this pdu. pub(super) async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - self.eventid_shorteventid - .get(event_id) - .and_then(|shorteventid| self.shorteventid_shortstatehash.get(&shorteventid)) + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.shorteventid_shortstatehash + .aqry::(&shorteventid) + }) .await .deserialized() } From f69c596f5607e596ee5e79ff8347dcbba676144f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 26 Nov 2024 00:35:03 +0000 Subject: [PATCH 0297/1248] generalize return value wrapping to not require Arc Signed-off-by: Jason Volk --- src/service/rooms/short/mod.rs | 13 ++++++++++--- src/service/rooms/state/mod.rs | 8 ++++++-- src/service/rooms/state_accessor/data.rs | 4 ++-- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index e4ff2975..0f100348 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,10 +1,11 @@ -use std::{fmt::Debug, mem::size_of_val, sync::Arc}; +use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; pub use conduit::pdu::{ShortEventId, ShortId, ShortRoomId}; use conduit::{err, implement, utils, utils::stream::ReadyExt, Result}; use database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; +use serde::Deserialize; use crate::{globals, Dep}; @@ -136,7 +137,11 @@ pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &s } #[implement(Service)] -pub async fn get_eventid_from_short(&self, shorteventid: ShortEventId) -> Result> { +pub async fn get_eventid_from_short(&self, shorteventid: ShortEventId) -> Result +where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + ::Owned: Borrow, +{ const BUFSIZE: usize = size_of::(); self.db @@ -148,8 +153,10 @@ pub async fn get_eventid_from_short(&self, shorteventid: ShortEventId) -> Result } #[implement(Service)] -pub async fn multi_get_eventid_from_short(&self, shorteventid: I) -> Vec>> +pub async fn multi_get_eventid_from_short(&self, shorteventid: I) -> Vec> where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + ::Owned: Borrow, I: Iterator + Send, { const BUFSIZE: usize = size_of::(); diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 4429e912..3227b935 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -102,7 +102,11 @@ impl Service { .iter() .stream() .map(|&new| parse_compressed_state_event(new).1) - .then(|shorteventid| self.services.short.get_eventid_from_short(shorteventid)) + .then(|shorteventid| { + self.services + .short + .get_eventid_from_short::>(shorteventid) + }) .ignore_err(); pin_mut!(event_ids); @@ -433,7 +437,7 @@ impl Service { .await .into_iter() .stream() - .and_then(|event_id| async move { self.services.timeline.get_pdu(&event_id).await }) + .and_then(|event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await }) .collect() .await; diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 62e1f550..1ef91221 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -7,7 +7,7 @@ use conduit::{ }; use database::{Deserialized, Map}; use futures::{StreamExt, TryFutureExt}; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{events::StateEventType, EventId, OwnedEventId, RoomId}; use crate::{ rooms, @@ -74,7 +74,7 @@ impl Data { .into_iter() .stream() .ready_filter_map(Result::ok) - .filter_map(|event_id| async move { self.services.timeline.get_pdu(&event_id).await.ok() }) + .filter_map(|event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await.ok() }) .collect() .await; From 4f97ff98d66cbd5b3dc9c5ca9a27232959b70ee7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 27 Nov 2024 02:55:38 +0000 Subject: [PATCH 0298/1248] enter the tokio runtime for the scope of main init Signed-off-by: Jason Volk --- src/main/server.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main/server.rs b/src/main/server.rs index 4813d586..27fd0673 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -24,6 +24,8 @@ pub(crate) struct Server { impl Server { pub(crate) fn build(args: &Args, runtime: Option<&runtime::Handle>) -> Result, Error> { + let _runtime_guard = runtime.map(runtime::Handle::enter); + let raw_config = Config::load(args.config.as_deref())?; let raw_config = crate::clap::update(raw_config, args)?; let config = Config::new(&raw_config)?; From e83fa124512cfa79172fc8eefc4bbe0eee388902 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 26 Nov 2024 01:01:08 +0000 Subject: [PATCH 0299/1248] tweak dev profile Signed-off-by: Jason Volk --- Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 02c3b5ab..12163f6a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -633,7 +633,6 @@ opt-level = 0 panic = "unwind" debug-assertions = true incremental = true -codegen-units = 64 #rustflags = [ # '--cfg', 'conduit_mods', # '-Ztime-passes', @@ -675,7 +674,6 @@ incremental = false [profile.dev.package.conduit] inherits = "dev" -incremental = false #rustflags = [ # '--cfg', 'conduit_mods', # '-Ztime-passes', From 527494a34b5f55cb18f329d7f91c20df71def623 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 27 Nov 2024 04:48:40 +0000 Subject: [PATCH 0300/1248] fix oversized tracing span arguments; lints Signed-off-by: Jason Volk --- src/database/engine.rs | 2 +- src/service/rooms/auth_chain/mod.rs | 13 ++++++++----- src/service/rooms/state/mod.rs | 2 +- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index 1fa53b01..9da20803 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -296,7 +296,7 @@ pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result<()> { Ok(()) } -#[tracing::instrument(skip_all, name = "rocksdb")] +#[tracing::instrument(skip_all, name = "rocksdb", level = "debug")] pub(crate) fn handle_log(level: LogLevel, msg: &str) { let msg = msg.trim(); if msg.starts_with("Options") { diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 1d0490c2..5face0b5 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -209,20 +209,23 @@ impl Service { self.db.get_cached_eventid_authchain(key).await } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip_all, level = "debug")] pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) { - let val = auth_chain.iter().copied().collect::>(); + let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); + self.db.cache_auth_chain(key, val); } - #[tracing::instrument(skip(self), level = "debug")] - pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &Vec) { - let val = auth_chain.iter().copied().collect::>(); + #[tracing::instrument(skip_all, level = "debug")] + pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &[ShortEventId]) { + let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); + self.db.cache_auth_chain(key, val); } pub fn get_cache_usage(&self) -> (usize, usize) { let cache = self.db.auth_chain_cache.lock().expect("locked"); + (cache.len(), cache.capacity()) } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 3227b935..8bd5f7eb 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -393,7 +393,7 @@ impl Service { } /// This fetches auth events from the current state. - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self, content), level = "debug")] pub async fn get_auth_events( &self, room_id: &RoomId, kind: &TimelineEventType, sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, From dd8c646b633ff05e9f8e49a274712d6bf435e83a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 25 Nov 2024 23:27:16 +0000 Subject: [PATCH 0301/1248] optimize state compressor I/O w/ batch operation Signed-off-by: Jason Volk --- .../rooms/event_handler/resolve_state.rs | 39 +++++++++------- .../event_handler/upgrade_outlier_pdu.rs | 20 +++++---- src/service/rooms/state_compressor/mod.rs | 45 ++++++++++++++----- 3 files changed, 69 insertions(+), 35 deletions(-) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 4863e340..dc0edd13 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -79,23 +79,30 @@ pub async fn resolve_state( drop(lock); - debug!("State resolution done. Compressing state"); - let mut new_room_state = HashSet::new(); - for ((event_type, state_key), event_id) in state { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key) - .await; + debug!("State resolution done."); + let state_events: Vec<_> = state + .iter() + .stream() + .then(|((event_type, state_key), event_id)| { + self.services + .short + .get_or_create_shortstatekey(event_type, state_key) + .map(move |shortstatekey| (shortstatekey, event_id)) + }) + .collect() + .await; - let compressed = self - .services - .state_compressor - .compress_state_event(shortstatekey, &event_id) - .await; - - new_room_state.insert(compressed); - } + debug!("Compressing state..."); + let new_room_state: HashSet<_> = self + .services + .state_compressor + .compress_state_events( + state_events + .iter() + .map(|(ref ssk, eid)| (ssk, (*eid).borrow())), + ) + .collect() + .await; Ok(Arc::new(new_room_state)) } diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 2a1e4662..13e2b281 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,4 +1,5 @@ use std::{ + borrow::Borrow, collections::{BTreeMap, HashSet}, sync::Arc, time::Instant, @@ -193,15 +194,16 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( extremities.retain(|id| retained.contains(id)); debug!("Retained {} extremities. Compressing state", extremities.len()); - let mut state_ids_compressed = HashSet::new(); - for (shortstatekey, id) in &state_at_incoming_event { - state_ids_compressed.insert( - self.services - .state_compressor - .compress_state_event(*shortstatekey, id) - .await, - ); - } + let state_ids_compressed: HashSet<_> = self + .services + .state_compressor + .compress_state_events( + state_at_incoming_event + .iter() + .map(|(ssk, eid)| (ssk, eid.borrow())), + ) + .collect() + .await; let state_ids_compressed = Arc::new(state_ids_compressed); diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 52ad5437..8c6eccbe 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,6 +1,6 @@ use std::{ collections::{HashMap, HashSet}, - fmt::Write, + fmt::{Debug, Write}, mem::size_of, sync::{Arc, Mutex}, }; @@ -8,10 +8,11 @@ use std::{ use arrayvec::ArrayVec; use conduit::{ at, checked, debug, err, expected, utils, - utils::{bytes, math::usize_from_f64}, + utils::{bytes, math::usize_from_f64, stream::IterStream}, Result, }; use database::Map; +use futures::{Stream, StreamExt}; use lru_cache::LruCache; use ruma::{EventId, RoomId}; @@ -179,21 +180,32 @@ impl Service { Ok(stack) } - pub async fn compress_state_event(&self, shortstatekey: ShortStateKey, event_id: &EventId) -> CompressedStateEvent { - const SIZE: usize = size_of::(); + pub fn compress_state_events<'a, I>(&'a self, state: I) -> impl Stream + Send + 'a + where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, + { + let event_ids = state.clone().map(at!(1)); + let short_event_ids = self + .services + .short + .multi_get_or_create_shorteventid(event_ids); + + state + .stream() + .map(at!(0)) + .zip(short_event_ids) + .map(|(shortstatekey, shorteventid)| compress_state_event(*shortstatekey, shorteventid)) + } + + pub async fn compress_state_event(&self, shortstatekey: ShortStateKey, event_id: &EventId) -> CompressedStateEvent { let shorteventid = self .services .short .get_or_create_shorteventid(event_id) .await; - let mut v = ArrayVec::::new(); - v.extend(shortstatekey.to_be_bytes()); - v.extend(shorteventid.to_be_bytes()); - v.as_ref() - .try_into() - .expect("failed to create CompressedStateEvent") + compress_state_event(shortstatekey, shorteventid) } /// Creates a new shortstatehash that often is just a diff to an already @@ -470,6 +482,19 @@ impl Service { } } +#[inline] +#[must_use] +fn compress_state_event(shortstatekey: ShortStateKey, shorteventid: ShortEventId) -> CompressedStateEvent { + const SIZE: usize = size_of::(); + + let mut v = ArrayVec::::new(); + v.extend(shortstatekey.to_be_bytes()); + v.extend(shorteventid.to_be_bytes()); + v.as_ref() + .try_into() + .expect("failed to create CompressedStateEvent") +} + #[inline] #[must_use] pub fn parse_compressed_state_event(compressed_event: CompressedStateEvent) -> (ShortStateKey, ShortEventId) { From 2aeee4f5095993c40463193dc491d8a0279c48dc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 27 Nov 2024 05:57:20 +0000 Subject: [PATCH 0302/1248] parallel query for outlier/non-outlier pdu data Signed-off-by: Jason Volk --- src/service/rooms/timeline/data.rs | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 22a6c1d0..c15d8e7f 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -12,7 +12,7 @@ use conduit::{ Err, PduCount, PduEvent, Result, }; use database::{Database, Deserialized, Json, KeyVal, Map}; -use futures::{Stream, StreamExt}; +use futures::{future::select_ok, FutureExt, Stream, StreamExt}; use ruma::{api::Direction, CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use tokio::sync::Mutex; @@ -82,11 +82,14 @@ impl Data { /// Returns the json of a pdu. pub(super) async fn get_pdu_json(&self, event_id: &EventId) -> Result { - if let Ok(pdu) = self.get_non_outlier_pdu_json(event_id).await { - return Ok(pdu); - } + let accepted = self.get_non_outlier_pdu_json(event_id).boxed(); + let outlier = self + .eventid_outlierpdu + .get(event_id) + .map(Deserialized::deserialized) + .boxed(); - self.eventid_outlierpdu.get(event_id).await.deserialized() + select_ok([accepted, outlier]).await.map(at!(0)) } /// Returns the json of a pdu. @@ -131,11 +134,14 @@ impl Data { /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub(super) async fn get_pdu_owned(&self, event_id: &EventId) -> Result { - if let Ok(pdu) = self.get_non_outlier_pdu(event_id).await { - return Ok(pdu); - } + let accepted = self.get_non_outlier_pdu(event_id).boxed(); + let outlier = self + .eventid_outlierpdu + .get(event_id) + .map(Deserialized::deserialized) + .boxed(); - self.eventid_outlierpdu.get(event_id).await.deserialized() + select_ok([accepted, outlier]).await.map(at!(0)) } /// Like get_non_outlier_pdu(), but without the expense of fetching and From 94d7b21cf01378ca8d8a0451d00c8eb4374c39c7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 27 Nov 2024 06:25:13 +0000 Subject: [PATCH 0303/1248] use stricter timeout for fetching state Signed-off-by: Jason Volk --- src/service/rooms/event_handler/fetch_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 74b0bb32..228b7d0c 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -22,7 +22,7 @@ pub(super) async fn fetch_state( let res = self .services .sending - .send_synapse_request( + .send_federation_request( origin, get_room_state_ids::v1::Request { room_id: room_id.to_owned(), From c7ae9516767dff6767a725be3d144164c0d1ad56 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 27 Nov 2024 06:28:32 +0000 Subject: [PATCH 0304/1248] add frontend threadpool to database Signed-off-by: Jason Volk --- Cargo.lock | 51 +++++++++- Cargo.toml | 6 +- src/database/Cargo.toml | 1 + src/database/engine.rs | 9 +- src/database/map.rs | 11 ++- src/database/map/contains.rs | 49 +++++---- src/database/map/get.rs | 100 ++++++++++++++----- src/database/mod.rs | 1 + src/database/pool.rs | 186 +++++++++++++++++++++++++++++++++++ src/database/util.rs | 5 +- 10 files changed, 362 insertions(+), 57 deletions(-) create mode 100644 src/database/pool.rs diff --git a/Cargo.lock b/Cargo.lock index 28f0edad..5c79b4b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -92,6 +92,18 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" version = "0.4.18" @@ -597,6 +609,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "conduit" version = "0.5.0" @@ -736,6 +757,7 @@ name = "conduit_database" version = "0.5.0" dependencies = [ "arrayvec", + "async-channel", "conduit_core", "const-str", "futures", @@ -1219,6 +1241,27 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "event-listener" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener", + "pin-project-lite", +] + [[package]] name = "fdeflate" version = "0.3.6" @@ -2151,7 +2194,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -2589,6 +2632,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + [[package]] name = "parking_lot" version = "0.12.3" diff --git a/Cargo.toml b/Cargo.toml index 12163f6a..283c5a95 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -307,10 +307,14 @@ version = "0.13.0" [workspace.dependencies.cyborgtime] version = "2.1.1" -# used to replace the channels of the tokio runtime +# used for MPSC channels [workspace.dependencies.loole] version = "0.4.0" +# used for MPMC channels +[workspace.dependencies.async-channel] +version = "2.3.1" + [workspace.dependencies.async-trait] version = "0.1.83" diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 0e718aa7..c45931a5 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -36,6 +36,7 @@ zstd_compression = [ [dependencies] arrayvec.workspace = true +async-channel.workspace = true conduit-core.workspace = true const-str.workspace = true futures.workspace = true diff --git a/src/database/engine.rs b/src/database/engine.rs index 9da20803..837c7259 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -16,7 +16,9 @@ use rocksdb::{ use crate::{ opts::{cf_options, db_options}, - or_else, result, + or_else, pool, + pool::Pool, + result, util::map_err, }; @@ -31,6 +33,7 @@ pub struct Engine { corks: AtomicU32, pub(super) read_only: bool, pub(super) secondary: bool, + pub(crate) pool: Arc, } pub(crate) type Db = DBWithThreadMode; @@ -111,6 +114,7 @@ impl Engine { corks: AtomicU32::new(0), read_only: config.rocksdb_read_only, secondary: config.rocksdb_secondary, + pool: Pool::new(&pool::Opts::default())?, })) } @@ -316,6 +320,9 @@ impl Drop for Engine { fn drop(&mut self) { const BLOCKING: bool = true; + debug!("Joining request threads..."); + self.pool.close(); + debug!("Waiting for background tasks to finish..."); self.db.cancel_all_background_work(BLOCKING); diff --git a/src/database/map.rs b/src/database/map.rs index d6b8bf38..4b55fa54 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -27,7 +27,7 @@ use std::{ }; use conduit::Result; -use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteOptions}; +use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, ReadTier, WriteOptions}; use crate::{watchers::Watchers, Engine}; @@ -38,6 +38,7 @@ pub struct Map { watchers: Watchers, write_options: WriteOptions, read_options: ReadOptions, + cache_read_options: ReadOptions, } impl Map { @@ -49,6 +50,7 @@ impl Map { watchers: Watchers::default(), write_options: write_options_default(), read_options: read_options_default(), + cache_read_options: cache_read_options_default(), })) } @@ -112,6 +114,13 @@ fn open(db: &Arc, name: &str) -> Result> { }) } +#[inline] +fn cache_read_options_default() -> ReadOptions { + let mut read_options = read_options_default(); + read_options.set_read_tier(ReadTier::BlockCache); + read_options +} + #[inline] fn read_options_default() -> ReadOptions { let mut read_options = ReadOptions::default(); diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index a98fe7c5..7acef2f6 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -1,17 +1,21 @@ -use std::{convert::AsRef, fmt::Debug, future::Future, io::Write}; +use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; use arrayvec::ArrayVec; -use conduit::{implement, utils::TryFutureExtExt, Err, Result}; -use futures::future::ready; +use conduit::{ + err, implement, + utils::{future::TryExtExt, result::FlatOk}, + Result, +}; +use futures::FutureExt; use serde::Serialize; -use crate::{ser, util}; +use crate::ser; /// Returns true if the map contains the key. /// - key is serialized into allocated buffer /// - harder errors may not be reported #[implement(super::Map)] -pub fn contains(&self, key: &K) -> impl Future + Send +pub fn contains(self: &Arc, key: &K) -> impl Future + Send + '_ where K: Serialize + ?Sized + Debug, { @@ -23,7 +27,7 @@ where /// - key is serialized into stack-buffer /// - harder errors will panic #[implement(super::Map)] -pub fn acontains(&self, key: &K) -> impl Future + Send +pub fn acontains(self: &Arc, key: &K) -> impl Future + Send + '_ where K: Serialize + ?Sized + Debug, { @@ -36,7 +40,7 @@ where /// - harder errors will panic #[implement(super::Map)] #[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] -pub fn bcontains(&self, key: &K, buf: &mut B) -> impl Future + Send +pub fn bcontains(self: &Arc, key: &K, buf: &mut B) -> impl Future + Send + '_ where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, @@ -48,41 +52,36 @@ where /// Returns Ok if the map contains the key. /// - key is raw #[implement(super::Map)] -pub fn exists(&self, key: &K) -> impl Future> + Send +pub fn exists<'a, K>(self: &'a Arc, key: &K) -> impl Future + Send + 'a where - K: AsRef<[u8]> + ?Sized + Debug, + K: AsRef<[u8]> + ?Sized + Debug + 'a, { - ready(self.exists_blocking(key)) + self.get(key).map(|res| res.map(|_| ())) } /// Returns Ok if the map contains the key; NotFound otherwise. Harder errors /// may not always be reported properly. #[implement(super::Map)] #[tracing::instrument(skip(self, key), fields(%self), level = "trace")] -pub fn exists_blocking(&self, key: &K) -> Result<()> +pub fn exists_blocking(&self, key: &K) -> Result where K: AsRef<[u8]> + ?Sized + Debug, { - if self.maybe_exists_blocking(key) - && self - .db - .db - .get_pinned_cf_opt(&self.cf(), key, &self.read_options) - .map_err(util::map_err)? - .is_some() - { - Ok(()) - } else { - Err!(Request(NotFound("Not found in database"))) - } + self.maybe_exists(key) + .then(|| self.get_blocking(key)) + .flat_ok() + .map(|_| ()) + .ok_or_else(|| err!(Request(NotFound("Not found in database")))) } +/// Rocksdb limits this to kBlockCacheTier internally so this is not actually a +/// blocking call; in case that changes we set this as well in our read_options. #[implement(super::Map)] -fn maybe_exists_blocking(&self, key: &K) -> bool +pub(crate) fn maybe_exists(&self, key: &K) -> bool where K: AsRef<[u8]> + ?Sized, { self.db .db - .key_may_exist_cf_opt(&self.cf(), key, &self.read_options) + .key_may_exist_cf_opt(&self.cf(), key, &self.cache_read_options) } diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 24649175..a00eecfa 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,13 +1,16 @@ -use std::{convert::AsRef, fmt::Debug, future::Future, io::Write}; +use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; use arrayvec::ArrayVec; -use conduit::{err, implement, utils::IterStream, Result}; -use futures::{FutureExt, Stream}; +use conduit::{err, implement, utils::IterStream, Err, Result}; +use futures::{future, Future, FutureExt, Stream}; use rocksdb::DBPinnableSlice; use serde::Serialize; -use tokio::task; -use crate::{ser, util, Handle}; +use crate::{ + ser, + util::{is_incomplete, map_err, or_else}, + Handle, +}; type RocksdbResult<'a> = Result>, rocksdb::Error>; @@ -15,7 +18,7 @@ type RocksdbResult<'a> = Result>, rocksdb::Error>; /// asynchronously. The key is serialized into an allocated buffer to perform /// the query. #[implement(super::Map)] -pub fn qry(&self, key: &K) -> impl Future>> + Send +pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send where K: Serialize + ?Sized + Debug, { @@ -27,7 +30,7 @@ where /// asynchronously. The key is serialized into a fixed-sized buffer to perform /// the query. The maximum size is supplied as const generic parameter. #[implement(super::Map)] -pub fn aqry(&self, key: &K) -> impl Future>> + Send +pub fn aqry(self: &Arc, key: &K) -> impl Future>> + Send where K: Serialize + ?Sized + Debug, { @@ -39,7 +42,7 @@ where /// asynchronously. The key is serialized into a user-supplied Writer. #[implement(super::Map)] #[tracing::instrument(skip(self, buf), level = "trace")] -pub fn bqry(&self, key: &K, buf: &mut B) -> impl Future>> + Send +pub fn bqry(self: &Arc, key: &K, buf: &mut B) -> impl Future>> + Send where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, @@ -52,28 +55,28 @@ where /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] #[tracing::instrument(skip(self, key), fields(%self), level = "trace")] -pub fn get(&self, key: &K) -> impl Future>> + Send +pub fn get(self: &Arc, key: &K) -> impl Future>> + Send where - K: AsRef<[u8]> + ?Sized + Debug, + K: AsRef<[u8]> + Debug + ?Sized, { - let result = self.get_blocking(key); - task::consume_budget().map(move |()| result) -} + use crate::pool::{Cmd, Get}; -/// Fetch a value from the database into cache, returning a reference-handle. -/// The key is referenced directly to perform the query. This is a thread- -/// blocking call. -#[implement(super::Map)] -pub fn get_blocking(&self, key: &K) -> Result> -where - K: AsRef<[u8]> + ?Sized, -{ - let res = self - .db - .db - .get_pinned_cf_opt(&self.cf(), key, &self.read_options); + let cached = self.get_cached(key); + if matches!(cached, Err(_) | Ok(Some(_))) { + return future::ready(cached.map(|res| res.expect("Option is Some"))).boxed(); + } - into_result_handle(res) + debug_assert!(matches!(cached, Ok(None)), "expected status Incomplete"); + let cmd = Cmd::Get(Get { + map: self.clone(), + res: None, + key: key + .as_ref() + .try_into() + .expect("failed to copy key into buffer"), + }); + + self.db.pool.execute(cmd).boxed() } #[implement(super::Map)] @@ -104,9 +107,52 @@ where .map(into_result_handle) } +/// Fetch a value from the database into cache, returning a reference-handle. +/// The key is referenced directly to perform the query. This is a thread- +/// blocking call. +#[implement(super::Map)] +pub fn get_blocking(&self, key: &K) -> Result> +where + K: AsRef<[u8]> + ?Sized, +{ + let res = self + .db + .db + .get_pinned_cf_opt(&self.cf(), key, &self.read_options); + + into_result_handle(res) +} + +/// Fetch a value from the cache without I/O. +#[implement(super::Map)] +#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] +pub(crate) fn get_cached(&self, key: &K) -> Result>> +where + K: AsRef<[u8]> + Debug + ?Sized, +{ + let res = self + .db + .db + .get_pinned_cf_opt(&self.cf(), key, &self.cache_read_options); + + match res { + // cache hit; not found + Ok(None) => Err!(Request(NotFound("Not found in database"))), + + // cache hit; value found + Ok(Some(res)) => Ok(Some(Handle::from(res))), + + // cache miss; unknown + Err(e) if is_incomplete(&e) => Ok(None), + + // some other error occurred + Err(e) => or_else(e), + } +} + fn into_result_handle(result: RocksdbResult<'_>) -> Result> { result - .map_err(util::map_err)? + .map_err(map_err)? .map(Handle::from) .ok_or(err!(Request(NotFound("Not found in database")))) } diff --git a/src/database/mod.rs b/src/database/mod.rs index f09c4a71..cd91fba2 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -8,6 +8,7 @@ pub mod keyval; mod map; pub mod maps; mod opts; +mod pool; mod ser; mod stream; mod tests; diff --git a/src/database/pool.rs b/src/database/pool.rs new file mode 100644 index 00000000..e2cb2605 --- /dev/null +++ b/src/database/pool.rs @@ -0,0 +1,186 @@ +use std::{ + convert::identity, + mem::take, + sync::{Arc, Mutex}, + thread::JoinHandle, +}; + +use arrayvec::ArrayVec; +use async_channel::{bounded, Receiver, Sender}; +use conduit::{debug, defer, err, implement, Result}; +use futures::channel::oneshot; + +use crate::{Handle, Map}; + +pub(crate) struct Pool { + workers: Mutex>>, + recv: Receiver, + send: Sender, +} + +#[derive(Default)] +pub(crate) struct Opts { + queue_size: Option, + worker_num: Option, +} + +const WORKER_THREAD_NAME: &str = "conduwuit:db"; +const DEFAULT_QUEUE_SIZE: usize = 1024; +const DEFAULT_WORKER_NUM: usize = 32; +const KEY_MAX_BYTES: usize = 384; + +#[derive(Debug)] +pub(crate) enum Cmd { + Get(Get), +} + +#[derive(Debug)] +pub(crate) struct Get { + pub(crate) map: Arc, + pub(crate) key: ArrayVec, + pub(crate) res: Option>>>, +} + +#[implement(Pool)] +pub(crate) fn new(opts: &Opts) -> Result> { + let queue_size = opts.queue_size.unwrap_or(DEFAULT_QUEUE_SIZE); + + let (send, recv) = bounded(queue_size); + let pool = Arc::new(Self { + workers: Vec::new().into(), + recv, + send, + }); + + let worker_num = opts.worker_num.unwrap_or(DEFAULT_WORKER_NUM); + pool.spawn_until(worker_num)?; + + Ok(pool) +} + +#[implement(Pool)] +fn spawn_until(self: &Arc, max: usize) -> Result { + let mut workers = self.workers.lock()?; + + while workers.len() < max { + self.clone().spawn_one(&mut workers)?; + } + + Ok(()) +} + +#[implement(Pool)] +fn spawn_one(self: Arc, workers: &mut Vec>) -> Result { + use std::thread::Builder; + + let id = workers.len(); + + debug!(?id, "spawning {WORKER_THREAD_NAME}..."); + let thread = Builder::new() + .name(WORKER_THREAD_NAME.into()) + .spawn(move || self.worker(id))?; + + workers.push(thread); + + Ok(id) +} + +#[implement(Pool)] +pub(crate) fn close(self: &Arc) { + debug!( + senders = %self.send.sender_count(), + receivers = %self.send.receiver_count(), + "Closing pool channel" + ); + + let closing = self.send.close(); + debug_assert!(closing, "channel is not closing"); + + debug!("Shutting down pool..."); + let mut workers = self.workers.lock().expect("locked"); + + debug!( + workers = %workers.len(), + "Waiting for workers to join..." + ); + take(&mut *workers) + .into_iter() + .map(JoinHandle::join) + .try_for_each(identity) + .expect("failed to join worker threads"); + + debug_assert!(self.send.is_empty(), "channel is not empty"); +} + +#[implement(Pool)] +#[tracing::instrument(skip(self, cmd), level = "trace")] +pub(crate) async fn execute(&self, mut cmd: Cmd) -> Result> { + let (send, recv) = oneshot::channel(); + match &mut cmd { + Cmd::Get(ref mut cmd) => { + _ = cmd.res.insert(send); + }, + }; + + self.send + .send(cmd) + .await + .map_err(|e| err!(error!("send failed {e:?}")))?; + + recv.await + .map(into_recv_result) + .map_err(|e| err!(error!("recv failed {e:?}")))? +} + +#[implement(Pool)] +#[tracing::instrument(skip(self))] +fn worker(self: Arc, id: usize) { + debug!(?id, "worker spawned"); + defer! {{ debug!(?id, "worker finished"); }} + self.worker_loop(id); +} + +#[implement(Pool)] +fn worker_loop(&self, id: usize) { + while let Ok(mut cmd) = self.recv.recv_blocking() { + self.handle(id, &mut cmd); + } +} + +#[implement(Pool)] +fn handle(&self, id: usize, cmd: &mut Cmd) { + match cmd { + Cmd::Get(get) => self.handle_get(id, get), + } +} + +#[implement(Pool)] +#[tracing::instrument(skip(self, cmd), fields(%cmd.map), level = "trace")] +fn handle_get(&self, id: usize, cmd: &mut Get) { + let chan = cmd.res.take().expect("missing result channel"); + + // If the future was dropped while the command was queued then we can bail + // without any query. This makes it more efficient to use select() variants and + // pessimistic parallel queries. + if chan.is_canceled() { + return; + } + + let result = cmd.map.get_blocking(&cmd.key); + let _sent = chan.send(into_send_result(result)).is_ok(); +} + +fn into_send_result(result: Result>) -> Result> { + // SAFETY: Necessary to send the Handle (rust_rocksdb::PinnableSlice) through + // the channel. The lifetime on the handle is a device by rust-rocksdb to + // associate a database lifetime with its assets, not a function of rocksdb or + // the asset. The Handle must be dropped before the database is dropped. The + // handle must pass through recv_handle() on the other end of the channel. + unsafe { std::mem::transmute(result) } +} + +fn into_recv_result(result: Result>) -> Result> { + // SAFETY: This is to receive the Handle from the channel. Previously it had + // passed through send_handle(). + unsafe { std::mem::transmute(result) } +} diff --git a/src/database/util.rs b/src/database/util.rs index ae076381..21764361 100644 --- a/src/database/util.rs +++ b/src/database/util.rs @@ -1,5 +1,5 @@ use conduit::{err, Result}; -use rocksdb::{Direction, IteratorMode}; +use rocksdb::{Direction, ErrorKind, IteratorMode}; //#[cfg(debug_assertions)] macro_rules! unhandled { @@ -45,6 +45,9 @@ pub(crate) fn and_then(t: T) -> Result { Ok(t) } pub(crate) fn or_else(e: rocksdb::Error) -> Result { Err(map_err(e)) } +#[inline] +pub(crate) fn is_incomplete(e: &rocksdb::Error) -> bool { e.kind() == ErrorKind::Incomplete } + pub(crate) fn map_err(e: rocksdb::Error) -> conduit::Error { let string = e.into_string(); err!(Database(error!("{string}"))) From 76c75cc05afd5a32bb8f07bdbb47d79a71c40a64 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 27 Nov 2024 20:58:04 -0500 Subject: [PATCH 0305/1248] bump tracing fork Signed-off-by: strawberry --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 14 +++++++------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c79b4b5..94a2b2bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4501,8 +4501,8 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" -source = "git+https://github.com/girlbossceo/tracing?rev=4d78a14a5e03f539b8c6b475aefa08bb14e4de91#4d78a14a5e03f539b8c6b475aefa08bb14e4de91" +version = "0.1.41" +source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" dependencies = [ "log", "pin-project-lite", @@ -4512,8 +4512,8 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" -source = "git+https://github.com/girlbossceo/tracing?rev=4d78a14a5e03f539b8c6b475aefa08bb14e4de91#4d78a14a5e03f539b8c6b475aefa08bb14e4de91" +version = "0.1.28" +source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" dependencies = [ "proc-macro2", "quote", @@ -4522,8 +4522,8 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.32" -source = "git+https://github.com/girlbossceo/tracing?rev=4d78a14a5e03f539b8c6b475aefa08bb14e4de91#4d78a14a5e03f539b8c6b475aefa08bb14e4de91" +version = "0.1.33" +source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" dependencies = [ "once_cell", "valuable", @@ -4543,7 +4543,7 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://github.com/girlbossceo/tracing?rev=4d78a14a5e03f539b8c6b475aefa08bb14e4de91#4d78a14a5e03f539b8c6b475aefa08bb14e4de91" +source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" dependencies = [ "log", "once_cell", @@ -4571,7 +4571,7 @@ dependencies = [ [[package]] name = "tracing-subscriber" version = "0.3.18" -source = "git+https://github.com/girlbossceo/tracing?rev=4d78a14a5e03f539b8c6b475aefa08bb14e4de91#4d78a14a5e03f539b8c6b475aefa08bb14e4de91" +source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" dependencies = [ "matchers", "nu-ansi-term", diff --git a/Cargo.toml b/Cargo.toml index 283c5a95..9557ae08 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -184,17 +184,17 @@ features = [ # logging [workspace.dependencies.log] -version = "0.4.21" +version = "0.4.22" default-features = false [workspace.dependencies.tracing] -version = "0.1.40" +version = "0.1.41" default-features = false [workspace.dependencies.tracing-subscriber] version = "0.3.18" default-features = false features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"] [workspace.dependencies.tracing-core] -version = "0.1.32" +version = "0.1.33" default-features = false # for URL previews @@ -493,16 +493,16 @@ version = "1.3.0" # https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c [patch.crates-io.tracing-subscriber] git = "https://github.com/girlbossceo/tracing" -rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91" +rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" [patch.crates-io.tracing] git = "https://github.com/girlbossceo/tracing" -rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91" +rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" [patch.crates-io.tracing-core] git = "https://github.com/girlbossceo/tracing" -rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91" +rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" [patch.crates-io.tracing-log] git = "https://github.com/girlbossceo/tracing" -rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91" +rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" # adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 # adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b From 3ad6aa59f9032b6f43a2e6fb2a7265d8bf3a93a1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 28 Nov 2024 05:54:34 +0000 Subject: [PATCH 0306/1248] use smallvec for db query buffering Signed-off-by: Jason Volk --- Cargo.lock | 4 +++ Cargo.toml | 11 +++++- src/database/Cargo.toml | 1 + src/database/handle.rs | 1 + src/database/keyval.rs | 35 +++++++++++++++++-- src/database/map/contains.rs | 7 ++-- src/database/map/get.rs | 10 +++--- src/database/map/insert.rs | 26 ++++++++++---- src/database/map/keys_from.rs | 13 +++---- src/database/map/keys_prefix.rs | 8 ++--- src/database/map/remove.rs | 6 ++-- src/database/map/rev_keys_from.rs | 11 +++--- src/database/map/rev_keys_prefix.rs | 8 ++--- src/database/map/rev_stream_from.rs | 11 +++--- src/database/map/rev_stream_prefix.rs | 8 ++--- src/database/map/stream_from.rs | 13 +++---- src/database/map/stream_prefix.rs | 8 ++--- src/database/mod.rs | 4 +-- src/database/pool.rs | 50 +++++++++++++++++---------- src/database/ser.rs | 16 +++------ src/service/account_data/mod.rs | 2 +- src/service/media/data.rs | 4 +-- src/service/rooms/state_cache/mod.rs | 14 ++++---- 23 files changed, 173 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 94a2b2bc..49c4127e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -765,6 +765,7 @@ dependencies = [ "rust-rocksdb-uwu", "serde", "serde_json", + "smallvec", "tokio", "tracing", ] @@ -3956,6 +3957,9 @@ name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +dependencies = [ + "serde", +] [[package]] name = "socket2" diff --git a/Cargo.toml b/Cargo.toml index 9557ae08..0e8596f7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,16 @@ name = "conduit" [workspace.dependencies.arrayvec] version = "0.7.4" -features = ["std", "serde"] +features = ["serde"] + +[workspace.dependencies.smallvec] +version = "1.13.2" +features = [ + "const_generics", + "const_new", + "serde", + "write", +] [workspace.dependencies.const-str] version = "0.5.7" diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index c45931a5..1deaf980 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -44,6 +44,7 @@ log.workspace = true rust-rocksdb.workspace = true serde.workspace = true serde_json.workspace = true +smallvec.workspace = true tokio.workspace = true tracing.workspace = true diff --git a/src/database/handle.rs b/src/database/handle.rs index daee224d..356bd859 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -58,6 +58,7 @@ impl<'a> Deserialized for Result<&'a Handle<'a>> { } impl<'a> Deserialized for &'a Handle<'a> { + #[inline] fn map_de(self, f: F) -> Result where F: FnOnce(T) -> U, diff --git a/src/database/keyval.rs b/src/database/keyval.rs index a288f184..d4568600 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -1,13 +1,42 @@ use conduit::Result; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; +use smallvec::SmallVec; -use crate::de; +use crate::{de, ser}; pub type KeyVal<'a, K = &'a Slice, V = &'a Slice> = (Key<'a, K>, Val<'a, V>); pub type Key<'a, T = &'a Slice> = T; pub type Val<'a, T = &'a Slice> = T; -pub type Slice = [u8]; +pub type KeyBuf = KeyBuffer; +pub type ValBuf = ValBuffer; + +pub type KeyBuffer = Buffer; +pub type ValBuffer = Buffer; +pub type Buffer = SmallVec<[Byte; CAP]>; + +pub type Slice = [Byte]; +pub type Byte = u8; + +pub const KEY_STACK_CAP: usize = 128; +pub const VAL_STACK_CAP: usize = 512; +pub const DEF_STACK_CAP: usize = KEY_STACK_CAP; + +#[inline] +pub fn serialize_key(val: T) -> Result +where + T: Serialize, +{ + ser::serialize_to::(val) +} + +#[inline] +pub fn serialize_val(val: T) -> Result +where + T: Serialize, +{ + ser::serialize_to::(val) +} #[inline] pub(crate) fn _expect_deserialize<'a, K, V>(kv: Result>) -> KeyVal<'a, K, V> diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index 7acef2f6..7016b744 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -9,23 +9,25 @@ use conduit::{ use futures::FutureExt; use serde::Serialize; -use crate::ser; +use crate::{keyval::KeyBuf, ser}; /// Returns true if the map contains the key. /// - key is serialized into allocated buffer /// - harder errors may not be reported +#[inline] #[implement(super::Map)] pub fn contains(self: &Arc, key: &K) -> impl Future + Send + '_ where K: Serialize + ?Sized + Debug, { - let mut buf = Vec::::with_capacity(64); + let mut buf = KeyBuf::new(); self.bcontains(key, &mut buf) } /// Returns true if the map contains the key. /// - key is serialized into stack-buffer /// - harder errors will panic +#[inline] #[implement(super::Map)] pub fn acontains(self: &Arc, key: &K) -> impl Future + Send + '_ where @@ -51,6 +53,7 @@ where /// Returns Ok if the map contains the key. /// - key is raw +#[inline] #[implement(super::Map)] pub fn exists<'a, K>(self: &'a Arc, key: &K) -> impl Future + Send + 'a where diff --git a/src/database/map/get.rs b/src/database/map/get.rs index a00eecfa..befc0b24 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -7,6 +7,7 @@ use rocksdb::DBPinnableSlice; use serde::Serialize; use crate::{ + keyval::KeyBuf, ser, util::{is_incomplete, map_err, or_else}, Handle, @@ -18,11 +19,12 @@ type RocksdbResult<'a> = Result>, rocksdb::Error>; /// asynchronously. The key is serialized into an allocated buffer to perform /// the query. #[implement(super::Map)] +#[inline] pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send where K: Serialize + ?Sized + Debug, { - let mut buf = Vec::::with_capacity(64); + let mut buf = KeyBuf::new(); self.bqry(key, &mut buf) } @@ -30,6 +32,7 @@ where /// asynchronously. The key is serialized into a fixed-sized buffer to perform /// the query. The maximum size is supplied as const generic parameter. #[implement(super::Map)] +#[inline] pub fn aqry(self: &Arc, key: &K) -> impl Future>> + Send where K: Serialize + ?Sized + Debug, @@ -69,11 +72,8 @@ where debug_assert!(matches!(cached, Ok(None)), "expected status Incomplete"); let cmd = Cmd::Get(Get { map: self.clone(), + key: key.as_ref().into(), res: None, - key: key - .as_ref() - .try_into() - .expect("failed to copy key into buffer"), }); self.db.pool.execute(cmd).boxed() diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index 9bebe7cf..5b2e816c 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -10,20 +10,25 @@ use conduit::implement; use rocksdb::WriteBatchWithTransaction; use serde::Serialize; -use crate::{ser, util::or_else}; +use crate::{ + keyval::{KeyBuf, ValBuf}, + ser, + util::or_else, +}; /// Insert Key/Value /// /// - Key is serialized /// - Val is serialized #[implement(super::Map)] +#[inline] pub fn put(&self, key: K, val: V) where K: Serialize + Debug, V: Serialize, { - let mut key_buf = Vec::new(); - let mut val_buf = Vec::new(); + let mut key_buf = KeyBuf::new(); + let mut val_buf = ValBuf::new(); self.bput(key, val, (&mut key_buf, &mut val_buf)); } @@ -32,12 +37,13 @@ where /// - Key is serialized /// - Val is raw #[implement(super::Map)] +#[inline] pub fn put_raw(&self, key: K, val: V) where K: Serialize + Debug, V: AsRef<[u8]>, { - let mut key_buf = Vec::new(); + let mut key_buf = KeyBuf::new(); self.bput_raw(key, val, &mut key_buf); } @@ -46,12 +52,13 @@ where /// - Key is raw /// - Val is serialized #[implement(super::Map)] +#[inline] pub fn raw_put(&self, key: K, val: V) where K: AsRef<[u8]>, V: Serialize, { - let mut val_buf = Vec::new(); + let mut val_buf = ValBuf::new(); self.raw_bput(key, val, &mut val_buf); } @@ -60,12 +67,13 @@ where /// - Key is serialized /// - Val is serialized to stack-buffer #[implement(super::Map)] +#[inline] pub fn put_aput(&self, key: K, val: V) where K: Serialize + Debug, V: Serialize, { - let mut key_buf = Vec::new(); + let mut key_buf = KeyBuf::new(); let mut val_buf = ArrayVec::::new(); self.bput(key, val, (&mut key_buf, &mut val_buf)); } @@ -75,13 +83,14 @@ where /// - Key is serialized to stack-buffer /// - Val is serialized #[implement(super::Map)] +#[inline] pub fn aput_put(&self, key: K, val: V) where K: Serialize + Debug, V: Serialize, { let mut key_buf = ArrayVec::::new(); - let mut val_buf = Vec::new(); + let mut val_buf = ValBuf::new(); self.bput(key, val, (&mut key_buf, &mut val_buf)); } @@ -90,6 +99,7 @@ where /// - Key is serialized to stack-buffer /// - Val is serialized to stack-buffer #[implement(super::Map)] +#[inline] pub fn aput(&self, key: K, val: V) where K: Serialize + Debug, @@ -105,6 +115,7 @@ where /// - Key is serialized to stack-buffer /// - Val is raw #[implement(super::Map)] +#[inline] pub fn aput_raw(&self, key: K, val: V) where K: Serialize + Debug, @@ -119,6 +130,7 @@ where /// - Key is raw /// - Val is serialized to stack-buffer #[implement(super::Map)] +#[inline] pub fn raw_aput(&self, key: K, val: V) where K: AsRef<[u8]>, diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 630bf3fb..093f7fd6 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -4,7 +4,10 @@ use conduit::{implement, Result}; use futures::{Stream, StreamExt}; use serde::{Deserialize, Serialize}; -use crate::{keyval, keyval::Key, ser, stream}; +use crate::{ + keyval::{result_deserialize_key, serialize_key, Key}, + stream, +}; #[implement(super::Map)] pub fn keys_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send @@ -12,8 +15,7 @@ where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, { - self.keys_from_raw(from) - .map(keyval::result_deserialize_key::) + self.keys_from_raw(from).map(result_deserialize_key::) } #[implement(super::Map)] @@ -22,7 +24,7 @@ pub fn keys_from_raw

    (&self, from: &P) -> impl Stream>> where P: Serialize + ?Sized + Debug, { - let key = ser::serialize_to_vec(from).expect("failed to serialize query key"); + let key = serialize_key(from).expect("failed to serialize query key"); self.raw_keys_from(&key) } @@ -32,8 +34,7 @@ where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, { - self.raw_keys_from(from) - .map(keyval::result_deserialize_key::) + self.raw_keys_from(from).map(result_deserialize_key::) } #[implement(super::Map)] diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index df214af4..8963f002 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -8,7 +8,7 @@ use futures::{ }; use serde::{Deserialize, Serialize}; -use crate::{keyval, keyval::Key, ser}; +use crate::keyval::{result_deserialize_key, serialize_key, Key}; #[implement(super::Map)] pub fn keys_prefix<'a, K, P>(&'a self, prefix: &P) -> impl Stream>> + Send @@ -17,7 +17,7 @@ where K: Deserialize<'a> + Send, { self.keys_prefix_raw(prefix) - .map(keyval::result_deserialize_key::) + .map(result_deserialize_key::) } #[implement(super::Map)] @@ -26,7 +26,7 @@ pub fn keys_prefix_raw

    (&self, prefix: &P) -> impl Stream| future::ok(k.starts_with(&key))) } @@ -38,7 +38,7 @@ where K: Deserialize<'a> + Send + 'a, { self.raw_keys_prefix(prefix) - .map(keyval::result_deserialize_key::) + .map(result_deserialize_key::) } #[implement(super::Map)] diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs index 949817a0..18080c64 100644 --- a/src/database/map/remove.rs +++ b/src/database/map/remove.rs @@ -4,18 +4,20 @@ use arrayvec::ArrayVec; use conduit::implement; use serde::Serialize; -use crate::{ser, util::or_else}; +use crate::{keyval::KeyBuf, ser, util::or_else}; #[implement(super::Map)] +#[inline] pub fn del(&self, key: K) where K: Serialize + Debug, { - let mut buf = Vec::::with_capacity(64); + let mut buf = KeyBuf::new(); self.bdel(key, &mut buf); } #[implement(super::Map)] +#[inline] pub fn adel(&self, key: K) where K: Serialize + Debug, diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index c1c6f3da..75d062b5 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -4,7 +4,10 @@ use conduit::{implement, Result}; use futures::{Stream, StreamExt}; use serde::{Deserialize, Serialize}; -use crate::{keyval, keyval::Key, ser, stream}; +use crate::{ + keyval::{result_deserialize_key, serialize_key, Key}, + stream, +}; #[implement(super::Map)] pub fn rev_keys_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send @@ -13,7 +16,7 @@ where K: Deserialize<'a> + Send, { self.rev_keys_from_raw(from) - .map(keyval::result_deserialize_key::) + .map(result_deserialize_key::) } #[implement(super::Map)] @@ -22,7 +25,7 @@ pub fn rev_keys_from_raw

    (&self, from: &P) -> impl Stream + Send, { self.rev_raw_keys_from(from) - .map(keyval::result_deserialize_key::) + .map(result_deserialize_key::) } #[implement(super::Map)] diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index 957b974e..c14909d4 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -8,7 +8,7 @@ use futures::{ }; use serde::{Deserialize, Serialize}; -use crate::{keyval, keyval::Key, ser}; +use crate::keyval::{result_deserialize_key, serialize_key, Key}; #[implement(super::Map)] pub fn rev_keys_prefix<'a, K, P>(&'a self, prefix: &P) -> impl Stream>> + Send @@ -17,7 +17,7 @@ where K: Deserialize<'a> + Send, { self.rev_keys_prefix_raw(prefix) - .map(keyval::result_deserialize_key::) + .map(result_deserialize_key::) } #[implement(super::Map)] @@ -26,7 +26,7 @@ pub fn rev_keys_prefix_raw

    (&self, prefix: &P) -> impl Stream| future::ok(k.starts_with(&key))) } @@ -38,7 +38,7 @@ where K: Deserialize<'a> + Send + 'a, { self.rev_raw_keys_prefix(prefix) - .map(keyval::result_deserialize_key::) + .map(result_deserialize_key::) } #[implement(super::Map)] diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 7ef25ee0..6ac1cd1a 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -4,7 +4,10 @@ use conduit::{implement, Result}; use futures::stream::{Stream, StreamExt}; use serde::{Deserialize, Serialize}; -use crate::{keyval, keyval::KeyVal, ser, stream}; +use crate::{ + keyval::{result_deserialize, serialize_key, KeyVal}, + stream, +}; /// Iterate key-value entries in the map starting from upper-bound. /// @@ -18,7 +21,7 @@ where V: Deserialize<'a> + Send, { self.rev_stream_from_raw(from) - .map(keyval::result_deserialize::) + .map(result_deserialize::) } /// Iterate key-value entries in the map starting from upper-bound. @@ -31,7 +34,7 @@ pub fn rev_stream_from_raw

    (&self, from: &P) -> impl Stream + Send, { self.rev_raw_stream_from(from) - .map(keyval::result_deserialize::) + .map(result_deserialize::) } /// Iterate key-value entries in the map starting from upper-bound. diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index 286cedca..fd0d93ff 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -8,7 +8,7 @@ use futures::{ }; use serde::{Deserialize, Serialize}; -use crate::{keyval, keyval::KeyVal, ser}; +use crate::keyval::{result_deserialize, serialize_key, KeyVal}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -22,7 +22,7 @@ where V: Deserialize<'a> + Send, { self.rev_stream_prefix_raw(prefix) - .map(keyval::result_deserialize::) + .map(result_deserialize::) } /// Iterate key-value entries in the map where the key matches a prefix. @@ -35,7 +35,7 @@ pub fn rev_stream_prefix_raw

    (&self, prefix: &P) -> impl Stream| future::ok(k.starts_with(&key))) } @@ -54,7 +54,7 @@ where V: Deserialize<'a> + Send + 'a, { self.rev_raw_stream_prefix(prefix) - .map(keyval::result_deserialize::) + .map(result_deserialize::) } /// Iterate key-value entries in the map where the key matches a prefix. diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index fe89afe1..052a2e74 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -4,7 +4,10 @@ use conduit::{implement, Result}; use futures::stream::{Stream, StreamExt}; use serde::{Deserialize, Serialize}; -use crate::{keyval, keyval::KeyVal, ser, stream}; +use crate::{ + keyval::{result_deserialize, serialize_key, KeyVal}, + stream, +}; /// Iterate key-value entries in the map starting from lower-bound. /// @@ -17,8 +20,7 @@ where K: Deserialize<'a> + Send, V: Deserialize<'a> + Send, { - self.stream_from_raw(from) - .map(keyval::result_deserialize::) + self.stream_from_raw(from).map(result_deserialize::) } /// Iterate key-value entries in the map starting from lower-bound. @@ -31,7 +33,7 @@ pub fn stream_from_raw

    (&self, from: &P) -> impl Stream + Send, V: Deserialize<'a> + Send, { - self.raw_stream_from(from) - .map(keyval::result_deserialize::) + self.raw_stream_from(from).map(result_deserialize::) } /// Iterate key-value entries in the map starting from lower-bound. diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index ca4cfeaa..a08b1e2a 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -8,7 +8,7 @@ use futures::{ }; use serde::{Deserialize, Serialize}; -use crate::{keyval, keyval::KeyVal, ser}; +use crate::keyval::{result_deserialize, serialize_key, KeyVal}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -22,7 +22,7 @@ where V: Deserialize<'a> + Send, { self.stream_prefix_raw(prefix) - .map(keyval::result_deserialize::) + .map(result_deserialize::) } /// Iterate key-value entries in the map where the key matches a prefix. @@ -35,7 +35,7 @@ pub fn stream_prefix_raw

    (&self, prefix: &P) -> impl Stream| future::ok(k.starts_with(&key))) } @@ -54,7 +54,7 @@ where V: Deserialize<'a> + Send + 'a, { self.raw_stream_prefix(prefix) - .map(keyval::result_deserialize::) + .map(result_deserialize::) } /// Iterate key-value entries in the map where the key matches a prefix. diff --git a/src/database/mod.rs b/src/database/mod.rs index cd91fba2..de060b3a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -28,9 +28,9 @@ pub use self::{ de::{Ignore, IgnoreAll}, deserialized::Deserialized, handle::Handle, - keyval::{KeyVal, Slice}, + keyval::{serialize_key, serialize_val, KeyVal, Slice}, map::Map, - ser::{serialize, serialize_to_array, serialize_to_vec, Interfix, Json, Separator, SEP}, + ser::{serialize, serialize_to, serialize_to_vec, Interfix, Json, Separator, SEP}, }; conduit::mod_ctor! {} diff --git a/src/database/pool.rs b/src/database/pool.rs index e2cb2605..6e7a1e29 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -5,12 +5,11 @@ use std::{ thread::JoinHandle, }; -use arrayvec::ArrayVec; use async_channel::{bounded, Receiver, Sender}; use conduit::{debug, defer, err, implement, Result}; use futures::channel::oneshot; -use crate::{Handle, Map}; +use crate::{keyval::KeyBuf, Handle, Map}; pub(crate) struct Pool { workers: Mutex>>, @@ -27,7 +26,6 @@ pub(crate) struct Opts { const WORKER_THREAD_NAME: &str = "conduwuit:db"; const DEFAULT_QUEUE_SIZE: usize = 1024; const DEFAULT_WORKER_NUM: usize = 32; -const KEY_MAX_BYTES: usize = 384; #[derive(Debug)] pub(crate) enum Cmd { @@ -37,10 +35,12 @@ pub(crate) enum Cmd { #[derive(Debug)] pub(crate) struct Get { pub(crate) map: Arc, - pub(crate) key: ArrayVec, - pub(crate) res: Option>>>, + pub(crate) key: KeyBuf, + pub(crate) res: Option, } +type ResultSender = oneshot::Sender>>; + #[implement(Pool)] pub(crate) fn new(opts: &Opts) -> Result> { let queue_size = opts.queue_size.unwrap_or(DEFAULT_QUEUE_SIZE); @@ -92,7 +92,6 @@ pub(crate) fn close(self: &Arc) { receivers = %self.send.receiver_count(), "Closing pool channel" ); - let closing = self.send.close(); debug_assert!(closing, "channel is not closing"); @@ -116,11 +115,7 @@ pub(crate) fn close(self: &Arc) { #[tracing::instrument(skip(self, cmd), level = "trace")] pub(crate) async fn execute(&self, mut cmd: Cmd) -> Result> { let (send, recv) = oneshot::channel(); - match &mut cmd { - Cmd::Get(ref mut cmd) => { - _ = cmd.res.insert(send); - }, - }; + Self::prepare(&mut cmd, send); self.send .send(cmd) @@ -132,6 +127,15 @@ pub(crate) async fn execute(&self, mut cmd: Cmd) -> Result> { .map_err(|e| err!(error!("recv failed {e:?}")))? } +#[implement(Pool)] +fn prepare(cmd: &mut Cmd, send: ResultSender) { + match cmd { + Cmd::Get(ref mut cmd) => { + _ = cmd.res.insert(send); + }, + }; +} + #[implement(Pool)] #[tracing::instrument(skip(self))] fn worker(self: Arc, id: usize) { @@ -157,25 +161,35 @@ fn handle(&self, id: usize, cmd: &mut Cmd) { #[implement(Pool)] #[tracing::instrument(skip(self, cmd), fields(%cmd.map), level = "trace")] fn handle_get(&self, id: usize, cmd: &mut Get) { + debug_assert!(!cmd.key.is_empty(), "querying for empty key"); + + // Obtain the result channel. let chan = cmd.res.take().expect("missing result channel"); - // If the future was dropped while the command was queued then we can bail - // without any query. This makes it more efficient to use select() variants and - // pessimistic parallel queries. + // It is worth checking if the future was dropped while the command was queued + // so we can bail without paying for any query. if chan.is_canceled() { return; } + // Perform the actual database query. We reuse our database::Map interface but + // limited to the blocking calls, rather than creating another surface directly + // with rocksdb here. let result = cmd.map.get_blocking(&cmd.key); - let _sent = chan.send(into_send_result(result)).is_ok(); + + // Send the result back to the submitter. + let chan_result = chan.send(into_send_result(result)); + + // If the future was dropped during the query this will fail acceptably. + let _chan_sent = chan_result.is_ok(); } fn into_send_result(result: Result>) -> Result> { // SAFETY: Necessary to send the Handle (rust_rocksdb::PinnableSlice) through // the channel. The lifetime on the handle is a device by rust-rocksdb to - // associate a database lifetime with its assets, not a function of rocksdb or - // the asset. The Handle must be dropped before the database is dropped. The - // handle must pass through recv_handle() on the other end of the channel. + // associate a database lifetime with its assets. The Handle must be dropped + // before the database is dropped. The handle must pass through recv_handle() on + // the other end of the channel. unsafe { std::mem::transmute(result) } } diff --git a/src/database/ser.rs b/src/database/ser.rs index 961d2700..a60812aa 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -1,28 +1,20 @@ use std::io::Write; -use arrayvec::ArrayVec; use conduit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; use serde::{ser, Serialize}; use crate::util::unhandled; #[inline] -pub fn serialize_to_array(val: T) -> Result> -where - T: Serialize, -{ - let mut buf = ArrayVec::::new(); - serialize(&mut buf, val)?; - - Ok(buf) -} +pub fn serialize_to_vec(val: T) -> Result> { serialize_to::, T>(val) } #[inline] -pub fn serialize_to_vec(val: T) -> Result> +pub fn serialize_to(val: T) -> Result where + B: Default + Write + AsRef<[u8]>, T: Serialize, { - let mut buf = Vec::with_capacity(64); + let mut buf = B::default(); serialize(&mut buf, val)?; Ok(buf) diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index b752f9b8..5dc17640 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -116,7 +116,7 @@ pub fn changes_since<'a>( &'a self, room_id: Option<&'a RoomId>, user_id: &'a UserId, since: u64, ) -> impl Stream + Send + 'a { let prefix = (room_id, user_id, Interfix); - let prefix = database::serialize_to_vec(prefix).expect("failed to serialize prefix"); + let prefix = database::serialize_key(prefix).expect("failed to serialize prefix"); // Skip the data that's exactly at since, because we sent that last time let first_possible = (room_id, user_id, since.saturating_add(1)); diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 9afbd708..3922dec9 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -39,14 +39,14 @@ impl Data { ) -> Result> { let dim: &[u32] = &[dim.width, dim.height]; let key = (mxc, dim, content_disposition, content_type); - let key = database::serialize_to_vec(key)?; + let key = database::serialize_key(key)?; self.mediaid_file.insert(&key, []); if let Some(user) = user { let key = (mxc, user); self.mediaid_user.put_raw(key, user); } - Ok(key) + Ok(key.to_vec()) } pub(super) async fn delete_file_mxc(&self, mxc: &Mxc<'_>) { diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 156345fe..4a33224e 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -9,7 +9,7 @@ use conduit::{ utils::{stream::TryIgnore, ReadyExt, StreamTools}, warn, Result, }; -use database::{serialize_to_vec, Deserialized, Ignore, Interfix, Json, Map}; +use database::{serialize_key, Deserialized, Ignore, Interfix, Json, Map}; use futures::{future::join4, pin_mut, stream::iter, Stream, StreamExt}; use itertools::Itertools; use ruma::{ @@ -289,10 +289,10 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { let userroom_id = (user_id, room_id); - let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); + let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); + let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); self.db.userroomid_joined.insert(&userroom_id, []); self.db.roomuserid_joined.insert(&roomuser_id, []); @@ -312,10 +312,10 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { let userroom_id = (user_id, room_id); - let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); + let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); + let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); // (timo) TODO let leftstate = Vec::>::new(); @@ -716,10 +716,10 @@ impl Service { invite_via: Option>, ) { let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_to_vec(roomuser_id).expect("failed to serialize roomuser_id"); + let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); let userroom_id = (user_id, room_id); - let userroom_id = serialize_to_vec(userroom_id).expect("failed to serialize userroom_id"); + let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); self.db .userroomid_invitestate From 2a9bb1ce11f931b1ec2e02865d4bf30ceec11175 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 28 Nov 2024 06:52:23 +0000 Subject: [PATCH 0307/1248] add configurables for frontend pool options Signed-off-by: Jason Volk --- conduwuit-example.toml | 12 ++++++++++++ src/core/config/mod.rs | 18 ++++++++++++++++++ src/database/engine.rs | 7 ++++++- src/database/pool.rs | 17 ++++++++--------- 4 files changed, 44 insertions(+), 10 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 30da80e6..a0f05ebb 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1338,6 +1338,18 @@ # #admin_room_notices = true +# Sets the number of worker threads in the frontend-pool of the database. +# This number should reflect the I/O capabilities of the system, +# specifically the queue-depth or the number of simultaneous requests in +# flight. Defaults to 32 or number of CPU cores, whichever is greater. +# +#db_pool_workers = 32 + +# Size of the queue feeding the database's frontend-pool. Defaults to 256 +# or eight times the number of CPU cores, whichever is greater. +# +#db_pool_queue_size = 256 + [global.tls] # Path to a valid TLS certificate file. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index fafd3396..edbb7c26 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1500,6 +1500,20 @@ pub struct Config { #[serde(default = "true_fn")] pub admin_room_notices: bool, + /// Sets the number of worker threads in the frontend-pool of the database. + /// This number should reflect the I/O capabilities of the system, + /// specifically the queue-depth or the number of simultaneous requests in + /// flight. Defaults to 32 or number of CPU cores, whichever is greater. + /// default: 32 + #[serde(default = "default_db_pool_workers")] + pub db_pool_workers: usize, + + /// Size of the queue feeding the database's frontend-pool. Defaults to 256 + /// or eight times the number of CPU cores, whichever is greater. + /// default: 256 + #[serde(default = "default_db_pool_queue_size")] + pub db_pool_queue_size: usize, + #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime catchall: BTreeMap, @@ -2265,3 +2279,7 @@ fn parallelism_scaled_u32(val: u32) -> u32 { fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } fn default_trusted_server_batch_size() -> usize { 256 } + +fn default_db_pool_workers() -> usize { sys::available_parallelism().max(32) } + +fn default_db_pool_queue_size() -> usize { sys::available_parallelism().saturating_mul(8).max(256) } diff --git a/src/database/engine.rs b/src/database/engine.rs index 837c7259..e700be62 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -103,6 +103,11 @@ impl Engine { "Opened database." ); + let pool_opts = pool::Opts { + queue_size: config.db_pool_queue_size, + worker_num: config.db_pool_workers, + }; + Ok(Arc::new(Self { server: server.clone(), row_cache, @@ -114,7 +119,7 @@ impl Engine { corks: AtomicU32::new(0), read_only: config.rocksdb_read_only, secondary: config.rocksdb_secondary, - pool: Pool::new(&pool::Opts::default())?, + pool: Pool::new(&pool_opts)?, })) } diff --git a/src/database/pool.rs b/src/database/pool.rs index 6e7a1e29..ee3e67dd 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -17,15 +17,14 @@ pub(crate) struct Pool { send: Sender, } -#[derive(Default)] pub(crate) struct Opts { - queue_size: Option, - worker_num: Option, + pub(crate) queue_size: usize, + pub(crate) worker_num: usize, } +const QUEUE_LIMIT: (usize, usize) = (1, 8192); +const WORKER_LIMIT: (usize, usize) = (1, 512); const WORKER_THREAD_NAME: &str = "conduwuit:db"; -const DEFAULT_QUEUE_SIZE: usize = 1024; -const DEFAULT_WORKER_NUM: usize = 32; #[derive(Debug)] pub(crate) enum Cmd { @@ -43,7 +42,7 @@ type ResultSender = oneshot::Sender>>; #[implement(Pool)] pub(crate) fn new(opts: &Opts) -> Result> { - let queue_size = opts.queue_size.unwrap_or(DEFAULT_QUEUE_SIZE); + let queue_size = opts.queue_size.clamp(QUEUE_LIMIT.0, QUEUE_LIMIT.1); let (send, recv) = bounded(queue_size); let pool = Arc::new(Self { @@ -52,7 +51,7 @@ pub(crate) fn new(opts: &Opts) -> Result> { send, }); - let worker_num = opts.worker_num.unwrap_or(DEFAULT_WORKER_NUM); + let worker_num = opts.worker_num.clamp(WORKER_LIMIT.0, WORKER_LIMIT.1); pool.spawn_until(worker_num)?; Ok(pool) @@ -147,12 +146,12 @@ fn worker(self: Arc, id: usize) { #[implement(Pool)] fn worker_loop(&self, id: usize) { while let Ok(mut cmd) = self.recv.recv_blocking() { - self.handle(id, &mut cmd); + self.worker_handle(id, &mut cmd); } } #[implement(Pool)] -fn handle(&self, id: usize, cmd: &mut Cmd) { +fn worker_handle(&self, id: usize, cmd: &mut Cmd) { match cmd { Cmd::Get(get) => self.handle_get(id, get), } From 58be22e69557f7045ad3fd433a438b33150ecd83 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 28 Nov 2024 23:33:33 +0000 Subject: [PATCH 0308/1248] fix new lints; clippy::unnecessary-map-or Signed-off-by: Jason Volk --- src/core/log/reload.rs | 2 +- src/core/utils/sys.rs | 5 +---- src/service/media/migrations.rs | 2 +- src/service/migrations.rs | 4 ++-- src/service/rooms/spaces/mod.rs | 2 +- src/service/rooms/state_accessor/data.rs | 1 - src/service/rooms/timeline/mod.rs | 2 +- src/service/sending/dest.rs | 7 ++----- src/service/users/mod.rs | 2 +- 9 files changed, 10 insertions(+), 17 deletions(-) diff --git a/src/core/log/reload.rs b/src/core/log/reload.rs index 6d651065..12d14f48 100644 --- a/src/core/log/reload.rs +++ b/src/core/log/reload.rs @@ -54,7 +54,7 @@ impl LogLevelReloadHandles { .lock() .expect("locked") .iter() - .filter(|(name, _)| names.map_or(false, |names| names.contains(&name.as_str()))) + .filter(|(name, _)| names.is_some_and(|names| names.contains(&name.as_str()))) .for_each(|(_, handle)| { _ = handle.reload(new_value.clone()).or_else(error::else_log); }); diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index af8bd70b..f3ff08b6 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -58,8 +58,5 @@ pub unsafe fn current_exe() -> Result { /// accurate on all platforms; defaults to false. #[must_use] pub fn current_exe_deleted() -> bool { - std::env::current_exe().map_or(false, |exe| { - exe.to_str() - .map_or(false, |exe| exe.ends_with(" (deleted)")) - }) + std::env::current_exe().is_ok_and(|exe| exe.to_str().is_some_and(|exe| exe.ends_with(" (deleted)"))) } diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index f1c6da7d..3d9c395e 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -112,7 +112,7 @@ async fn handle_media_check( let old_is_symlink = || async { tokio::fs::symlink_metadata(old_path) .await - .map_or(false, |md| md.is_symlink()) + .is_ok_and(|md| md.is_symlink()) }; if config.prune_missing_media && !old_exists && !new_exists { diff --git a/src/service/migrations.rs b/src/service/migrations.rs index cded9bce..f9057036 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -413,7 +413,7 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) .rooms .state_accessor .get_member(room_id, user_id) - .map(|member| member.map_or(false, |member| member.membership == MembershipState::Join)) + .map(|member| member.is_ok_and(|member| member.membership == MembershipState::Join)) }) .collect::>() .await; @@ -426,7 +426,7 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) .rooms .state_accessor .get_member(room_id, user_id) - .map(|member| member.map_or(false, |member| member.membership == MembershipState::Join)) + .map(|member| member.is_ok_and(|member| member.membership == MembershipState::Join)) }) .collect::>() .await; diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 0ef7ddf5..2b80e3dc 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -757,7 +757,7 @@ fn get_parent_children_via( fn next_room_to_traverse( stack: &mut Vec)>>, parents: &mut VecDeque, ) -> Option<(OwnedRoomId, Vec)> { - while stack.last().map_or(false, Vec::is_empty) { + while stack.last().is_some_and(Vec::is_empty) { stack.pop(); parents.pop_back(); } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 1ef91221..a6c2e429 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -179,7 +179,6 @@ impl Data { } /// Returns the full room state. - #[allow(unused_qualifications)] // async traits pub(super) async fn room_state_full( &self, room_id: &RoomId, ) -> Result>> { diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 5d5566cb..c0b48b9b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -616,7 +616,7 @@ impl Service { && pdu .state_key .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) + .is_some_and(|state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: NamespaceRegex| { self.services diff --git a/src/service/sending/dest.rs b/src/service/sending/dest.rs index 234a0b90..d35350c5 100644 --- a/src/service/sending/dest.rs +++ b/src/service/sending/dest.rs @@ -24,10 +24,7 @@ pub(super) fn get_prefix(&self) -> Vec { }, Self::Appservice(server) => { let sigil = b"+"; - let len = sigil - .len() - .saturating_add(server.as_bytes().len()) - .saturating_add(1); + let len = sigil.len().saturating_add(server.len()).saturating_add(1); let mut p = Vec::with_capacity(len); p.extend_from_slice(sigil); @@ -41,7 +38,7 @@ pub(super) fn get_prefix(&self) -> Vec { .len() .saturating_add(user.as_bytes().len()) .saturating_add(1) - .saturating_add(pushkey.as_bytes().len()) + .saturating_add(pushkey.len()) .saturating_add(1); let mut p = Vec::with_capacity(len); diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 1f8c56df..2462dde3 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -101,7 +101,7 @@ impl Service { .account_data .get_global(recipient_user, GlobalAccountDataEventType::IgnoredUserList) .await - .map_or(false, |ignored: IgnoredUserListEvent| { + .is_ok_and(|ignored: IgnoredUserListEvent| { ignored .content .ignored_users From 6175e72f1c056bcaee5d318a75b014855b7956ee Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 29 Nov 2024 08:26:27 +0000 Subject: [PATCH 0309/1248] simplify get_pdu() interface; eliminate unconditional Arc Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 6 ++--- src/api/client/context.rs | 2 +- src/api/client/report.rs | 2 +- src/api/client/room/event.rs | 2 +- src/api/client/search.rs | 6 +---- src/api/client/state.rs | 4 ++-- src/api/client/sync/v3.rs | 2 +- .../fetch_and_handle_outliers.rs | 3 ++- .../event_handler/handle_incoming_pdu.rs | 4 +++- .../rooms/event_handler/handle_outlier_pdu.rs | 4 ++-- src/service/rooms/event_handler/mod.rs | 13 +++++++++-- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/state_accessor/data.rs | 23 +++++++++++-------- src/service/rooms/state_accessor/mod.rs | 10 ++++---- src/service/rooms/timeline/data.rs | 9 +------- src/service/rooms/timeline/mod.rs | 11 +++------ 16 files changed, 51 insertions(+), 52 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 89e47d4e..d4c9a57b 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -266,15 +266,15 @@ pub(super) async fn get_remote_pdu( #[admin_command] pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room).await?; - let room_state = self + let room_state: Vec<_> = self .services .rooms .state_accessor .room_state_full(&room_id) .await? .values() - .map(|pdu| pdu.to_state_event()) - .collect::>(); + .map(PduEvent::to_state_event) + .collect(); if room_state.is_empty() { return Ok(RoomMessageEventContent::text_plain( diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 5b6b516e..bf87f5e1 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -103,7 +103,7 @@ pub(crate) async fn get_context_route( .collect() .await; - let lazy = once(&(base_token, (*base_event).clone())) + let lazy = once(&(base_token, base_event.clone())) .chain(events_before.iter()) .chain(events_after.iter()) .stream() diff --git a/src/api/client/report.rs b/src/api/client/report.rs index a0133704..31667323 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -137,7 +137,7 @@ pub(crate) async fn report_event_route( /// check if reporting user is in the reporting room async fn is_event_report_valid( services: &Services, event_id: &EventId, room_id: &RoomId, sender_user: &UserId, reason: Option<&String>, - score: Option, pdu: &std::sync::Arc, + score: Option, pdu: &PduEvent, ) -> Result<()> { debug_info!("Checking if report from user {sender_user} for event {event_id} in room {room_id} is valid"); diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index 0f44f25d..090c70a7 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -18,7 +18,7 @@ pub(crate) async fn get_room_event_route( event: services .rooms .timeline - .get_pdu_owned(&body.event_id) + .get_pdu(&body.event_id) .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))) .and_then(|event| async move { services diff --git a/src/api/client/search.rs b/src/api/client/search.rs index 1e5384fe..38468abb 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -181,11 +181,7 @@ async fn procure_room_state(services: &Services, room_id: &RoomId) -> Result( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { + if let Ok(local_pdu) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await { trace!("Found {id} in db"); events_with_auth_events.push((id, Some(local_pdu), vec![])); continue; diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 4d2d75d5..19367582 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -1,10 +1,11 @@ use std::{ collections::{hash_map, BTreeMap}, + sync::Arc, time::Instant, }; use conduit::{debug, err, implement, warn, Error, Result}; -use futures::FutureExt; +use futures::{FutureExt, TryFutureExt}; use ruma::{ api::client::error::ErrorKind, events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId, }; @@ -79,6 +80,7 @@ pub async fn handle_incoming_pdu<'a>( .services .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, "") + .map_ok(Arc::new) .await?; // Procure the room version diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 2d95ff63..21504b66 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -4,7 +4,7 @@ use std::{ }; use conduit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; -use futures::future::ready; +use futures::{future::ready, TryFutureExt}; use ruma::{ api::client::error::ErrorKind, events::StateEventType, @@ -94,7 +94,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // Build map of auth events let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); for id in &incoming_pdu.auth_events { - let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { + let Ok(auth_event) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await else { warn!("Could not find auth event {id}"); continue; }; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index f6440fe9..3fb7d5c4 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -17,7 +17,11 @@ use std::{ time::Instant, }; -use conduit::{utils::MutexMap, Err, PduEvent, Result, Server}; +use conduit::{ + utils::{MutexMap, TryFutureExtExt}, + Err, PduEvent, Result, Server, +}; +use futures::TryFutureExt; use ruma::{ events::room::create::RoomCreateEventContent, state_res::RoomVersion, EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, @@ -94,7 +98,12 @@ impl Service { async fn event_exists(&self, event_id: Arc) -> bool { self.services.timeline.pdu_exists(&event_id).await } async fn event_fetch(&self, event_id: Arc) -> Option> { - self.services.timeline.get_pdu(&event_id).await.ok() + self.services + .timeline + .get_pdu(&event_id) + .map_ok(Arc::new) + .ok() + .await } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 8bd5f7eb..03e2d2e8 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -445,7 +445,7 @@ impl Service { .into_iter() .map(at!(0)) .zip(auth_pdus.into_iter()) - .filter_map(|((event_type, state_key), pdu)| Some(((event_type, state_key), pdu.ok()?))) + .filter_map(|((event_type, state_key), pdu)| Some(((event_type, state_key), pdu.ok()?.into()))) .collect(); Ok(auth_pdus) diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index a6c2e429..6c67b856 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -46,7 +46,7 @@ impl Data { pub(super) async fn state_full( &self, shortstatehash: ShortStateHash, - ) -> Result>> { + ) -> Result> { let state = self .state_full_pdus(shortstatehash) .await? @@ -57,24 +57,27 @@ impl Data { Ok(state) } - pub(super) async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result>> { + pub(super) async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result> { let short_ids = self .state_full_shortids(shortstatehash) .await? .into_iter() .map(at!(1)); - let event_ids = self + let event_ids: Vec = self .services .short .multi_get_eventid_from_short(short_ids) - .await; + .await + .into_iter() + .filter_map(Result::ok) + .collect(); let full_pdus = event_ids - .into_iter() + .iter() .stream() + .then(|event_id| self.services.timeline.get_pdu(event_id)) .ready_filter_map(Result::ok) - .filter_map(|event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await.ok() }) .collect() .await; @@ -157,7 +160,7 @@ impl Data { /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). pub(super) async fn state_get( &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, - ) -> Result> { + ) -> Result { self.state_get_id(shortstatehash, event_type, state_key) .and_then(|event_id| async move { self.services.timeline.get_pdu(&event_id).await }) .await @@ -181,7 +184,7 @@ impl Data { /// Returns the full room state. pub(super) async fn room_state_full( &self, room_id: &RoomId, - ) -> Result>> { + ) -> Result> { self.services .state .get_room_shortstatehash(room_id) @@ -192,7 +195,7 @@ impl Data { /// Returns the full room state's pdus. #[allow(unused_qualifications)] // async traits - pub(super) async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result>> { + pub(super) async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result> { self.services .state .get_room_shortstatehash(room_id) @@ -215,7 +218,7 @@ impl Data { /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). pub(super) async fn room_state_get( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result> { + ) -> Result { self.services .state .get_room_shortstatehash(room_id) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index e08fac66..18f999b4 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -114,7 +114,7 @@ impl Service { pub async fn state_full( &self, shortstatehash: ShortStateHash, - ) -> Result>> { + ) -> Result> { self.db.state_full(shortstatehash).await } @@ -134,7 +134,7 @@ impl Service { #[inline] pub async fn state_get( &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, - ) -> Result> { + ) -> Result { self.db .state_get(shortstatehash, event_type, state_key) .await @@ -311,13 +311,13 @@ impl Service { /// Returns the full room state. #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_full(&self, room_id: &RoomId) -> Result>> { + pub async fn room_state_full(&self, room_id: &RoomId) -> Result> { self.db.room_state_full(room_id).await } /// Returns the full room state pdus #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result>> { + pub async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result> { self.db.room_state_full_pdus(room_id).await } @@ -337,7 +337,7 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub async fn room_state_get( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result> { + ) -> Result { self.db.room_state_get(room_id, event_type, state_key).await } diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index c15d8e7f..c394dc3b 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -126,14 +126,7 @@ impl Data { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub(super) async fn get_pdu(&self, event_id: &EventId) -> Result> { - self.get_pdu_owned(event_id).await.map(Arc::new) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub(super) async fn get_pdu_owned(&self, event_id: &EventId) -> Result { + pub(super) async fn get_pdu(&self, event_id: &EventId) -> Result { let accepted = self.get_non_outlier_pdu(event_id).boxed(); let outlier = self .eventid_outlierpdu diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index c0b48b9b..07b406c4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -242,12 +242,7 @@ impl Service { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub async fn get_pdu(&self, event_id: &EventId) -> Result> { self.db.get_pdu(event_id).await } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub async fn get_pdu_owned(&self, event_id: &EventId) -> Result { self.db.get_pdu_owned(event_id).await } + pub async fn get_pdu(&self, event_id: &EventId) -> Result { self.db.get_pdu(event_id).await } /// Checks if pdu exists /// @@ -327,11 +322,11 @@ impl Service { ); unsigned.insert( String::from("prev_sender"), - CanonicalJsonValue::String(prev_state.sender.clone().to_string()), + CanonicalJsonValue::String(prev_state.sender.to_string()), ); unsigned.insert( String::from("replaces_state"), - CanonicalJsonValue::String(prev_state.event_id.clone().to_string()), + CanonicalJsonValue::String(prev_state.event_id.to_string()), ); } } From b5266ad9f5b77f5fa04b305ebbb0ed29e862396f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 29 Nov 2024 03:25:29 +0000 Subject: [PATCH 0310/1248] parallelize sender edu selection Signed-off-by: Jason Volk --- src/service/sending/sender.rs | 109 +++++++++++++++++++++------------- 1 file changed, 68 insertions(+), 41 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 3c544725..b1e909c9 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -1,7 +1,7 @@ use std::{ - cmp, collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, + sync::atomic::{AtomicU64, AtomicUsize, Ordering}, time::{Duration, Instant}, }; @@ -13,7 +13,12 @@ use conduit::{ utils::{calculate_hash, math::continue_exponential_backoff_secs, ReadyExt}, warn, Error, Result, }; -use futures::{future::BoxFuture, pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; +use futures::{ + future::{BoxFuture, OptionFuture}, + join, pin_mut, + stream::FuturesUnordered, + FutureExt, StreamExt, +}; use ruma::{ api::{ appservice::event::push_events::v1::Edu as RumaEdu, @@ -261,33 +266,45 @@ impl Service { #[tracing::instrument(skip_all, level = "debug")] async fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { - // u64: count of last edu + // selection window let since = self.db.get_latest_educount(server_name).await; - let mut max_edu_count = since; - let mut events = Vec::new(); + let since_upper = self.services.globals.current_count()?; + let batch = (since, since_upper); + debug_assert!(batch.0 <= batch.1, "since range must not be negative"); - self.select_edus_device_changes(server_name, since, &mut max_edu_count, &mut events) - .await; + let events_len = AtomicUsize::default(); + let max_edu_count = AtomicU64::new(since); - if self.server.config.allow_outgoing_read_receipts { - self.select_edus_receipts(server_name, since, &mut max_edu_count, &mut events) - .await; - } + let device_changes = self.select_edus_device_changes(server_name, batch, &max_edu_count, &events_len); - if self.server.config.allow_outgoing_presence { - self.select_edus_presence(server_name, since, &mut max_edu_count, &mut events) - .await; - } + let receipts: OptionFuture<_> = self + .server + .config + .allow_outgoing_read_receipts + .then(|| self.select_edus_receipts(server_name, batch, &max_edu_count)) + .into(); - Ok((events, max_edu_count)) + let presence: OptionFuture<_> = self + .server + .config + .allow_outgoing_presence + .then(|| self.select_edus_presence(server_name, batch, &max_edu_count)) + .into(); + + let (device_changes, receipts, presence) = join!(device_changes, receipts, presence); + + let mut events = device_changes; + events.extend(presence.into_iter().flatten()); + events.extend(receipts.into_iter().flatten()); + + Ok((events, max_edu_count.load(Ordering::Acquire))) } /// Look for presence async fn select_edus_device_changes( - &self, server_name: &ServerName, since: u64, max_edu_count: &mut u64, events: &mut Vec>, - ) { - debug_assert!(events.len() < SELECT_EDU_LIMIT, "called when edu limit reached"); - + &self, server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, events_len: &AtomicUsize, + ) -> Vec> { + let mut events = Vec::new(); let server_rooms = self.services.state_cache.server_rooms(server_name); pin_mut!(server_rooms); @@ -296,12 +313,16 @@ impl Service { let keys_changed = self .services .users - .room_keys_changed(room_id, since, None) + .room_keys_changed(room_id, since.0, None) .ready_filter(|(user_id, _)| self.services.globals.user_is_local(user_id)); pin_mut!(keys_changed); while let Some((user_id, count)) = keys_changed.next().await { - *max_edu_count = cmp::max(count, *max_edu_count); + if count > since.1 { + break; + } + + max_edu_count.fetch_max(count, Ordering::Relaxed); if !device_list_changes.insert(user_id.into()) { continue; } @@ -321,19 +342,19 @@ impl Service { let edu = serde_json::to_vec(&edu).expect("failed to serialize device list update to JSON"); events.push(edu); - if events.len() >= SELECT_EDU_LIMIT { - return; + if events_len.fetch_add(1, Ordering::Relaxed) >= SELECT_EDU_LIMIT - 1 { + return events; } } } + + events } /// Look for read receipts in this room async fn select_edus_receipts( - &self, server_name: &ServerName, since: u64, max_edu_count: &mut u64, events: &mut Vec>, - ) { - debug_assert!(events.len() < EDU_LIMIT, "called when edu limit reached"); - + &self, server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, + ) -> Option> { let server_rooms = self.services.state_cache.server_rooms(server_name); pin_mut!(server_rooms); @@ -350,7 +371,7 @@ impl Service { } if receipts.is_empty() { - return; + return None; } let receipt_content = Edu::Receipt(ReceiptContent { @@ -360,22 +381,26 @@ impl Service { let receipt_content = serde_json::to_vec(&receipt_content).expect("Failed to serialize Receipt EDU to JSON vec"); - events.push(receipt_content); + Some(receipt_content) } /// Look for read receipts in this room async fn select_edus_receipts_room( - &self, room_id: &RoomId, since: u64, max_edu_count: &mut u64, num: &mut usize, + &self, room_id: &RoomId, since: (u64, u64), max_edu_count: &AtomicU64, num: &mut usize, ) -> ReceiptMap { let receipts = self .services .read_receipt - .readreceipts_since(room_id, since); + .readreceipts_since(room_id, since.0); pin_mut!(receipts); let mut read = BTreeMap::::new(); while let Some((user_id, count, read_receipt)) = receipts.next().await { - *max_edu_count = cmp::max(count, *max_edu_count); + if count > since.1 { + break; + } + + max_edu_count.fetch_max(count, Ordering::Relaxed); if !self.services.globals.user_is_local(user_id) { continue; } @@ -423,16 +448,18 @@ impl Service { /// Look for presence async fn select_edus_presence( - &self, server_name: &ServerName, since: u64, max_edu_count: &mut u64, events: &mut Vec>, - ) { - debug_assert!(events.len() < EDU_LIMIT, "called when edu limit reached"); - - let presence_since = self.services.presence.presence_since(since); + &self, server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, + ) -> Option> { + let presence_since = self.services.presence.presence_since(since.0); pin_mut!(presence_since); let mut presence_updates = HashMap::::new(); while let Some((user_id, count, presence_bytes)) = presence_since.next().await { - *max_edu_count = cmp::max(count, *max_edu_count); + if count > since.1 { + break; + } + + max_edu_count.fetch_max(count, Ordering::Relaxed); if !self.services.globals.user_is_local(user_id) { continue; } @@ -474,7 +501,7 @@ impl Service { } if presence_updates.is_empty() { - return; + return None; } let presence_content = Edu::Presence(PresenceContent { @@ -483,7 +510,7 @@ impl Service { let presence_content = serde_json::to_vec(&presence_content).expect("failed to serialize Presence EDU to JSON"); - events.push(presence_content); + Some(presence_content) } async fn send_events(&self, dest: Destination, events: Vec) -> SendingResult { From 4a3cc9fffa62636ba1c3e76494142da226f9aefd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 30 Nov 2024 08:09:51 +0000 Subject: [PATCH 0311/1248] de-arc state_full_ids Signed-off-by: Jason Volk --- src/api/client/context.rs | 6 ++--- src/api/client/sync/v3.rs | 8 +++--- src/api/client/sync/v4.rs | 6 ++--- src/api/server/send_join.rs | 6 ++--- src/api/server/state.rs | 10 ++++--- src/api/server/state_ids.rs | 5 ++-- src/service/rooms/spaces/mod.rs | 4 +-- src/service/rooms/state_accessor/data.rs | 34 +++++++++++++++++------- src/service/rooms/state_accessor/mod.rs | 29 ++++++++++++++------ 9 files changed, 69 insertions(+), 39 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index bf87f5e1..652e17f4 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,4 +1,4 @@ -use std::iter::once; +use std::{collections::HashMap, iter::once}; use axum::extract::State; use conduit::{ @@ -10,7 +10,7 @@ use futures::{future::try_join, StreamExt, TryFutureExt}; use ruma::{ api::client::{context::get_context, filter::LazyLoadOptions}, events::StateEventType, - UserId, + OwnedEventId, UserId, }; use crate::{ @@ -124,7 +124,7 @@ pub(crate) async fn get_context_route( .await .map_err(|e| err!(Database("State hash not found: {e}")))?; - let state_ids = services + let state_ids: HashMap<_, OwnedEventId> = services .rooms .state_accessor .state_full_ids(shortstatehash) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 9c1cefdb..5578077f 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -32,7 +32,7 @@ use ruma::{ TimelineEventType::*, }, serde::Raw, - uint, DeviceId, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId, + uint, DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, }; use tracing::{Instrument as _, Span}; @@ -398,7 +398,7 @@ async fn handle_left_room( Err(_) => HashMap::new(), }; - let Ok(left_event_id) = services + let Ok(left_event_id): Result = services .rooms .state_accessor .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str()) @@ -666,7 +666,7 @@ async fn load_joined_room( let (joined_member_count, invited_member_count, heroes) = calculate_counts().await?; - let current_state_ids = services + let current_state_ids: HashMap<_, OwnedEventId> = services .rooms .state_accessor .state_full_ids(current_shortstatehash) @@ -736,7 +736,7 @@ async fn load_joined_room( let mut delta_state_events = Vec::new(); if since_shortstatehash != current_shortstatehash { - let current_state_ids = services + let current_state_ids: HashMap<_, OwnedEventId> = services .rooms .state_accessor .state_full_ids(current_shortstatehash) diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 62c313e2..14d79c19 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -1,6 +1,6 @@ use std::{ cmp::{self, Ordering}, - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, time::Duration, }; @@ -30,7 +30,7 @@ use ruma::{ TimelineEventType::{self, *}, }, state_res::Event, - uint, MilliSecondsSinceUnixEpoch, OwnedRoomId, UInt, UserId, + uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, UInt, UserId, }; use service::{rooms::read_receipt::pack_receipts, Services}; @@ -211,7 +211,7 @@ pub(crate) async fn sync_events_v4_route( let new_encrypted_room = encrypted_room && since_encryption.is_err(); if encrypted_room { - let current_state_ids = services + let current_state_ids: HashMap<_, OwnedEventId> = services .rooms .state_accessor .state_full_ids(current_shortstatehash) diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 0ad07b1e..92ab3b50 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -1,6 +1,6 @@ #![allow(deprecated)] -use std::borrow::Borrow; +use std::{borrow::Borrow, collections::HashMap}; use axum::extract::State; use conduit::{err, pdu::gen_event_id_canonical_json, utils::IterStream, warn, Error, Result}; @@ -11,7 +11,7 @@ use ruma::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, }, - CanonicalJsonValue, OwnedServerName, OwnedUserId, RoomId, ServerName, + CanonicalJsonValue, OwnedEventId, OwnedServerName, OwnedUserId, RoomId, ServerName, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use service::Services; @@ -165,7 +165,7 @@ async fn create_join_event( drop(mutex_lock); - let state_ids = services + let state_ids: HashMap<_, OwnedEventId> = services .rooms .state_accessor .state_full_ids(shortstatehash) diff --git a/src/api/server/state.rs b/src/api/server/state.rs index b21fce68..400b9237 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -3,7 +3,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; use conduit::{err, result::LogErr, utils::IterStream, Result}; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::api::federation::event::get_room_state; +use ruma::{api::federation::event::get_room_state, OwnedEventId}; use super::AccessCheck; use crate::Ruma; @@ -30,14 +30,18 @@ pub(crate) async fn get_room_state_route( .await .map_err(|_| err!(Request(NotFound("PDU state not found."))))?; - let pdus = services + let state_ids: Vec = services .rooms .state_accessor .state_full_ids(shortstatehash) .await .log_err() .map_err(|_| err!(Request(NotFound("PDU state IDs not found."))))? - .values() + .into_values() + .collect(); + + let pdus = state_ids + .iter() .try_stream() .and_then(|id| services.rooms.timeline.get_pdu_json(id)) .and_then(|pdu| { diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 0c023bf0..55662a40 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -3,7 +3,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; use conduit::{err, Result}; use futures::StreamExt; -use ruma::api::federation::event::get_room_state_ids; +use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; use super::AccessCheck; use crate::Ruma; @@ -31,14 +31,13 @@ pub(crate) async fn get_room_state_ids_route( .await .map_err(|_| err!(Request(NotFound("Pdu state not found."))))?; - let pdu_ids = services + let pdu_ids: Vec = services .rooms .state_accessor .state_full_ids(shortstatehash) .await .map_err(|_| err!(Request(NotFound("State ids not found"))))? .into_values() - .map(|id| (*id).to_owned()) .collect(); let auth_chain_ids = services diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 2b80e3dc..3e972ca6 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,7 +1,7 @@ mod tests; use std::{ - collections::VecDeque, + collections::{HashMap, VecDeque}, fmt::{Display, Formatter}, str::FromStr, sync::Arc, @@ -572,7 +572,7 @@ impl Service { return Ok(None); }; - let state = self + let state: HashMap<_, Arc<_>> = self .services .state_accessor .state_full_ids(current_shortstatehash) diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 6c67b856..7760d5b6 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{borrow::Borrow, collections::HashMap, sync::Arc}; use conduit::{ at, err, @@ -8,6 +8,7 @@ use conduit::{ use database::{Deserialized, Map}; use futures::{StreamExt, TryFutureExt}; use ruma::{events::StateEventType, EventId, OwnedEventId, RoomId}; +use serde::Deserialize; use crate::{ rooms, @@ -84,7 +85,11 @@ impl Data { Ok(full_pdus) } - pub(super) async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result>> { + pub(super) async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result> + where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + ::Owned: Borrow, + { let short_ids = self.state_full_shortids(shortstatehash).await?; let event_ids = self @@ -123,11 +128,15 @@ impl Data { Ok(shortids) } - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - #[allow(clippy::unused_self)] - pub(super) async fn state_get_id( + /// Returns a single EventId from `room_id` with key + /// (`event_type`,`state_key`). + pub(super) async fn state_get_id( &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, - ) -> Result> { + ) -> Result + where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + ::Owned: Borrow, + { let shortstatekey = self .services .short @@ -162,7 +171,7 @@ impl Data { &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result { self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id| async move { self.services.timeline.get_pdu(&event_id).await }) + .and_then(|event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await }) .await } @@ -204,10 +213,15 @@ impl Data { .await } - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub(super) async fn room_state_get_id( + /// Returns a single EventId from `room_id` with key + /// (`event_type`,`state_key`). + pub(super) async fn room_state_get_id( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result> { + ) -> Result + where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + ::Owned: Borrow, + { self.services .state .get_room_shortstatehash(room_id) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 18f999b4..e42d3764 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,6 +1,7 @@ mod data; use std::{ + borrow::Borrow, collections::HashMap, fmt::Write, sync::{Arc, Mutex as StdMutex, Mutex}, @@ -101,8 +102,12 @@ impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result>> { - self.db.state_full_ids(shortstatehash).await + pub async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result> + where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + ::Owned: Borrow, + { + self.db.state_full_ids::(shortstatehash).await } #[inline] @@ -118,12 +123,16 @@ impl Service { self.db.state_full(shortstatehash).await } - /// Returns a single PDU from `room_id` with key (`event_type`, + /// Returns a single EventId from `room_id` with key (`event_type`, /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_id( + pub async fn state_get_id( &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, - ) -> Result> { + ) -> Result + where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + ::Owned: Borrow, + { self.db .state_get_id(shortstatehash, event_type, state_key) .await @@ -321,12 +330,16 @@ impl Service { self.db.room_state_full_pdus(room_id).await } - /// Returns a single PDU from `room_id` with key (`event_type`, + /// Returns a single EventId from `room_id` with key (`event_type`, /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get_id( + pub async fn room_state_get_id( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result> { + ) -> Result + where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + ::Owned: Borrow, + { self.db .room_state_get_id(room_id, event_type, state_key) .await From 9263439af8e7409bc2e6cf0ada82e44f5989374e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 1 Dec 2024 10:49:46 +0000 Subject: [PATCH 0312/1248] fix is_matching macro argument designator Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 18c2dd6f..1ab7d717 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -118,8 +118,8 @@ macro_rules! is_matching { |x| matches!(x, $val) }; - ($val:expr) => { - |x| matches!(x, $val) + ($($val:tt)+) => { + |x| matches!(x, $($val)+) }; } From 3fbd74310f866c8f6292c17a89007f396aa748e1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 1 Dec 2024 09:18:35 +0000 Subject: [PATCH 0313/1248] impl transposed form of MapExpect Signed-off-by: Jason Volk --- src/core/utils/result/map_expect.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/core/utils/result/map_expect.rs b/src/core/utils/result/map_expect.rs index 9cd498f7..b315ad3d 100644 --- a/src/core/utils/result/map_expect.rs +++ b/src/core/utils/result/map_expect.rs @@ -13,3 +13,8 @@ impl<'a, T, E: Debug> MapExpect<'a, Option> for Option> { #[inline] fn map_expect(self, msg: &'a str) -> Option { self.map(|result| result.expect(msg)) } } + +impl<'a, T, E: Debug> MapExpect<'a, Result> for Result, E> { + #[inline] + fn map_expect(self, msg: &'a str) -> Result { self.map(|result| result.expect(msg)) } +} From 3b30bd3580af5082c8878ef490818411217648e3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 2 Dec 2024 10:49:08 +0000 Subject: [PATCH 0314/1248] add try_filter_map to TryReadyExt Signed-off-by: Jason Volk --- src/core/utils/stream/try_ready.rs | 35 +++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index 0daed26e..b0a2b6c5 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -3,7 +3,7 @@ use futures::{ future::{ready, Ready}, - stream::{AndThen, TryFold, TryForEach, TryStream, TryStreamExt}, + stream::{AndThen, TryFilterMap, TryFold, TryForEach, TryStream, TryStreamExt}, }; use crate::Result; @@ -20,11 +20,11 @@ where where F: Fn(S::Ok) -> Result; - fn ready_try_for_each( + fn ready_try_filter_map( self, f: F, - ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> + ) -> TryFilterMap, E>>, impl FnMut(S::Ok) -> Ready, E>>> where - F: FnMut(S::Ok) -> Result<(), E>; + F: Fn(S::Ok) -> Result, E>; fn ready_try_fold( self, init: U, f: F, @@ -38,6 +38,12 @@ where where F: Fn(U, S::Ok) -> Result, U: Default; + + fn ready_try_for_each( + self, f: F, + ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> + where + F: FnMut(S::Ok) -> Result<(), E>; } impl TryReadyExt for S @@ -53,14 +59,13 @@ where self.and_then(move |t| ready(f(t))) } - #[inline] - fn ready_try_for_each( - self, mut f: F, - ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> + fn ready_try_filter_map( + self, f: F, + ) -> TryFilterMap, E>>, impl FnMut(S::Ok) -> Ready, E>>> where - F: FnMut(S::Ok) -> Result<(), E>, + F: Fn(S::Ok) -> Result, E>, { - self.try_for_each(move |t| ready(f(t))) + self.try_filter_map(move |t| ready(f(t))) } #[inline] @@ -83,4 +88,14 @@ where { self.ready_try_fold(U::default(), f) } + + #[inline] + fn ready_try_for_each( + self, mut f: F, + ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> + where + F: FnMut(S::Ok) -> Result<(), E>, + { + self.try_for_each(move |t| ready(f(t))) + } } From 61d9ac66fa367128fc539faf965d5422ef154f39 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 2 Dec 2024 11:54:03 +0000 Subject: [PATCH 0315/1248] add ref_at util macro Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 1ab7d717..f9310243 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -79,6 +79,13 @@ macro_rules! at { }; } +#[macro_export] +macro_rules! ref_at { + ($idx:tt) => { + |ref t| &t.$idx + }; +} + /// Functor for equality i.e. .is_some_and(is_equal!(2)) #[macro_export] macro_rules! is_equal_to { From 7d6710c03346f7157c74538b61e407c1b8536f64 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 30 Nov 2024 08:27:51 +0000 Subject: [PATCH 0316/1248] add broadband stream extensions Signed-off-by: Jason Volk --- src/core/utils/stream/broadband.rs | 84 ++++++++++++++++++++++++++++++ src/core/utils/stream/mod.rs | 4 ++ src/core/utils/stream/wideband.rs | 84 ++++++++++++++++++++++++++++++ 3 files changed, 172 insertions(+) create mode 100644 src/core/utils/stream/broadband.rs create mode 100644 src/core/utils/stream/wideband.rs diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs new file mode 100644 index 00000000..ce17830c --- /dev/null +++ b/src/core/utils/stream/broadband.rs @@ -0,0 +1,84 @@ +//! Broadband stream combinator extensions to futures::Stream +#![allow(clippy::type_complexity)] + +use std::convert::identity; + +use futures::{ + stream::{Stream, StreamExt}, + Future, +}; + +use super::ReadyExt; + +const WIDTH: usize = 32; + +/// Concurrency extensions to augment futures::StreamExt. broad_ combinators +/// produce out-of-order +pub trait BroadbandExt +where + Self: Stream + Send + Sized, +{ + /// Concurrent filter_map(); unordered results + fn broadn_filter_map(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future> + Send, + U: Send; + + fn broadn_then(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + U: Send; + + #[inline] + fn broad_filter_map(self, f: F) -> impl Stream + Send + where + F: Fn(Item) -> Fut + Send, + Fut: Future> + Send, + U: Send, + { + self.broadn_filter_map(None, f) + } + + #[inline] + fn broad_then(self, f: F) -> impl Stream + Send + where + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + U: Send, + { + self.broadn_then(None, f) + } +} + +impl BroadbandExt for S +where + S: Stream + Send + Sized, +{ + #[inline] + fn broadn_filter_map(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future> + Send, + U: Send, + { + self.map(f) + .buffer_unordered(n.into().unwrap_or(WIDTH)) + .ready_filter_map(identity) + } + + #[inline] + fn broadn_then(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + U: Send, + { + self.map(f).buffer_unordered(n.into().unwrap_or(WIDTH)) + } +} diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 1111915b..45c2110d 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -1,3 +1,4 @@ +mod broadband; mod cloned; mod expect; mod ignore; @@ -5,7 +6,9 @@ mod iter_stream; mod ready; mod tools; mod try_ready; +mod wideband; +pub use broadband::BroadbandExt; pub use cloned::Cloned; pub use expect::TryExpect; pub use ignore::TryIgnore; @@ -13,3 +16,4 @@ pub use iter_stream::IterStream; pub use ready::ReadyExt; pub use tools::Tools; pub use try_ready::TryReadyExt; +pub use wideband::WidebandExt; diff --git a/src/core/utils/stream/wideband.rs b/src/core/utils/stream/wideband.rs new file mode 100644 index 00000000..100990b8 --- /dev/null +++ b/src/core/utils/stream/wideband.rs @@ -0,0 +1,84 @@ +//! Wideband stream combinator extensions to futures::Stream +#![allow(clippy::type_complexity)] + +use std::convert::identity; + +use futures::{ + stream::{Stream, StreamExt}, + Future, +}; + +use super::ReadyExt; + +const WIDTH: usize = 32; + +/// Concurrency extensions to augment futures::StreamExt. wideband_ combinators +/// produce in-order. +pub trait WidebandExt +where + Self: Stream + Send + Sized, +{ + /// Concurrent filter_map(); ordered results + fn widen_filter_map(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future> + Send, + U: Send; + + fn widen_then(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + U: Send; + + #[inline] + fn wide_filter_map(self, f: F) -> impl Stream + Send + where + F: Fn(Item) -> Fut + Send, + Fut: Future> + Send, + U: Send, + { + self.widen_filter_map(None, f) + } + + #[inline] + fn wide_then(self, f: F) -> impl Stream + Send + where + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + U: Send, + { + self.widen_then(None, f) + } +} + +impl WidebandExt for S +where + S: Stream + Send + Sized, +{ + #[inline] + fn widen_filter_map(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future> + Send, + U: Send, + { + self.map(f) + .buffered(n.into().unwrap_or(WIDTH)) + .ready_filter_map(identity) + } + + #[inline] + fn widen_then(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + U: Send, + { + self.map(f).buffered(n.into().unwrap_or(WIDTH)) + } +} From 89a158ab0b7ce47849b6b281e91fce584f3d4e93 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 30 Nov 2024 11:54:54 +0000 Subject: [PATCH 0317/1248] add delay before starting updates check Signed-off-by: Jason Volk --- src/service/updates/mod.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index adc85fe6..82decd26 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduit::{debug, info, warn, Result}; +use conduit::{debug, info, warn, Result, Server}; use database::{Deserialized, Map}; use ruma::events::room::message::RoomMessageEventContent; use serde::Deserialize; @@ -23,6 +23,7 @@ struct Services { admin: Dep, client: Dep, globals: Dep, + server: Arc, } #[derive(Debug, Deserialize)] @@ -52,11 +53,12 @@ impl crate::Service for Service { globals: args.depend::("globals"), admin: args.depend::("admin"), client: args.depend::("client"), + server: args.server.clone(), }, })) } - #[tracing::instrument(skip_all, name = "updates", level = "trace")] + #[tracing::instrument(skip_all, name = "updates", level = "debug")] async fn worker(self: Arc) -> Result<()> { if !self.services.globals.allow_check_for_updates() { debug!("Disabling update check"); @@ -65,6 +67,7 @@ impl crate::Service for Service { let mut i = interval(self.interval); i.set_missed_tick_behavior(MissedTickBehavior::Delay); + i.reset_after(self.interval); loop { tokio::select! { () = self.interrupt.notified() => break, @@ -85,8 +88,10 @@ impl crate::Service for Service { } impl Service { - #[tracing::instrument(skip_all, level = "trace")] + #[tracing::instrument(skip_all)] async fn check(&self) -> Result<()> { + debug_assert!(self.services.server.running(), "server must not be shutting down"); + let response = self .services .client @@ -108,6 +113,7 @@ impl Service { Ok(()) } + #[tracing::instrument(skip_all)] async fn handle(&self, update: &CheckForUpdatesResponseEntry) { info!("{} {:#}", update.date, update.message); self.services From 9a9c071e8204604d617b213c6a977f5649ba2fc0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 30 Nov 2024 03:16:57 +0000 Subject: [PATCH 0318/1248] use tokio for threadpool mgmt Signed-off-by: Jason Volk --- src/database/database.rs | 2 +- src/database/engine.rs | 10 +- src/database/map/get.rs | 30 +++--- src/database/pool.rs | 204 +++++++++++++++++++++++++-------------- 4 files changed, 155 insertions(+), 91 deletions(-) diff --git a/src/database/database.rs b/src/database/database.rs index 40aec312..3df95dce 100644 --- a/src/database/database.rs +++ b/src/database/database.rs @@ -16,7 +16,7 @@ pub struct Database { impl Database { /// Load an existing database or create a new one. pub async fn open(server: &Arc) -> Result> { - let db = Engine::open(server)?; + let db = Engine::open(server).await?; Ok(Arc::new(Self { db: db.clone(), maps: maps::open(&db)?, diff --git a/src/database/engine.rs b/src/database/engine.rs index e700be62..a6ed7d86 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -23,7 +23,7 @@ use crate::{ }; pub struct Engine { - server: Arc, + pub(crate) server: Arc, row_cache: Cache, col_cache: RwLock>, opts: Options, @@ -40,7 +40,7 @@ pub(crate) type Db = DBWithThreadMode; impl Engine { #[tracing::instrument(skip_all)] - pub(crate) fn open(server: &Arc) -> Result> { + pub(crate) async fn open(server: &Arc) -> Result> { let config = &server.config; let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0; @@ -119,7 +119,7 @@ impl Engine { corks: AtomicU32::new(0), read_only: config.rocksdb_read_only, secondary: config.rocksdb_secondary, - pool: Pool::new(&pool_opts)?, + pool: Pool::new(server, &pool_opts).await?, })) } @@ -305,7 +305,7 @@ pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result<()> { Ok(()) } -#[tracing::instrument(skip_all, name = "rocksdb", level = "debug")] +#[tracing::instrument(skip(msg), name = "rocksdb", level = "trace")] pub(crate) fn handle_log(level: LogLevel, msg: &str) { let msg = msg.trim(); if msg.starts_with("Options") { @@ -325,7 +325,7 @@ impl Drop for Engine { fn drop(&mut self) { const BLOCKING: bool = true; - debug!("Joining request threads..."); + debug!("Shutting down request pool..."); self.pool.close(); debug!("Waiting for background tasks to finish..."); diff --git a/src/database/map/get.rs b/src/database/map/get.rs index befc0b24..4699fec4 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -2,7 +2,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; use arrayvec::ArrayVec; use conduit::{err, implement, utils::IterStream, Err, Result}; -use futures::{future, Future, FutureExt, Stream}; +use futures::{future, Future, FutureExt, Stream, StreamExt}; use rocksdb::DBPinnableSlice; use serde::Serialize; @@ -54,6 +54,18 @@ where self.get(key) } +#[implement(super::Map)] +#[tracing::instrument(skip(self, keys), fields(%self), level = "trace")] +pub fn get_batch<'a, I, K>(self: &'a Arc, keys: I) -> impl Stream>> + Send + 'a +where + I: Iterator + ExactSizeIterator + Debug + Send + 'a, + K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, +{ + keys.stream() + .map(move |key| self.get(key)) + .buffered(self.db.server.config.db_pool_workers.saturating_mul(2)) +} + /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] @@ -80,17 +92,8 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self, keys), fields(%self), level = "trace")] -pub fn get_batch<'a, I, K>(&self, keys: I) -> impl Stream>> -where - I: Iterator + ExactSizeIterator + Debug + Send, - K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, -{ - self.get_batch_blocking(keys).stream() -} - -#[implement(super::Map)] -pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator>> +#[tracing::instrument(skip(self, keys), name = "batch_blocking", level = "trace")] +pub(crate) fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator>> + Send where I: Iterator + ExactSizeIterator + Debug + Send, K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, @@ -111,6 +114,7 @@ where /// The key is referenced directly to perform the query. This is a thread- /// blocking call. #[implement(super::Map)] +#[tracing::instrument(skip(self, key), name = "blocking", level = "trace")] pub fn get_blocking(&self, key: &K) -> Result> where K: AsRef<[u8]> + ?Sized, @@ -125,7 +129,7 @@ where /// Fetch a value from the cache without I/O. #[implement(super::Map)] -#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] +#[tracing::instrument(skip(self, key), name = "cache", level = "trace")] pub(crate) fn get_cached(&self, key: &K) -> Result>> where K: AsRef<[u8]> + Debug + ?Sized, diff --git a/src/database/pool.rs b/src/database/pool.rs index ee3e67dd..a9697625 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -1,20 +1,25 @@ use std::{ - convert::identity, mem::take, - sync::{Arc, Mutex}, - thread::JoinHandle, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }; -use async_channel::{bounded, Receiver, Sender}; -use conduit::{debug, defer, err, implement, Result}; +use async_channel::{bounded, Receiver, RecvError, Sender}; +use conduit::{debug, debug_warn, defer, err, implement, result::DebugInspect, Result, Server}; use futures::channel::oneshot; +use tokio::{sync::Mutex, task::JoinSet}; use crate::{keyval::KeyBuf, Handle, Map}; pub(crate) struct Pool { - workers: Mutex>>, - recv: Receiver, - send: Sender, + server: Arc, + workers: Mutex>, + queue: Sender, + busy: AtomicUsize, + busy_max: AtomicUsize, + queued_max: AtomicUsize, } pub(crate) struct Opts { @@ -22,10 +27,6 @@ pub(crate) struct Opts { pub(crate) worker_num: usize, } -const QUEUE_LIMIT: (usize, usize) = (1, 8192); -const WORKER_LIMIT: (usize, usize) = (1, 512); -const WORKER_THREAD_NAME: &str = "conduwuit:db"; - #[derive(Debug)] pub(crate) enum Cmd { Get(Get), @@ -40,83 +41,111 @@ pub(crate) struct Get { type ResultSender = oneshot::Sender>>; -#[implement(Pool)] -pub(crate) fn new(opts: &Opts) -> Result> { - let queue_size = opts.queue_size.clamp(QUEUE_LIMIT.0, QUEUE_LIMIT.1); +const QUEUE_LIMIT: (usize, usize) = (1, 3072); +const WORKER_LIMIT: (usize, usize) = (1, 512); +impl Drop for Pool { + fn drop(&mut self) { + debug_assert!(self.queue.is_empty(), "channel must be empty on drop"); + debug_assert!(self.queue.is_closed(), "channel should be closed on drop"); + } +} + +#[implement(Pool)] +pub(crate) async fn new(server: &Arc, opts: &Opts) -> Result> { + let queue_size = opts.queue_size.clamp(QUEUE_LIMIT.0, QUEUE_LIMIT.1); let (send, recv) = bounded(queue_size); let pool = Arc::new(Self { - workers: Vec::new().into(), - recv, - send, + server: server.clone(), + workers: JoinSet::new().into(), + queue: send, + busy: AtomicUsize::default(), + busy_max: AtomicUsize::default(), + queued_max: AtomicUsize::default(), }); let worker_num = opts.worker_num.clamp(WORKER_LIMIT.0, WORKER_LIMIT.1); - pool.spawn_until(worker_num)?; + pool.spawn_until(recv, worker_num).await?; Ok(pool) } #[implement(Pool)] -fn spawn_until(self: &Arc, max: usize) -> Result { - let mut workers = self.workers.lock()?; +pub(crate) async fn _shutdown(self: &Arc) { + if !self.queue.is_closed() { + self.close(); + } + let workers = take(&mut *self.workers.lock().await); + debug!(workers = workers.len(), "Waiting for workers to join..."); + + workers.join_all().await; + debug_assert!(self.queue.is_empty(), "channel is not empty"); +} + +#[implement(Pool)] +pub(crate) fn close(&self) { + debug_assert!(!self.queue.is_closed(), "channel already closed"); + debug!( + senders = self.queue.sender_count(), + receivers = self.queue.receiver_count(), + "Closing pool channel" + ); + + let closing = self.queue.close(); + debug_assert!(closing, "channel is not closing"); +} + +#[implement(Pool)] +async fn spawn_until(self: &Arc, recv: Receiver, max: usize) -> Result { + let mut workers = self.workers.lock().await; while workers.len() < max { - self.clone().spawn_one(&mut workers)?; + self.spawn_one(&mut workers, recv.clone())?; } Ok(()) } #[implement(Pool)] -fn spawn_one(self: Arc, workers: &mut Vec>) -> Result { - use std::thread::Builder; - +fn spawn_one(self: &Arc, workers: &mut JoinSet<()>, recv: Receiver) -> Result { let id = workers.len(); - debug!(?id, "spawning {WORKER_THREAD_NAME}..."); - let thread = Builder::new() - .name(WORKER_THREAD_NAME.into()) - .spawn(move || self.worker(id))?; + debug!(?id, "spawning"); + let self_ = self.clone(); + let _abort = workers.spawn_blocking_on(move || self_.worker(id, recv), self.server.runtime()); - workers.push(thread); - - Ok(id) + Ok(()) } #[implement(Pool)] -pub(crate) fn close(self: &Arc) { - debug!( - senders = %self.send.sender_count(), - receivers = %self.send.receiver_count(), - "Closing pool channel" - ); - let closing = self.send.close(); - debug_assert!(closing, "channel is not closing"); - - debug!("Shutting down pool..."); - let mut workers = self.workers.lock().expect("locked"); - - debug!( - workers = %workers.len(), - "Waiting for workers to join..." - ); - take(&mut *workers) - .into_iter() - .map(JoinHandle::join) - .try_for_each(identity) - .expect("failed to join worker threads"); - - debug_assert!(self.send.is_empty(), "channel is not empty"); -} - -#[implement(Pool)] -#[tracing::instrument(skip(self, cmd), level = "trace")] +#[tracing::instrument( + level = "trace" + skip(self, cmd), + fields( + task = ?tokio::task::try_id(), + receivers = self.queue.receiver_count(), + senders = self.queue.sender_count(), + queued = self.queue.len(), + queued_max = self.queued_max.load(Ordering::Relaxed), + ), +)] pub(crate) async fn execute(&self, mut cmd: Cmd) -> Result> { let (send, recv) = oneshot::channel(); Self::prepare(&mut cmd, send); - self.send + if cfg!(debug_assertions) { + self.queued_max + .fetch_max(self.queue.len(), Ordering::Relaxed); + } + + if self.queue.is_full() { + debug_warn!( + capacity = ?self.queue.capacity(), + "pool queue is full" + ); + } + + self.queue .send(cmd) .await .map_err(|e| err!(error!("send failed {e:?}")))?; @@ -136,30 +165,61 @@ fn prepare(cmd: &mut Cmd, send: ResultSender) { } #[implement(Pool)] -#[tracing::instrument(skip(self))] -fn worker(self: Arc, id: usize) { - debug!(?id, "worker spawned"); - defer! {{ debug!(?id, "worker finished"); }} - self.worker_loop(id); +#[tracing::instrument(skip(self, recv))] +fn worker(self: Arc, id: usize, recv: Receiver) { + debug!("worker spawned"); + defer! {{ debug!("worker finished"); }} + + self.worker_loop(&recv); } #[implement(Pool)] -fn worker_loop(&self, id: usize) { - while let Ok(mut cmd) = self.recv.recv_blocking() { - self.worker_handle(id, &mut cmd); +fn worker_loop(&self, recv: &Receiver) { + // initial +1 needed prior to entering wait + self.busy.fetch_add(1, Ordering::Relaxed); + + while let Ok(mut cmd) = self.worker_wait(recv) { + self.worker_handle(&mut cmd); } } #[implement(Pool)] -fn worker_handle(&self, id: usize, cmd: &mut Cmd) { +#[tracing::instrument( + name = "wait", + level = "trace", + skip_all, + fields( + receivers = recv.receiver_count(), + senders = recv.sender_count(), + queued = recv.len(), + busy = self.busy.load(Ordering::Relaxed), + busy_max = self.busy_max.fetch_max( + self.busy.fetch_sub(1, Ordering::Relaxed), + Ordering::Relaxed + ), + ), +)] +fn worker_wait(&self, recv: &Receiver) -> Result { + recv.recv_blocking().debug_inspect(|_| { + self.busy.fetch_add(1, Ordering::Relaxed); + }) +} + +#[implement(Pool)] +fn worker_handle(&self, cmd: &mut Cmd) { match cmd { - Cmd::Get(get) => self.handle_get(id, get), + Cmd::Get(cmd) => self.handle_get(cmd), } } #[implement(Pool)] -#[tracing::instrument(skip(self, cmd), fields(%cmd.map), level = "trace")] -fn handle_get(&self, id: usize, cmd: &mut Get) { +#[tracing::instrument( + name = "get", + level = "trace", + skip_all, + fields(%cmd.map), +)] +fn handle_get(&self, cmd: &mut Get) { debug_assert!(!cmd.key.is_empty(), "querying for empty key"); // Obtain the result channel. From ed8c21ac9aa1be607195985249ec0f9915b41afd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 30 Nov 2024 15:13:35 +0000 Subject: [PATCH 0319/1248] modernize async srv lookup Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 41 +++++++++++++--------------------- src/service/resolver/mod.rs | 7 ------ 2 files changed, 16 insertions(+), 32 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index fec29133..6589a58b 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -1,11 +1,10 @@ use std::{ fmt::Debug, net::{IpAddr, SocketAddr}, - sync::Arc, }; use conduit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; -use hickory_resolver::{error::ResolveError, lookup::SrvLookup}; +use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; use ruma::ServerName; @@ -258,7 +257,7 @@ impl super::Service { #[tracing::instrument(skip_all, name = "ip")] async fn query_and_cache_override(&self, overname: &'_ str, hostname: &'_ str, port: u16) -> Result<()> { - match self.raw().lookup_ip(hostname.to_owned()).await { + match self.resolver.resolver.lookup_ip(hostname.to_owned()).await { Err(e) => Self::handle_resolve_error(&e), Ok(override_ip) => { if hostname != overname { @@ -281,32 +280,24 @@ impl super::Service { #[tracing::instrument(skip_all, name = "srv")] async fn query_srv_record(&self, hostname: &'_ str) -> Result> { - fn handle_successful_srv(srv: &SrvLookup) -> Option { - srv.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()) - .as_str() - .try_into() - .unwrap_or_else(|_| FedDest::default_port()), - ) - }) - } - - async fn lookup_srv( - resolver: Arc, hostname: &str, - ) -> Result { - debug!("querying SRV for {hostname:?}"); - let hostname = hostname.trim_end_matches('.'); - resolver.srv_lookup(hostname.to_owned()).await - } - let hostnames = [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; for hostname in hostnames { - match lookup_srv(self.raw(), &hostname).await { - Ok(result) => return Ok(handle_successful_srv(&result)), + debug!("querying SRV for {hostname:?}"); + let hostname = hostname.trim_end_matches('.'); + match self.resolver.resolver.srv_lookup(hostname).await { Err(e) => Self::handle_resolve_error(&e)?, + Ok(result) => { + return Ok(result.iter().next().map(|result| { + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()) + .as_str() + .try_into() + .unwrap_or_else(|_| FedDest::default_port()), + ) + })) + }, } } diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 457ea9cc..28b7063d 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -7,7 +7,6 @@ mod tests; use std::{fmt::Write, sync::Arc}; use conduit::{Result, Server}; -use hickory_resolver::TokioAsyncResolver; use self::{cache::Cache, dns::Resolver}; use crate::{client, globals, Dep}; @@ -71,9 +70,3 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } - -impl Service { - #[inline] - #[must_use] - pub fn raw(&self) -> Arc { self.resolver.resolver.clone() } -} From 320b0680bdbbb2f08790a178ea2a75c5a0dee11f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 30 Nov 2024 08:31:28 +0000 Subject: [PATCH 0320/1248] pipeline various loops Signed-off-by: Jason Volk --- src/api/client/context.rs | 39 ++++++++++++------------ src/api/client/message.rs | 7 +++-- src/service/rooms/search/mod.rs | 12 +++++--- src/service/rooms/state/mod.rs | 29 +++++++++++------- src/service/rooms/state_accessor/data.rs | 14 ++++----- 5 files changed, 56 insertions(+), 45 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 652e17f4..af4e26f0 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,9 +1,13 @@ -use std::{collections::HashMap, iter::once}; +use std::iter::once; use axum::extract::State; use conduit::{ - at, err, error, - utils::{future::TryExtExt, stream::ReadyExt, IterStream}, + at, err, + utils::{ + future::TryExtExt, + stream::{BroadbandExt, ReadyExt, WidebandExt}, + IterStream, + }, Err, Result, }; use futures::{future::try_join, StreamExt, TryFutureExt}; @@ -85,8 +89,8 @@ pub(crate) async fn get_context_route( .pdus_rev(Some(sender_user), room_id, Some(base_token)) .await? .ready_filter_map(|item| event_filter(item, filter)) - .filter_map(|item| ignored_filter(&services, item, sender_user)) - .filter_map(|item| visibility_filter(&services, item, sender_user)) + .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) + .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) .take(limit / 2) .collect() .await; @@ -97,8 +101,8 @@ pub(crate) async fn get_context_route( .pdus(Some(sender_user), room_id, Some(base_token)) .await? .ready_filter_map(|item| event_filter(item, filter)) - .filter_map(|item| ignored_filter(&services, item, sender_user)) - .filter_map(|item| visibility_filter(&services, item, sender_user)) + .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) + .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) .take(limit / 2) .collect() .await; @@ -124,7 +128,7 @@ pub(crate) async fn get_context_route( .await .map_err(|e| err!(Database("State hash not found: {e}")))?; - let state_ids: HashMap<_, OwnedEventId> = services + let state_ids = services .rooms .state_accessor .state_full_ids(shortstatehash) @@ -133,17 +137,17 @@ pub(crate) async fn get_context_route( let lazy = &lazy; let state: Vec<_> = state_ids - .iter() + .into_iter() .stream() - .filter_map(|(shortstatekey, event_id)| { + .broad_filter_map(|(shortstatekey, event_id)| { services .rooms .short - .get_statekey_from_short(*shortstatekey) + .get_statekey_from_short(shortstatekey) .map_ok(move |(event_type, state_key)| (event_type, state_key, event_id)) .ok() }) - .filter_map(|(event_type, state_key, event_id)| async move { + .ready_filter_map(|(event_type, state_key, event_id)| { if lazy_load_enabled && event_type == StateEventType::RoomMember { let user_id: &UserId = state_key.as_str().try_into().ok()?; if !lazy.contains(user_id) { @@ -151,15 +155,10 @@ pub(crate) async fn get_context_route( } } - services - .rooms - .timeline - .get_pdu(event_id) - .await - .inspect_err(|_| error!("Pdu in state not found: {event_id}")) - .map(|pdu| pdu.to_state_event()) - .ok() + Some(event_id) }) + .broad_filter_map(|event_id: OwnedEventId| async move { services.rooms.timeline.get_pdu(&event_id).await.ok() }) + .map(|pdu| pdu.to_state_event()) .collect() .await; diff --git a/src/api/client/message.rs b/src/api/client/message.rs index d8043855..242c1681 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -5,6 +5,7 @@ use conduit::{ at, is_equal_to, utils::{ result::{FlatOk, LogErr}, + stream::{BroadbandExt, WidebandExt}, IterStream, ReadyExt, }, Event, PduCount, Result, @@ -115,8 +116,8 @@ pub(crate) async fn get_message_events_route( let events: Vec<_> = it .ready_take_while(|(count, _)| Some(*count) != to) .ready_filter_map(|item| event_filter(item, filter)) - .filter_map(|item| ignored_filter(&services, item, sender_user)) - .filter_map(|item| visibility_filter(&services, item, sender_user)) + .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) + .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) .take(limit) .collect() .await; @@ -132,7 +133,7 @@ pub(crate) async fn get_message_events_route( let state = lazy .iter() .stream() - .filter_map(|user_id| get_member_event(&services, room_id, user_id)) + .broad_filter_map(|user_id| get_member_event(&services, room_id, user_id)) .collect() .await; diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index d59d1d11..ae3567ce 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -3,7 +3,11 @@ use std::sync::Arc; use arrayvec::ArrayVec; use conduit::{ implement, - utils::{set, stream::TryIgnore, ArrayVecExt, IterStream, ReadyExt}, + utils::{ + set, + stream::{TryIgnore, WidebandExt}, + ArrayVecExt, IterStream, ReadyExt, + }, PduCount, PduEvent, Result, }; use database::{keyval::Val, Map}; @@ -107,7 +111,7 @@ pub async fn search_pdus<'a>( let pdus = pdu_ids .into_iter() .stream() - .filter_map(move |result_pdu_id: RawPduId| async move { + .wide_filter_map(move |result_pdu_id: RawPduId| async move { self.services .timeline .get_pdu_from_id(&result_pdu_id) @@ -116,7 +120,7 @@ pub async fn search_pdus<'a>( }) .ready_filter(|pdu| !pdu.is_redacted()) .ready_filter(|pdu| pdu.matches(&query.criteria.filter)) - .filter_map(move |pdu| async move { + .wide_filter_map(move |pdu| async move { self.services .state_accessor .user_can_see_event(query.user_id?, &pdu.room_id, &pdu.event_id) @@ -146,7 +150,7 @@ pub async fn search_pdu_ids(&self, query: &RoomQuery<'_>) -> Result, shortroomid: ShortRoomId) -> Vec> { tokenize(&query.criteria.search_term) .stream() - .then(|word| async move { + .wide_then(|word| async move { self.search_pdu_ids_query_words(shortroomid, &word) .collect::>() .await diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 03e2d2e8..838deacd 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -8,7 +8,11 @@ use std::{ use conduit::{ at, err, result::FlatOk, - utils::{calculate_hash, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, + utils::{ + calculate_hash, + stream::{BroadbandExt, TryIgnore}, + IterStream, MutexMap, MutexMapGuard, ReadyExt, + }, warn, PduEvent, Result, }; use database::{Deserialized, Ignore, Interfix, Map}; @@ -405,7 +409,7 @@ impl Service { let mut sauthevents: HashMap<_, _> = state_res::auth_types_for_event(kind, sender, state_key, content)? .iter() .stream() - .filter_map(|(event_type, state_key)| { + .broad_filter_map(|(event_type, state_key)| { self.services .short .get_shortstatekey(event_type, state_key) @@ -430,24 +434,27 @@ impl Service { }) .collect(); - let auth_pdus: Vec<_> = self + let auth_pdus = self .services .short .multi_get_eventid_from_short(auth_state.iter().map(at!(1))) .await .into_iter() .stream() - .and_then(|event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await }) + .zip(auth_state.into_iter().stream().map(at!(0))) + .ready_filter_map(|(event_id, tsk)| Some((tsk, event_id.ok()?))) + .broad_filter_map(|(tsk, event_id): (_, OwnedEventId)| async move { + self.services + .timeline + .get_pdu(&event_id) + .await + .map(Arc::new) + .map(move |pdu| (tsk, pdu)) + .ok() + }) .collect() .await; - let auth_pdus = auth_state - .into_iter() - .map(at!(0)) - .zip(auth_pdus.into_iter()) - .filter_map(|((event_type, state_key), pdu)| Some(((event_type, state_key), pdu.ok()?.into()))) - .collect(); - Ok(auth_pdus) } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 7760d5b6..2a670066 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -2,7 +2,7 @@ use std::{borrow::Borrow, collections::HashMap, sync::Arc}; use conduit::{ at, err, - utils::stream::{IterStream, ReadyExt}, + utils::stream::{BroadbandExt, IterStream}, PduEvent, Result, }; use database::{Deserialized, Map}; @@ -65,20 +65,20 @@ impl Data { .into_iter() .map(at!(1)); - let event_ids: Vec = self + let event_ids = self .services .short .multi_get_eventid_from_short(short_ids) .await .into_iter() - .filter_map(Result::ok) - .collect(); + .filter_map(Result::ok); let full_pdus = event_ids - .iter() + .into_iter() .stream() - .then(|event_id| self.services.timeline.get_pdu(event_id)) - .ready_filter_map(Result::ok) + .broad_filter_map( + |event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await.ok() }, + ) .collect() .await; From b5006a4c4128fc779add9cd6c35b93f0d8d6ba7e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 2 Dec 2024 08:51:59 +0000 Subject: [PATCH 0321/1248] offload initial iterator seeks to threadpool Signed-off-by: Jason Volk --- src/database/map/count.rs | 10 +- src/database/map/get.rs | 16 ++- src/database/map/keys.rs | 4 +- src/database/map/keys_from.rs | 32 ++++-- src/database/map/keys_prefix.rs | 12 +- src/database/map/rev_keys.rs | 4 +- src/database/map/rev_keys_from.rs | 32 ++++-- src/database/map/rev_keys_prefix.rs | 12 +- src/database/map/rev_stream.rs | 4 +- src/database/map/rev_stream_from.rs | 39 +++++-- src/database/map/rev_stream_prefix.rs | 14 ++- src/database/map/stream.rs | 4 +- src/database/map/stream_from.rs | 37 ++++-- src/database/map/stream_prefix.rs | 14 ++- src/database/pool.rs | 158 ++++++++++++++++++-------- src/database/stream.rs | 16 +-- src/database/stream/items.rs | 21 +++- src/database/stream/items_rev.rs | 21 +++- src/database/stream/keys.rs | 21 +++- src/database/stream/keys_rev.rs | 21 +++- 20 files changed, 361 insertions(+), 131 deletions(-) diff --git a/src/database/map/count.rs b/src/database/map/count.rs index 3e92279c..894fe12e 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -1,4 +1,4 @@ -use std::{fmt::Debug, future::Future}; +use std::{fmt::Debug, future::Future, sync::Arc}; use conduit::implement; use futures::stream::StreamExt; @@ -14,7 +14,7 @@ pub fn count(&self) -> impl Future + Send + '_ { self.raw_keys() /// - From is a structured key #[implement(super::Map)] #[inline] -pub fn count_from<'a, P>(&'a self, from: &P) -> impl Future + Send + 'a +pub fn count_from<'a, P>(self: &'a Arc, from: &P) -> impl Future + Send + 'a where P: Serialize + ?Sized + Debug + 'a, { @@ -26,7 +26,7 @@ where /// - From is a raw #[implement(super::Map)] #[inline] -pub fn raw_count_from<'a, P>(&'a self, from: &'a P) -> impl Future + Send + 'a +pub fn raw_count_from<'a, P>(self: &'a Arc, from: &'a P) -> impl Future + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { @@ -38,7 +38,7 @@ where /// - Prefix is structured key #[implement(super::Map)] #[inline] -pub fn count_prefix<'a, P>(&'a self, prefix: &P) -> impl Future + Send + 'a +pub fn count_prefix<'a, P>(self: &'a Arc, prefix: &P) -> impl Future + Send + 'a where P: Serialize + ?Sized + Debug + 'a, { @@ -50,7 +50,7 @@ where /// - Prefix is raw #[implement(super::Map)] #[inline] -pub fn raw_count_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Future + Send + 'a +pub fn raw_count_prefix<'a, P>(self: &'a Arc, prefix: &'a P) -> impl Future + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 4699fec4..ef552177 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,7 +1,11 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; use arrayvec::ArrayVec; -use conduit::{err, implement, utils::IterStream, Err, Result}; +use conduit::{ + err, implement, + utils::{result::MapExpect, IterStream}, + Err, Result, +}; use futures::{future, Future, FutureExt, Stream, StreamExt}; use rocksdb::DBPinnableSlice; use serde::Serialize; @@ -74,21 +78,21 @@ pub fn get(self: &Arc, key: &K) -> impl Future + Debug + ?Sized, { - use crate::pool::{Cmd, Get}; + use crate::pool::Get; let cached = self.get_cached(key); if matches!(cached, Err(_) | Ok(Some(_))) { - return future::ready(cached.map(|res| res.expect("Option is Some"))).boxed(); + return future::ready(cached.map_expect("data found in cache")).boxed(); } debug_assert!(matches!(cached, Ok(None)), "expected status Incomplete"); - let cmd = Cmd::Get(Get { + let cmd = Get { map: self.clone(), key: key.as_ref().into(), res: None, - }); + }; - self.db.pool.execute(cmd).boxed() + self.db.pool.execute_get(cmd).boxed() } #[implement(super::Map)] diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 9c4d66e4..80cf1e15 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -2,7 +2,7 @@ use conduit::{implement, Result}; use futures::{Stream, StreamExt}; use serde::Deserialize; -use crate::{keyval, keyval::Key, stream}; +use crate::{keyval, keyval::Key, stream, stream::Cursor}; #[implement(super::Map)] pub fn keys<'a, K>(&'a self) -> impl Stream>> + Send @@ -16,5 +16,5 @@ where #[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn raw_keys(&self) -> impl Stream>> + Send { let opts = super::read_options_default(); - stream::Keys::new(&self.db, &self.cf, opts, None) + stream::Keys::new(&self.db, &self.cf, opts).init(None) } diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 093f7fd6..7be3dd1d 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -1,7 +1,8 @@ -use std::{convert::AsRef, fmt::Debug}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduit::{implement, Result}; -use futures::{Stream, StreamExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use rocksdb::Direction; use serde::{Deserialize, Serialize}; use crate::{ @@ -10,7 +11,7 @@ use crate::{ }; #[implement(super::Map)] -pub fn keys_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn keys_from<'a, K, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -20,7 +21,7 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn keys_from_raw

    (&self, from: &P) -> impl Stream>> + Send +pub fn keys_from_raw

    (self: &Arc, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -29,7 +30,7 @@ where } #[implement(super::Map)] -pub fn keys_raw_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn keys_raw_from<'a, K, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -39,10 +40,27 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn raw_keys_from

    (&self, from: &P) -> impl Stream>> + Send +pub fn raw_keys_from

    (self: &Arc, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, { + use crate::pool::Seek; + let opts = super::read_options_default(); - stream::Keys::new(&self.db, &self.cf, opts, Some(from.as_ref())) + let state = stream::State::new(&self.db, &self.cf, opts); + let seek = Seek { + map: self.clone(), + dir: Direction::Forward, + key: Some(from.as_ref().into()), + state: crate::pool::into_send_seek(state), + res: None, + }; + + self.db + .pool + .execute_iter(seek) + .ok_into::>() + .into_stream() + .try_flatten() + .boxed() } diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index 8963f002..9122d78e 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -1,4 +1,4 @@ -use std::{convert::AsRef, fmt::Debug}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduit::{implement, Result}; use futures::{ @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use crate::keyval::{result_deserialize_key, serialize_key, Key}; #[implement(super::Map)] -pub fn keys_prefix<'a, K, P>(&'a self, prefix: &P) -> impl Stream>> + Send +pub fn keys_prefix<'a, K, P>(self: &'a Arc, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -22,7 +22,7 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn keys_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send +pub fn keys_prefix_raw

    (self: &Arc, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -32,7 +32,9 @@ where } #[implement(super::Map)] -pub fn keys_raw_prefix<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn keys_raw_prefix<'a, K, P>( + self: &'a Arc, prefix: &'a P, +) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, K: Deserialize<'a> + Send + 'a, @@ -42,7 +44,7 @@ where } #[implement(super::Map)] -pub fn raw_keys_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn raw_keys_prefix<'a, P>(self: &'a Arc, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index e10a199c..45a0203f 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -2,7 +2,7 @@ use conduit::{implement, Result}; use futures::{Stream, StreamExt}; use serde::Deserialize; -use crate::{keyval, keyval::Key, stream}; +use crate::{keyval, keyval::Key, stream, stream::Cursor}; #[implement(super::Map)] pub fn rev_keys<'a, K>(&'a self) -> impl Stream>> + Send @@ -16,5 +16,5 @@ where #[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_raw_keys(&self) -> impl Stream>> + Send { let opts = super::read_options_default(); - stream::KeysRev::new(&self.db, &self.cf, opts, None) + stream::KeysRev::new(&self.db, &self.cf, opts).init(None) } diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index 75d062b5..2b59a5d7 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -1,7 +1,8 @@ -use std::{convert::AsRef, fmt::Debug}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduit::{implement, Result}; -use futures::{Stream, StreamExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use rocksdb::Direction; use serde::{Deserialize, Serialize}; use crate::{ @@ -10,7 +11,7 @@ use crate::{ }; #[implement(super::Map)] -pub fn rev_keys_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn rev_keys_from<'a, K, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -21,7 +22,7 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn rev_keys_from_raw

    (&self, from: &P) -> impl Stream>> + Send +pub fn rev_keys_from_raw

    (self: &Arc, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -30,7 +31,7 @@ where } #[implement(super::Map)] -pub fn rev_keys_raw_from<'a, K, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn rev_keys_raw_from<'a, K, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -41,10 +42,27 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn rev_raw_keys_from

    (&self, from: &P) -> impl Stream>> + Send +pub fn rev_raw_keys_from

    (self: &Arc, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, { + use crate::pool::Seek; + let opts = super::read_options_default(); - stream::KeysRev::new(&self.db, &self.cf, opts, Some(from.as_ref())) + let state = stream::State::new(&self.db, &self.cf, opts); + let seek = Seek { + map: self.clone(), + dir: Direction::Reverse, + key: Some(from.as_ref().into()), + state: crate::pool::into_send_seek(state), + res: None, + }; + + self.db + .pool + .execute_iter(seek) + .ok_into::>() + .into_stream() + .try_flatten() + .boxed() } diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index c14909d4..69dc54f2 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -1,4 +1,4 @@ -use std::{convert::AsRef, fmt::Debug}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduit::{implement, Result}; use futures::{ @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use crate::keyval::{result_deserialize_key, serialize_key, Key}; #[implement(super::Map)] -pub fn rev_keys_prefix<'a, K, P>(&'a self, prefix: &P) -> impl Stream>> + Send +pub fn rev_keys_prefix<'a, K, P>(self: &'a Arc, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -22,7 +22,7 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn rev_keys_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send +pub fn rev_keys_prefix_raw

    (self: &Arc, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -32,7 +32,9 @@ where } #[implement(super::Map)] -pub fn rev_keys_raw_prefix<'a, K, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn rev_keys_raw_prefix<'a, K, P>( + self: &'a Arc, prefix: &'a P, +) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, K: Deserialize<'a> + Send + 'a, @@ -42,7 +44,7 @@ where } #[implement(super::Map)] -pub fn rev_raw_keys_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn rev_raw_keys_prefix<'a, P>(self: &'a Arc, prefix: &'a P) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index f4be69fd..37b0d3b3 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -2,7 +2,7 @@ use conduit::{implement, Result}; use futures::stream::{Stream, StreamExt}; use serde::Deserialize; -use crate::{keyval, keyval::KeyVal, stream}; +use crate::{keyval, keyval::KeyVal, stream, stream::Cursor}; /// Iterate key-value entries in the map from the end. /// @@ -24,5 +24,5 @@ where #[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_raw_stream(&self) -> impl Stream>> + Send { let opts = super::read_options_default(); - stream::ItemsRev::new(&self.db, &self.cf, opts, None) + stream::ItemsRev::new(&self.db, &self.cf, opts).init(None) } diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 6ac1cd1a..9811d106 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -1,7 +1,11 @@ -use std::{convert::AsRef, fmt::Debug}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduit::{implement, Result}; -use futures::stream::{Stream, StreamExt}; +use futures::{ + stream::{Stream, StreamExt}, + FutureExt, TryFutureExt, TryStreamExt, +}; +use rocksdb::Direction; use serde::{Deserialize, Serialize}; use crate::{ @@ -14,7 +18,9 @@ use crate::{ /// - Query is serialized /// - Result is deserialized #[implement(super::Map)] -pub fn rev_stream_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn rev_stream_from<'a, K, V, P>( + self: &'a Arc, from: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -30,7 +36,7 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn rev_stream_from_raw

    (&self, from: &P) -> impl Stream>> + Send +pub fn rev_stream_from_raw

    (self: &Arc, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -43,7 +49,9 @@ where /// - Query is raw /// - Result is deserialized #[implement(super::Map)] -pub fn rev_stream_raw_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn rev_stream_raw_from<'a, K, V, P>( + self: &'a Arc, from: &P, +) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -59,10 +67,27 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn rev_raw_stream_from

    (&self, from: &P) -> impl Stream>> + Send +pub fn rev_raw_stream_from

    (self: &Arc, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, { + use crate::pool::Seek; + let opts = super::read_options_default(); - stream::ItemsRev::new(&self.db, &self.cf, opts, Some(from.as_ref())) + let state = stream::State::new(&self.db, &self.cf, opts); + let seek = Seek { + map: self.clone(), + dir: Direction::Reverse, + key: Some(from.as_ref().into()), + state: crate::pool::into_send_seek(state), + res: None, + }; + + self.db + .pool + .execute_iter(seek) + .ok_into::>() + .into_stream() + .try_flatten() + .boxed() } diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index fd0d93ff..e5c2fbea 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -1,4 +1,4 @@ -use std::{convert::AsRef, fmt::Debug}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduit::{implement, Result}; use futures::{ @@ -15,7 +15,9 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; /// - Query is serialized /// - Result is deserialized #[implement(super::Map)] -pub fn rev_stream_prefix<'a, K, V, P>(&'a self, prefix: &P) -> impl Stream>> + Send +pub fn rev_stream_prefix<'a, K, V, P>( + self: &'a Arc, prefix: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -31,7 +33,7 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn rev_stream_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send +pub fn rev_stream_prefix_raw

    (self: &Arc, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -46,7 +48,7 @@ where /// - Result is deserialized #[implement(super::Map)] pub fn rev_stream_raw_prefix<'a, K, V, P>( - &'a self, prefix: &'a P, + self: &'a Arc, prefix: &'a P, ) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, @@ -62,7 +64,9 @@ where /// - Query is raw /// - Result is raw #[implement(super::Map)] -pub fn rev_raw_stream_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn rev_raw_stream_prefix<'a, P>( + self: &'a Arc, prefix: &'a P, +) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index 143b0d0c..4f4fbd08 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -2,7 +2,7 @@ use conduit::{implement, Result}; use futures::stream::{Stream, StreamExt}; use serde::Deserialize; -use crate::{keyval, keyval::KeyVal, stream}; +use crate::{keyval, keyval::KeyVal, stream, stream::Cursor}; /// Iterate key-value entries in the map from the beginning. /// @@ -23,5 +23,5 @@ where #[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn raw_stream(&self) -> impl Stream>> + Send { let opts = super::read_options_default(); - stream::Items::new(&self.db, &self.cf, opts, None) + stream::Items::new(&self.db, &self.cf, opts).init(None) } diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 052a2e74..6468846f 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -1,7 +1,11 @@ -use std::{convert::AsRef, fmt::Debug}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduit::{implement, Result}; -use futures::stream::{Stream, StreamExt}; +use futures::{ + stream::{Stream, StreamExt}, + FutureExt, TryFutureExt, TryStreamExt, +}; +use rocksdb::Direction; use serde::{Deserialize, Serialize}; use crate::{ @@ -14,7 +18,7 @@ use crate::{ /// - Query is serialized /// - Result is deserialized #[implement(super::Map)] -pub fn stream_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn stream_from<'a, K, V, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -29,7 +33,7 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn stream_from_raw

    (&self, from: &P) -> impl Stream>> + Send +pub fn stream_from_raw

    (self: &Arc, from: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -42,7 +46,9 @@ where /// - Query is raw /// - Result is deserialized #[implement(super::Map)] -pub fn stream_raw_from<'a, K, V, P>(&'a self, from: &P) -> impl Stream>> + Send +pub fn stream_raw_from<'a, K, V, P>( + self: &'a Arc, from: &P, +) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -57,10 +63,27 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn raw_stream_from

    (&self, from: &P) -> impl Stream>> + Send +pub fn raw_stream_from

    (self: &Arc, from: &P) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, { + use crate::pool::Seek; + let opts = super::read_options_default(); - stream::Items::new(&self.db, &self.cf, opts, Some(from.as_ref())) + let state = stream::State::new(&self.db, &self.cf, opts); + let seek = Seek { + map: self.clone(), + dir: Direction::Forward, + key: Some(from.as_ref().into()), + state: crate::pool::into_send_seek(state), + res: None, + }; + + self.db + .pool + .execute_iter(seek) + .ok_into::>() + .into_stream() + .try_flatten() + .boxed() } diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index a08b1e2a..3c7bce2e 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -1,4 +1,4 @@ -use std::{convert::AsRef, fmt::Debug}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduit::{implement, Result}; use futures::{ @@ -15,7 +15,9 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; /// - Query is serialized /// - Result is deserialized #[implement(super::Map)] -pub fn stream_prefix<'a, K, V, P>(&'a self, prefix: &P) -> impl Stream>> + Send +pub fn stream_prefix<'a, K, V, P>( + self: &'a Arc, prefix: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -31,7 +33,7 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn stream_prefix_raw

    (&self, prefix: &P) -> impl Stream>> + Send +pub fn stream_prefix_raw

    (self: &Arc, prefix: &P) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -46,7 +48,7 @@ where /// - Result is deserialized #[implement(super::Map)] pub fn stream_raw_prefix<'a, K, V, P>( - &'a self, prefix: &'a P, + self: &'a Arc, prefix: &'a P, ) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, @@ -62,7 +64,9 @@ where /// - Query is raw /// - Result is raw #[implement(super::Map)] -pub fn raw_stream_prefix<'a, P>(&'a self, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn raw_stream_prefix<'a, P>( + self: &'a Arc, prefix: &'a P, +) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { diff --git a/src/database/pool.rs b/src/database/pool.rs index a9697625..136de67d 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -8,17 +8,18 @@ use std::{ use async_channel::{bounded, Receiver, RecvError, Sender}; use conduit::{debug, debug_warn, defer, err, implement, result::DebugInspect, Result, Server}; -use futures::channel::oneshot; +use futures::{channel::oneshot, TryFutureExt}; +use oneshot::Sender as ResultSender; +use rocksdb::Direction; use tokio::{sync::Mutex, task::JoinSet}; -use crate::{keyval::KeyBuf, Handle, Map}; +use crate::{keyval::KeyBuf, stream, Handle, Map}; pub(crate) struct Pool { server: Arc, workers: Mutex>, queue: Sender, busy: AtomicUsize, - busy_max: AtomicUsize, queued_max: AtomicUsize, } @@ -27,19 +28,24 @@ pub(crate) struct Opts { pub(crate) worker_num: usize, } -#[derive(Debug)] pub(crate) enum Cmd { Get(Get), + Iter(Seek), } -#[derive(Debug)] pub(crate) struct Get { pub(crate) map: Arc, pub(crate) key: KeyBuf, - pub(crate) res: Option, + pub(crate) res: Option>>>, } -type ResultSender = oneshot::Sender>>; +pub(crate) struct Seek { + pub(crate) map: Arc, + pub(crate) state: stream::State<'static>, + pub(crate) dir: Direction, + pub(crate) key: Option, + pub(crate) res: Option>>, +} const QUEUE_LIMIT: (usize, usize) = (1, 3072); const WORKER_LIMIT: (usize, usize) = (1, 512); @@ -60,7 +66,6 @@ pub(crate) async fn new(server: &Arc, opts: &Opts) -> Result> workers: JoinSet::new().into(), queue: send, busy: AtomicUsize::default(), - busy_max: AtomicUsize::default(), queued_max: AtomicUsize::default(), }); @@ -94,6 +99,8 @@ pub(crate) fn close(&self) { let closing = self.queue.close(); debug_assert!(closing, "channel is not closing"); + + std::thread::yield_now(); } #[implement(Pool)] @@ -117,22 +124,45 @@ fn spawn_one(self: &Arc, workers: &mut JoinSet<()>, recv: Receiver) - Ok(()) } +#[implement(Pool)] +#[tracing::instrument(level = "trace", name = "get", skip(self, cmd))] +pub(crate) async fn execute_get(&self, mut cmd: Get) -> Result> { + let (send, recv) = oneshot::channel(); + _ = cmd.res.insert(send); + self.execute(Cmd::Get(cmd)) + .and_then(|()| { + recv.map_ok(into_recv_get_result) + .map_err(|e| err!(error!("recv failed {e:?}"))) + }) + .await? +} + +#[implement(Pool)] +#[tracing::instrument(level = "trace", name = "iter", skip(self, cmd))] +pub(crate) async fn execute_iter(&self, mut cmd: Seek) -> Result> { + let (send, recv) = oneshot::channel(); + _ = cmd.res.insert(send); + self.execute(Cmd::Iter(cmd)) + .and_then(|()| { + recv.map_ok(into_recv_seek) + .map_err(|e| err!(error!("recv failed {e:?}"))) + }) + .await +} + #[implement(Pool)] #[tracing::instrument( - level = "trace" + level = "trace", + name = "execute", skip(self, cmd), fields( task = ?tokio::task::try_id(), receivers = self.queue.receiver_count(), - senders = self.queue.sender_count(), queued = self.queue.len(), queued_max = self.queued_max.load(Ordering::Relaxed), ), )] -pub(crate) async fn execute(&self, mut cmd: Cmd) -> Result> { - let (send, recv) = oneshot::channel(); - Self::prepare(&mut cmd, send); - +async fn execute(&self, cmd: Cmd) -> Result { if cfg!(debug_assertions) { self.queued_max .fetch_max(self.queue.len(), Ordering::Relaxed); @@ -148,20 +178,7 @@ pub(crate) async fn execute(&self, mut cmd: Cmd) -> Result> { self.queue .send(cmd) .await - .map_err(|e| err!(error!("send failed {e:?}")))?; - - recv.await - .map(into_recv_result) - .map_err(|e| err!(error!("recv failed {e:?}")))? -} - -#[implement(Pool)] -fn prepare(cmd: &mut Cmd, send: ResultSender) { - match cmd { - Cmd::Get(ref mut cmd) => { - _ = cmd.res.insert(send); - }, - }; + .map_err(|e| err!(error!("send failed {e:?}"))) } #[implement(Pool)] @@ -178,8 +195,8 @@ fn worker_loop(&self, recv: &Receiver) { // initial +1 needed prior to entering wait self.busy.fetch_add(1, Ordering::Relaxed); - while let Ok(mut cmd) = self.worker_wait(recv) { - self.worker_handle(&mut cmd); + while let Ok(cmd) = self.worker_wait(recv) { + self.worker_handle(cmd); } } @@ -190,13 +207,8 @@ fn worker_loop(&self, recv: &Receiver) { skip_all, fields( receivers = recv.receiver_count(), - senders = recv.sender_count(), queued = recv.len(), - busy = self.busy.load(Ordering::Relaxed), - busy_max = self.busy_max.fetch_max( - self.busy.fetch_sub(1, Ordering::Relaxed), - Ordering::Relaxed - ), + busy = self.busy.fetch_sub(1, Ordering::Relaxed) - 1, ), )] fn worker_wait(&self, recv: &Receiver) -> Result { @@ -206,12 +218,60 @@ fn worker_wait(&self, recv: &Receiver) -> Result { } #[implement(Pool)] -fn worker_handle(&self, cmd: &mut Cmd) { +fn worker_handle(&self, cmd: Cmd) { match cmd { Cmd::Get(cmd) => self.handle_get(cmd), + Cmd::Iter(cmd) => self.handle_iter(cmd), } } +#[implement(Pool)] +#[tracing::instrument( + name = "iter", + level = "trace", + skip_all, + fields(%cmd.map), +)] +fn handle_iter(&self, mut cmd: Seek) { + let chan = cmd.res.take().expect("missing result channel"); + + if chan.is_canceled() { + return; + } + + let from = cmd.key.as_deref().map(Into::into); + let result = match cmd.dir { + Direction::Forward => cmd.state.init_fwd(from), + Direction::Reverse => cmd.state.init_rev(from), + }; + + let chan_result = chan.send(into_send_seek(result)); + let _chan_sent = chan_result.is_ok(); +} + +#[implement(Pool)] +#[tracing::instrument( + name = "seek", + level = "trace", + skip_all, + fields(%cmd.map), +)] +fn _handle_seek(&self, mut cmd: Seek) { + let chan = cmd.res.take().expect("missing result channel"); + + if chan.is_canceled() { + return; + } + + match cmd.dir { + Direction::Forward => cmd.state.seek_fwd(), + Direction::Reverse => cmd.state.seek_rev(), + }; + + let chan_result = chan.send(into_send_seek(cmd.state)); + let _chan_sent = chan_result.is_ok(); +} + #[implement(Pool)] #[tracing::instrument( name = "get", @@ -219,7 +279,7 @@ fn worker_handle(&self, cmd: &mut Cmd) { skip_all, fields(%cmd.map), )] -fn handle_get(&self, cmd: &mut Get) { +fn handle_get(&self, mut cmd: Get) { debug_assert!(!cmd.key.is_empty(), "querying for empty key"); // Obtain the result channel. @@ -237,23 +297,31 @@ fn handle_get(&self, cmd: &mut Get) { let result = cmd.map.get_blocking(&cmd.key); // Send the result back to the submitter. - let chan_result = chan.send(into_send_result(result)); + let chan_result = chan.send(into_send_get_result(result)); // If the future was dropped during the query this will fail acceptably. let _chan_sent = chan_result.is_ok(); } -fn into_send_result(result: Result>) -> Result> { +fn into_send_get_result(result: Result>) -> Result> { // SAFETY: Necessary to send the Handle (rust_rocksdb::PinnableSlice) through // the channel. The lifetime on the handle is a device by rust-rocksdb to // associate a database lifetime with its assets. The Handle must be dropped - // before the database is dropped. The handle must pass through recv_handle() on - // the other end of the channel. + // before the database is dropped. unsafe { std::mem::transmute(result) } } -fn into_recv_result(result: Result>) -> Result> { - // SAFETY: This is to receive the Handle from the channel. Previously it had - // passed through send_handle(). +fn into_recv_get_result(result: Result>) -> Result> { + // SAFETY: This is to receive the Handle from the channel. + unsafe { std::mem::transmute(result) } +} + +pub(crate) fn into_send_seek(result: stream::State<'_>) -> stream::State<'static> { + // SAFETY: Necessary to send the State through the channel; see above. + unsafe { std::mem::transmute(result) } +} + +fn into_recv_seek(result: stream::State<'static>) -> stream::State<'_> { + // SAFETY: This is to receive the State from the channel; see above. unsafe { std::mem::transmute(result) } } diff --git a/src/database/stream.rs b/src/database/stream.rs index a2a72e44..38c46596 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -16,19 +16,21 @@ use crate::{ Engine, Slice, }; -struct State<'a> { +pub(crate) struct State<'a> { inner: Inner<'a>, seek: bool, init: bool, } -trait Cursor<'a, T> { +pub(crate) trait Cursor<'a, T> { fn state(&self) -> &State<'a>; fn fetch(&self) -> Option; fn seek(&mut self); + fn init(self, from: From<'a>) -> Self; + fn get(&self) -> Option> { self.fetch() .map(Ok) @@ -45,7 +47,7 @@ type Inner<'a> = DBRawIteratorWithThreadMode<'a, Db>; type From<'a> = Option>; impl<'a> State<'a> { - fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { + pub(super) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { Self { inner: db.db.raw_iterator_cf_opt(&**cf, opts), init: true, @@ -53,7 +55,7 @@ impl<'a> State<'a> { } } - fn init_fwd(mut self, from: From<'_>) -> Self { + pub(super) fn init_fwd(mut self, from: From<'_>) -> Self { if let Some(key) = from { self.inner.seek(key); self.seek = true; @@ -62,7 +64,7 @@ impl<'a> State<'a> { self } - fn init_rev(mut self, from: From<'_>) -> Self { + pub(super) fn init_rev(mut self, from: From<'_>) -> Self { if let Some(key) = from { self.inner.seek_for_prev(key); self.seek = true; @@ -72,7 +74,7 @@ impl<'a> State<'a> { } #[inline] - fn seek_fwd(&mut self) { + pub(super) fn seek_fwd(&mut self) { if !exchange(&mut self.init, false) { self.inner.next(); } else if !self.seek { @@ -81,7 +83,7 @@ impl<'a> State<'a> { } #[inline] - fn seek_rev(&mut self) { + pub(super) fn seek_rev(&mut self) { if !exchange(&mut self.init, false) { self.inner.prev(); } else if !self.seek { diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index 54f8bc5c..77b08a0b 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -1,4 +1,4 @@ -use std::{pin::Pin, sync::Arc}; +use std::{convert, pin::Pin, sync::Arc}; use conduit::Result; use futures::{ @@ -16,9 +16,17 @@ pub(crate) struct Items<'a> { } impl<'a> Items<'a> { - pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions, from: From<'_>) -> Self { + pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { Self { - state: State::new(db, cf, opts).init_fwd(from), + state: State::new(db, cf, opts), + } + } +} + +impl<'a> convert::From> for Items<'a> { + fn from(state: State<'a>) -> Self { + Self { + state, } } } @@ -30,6 +38,13 @@ impl<'a> Cursor<'a, KeyVal<'a>> for Items<'a> { #[inline] fn seek(&mut self) { self.state.seek_fwd(); } + + #[inline] + fn init(self, from: From<'a>) -> Self { + Self { + state: self.state.init_fwd(from), + } + } } impl<'a> Stream for Items<'a> { diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index 26492db8..dfd3a107 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -1,4 +1,4 @@ -use std::{pin::Pin, sync::Arc}; +use std::{convert, pin::Pin, sync::Arc}; use conduit::Result; use futures::{ @@ -16,9 +16,17 @@ pub(crate) struct ItemsRev<'a> { } impl<'a> ItemsRev<'a> { - pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions, from: From<'_>) -> Self { + pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { Self { - state: State::new(db, cf, opts).init_rev(from), + state: State::new(db, cf, opts), + } + } +} + +impl<'a> convert::From> for ItemsRev<'a> { + fn from(state: State<'a>) -> Self { + Self { + state, } } } @@ -30,6 +38,13 @@ impl<'a> Cursor<'a, KeyVal<'a>> for ItemsRev<'a> { #[inline] fn seek(&mut self) { self.state.seek_rev(); } + + #[inline] + fn init(self, from: From<'a>) -> Self { + Self { + state: self.state.init_rev(from), + } + } } impl<'a> Stream for ItemsRev<'a> { diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index 91884c8d..2ce88959 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -1,4 +1,4 @@ -use std::{pin::Pin, sync::Arc}; +use std::{convert, pin::Pin, sync::Arc}; use conduit::Result; use futures::{ @@ -16,9 +16,17 @@ pub(crate) struct Keys<'a> { } impl<'a> Keys<'a> { - pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions, from: From<'_>) -> Self { + pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { Self { - state: State::new(db, cf, opts).init_fwd(from), + state: State::new(db, cf, opts), + } + } +} + +impl<'a> convert::From> for Keys<'a> { + fn from(state: State<'a>) -> Self { + Self { + state, } } } @@ -31,6 +39,13 @@ impl<'a> Cursor<'a, Key<'a>> for Keys<'a> { #[inline] fn seek(&mut self) { self.state.seek_fwd(); } + + #[inline] + fn init(self, from: From<'a>) -> Self { + Self { + state: self.state.init_fwd(from), + } + } } impl<'a> Stream for Keys<'a> { diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index 59f66c2e..12dae759 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -1,4 +1,4 @@ -use std::{pin::Pin, sync::Arc}; +use std::{convert, pin::Pin, sync::Arc}; use conduit::Result; use futures::{ @@ -16,9 +16,17 @@ pub(crate) struct KeysRev<'a> { } impl<'a> KeysRev<'a> { - pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions, from: From<'_>) -> Self { + pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { Self { - state: State::new(db, cf, opts).init_rev(from), + state: State::new(db, cf, opts), + } + } +} + +impl<'a> convert::From> for KeysRev<'a> { + fn from(state: State<'a>) -> Self { + Self { + state, } } } @@ -31,6 +39,13 @@ impl<'a> Cursor<'a, Key<'a>> for KeysRev<'a> { #[inline] fn seek(&mut self) { self.state.seek_rev(); } + + #[inline] + fn init(self, from: From<'a>) -> Self { + Self { + state: self.state.init_rev(from), + } + } } impl<'a> Stream for KeysRev<'a> { From b7df0a14c6dd0b56348beab220637129c81942d2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 2 Dec 2024 11:56:02 +0000 Subject: [PATCH 0322/1248] parallelize events_before and events_after in api/client/context Signed-off-by: Jason Volk --- src/api/client/context.rs | 109 ++++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 52 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index af4e26f0..acd7d80b 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -2,7 +2,7 @@ use std::iter::once; use axum::extract::State; use conduit::{ - at, err, + at, err, ref_at, utils::{ future::TryExtExt, stream::{BroadbandExt, ReadyExt, WidebandExt}, @@ -10,7 +10,7 @@ use conduit::{ }, Err, Result, }; -use futures::{future::try_join, StreamExt, TryFutureExt}; +use futures::{join, try_join, FutureExt, StreamExt, TryFutureExt}; use ruma::{ api::client::{context::get_context, filter::LazyLoadOptions}, events::StateEventType, @@ -37,6 +37,7 @@ pub(crate) async fn get_context_route( let filter = &body.filter; let sender = body.sender(); let (sender_user, _) = sender; + let room_id = &body.room_id; // Use limit or else 10, with maximum 100 let limit: usize = body @@ -70,42 +71,63 @@ pub(crate) async fn get_context_route( .get_pdu(&body.event_id) .map_err(|_| err!(Request(NotFound("Base event not found.")))); - let (base_token, base_event) = try_join(base_token, base_event).await?; - - let room_id = &base_event.room_id; - - if !services + let visible = services .rooms .state_accessor - .user_can_see_event(sender_user, room_id, &body.event_id) - .await - { + .user_can_see_event(sender_user, &body.room_id, &body.event_id) + .map(Ok); + + let (base_token, base_event, visible) = try_join!(base_token, base_event, visible)?; + + if base_event.room_id != body.room_id { + return Err!(Request(NotFound("Base event not found."))); + } + + if !visible { return Err!(Request(Forbidden("You don't have permission to view this event."))); } - let events_before: Vec<_> = services + let events_before = services .rooms .timeline - .pdus_rev(Some(sender_user), room_id, Some(base_token)) - .await? - .ready_filter_map(|item| event_filter(item, filter)) - .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) - .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) - .take(limit / 2) - .collect() - .await; + .pdus_rev(Some(sender_user), room_id, Some(base_token)); - let events_after: Vec<_> = services + let events_after = services .rooms .timeline - .pdus(Some(sender_user), room_id, Some(base_token)) - .await? + .pdus(Some(sender_user), room_id, Some(base_token)); + + let (events_before, events_after) = try_join!(events_before, events_after)?; + + let events_before = events_before .ready_filter_map(|item| event_filter(item, filter)) .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) .take(limit / 2) - .collect() - .await; + .collect(); + + let events_after = events_after + .ready_filter_map(|item| event_filter(item, filter)) + .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) + .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) + .take(limit / 2) + .collect(); + + let (events_before, events_after): (Vec<_>, Vec<_>) = join!(events_before, events_after); + + let state_at = events_after + .last() + .map(ref_at!(1)) + .map_or(body.event_id.as_ref(), |e| e.event_id.as_ref()); + + let state_ids = services + .rooms + .state_accessor + .pdu_shortstatehash(state_at) + .or_else(|_| services.rooms.state.get_room_shortstatehash(room_id)) + .and_then(|shortstatehash| services.rooms.state_accessor.state_full_ids(shortstatehash)) + .map_err(|e| err!(Database("State not found: {e}"))) + .await?; let lazy = once(&(base_token, base_event.clone())) .chain(events_before.iter()) @@ -116,48 +138,31 @@ pub(crate) async fn get_context_route( }) .await; - let state_id = events_after - .last() - .map_or(body.event_id.as_ref(), |(_, e)| e.event_id.as_ref()); - - let shortstatehash = services - .rooms - .state_accessor - .pdu_shortstatehash(state_id) - .or_else(|_| services.rooms.state.get_room_shortstatehash(room_id)) - .await - .map_err(|e| err!(Database("State hash not found: {e}")))?; - - let state_ids = services - .rooms - .state_accessor - .state_full_ids(shortstatehash) - .await - .map_err(|e| err!(Database("State not found: {e}")))?; - let lazy = &lazy; let state: Vec<_> = state_ids - .into_iter() + .iter() .stream() .broad_filter_map(|(shortstatekey, event_id)| { services .rooms .short - .get_statekey_from_short(shortstatekey) + .get_statekey_from_short(*shortstatekey) .map_ok(move |(event_type, state_key)| (event_type, state_key, event_id)) .ok() }) .ready_filter_map(|(event_type, state_key, event_id)| { - if lazy_load_enabled && event_type == StateEventType::RoomMember { - let user_id: &UserId = state_key.as_str().try_into().ok()?; - if !lazy.contains(user_id) { - return None; - } + if !lazy_load_enabled || event_type != StateEventType::RoomMember { + return Some(event_id); } - Some(event_id) + state_key + .as_str() + .try_into() + .ok() + .filter(|&user_id: &&UserId| lazy.contains(user_id)) + .map(|_| event_id) }) - .broad_filter_map(|event_id: OwnedEventId| async move { services.rooms.timeline.get_pdu(&event_id).await.ok() }) + .broad_filter_map(|event_id: &OwnedEventId| services.rooms.timeline.get_pdu(event_id).ok()) .map(|pdu| pdu.to_state_event()) .collect() .await; From ef9b1c63036868f336894b86f00b29cad0f48673 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 2 Dec 2024 13:50:09 +0000 Subject: [PATCH 0323/1248] simplify sender shutdown; prevent launching any retries Signed-off-by: Jason Volk --- src/service/sending/sender.rs | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index b1e909c9..5c00915b 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -7,7 +7,7 @@ use std::{ use base64::{engine::general_purpose, Engine as _}; use conduit::{ - debug, debug_warn, err, error, + debug, err, error, result::LogErr, trace, utils::{calculate_hash, math::continue_exponential_backoff_secs, ReadyExt}, @@ -35,7 +35,6 @@ use ruma::{ RoomVersionId, ServerName, UInt, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use tokio::time::sleep_until; use super::{appservice, data::QueueItem, Destination, Msg, SendingEvent, Service}; @@ -81,7 +80,7 @@ impl Service { }, } } - self.finish_responses(&mut futures, &mut statuses).await; + self.finish_responses(&mut futures).await; Ok(()) } @@ -146,22 +145,26 @@ impl Service { } } - async fn finish_responses<'a>(&'a self, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus) { + async fn finish_responses<'a>(&'a self, futures: &mut SendingFutures<'a>) { + use tokio::{ + select, + time::{sleep_until, Instant}, + }; + let now = Instant::now(); let timeout = Duration::from_millis(CLEANUP_TIMEOUT_MS); let deadline = now.checked_add(timeout).unwrap_or(now); loop { trace!("Waiting for {} requests to complete...", futures.len()); - tokio::select! { - () = sleep_until(deadline.into()) => break, + select! { + () = sleep_until(deadline) => return, response = futures.next() => match response { - Some(response) => self.handle_response(response, futures, statuses).await, + Some(Ok(dest)) => self.db.delete_all_active_requests_for(&dest).await, + Some(_) => continue, None => return, - } + }, } } - - debug_warn!("Leaving with {} unfinished requests...", futures.len()); } #[allow(clippy::needless_pass_by_ref_mut)] From 3109c0daba1115a4c58f46c9192d6ade8f078885 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 3 Dec 2024 06:34:56 +0000 Subject: [PATCH 0324/1248] perform async shutdown for database pool after services stop Signed-off-by: Jason Volk --- src/database/engine.rs | 5 +++-- src/database/pool.rs | 21 ++++++++++----------- src/router/run.rs | 12 +++++++++++- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index a6ed7d86..fe6602ae 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -148,6 +148,8 @@ impl Engine { .expect("column was created and exists") } + pub async fn shutdown_pool(&self) { self.pool.shutdown().await; } + pub fn flush(&self) -> Result<()> { result(DBCommon::flush_wal(&self.db, false)) } pub fn sync(&self) -> Result<()> { result(DBCommon::flush_wal(&self.db, true)) } @@ -325,8 +327,7 @@ impl Drop for Engine { fn drop(&mut self) { const BLOCKING: bool = true; - debug!("Shutting down request pool..."); - self.pool.close(); + debug_assert!(!self.pool.close(), "request pool was not closed"); debug!("Waiting for background tasks to finish..."); self.db.cancel_all_background_work(BLOCKING); diff --git a/src/database/pool.rs b/src/database/pool.rs index 136de67d..e7ffc807 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -76,10 +76,8 @@ pub(crate) async fn new(server: &Arc, opts: &Opts) -> Result> } #[implement(Pool)] -pub(crate) async fn _shutdown(self: &Arc) { - if !self.queue.is_closed() { - self.close(); - } +pub(crate) async fn shutdown(self: &Arc) { + self.close(); let workers = take(&mut *self.workers.lock().await); debug!(workers = workers.len(), "Waiting for workers to join..."); @@ -89,18 +87,19 @@ pub(crate) async fn _shutdown(self: &Arc) { } #[implement(Pool)] -pub(crate) fn close(&self) { - debug_assert!(!self.queue.is_closed(), "channel already closed"); +pub(crate) fn close(&self) -> bool { + if !self.queue.close() { + return false; + } + + std::thread::yield_now(); debug!( senders = self.queue.sender_count(), receivers = self.queue.receiver_count(), - "Closing pool channel" + "Closed pool channel" ); - let closing = self.queue.close(); - debug_assert!(closing, "channel is not closing"); - - std::thread::yield_now(); + true } #[implement(Pool)] diff --git a/src/router/run.rs b/src/router/run.rs index 395aa8c4..93b1339b 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -78,6 +78,11 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { // unload and explode. services.stop().await; + // Check that Services and Database will drop as expected, The complex of Arc's + // used for various components can easily lead to references being held + // somewhere improperly; this can hang shutdowns. + debug!("Cleaning up..."); + let db = Arc::downgrade(&services.db); if let Err(services) = Arc::try_unwrap(services) { debug_error!( "{} dangling references to Services after shutdown", @@ -85,7 +90,12 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { ); } - debug!("Cleaning up..."); + // The db threadpool requires async join if we use tokio/spawn_blocking to + // manage the threads. Without async-drop we have to wait here; for symmetry + // with Services construction it can't be done in services.stop(). + if let Some(db) = db.upgrade() { + db.db.shutdown_pool().await; + } #[cfg(feature = "systemd")] sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]).expect("failed to notify systemd of stopping state"); From 9d9f403ad5f5656f8b1084a71fcba47a5ef03e5b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 3 Dec 2024 07:51:29 +0000 Subject: [PATCH 0325/1248] prevent adding presence timer for server's own user Signed-off-by: Jason Volk --- src/service/presence/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index b2106f3f..8e1521c5 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -137,7 +137,9 @@ impl Service { .set_presence(user_id, presence_state, currently_active, last_active_ago, status_msg) .await?; - if self.timeout_remote_users || self.services.globals.user_is_local(user_id) { + if (self.timeout_remote_users || self.services.globals.user_is_local(user_id)) + && user_id != self.services.globals.server_user + { let timeout = match presence_state { PresenceState::Online => self.services.server.config.presence_idle_timeout_s, _ => self.services.server.config.presence_offline_timeout_s, From c01b049910c15e751b8782d33048fc8210983a8e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 3 Dec 2024 07:35:48 +0000 Subject: [PATCH 0326/1248] move cidr_range_denylist from globals to client service Signed-off-by: Jason Volk --- src/service/client/mod.rs | 22 +++++++++++++++++++++- src/service/globals/mod.rs | 24 +----------------------- src/service/media/preview.rs | 4 ++-- src/service/pusher/mod.rs | 4 ++-- src/service/resolver/actual.rs | 2 +- src/service/resolver/mod.rs | 4 +--- 6 files changed, 28 insertions(+), 32 deletions(-) diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index f9a89e99..2794efc1 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Duration}; -use conduit::{Config, Result}; +use conduit::{err, implement, trace, Config, Result}; +use ipaddress::IPAddress; use reqwest::redirect; use crate::{resolver, service}; @@ -15,6 +16,8 @@ pub struct Service { pub sender: reqwest::Client, pub appservice: reqwest::Client, pub pusher: reqwest::Client, + + pub cidr_range_denylist: Vec, } impl crate::Service for Service { @@ -86,6 +89,14 @@ impl crate::Service for Service { .pool_idle_timeout(Duration::from_secs(config.pusher_idle_timeout)) .redirect(redirect::Policy::limited(2)) .build()?, + + cidr_range_denylist: config + .ip_range_denylist + .iter() + .map(IPAddress::parse) + .inspect(|cidr| trace!("Denied CIDR range: {cidr:?}")) + .collect::>() + .map_err(|e| err!(Config("ip_range_denylist", e)))?, })) } @@ -152,3 +163,12 @@ fn base(config: &Config) -> Result { Ok(builder) } } + +#[inline] +#[must_use] +#[implement(Service)] +pub fn valid_cidr_range(&self, ip: &IPAddress) -> bool { + self.cidr_range_denylist + .iter() + .all(|cidr| !cidr.includes(ip)) +} diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 3eefe4b7..4fb1ce2d 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,9 +7,8 @@ use std::{ time::Instant, }; -use conduit::{err, error, trace, Config, Result}; +use conduit::{error, Config, Result}; use data::Data; -use ipaddress::IPAddress; use regex::RegexSet; use ruma::{ OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, RoomVersionId, ServerName, UserId, @@ -22,7 +21,6 @@ pub struct Service { pub db: Data, pub config: Config, - pub cidr_range_denylist: Vec, jwt_decoding_key: Option, pub stable_room_versions: Vec, pub unstable_room_versions: Vec, @@ -59,14 +57,6 @@ impl crate::Service for Service { // Experimental, partially supported room versions let unstable_room_versions = vec![RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; - let cidr_range_denylist: Vec<_> = config - .ip_range_denylist - .iter() - .map(IPAddress::parse) - .inspect(|cidr| trace!("Denied CIDR range: {cidr:?}")) - .collect::>() - .map_err(|e| err!(Config("ip_range_denylist", e)))?; - let turn_secret = config .turn_secret_file .as_ref() @@ -95,7 +85,6 @@ impl crate::Service for Service { let mut s = Self { db, config: config.clone(), - cidr_range_denylist, jwt_decoding_key, stable_room_versions, unstable_room_versions, @@ -255,17 +244,6 @@ impl Service { } } - #[inline] - pub fn valid_cidr_range(&self, ip: &IPAddress) -> bool { - for cidr in &self.cidr_range_denylist { - if cidr.includes(ip) { - return false; - } - } - - true - } - /// checks if `user_id` is local to us via server_name comparison #[inline] pub fn user_is_local(&self, user_id: &UserId) -> bool { self.server_is_ours(user_id.server_name()) } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index acc9d8ed..eb9be560 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -87,7 +87,7 @@ pub async fn get_url_preview(&self, url: &Url) -> Result { #[implement(Service)] async fn request_url_preview(&self, url: &Url) -> Result { if let Ok(ip) = IPAddress::parse(url.host_str().expect("URL previously validated")) { - if !self.services.globals.valid_cidr_range(&ip) { + if !self.services.client.valid_cidr_range(&ip) { return Err!(BadServerResponse("Requesting from this address is forbidden")); } } @@ -97,7 +97,7 @@ async fn request_url_preview(&self, url: &Url) -> Result { if let Some(remote_addr) = response.remote_addr() { if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { - if !self.services.globals.valid_cidr_range(&ip) { + if !self.services.client.valid_cidr_range(&ip) { return Err!(BadServerResponse("Requesting from this address is forbidden")); } } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 6b02c7f8..df5852c6 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -151,7 +151,7 @@ impl Service { if let Some(url_host) = reqwest_request.url().host_str() { trace!("Checking request URL for IP"); if let Ok(ip) = IPAddress::parse(url_host) { - if !self.services.globals.valid_cidr_range(&ip) { + if !self.services.client.valid_cidr_range(&ip) { return Err!(BadServerResponse("Not allowed to send requests to this IP")); } } @@ -166,7 +166,7 @@ impl Service { trace!("Checking response destination's IP"); if let Some(remote_addr) = response.remote_addr() { if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { - if !self.services.globals.valid_cidr_range(&ip) { + if !self.services.client.valid_cidr_range(&ip) { return Err!(BadServerResponse("Not allowed to send requests to this IP")); } } diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 6589a58b..8553e8bb 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -358,7 +358,7 @@ impl super::Service { } pub(crate) fn validate_ip(&self, ip: &IPAddress) -> Result<()> { - if !self.services.globals.valid_cidr_range(ip) { + if !self.services.client.valid_cidr_range(ip) { return Err!(BadServerResponse("Not allowed to send requests to this IP")); } diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 28b7063d..111de292 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -9,7 +9,7 @@ use std::{fmt::Write, sync::Arc}; use conduit::{Result, Server}; use self::{cache::Cache, dns::Resolver}; -use crate::{client, globals, Dep}; +use crate::{client, Dep}; pub struct Service { pub cache: Arc, @@ -20,7 +20,6 @@ pub struct Service { struct Services { server: Arc, client: Dep, - globals: Dep, } impl crate::Service for Service { @@ -33,7 +32,6 @@ impl crate::Service for Service { services: Services { server: args.server.clone(), client: args.depend::("client"), - globals: args.depend::("globals"), }, })) } From 48703173bc2db590bfd9aeaf79b1248bc2a8327f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 3 Dec 2024 10:42:52 +0000 Subject: [PATCH 0327/1248] split get_batch from get.rs; add aqry_batch Signed-off-by: Jason Volk --- src/database/map.rs | 1 + src/database/map/get.rs | 59 +++++------------------------------ src/database/map/get_batch.rs | 57 +++++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 52 deletions(-) create mode 100644 src/database/map/get_batch.rs diff --git a/src/database/map.rs b/src/database/map.rs index 4b55fa54..a15d5e9d 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,6 +1,7 @@ mod contains; mod count; mod get; +mod get_batch; mod insert; mod keys; mod keys_from; diff --git a/src/database/map/get.rs b/src/database/map/get.rs index ef552177..04f5d0ae 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,13 +1,8 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; use arrayvec::ArrayVec; -use conduit::{ - err, implement, - utils::{result::MapExpect, IterStream}, - Err, Result, -}; -use futures::{future, Future, FutureExt, Stream, StreamExt}; -use rocksdb::DBPinnableSlice; +use conduit::{err, implement, utils::result::MapExpect, Err, Result}; +use futures::{future, Future, FutureExt}; use serde::Serialize; use crate::{ @@ -17,8 +12,6 @@ use crate::{ Handle, }; -type RocksdbResult<'a> = Result>, rocksdb::Error>; - /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is serialized into an allocated buffer to perform /// the query. @@ -58,18 +51,6 @@ where self.get(key) } -#[implement(super::Map)] -#[tracing::instrument(skip(self, keys), fields(%self), level = "trace")] -pub fn get_batch<'a, I, K>(self: &'a Arc, keys: I) -> impl Stream>> + Send + 'a -where - I: Iterator + ExactSizeIterator + Debug + Send + 'a, - K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, -{ - keys.stream() - .map(move |key| self.get(key)) - .buffered(self.db.server.config.db_pool_workers.saturating_mul(2)) -} - /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] @@ -95,25 +76,6 @@ where self.db.pool.execute_get(cmd).boxed() } -#[implement(super::Map)] -#[tracing::instrument(skip(self, keys), name = "batch_blocking", level = "trace")] -pub(crate) fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator>> + Send -where - I: Iterator + ExactSizeIterator + Debug + Send, - K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, -{ - // Optimization can be `true` if key vector is pre-sorted **by the column - // comparator**. - const SORTED: bool = false; - - let read_options = &self.read_options; - self.db - .db - .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options) - .into_iter() - .map(into_result_handle) -} - /// Fetch a value from the database into cache, returning a reference-handle. /// The key is referenced directly to perform the query. This is a thread- /// blocking call. @@ -123,12 +85,12 @@ pub fn get_blocking(&self, key: &K) -> Result> where K: AsRef<[u8]> + ?Sized, { - let res = self + self.db .db - .db - .get_pinned_cf_opt(&self.cf(), key, &self.read_options); - - into_result_handle(res) + .get_pinned_cf_opt(&self.cf(), key, &self.read_options) + .map_err(map_err)? + .map(Handle::from) + .ok_or(err!(Request(NotFound("Not found in database")))) } /// Fetch a value from the cache without I/O. @@ -157,10 +119,3 @@ where Err(e) => or_else(e), } } - -fn into_result_handle(result: RocksdbResult<'_>) -> Result> { - result - .map_err(map_err)? - .map(Handle::from) - .ok_or(err!(Request(NotFound("Not found in database")))) -} diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs new file mode 100644 index 00000000..0f1fdea7 --- /dev/null +++ b/src/database/map/get_batch.rs @@ -0,0 +1,57 @@ +use std::{convert::AsRef, fmt::Debug, sync::Arc}; + +use conduit::{err, implement, utils::IterStream, Result}; +use futures::{Stream, StreamExt}; +use serde::Serialize; + +use crate::{util::map_err, Handle}; + +#[implement(super::Map)] +#[tracing::instrument(skip(self, keys), level = "trace")] +pub fn aqry_batch<'b, 'a: 'b, const MAX: usize, I, K>( + self: &'a Arc, keys: I, +) -> impl Stream>> + Send + 'a +where + I: Iterator + Send + 'a, + K: Serialize + ?Sized + Debug + 'b, +{ + keys.stream() + .map(move |key| self.aqry::(&key)) + .buffered(self.db.server.config.db_pool_workers.saturating_mul(2)) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, keys), level = "trace")] +pub fn get_batch<'a, I, K>(self: &'a Arc, keys: I) -> impl Stream>> + Send + 'a +where + I: Iterator + Debug + Send + 'a, + K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, +{ + keys.stream() + .map(move |key| self.get(key)) + .buffered(self.db.server.config.db_pool_workers.saturating_mul(2)) +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, keys), name = "batch_blocking", level = "trace")] +pub(crate) fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator>> + Send +where + I: Iterator + ExactSizeIterator + Debug + Send, + K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, +{ + // Optimization can be `true` if key vector is pre-sorted **by the column + // comparator**. + const SORTED: bool = false; + + let read_options = &self.read_options; + self.db + .db + .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options) + .into_iter() + .map(|result| { + result + .map_err(map_err)? + .map(Handle::from) + .ok_or(err!(Request(NotFound("Not found in database")))) + }) +} From 784ccd6bad25f845532bc3a6e82c5031c50c6444 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 3 Dec 2024 09:42:26 +0000 Subject: [PATCH 0328/1248] return stream from multi_get_eventid_from_short Signed-off-by: Jason Volk --- src/service/rooms/auth_chain/mod.rs | 15 +++++---- src/service/rooms/short/mod.rs | 14 +++----- src/service/rooms/state/mod.rs | 13 +++---- src/service/rooms/state_accessor/data.rs | 43 +++++++++--------------- src/service/rooms/state_accessor/mod.rs | 10 +++--- 5 files changed, 39 insertions(+), 56 deletions(-) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 5face0b5..e7e5edf4 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -6,7 +6,11 @@ use std::{ sync::Arc, }; -use conduit::{debug, debug_error, trace, utils::IterStream, validated, warn, Err, Result}; +use conduit::{ + debug, debug_error, trace, + utils::{stream::ReadyExt, IterStream}, + validated, warn, Err, Result, +}; use futures::{Stream, StreamExt}; use ruma::{EventId, RoomId}; @@ -61,11 +65,10 @@ impl Service { let event_ids = self .services .short - .multi_get_eventid_from_short(chain.into_iter()) - .await - .into_iter() - .filter_map(Result::ok) - .collect(); + .multi_get_eventid_from_short(chain.iter()) + .ready_filter_map(Result::ok) + .collect() + .await; Ok(event_ids) } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 0f100348..a7c32856 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -139,7 +139,7 @@ pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &s #[implement(Service)] pub async fn get_eventid_from_short(&self, shorteventid: ShortEventId) -> Result where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { const BUFSIZE: usize = size_of::(); @@ -153,22 +153,18 @@ where } #[implement(Service)] -pub async fn multi_get_eventid_from_short(&self, shorteventid: I) -> Vec> +pub fn multi_get_eventid_from_short<'a, Id, I>(&'a self, shorteventid: I) -> impl Stream> + Send + 'a where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + I: Iterator + Send + 'a, + Id: for<'de> Deserialize<'de> + Sized + ToOwned + 'a, ::Owned: Borrow, - I: Iterator + Send, { const BUFSIZE: usize = size_of::(); - let keys: Vec<[u8; BUFSIZE]> = shorteventid.map(u64::to_be_bytes).collect(); - self.db .shorteventid_eventid - .get_batch(keys.iter()) + .aqry_batch::(shorteventid) .map(Deserialized::deserialized) - .collect() - .await } #[implement(Service)] diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 838deacd..d0d21fa8 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use conduit::{ - at, err, + err, result::FlatOk, utils::{ calculate_hash, @@ -420,7 +420,7 @@ impl Service { .collect() .await; - let auth_state: Vec<_> = self + let (state_keys, event_ids): (Vec<_>, Vec<_>) = self .services .state_accessor .state_full_shortids(shortstatehash) @@ -432,16 +432,13 @@ impl Service { .remove(&shortstatekey) .map(|(event_type, state_key)| ((event_type, state_key), shorteventid)) }) - .collect(); + .unzip(); let auth_pdus = self .services .short - .multi_get_eventid_from_short(auth_state.iter().map(at!(1))) - .await - .into_iter() - .stream() - .zip(auth_state.into_iter().stream().map(at!(0))) + .multi_get_eventid_from_short(event_ids.iter()) + .zip(state_keys.into_iter().stream()) .ready_filter_map(|(event_id, tsk)| Some((tsk, event_id.ok()?))) .broad_filter_map(|(tsk, event_id): (_, OwnedEventId)| async move { self.services diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 2a670066..bca54069 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,12 +1,12 @@ use std::{borrow::Borrow, collections::HashMap, sync::Arc}; use conduit::{ - at, err, - utils::stream::{BroadbandExt, IterStream}, + at, err, ref_at, + utils::stream::{BroadbandExt, IterStream, ReadyExt}, PduEvent, Result, }; use database::{Deserialized, Map}; -use futures::{StreamExt, TryFutureExt}; +use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{events::StateEventType, EventId, OwnedEventId, RoomId}; use serde::Deserialize; @@ -59,23 +59,13 @@ impl Data { } pub(super) async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result> { - let short_ids = self - .state_full_shortids(shortstatehash) - .await? - .into_iter() - .map(at!(1)); + let short_ids = self.state_full_shortids(shortstatehash).await?; - let event_ids = self + let full_pdus = self .services .short - .multi_get_eventid_from_short(short_ids) - .await - .into_iter() - .filter_map(Result::ok); - - let full_pdus = event_ids - .into_iter() - .stream() + .multi_get_eventid_from_short(short_ids.iter().map(ref_at!(1))) + .ready_filter_map(Result::ok) .broad_filter_map( |event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await.ok() }, ) @@ -92,19 +82,16 @@ impl Data { { let short_ids = self.state_full_shortids(shortstatehash).await?; - let event_ids = self + let full_ids = self .services .short - .multi_get_eventid_from_short(short_ids.iter().map(at!(1))) + .multi_get_eventid_from_short(short_ids.iter().map(ref_at!(1))) + .zip(short_ids.iter().stream().map(at!(0))) + .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) + .collect() + .boxed() .await; - let full_ids = short_ids - .into_iter() - .map(at!(0)) - .zip(event_ids.into_iter()) - .filter_map(|(shortstatekey, event_id)| Some((shortstatekey, event_id.ok()?))) - .collect(); - Ok(full_ids) } @@ -134,7 +121,7 @@ impl Data { &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { let shortstatekey = self @@ -219,7 +206,7 @@ impl Data { &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, ) -> Result where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { self.services diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index e42d3764..ef1b63f5 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -102,7 +102,7 @@ impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result> + pub async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result> where Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, ::Owned: Borrow, @@ -130,7 +130,7 @@ impl Service { &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { self.db @@ -154,7 +154,7 @@ impl Service { &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result where - T: for<'de> Deserialize<'de> + Send, + T: for<'de> Deserialize<'de>, { self.state_get(shortstatehash, event_type, state_key) .await @@ -337,7 +337,7 @@ impl Service { &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, ) -> Result where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { self.db @@ -359,7 +359,7 @@ impl Service { &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, ) -> Result where - T: for<'de> Deserialize<'de> + Send, + T: for<'de> Deserialize<'de>, { self.room_state_get(room_id, event_type, state_key) .await From e0494c1538526771695e63aa9dc410207e748704 Mon Sep 17 00:00:00 2001 From: morguldir Date: Sat, 30 Nov 2024 14:18:46 -0500 Subject: [PATCH 0329/1248] add /bin/conduit to OCI image contents Signed-off-by: strawberry --- nix/pkgs/oci-image/default.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 9b641310..152e00d1 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -14,6 +14,7 @@ dockerTools.buildLayeredImage { created = "@${toString inputs.self.lastModified}"; contents = [ dockerTools.caCertificates + main ]; config = { Entrypoint = if !stdenv.hostPlatform.isDarwin From 9db0325b42bb5afe4f32c5c9f47a2b7342c844a5 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 30 Nov 2024 14:43:14 -0500 Subject: [PATCH 0330/1248] bump rust to 1.83.0 Signed-off-by: strawberry --- Cargo.toml | 2 +- flake.nix | 2 +- rust-toolchain.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0e8596f7..efb06a5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.82.0" +rust-version = "1.83.0" version = "0.5.0" [workspace.metadata.crane] diff --git a/flake.nix b/flake.nix index 113757a7..281052a8 100644 --- a/flake.nix +++ b/flake.nix @@ -27,7 +27,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-yMuSb5eQPO/bHv+Bcf/US8LVMbf/G/0MSfiPwBhiPpk="; + sha256 = "sha256-s1RPtyvDGJaX/BisLT+ifVfuhDT1nZkZ1NcK8sbwELM="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 086f7ba0..ddd952a2 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.82.0" +channel = "1.83.0" profile = "minimal" components = [ # For rust-analyzer From 513236b3cec834e3f38939a1ce04574cf16bcbe1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 4 Dec 2024 00:00:00 +0000 Subject: [PATCH 0331/1248] bump ruma for async state-res optimizations Signed-off-by: Jason Volk --- Cargo.lock | 30 +++++++++---------- Cargo.toml | 2 +- .../rooms/event_handler/resolve_state.rs | 2 +- .../rooms/event_handler/state_at_incoming.rs | 1 + 4 files changed, 17 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 49c4127e..d787739e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -3171,7 +3171,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "assign", "js_int", @@ -3193,7 +3193,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "js_int", "ruma-common", @@ -3205,7 +3205,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "as_variant", "assign", @@ -3228,7 +3228,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "as_variant", "base64 0.22.1", @@ -3258,7 +3258,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -3282,7 +3282,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "bytes", "http", @@ -3300,7 +3300,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "js_int", "thiserror 2.0.3", @@ -3309,7 +3309,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "js_int", "ruma-common", @@ -3319,10 +3319,9 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "cfg-if", - "once_cell", "proc-macro-crate", "proc-macro2", "quote", @@ -3335,7 +3334,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "js_int", "ruma-common", @@ -3347,7 +3346,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "headers", "http", @@ -3360,7 +3359,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3376,10 +3375,9 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=97e2fb6df13f65532d33fc2f0f097ad5a449dd70#97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" dependencies = [ "futures-util", - "itertools 0.13.0", "js_int", "ruma-common", "ruma-events", diff --git a/Cargo.toml b/Cargo.toml index efb06a5b..673cab09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -334,7 +334,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "97e2fb6df13f65532d33fc2f0f097ad5a449dd70" +rev = "1a550585bf025cce48ef8b734339245092bc986e" features = [ "compat", "rand", diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index dc0edd13..81cf7733 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -65,7 +65,6 @@ pub async fn resolve_state( .collect() }) .collect() - .boxed() .await; debug!("Resolving state"); @@ -74,6 +73,7 @@ pub async fn resolve_state( let event_fetch = |event_id| self.event_fetch(event_id); let event_exists = |event_id| self.event_exists(event_id); let state = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) + .boxed() .await .map_err(|e| err!(Database(error!("State resolution failed: {e:?}"))))?; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 05a9d8ca..96ee9907 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -153,6 +153,7 @@ pub(super) async fn state_at_incoming_resolved( let event_fetch = |event_id| self.event_fetch(event_id); let event_exists = |event_id| self.event_exists(event_id); let result = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) + .boxed() .await .map_err(|e| err!(Database(warn!(?e, "State resolution on prev events failed.")))); From c2d97aaa5ea4d304cb76d9d0ec31f3b9e1529043 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 4 Dec 2024 01:23:36 +0000 Subject: [PATCH 0332/1248] increase default db pool worker count for large systems Signed-off-by: Jason Volk --- conduwuit-example.toml | 3 ++- src/core/config/mod.rs | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index a0f05ebb..aee29f92 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1341,7 +1341,8 @@ # Sets the number of worker threads in the frontend-pool of the database. # This number should reflect the I/O capabilities of the system, # specifically the queue-depth or the number of simultaneous requests in -# flight. Defaults to 32 or number of CPU cores, whichever is greater. +# flight. Defaults to 32 or four times the number of CPU cores, whichever +# is greater. # #db_pool_workers = 32 diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index edbb7c26..cb8f3b87 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1503,7 +1503,8 @@ pub struct Config { /// Sets the number of worker threads in the frontend-pool of the database. /// This number should reflect the I/O capabilities of the system, /// specifically the queue-depth or the number of simultaneous requests in - /// flight. Defaults to 32 or number of CPU cores, whichever is greater. + /// flight. Defaults to 32 or four times the number of CPU cores, whichever + /// is greater. /// default: 32 #[serde(default = "default_db_pool_workers")] pub db_pool_workers: usize, @@ -2280,6 +2281,6 @@ fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_p fn default_trusted_server_batch_size() -> usize { 256 } -fn default_db_pool_workers() -> usize { sys::available_parallelism().max(32) } +fn default_db_pool_workers() -> usize { sys::available_parallelism().saturating_mul(4).max(32) } fn default_db_pool_queue_size() -> usize { sys::available_parallelism().saturating_mul(8).max(256) } From 59d5e3ebf1790e4dece9089bb3bce90cb976ffc3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 4 Dec 2024 02:11:43 +0000 Subject: [PATCH 0333/1248] additional stream extensions for any/all additional stream extension TryBroadbandExt Signed-off-by: Jason Volk --- src/core/utils/stream/broadband.rs | 55 +++++++++++++++++++++++++- src/core/utils/stream/iter_stream.rs | 11 +++++- src/core/utils/stream/mod.rs | 2 + src/core/utils/stream/ready.rs | 14 ++++++- src/core/utils/stream/try_broadband.rs | 43 ++++++++++++++++++++ src/core/utils/stream/wideband.rs | 1 - 6 files changed, 121 insertions(+), 5 deletions(-) create mode 100644 src/core/utils/stream/try_broadband.rs diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs index ce17830c..37416d63 100644 --- a/src/core/utils/stream/broadband.rs +++ b/src/core/utils/stream/broadband.rs @@ -1,5 +1,4 @@ //! Broadband stream combinator extensions to futures::Stream -#![allow(clippy::type_complexity)] use std::convert::identity; @@ -18,6 +17,18 @@ pub trait BroadbandExt where Self: Stream + Send + Sized, { + fn broadn_all(self, n: N, f: F) -> impl Future + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future + Send; + + fn broadn_any(self, n: N, f: F) -> impl Future + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future + Send; + /// Concurrent filter_map(); unordered results fn broadn_filter_map(self, n: N, f: F) -> impl Stream + Send where @@ -33,6 +44,24 @@ where Fut: Future + Send, U: Send; + #[inline] + fn broad_all(self, f: F) -> impl Future + Send + where + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + { + self.broadn_all(None, f) + } + + #[inline] + fn broad_any(self, f: F) -> impl Future + Send + where + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + { + self.broadn_any(None, f) + } + #[inline] fn broad_filter_map(self, f: F) -> impl Stream + Send where @@ -58,6 +87,30 @@ impl BroadbandExt for S where S: Stream + Send + Sized, { + #[inline] + fn broadn_all(self, n: N, f: F) -> impl Future + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + { + self.map(f) + .buffer_unordered(n.into().unwrap_or(WIDTH)) + .ready_all(identity) + } + + #[inline] + fn broadn_any(self, n: N, f: F) -> impl Future + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Future + Send, + { + self.map(f) + .buffer_unordered(n.into().unwrap_or(WIDTH)) + .ready_any(identity) + } + #[inline] fn broadn_filter_map(self, n: N, f: F) -> impl Stream + Send where diff --git a/src/core/utils/stream/iter_stream.rs b/src/core/utils/stream/iter_stream.rs index 69edf64f..2face4b0 100644 --- a/src/core/utils/stream/iter_stream.rs +++ b/src/core/utils/stream/iter_stream.rs @@ -4,12 +4,16 @@ use futures::{ StreamExt, }; +use crate::{Error, Result}; + pub trait IterStream { /// Convert an Iterator into a Stream fn stream(self) -> impl Stream::Item> + Send; /// Convert an Iterator into a TryStream - fn try_stream(self) -> impl TryStream::Item, Error = crate::Error> + Send; + fn try_stream( + self, + ) -> impl TryStream::Item, Error = Error, Item = Result<::Item, Error>> + Send; } impl IterStream for I @@ -21,7 +25,10 @@ where fn stream(self) -> impl Stream::Item> + Send { stream::iter(self) } #[inline] - fn try_stream(self) -> impl TryStream::Item, Error = crate::Error> + Send { + fn try_stream( + self, + ) -> impl TryStream::Item, Error = Error, Item = Result<::Item, Error>> + Send + { self.stream().map(Ok) } } diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 45c2110d..c9138116 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -5,6 +5,7 @@ mod ignore; mod iter_stream; mod ready; mod tools; +mod try_broadband; mod try_ready; mod wideband; @@ -15,5 +16,6 @@ pub use ignore::TryIgnore; pub use iter_stream::IterStream; pub use ready::ReadyExt; pub use tools::Tools; +pub use try_broadband::TryBroadbandExt; pub use try_ready::TryReadyExt; pub use wideband::WidebandExt; diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index f4eec7d1..9bba589e 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -3,7 +3,7 @@ use futures::{ future::{ready, Ready}, - stream::{Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile}, + stream::{All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile}, }; /// Synchronous combinators to augment futures::StreamExt. Most Stream @@ -16,6 +16,10 @@ pub trait ReadyExt where Self: Stream + Send + Sized, { + fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> + where + F: Fn(Item) -> bool; + fn ready_any(self, f: F) -> Any, impl FnMut(Item) -> Ready> where F: Fn(Item) -> bool; @@ -66,6 +70,14 @@ impl ReadyExt for S where S: Stream + Send + Sized, { + #[inline] + fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> + where + F: Fn(Item) -> bool, + { + self.all(move |t| ready(f(t))) + } + #[inline] fn ready_any(self, f: F) -> Any, impl FnMut(Item) -> Ready> where diff --git a/src/core/utils/stream/try_broadband.rs b/src/core/utils/stream/try_broadband.rs new file mode 100644 index 00000000..59c488e0 --- /dev/null +++ b/src/core/utils/stream/try_broadband.rs @@ -0,0 +1,43 @@ +//! Synchronous combinator extensions to futures::TryStream + +use futures::{TryFuture, TryStream, TryStreamExt}; + +use crate::Result; + +const WIDTH: usize = 32; + +/// Concurrency extensions to augment futures::TryStreamExt. broad_ combinators +/// produce out-of-order +pub trait TryBroadbandExt +where + Self: TryStream> + Send + Sized, +{ + fn broadn_and_then(self, n: N, f: F) -> impl TryStream> + Send + where + N: Into>, + F: Fn(Self::Ok) -> Fut + Send + Sync, + Fut: TryFuture> + Send; + + fn broad_and_then(self, f: F) -> impl TryStream> + Send + where + F: Fn(Self::Ok) -> Fut + Send + Sync, + Fut: TryFuture> + Send, + { + self.broadn_and_then(None, f) + } +} + +impl TryBroadbandExt for S +where + S: TryStream> + Send + Sized, +{ + fn broadn_and_then(self, n: N, f: F) -> impl TryStream> + Send + where + N: Into>, + F: Fn(Self::Ok) -> Fut + Send + Sync, + Fut: TryFuture> + Send, + { + self.map_ok(f) + .try_buffer_unordered(n.into().unwrap_or(WIDTH)) + } +} diff --git a/src/core/utils/stream/wideband.rs b/src/core/utils/stream/wideband.rs index 100990b8..053a351f 100644 --- a/src/core/utils/stream/wideband.rs +++ b/src/core/utils/stream/wideband.rs @@ -1,5 +1,4 @@ //! Wideband stream combinator extensions to futures::Stream -#![allow(clippy::type_complexity)] use std::convert::identity; From 1d0285102825c903f90ceddfe3cb8763d8707dc4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 4 Dec 2024 00:00:40 +0000 Subject: [PATCH 0334/1248] implement several broadband loops Signed-off-by: Jason Volk --- src/api/client/relations.rs | 7 +++---- src/api/client/sync/mod.rs | 7 +++++-- src/api/server/send_join.rs | 20 +++++++++++++------ .../rooms/event_handler/resolve_state.rs | 12 +++++++---- .../rooms/event_handler/state_at_incoming.rs | 9 +++++++-- src/service/rooms/pdu_metadata/data.rs | 7 +++++-- src/service/rooms/threads/mod.rs | 7 +++++-- 7 files changed, 47 insertions(+), 22 deletions(-) diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 902e6be6..de54c4e4 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,10 +1,10 @@ use axum::extract::State; use conduit::{ at, - utils::{result::FlatOk, IterStream, ReadyExt}, + utils::{result::FlatOk, stream::WidebandExt, IterStream, ReadyExt}, PduCount, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::StreamExt; use ruma::{ api::{ client::relations::{ @@ -138,11 +138,10 @@ async fn paginate_relations_with_filter( .is_none_or(|rel_type| pdu.relation_type_equal(rel_type)) }) .stream() - .filter_map(|item| visibility_filter(services, sender_user, item)) .ready_take_while(|(count, _)| Some(*count) != to) + .wide_filter_map(|item| visibility_filter(services, sender_user, item)) .take(limit) .collect() - .boxed() .await; let next_batch = match dir { diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index ba50d77c..6f791860 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -1,7 +1,10 @@ mod v3; mod v4; -use conduit::{utils::ReadyExt, PduCount}; +use conduit::{ + utils::stream::{BroadbandExt, ReadyExt}, + PduCount, +}; use futures::StreamExt; use ruma::{RoomId, UserId}; @@ -55,7 +58,7 @@ async fn share_encrypted_room( .state_cache .get_shared_rooms(sender_user, user_id) .ready_filter(|&room_id| Some(room_id) != ignore_room) - .any(|other_room_id| { + .broad_any(|other_room_id| { services .rooms .state_accessor diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 92ab3b50..d1574e62 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -3,7 +3,12 @@ use std::{borrow::Borrow, collections::HashMap}; use axum::extract::State; -use conduit::{err, pdu::gen_event_id_canonical_json, utils::IterStream, warn, Error, Result}; +use conduit::{ + err, + pdu::gen_event_id_canonical_json, + utils::stream::{IterStream, TryBroadbandExt}, + warn, Error, Result, +}; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ api::{client::error::ErrorKind, federation::membership::create_join_event}, @@ -160,6 +165,7 @@ async fn create_join_event( .rooms .event_handler .handle_incoming_pdu(&origin, room_id, &event_id, value.clone(), true) + .boxed() .await? .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; @@ -172,16 +178,17 @@ async fn create_join_event( .await?; let state = state_ids - .iter() + .values() .try_stream() - .and_then(|(_, event_id)| services.rooms.timeline.get_pdu_json(event_id)) - .and_then(|pdu| { + .broad_and_then(|event_id| services.rooms.timeline.get_pdu_json(event_id)) + .broad_and_then(|pdu| { services .sending .convert_to_outgoing_federation_event(pdu) .map(Ok) }) .try_collect() + .boxed() .await?; let starting_events = state_ids.values().map(Borrow::borrow); @@ -191,14 +198,15 @@ async fn create_join_event( .event_ids_iter(room_id, starting_events) .await? .map(Ok) - .and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) - .and_then(|pdu| { + .broad_and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) + .broad_and_then(|pdu| { services .sending .convert_to_outgoing_federation_event(pdu) .map(Ok) }) .try_collect() + .boxed() .await?; services.sending.send_pdu_room(room_id, &pdu_id).await?; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 81cf7733..3329a146 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -4,7 +4,11 @@ use std::{ sync::Arc, }; -use conduit::{debug, err, implement, utils::IterStream, Result}; +use conduit::{ + debug, err, implement, + utils::stream::{IterStream, WidebandExt}, + Result, +}; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ state_res::{self, StateMap}, @@ -52,11 +56,11 @@ pub async fn resolve_state( let fork_states: Vec>> = fork_states .into_iter() .stream() - .then(|fork_state| { + .wide_then(|fork_state| { fork_state .into_iter() .stream() - .filter_map(|(k, id)| { + .wide_filter_map(|(k, id)| { self.services .short .get_statekey_from_short(k) @@ -83,7 +87,7 @@ pub async fn resolve_state( let state_events: Vec<_> = state .iter() .stream() - .then(|((event_type, state_key), event_id)| { + .wide_then(|((event_type, state_key), event_id)| { self.services .short .get_or_create_shortstatekey(event_type, state_key) diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 96ee9907..9b30a830 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -4,7 +4,12 @@ use std::{ sync::Arc, }; -use conduit::{debug, err, implement, result::LogErr, utils::IterStream, PduEvent, Result}; +use conduit::{ + debug, err, implement, + result::LogErr, + utils::stream::{BroadbandExt, IterStream}, + PduEvent, Result, +}; use futures::{FutureExt, StreamExt}; use ruma::{ state_res::{self, StateMap}, @@ -166,7 +171,7 @@ pub(super) async fn state_at_incoming_resolved( new_state .iter() .stream() - .then(|((event_type, state_key), event_id)| { + .broad_then(|((event_type, state_key), event_id)| { self.services .short .get_or_create_shortstatekey(event_type, state_key) diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index b06e988e..3d05a1c8 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -3,7 +3,10 @@ use std::{mem::size_of, sync::Arc}; use arrayvec::ArrayVec; use conduit::{ result::LogErr, - utils::{stream::TryIgnore, u64_from_u8, ReadyExt}, + utils::{ + stream::{TryIgnore, WidebandExt}, + u64_from_u8, ReadyExt, + }, PduCount, PduEvent, }; use database::Map; @@ -67,7 +70,7 @@ impl Data { .ready_take_while(move |key| key.starts_with(&target.to_be_bytes())) .map(|to_from| u64_from_u8(&to_from[8..16])) .map(PduCount::from_unsigned) - .filter_map(move |shorteventid| async move { + .wide_filter_map(move |shorteventid| async move { let pdu_id: RawPduId = PduId { shortroomid, shorteventid, diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index 5821f279..a304e482 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -2,7 +2,10 @@ use std::{collections::BTreeMap, sync::Arc}; use conduit::{ err, - utils::{stream::TryIgnore, ReadyExt}, + utils::{ + stream::{TryIgnore, WidebandExt}, + ReadyExt, + }, PduCount, PduEvent, PduId, RawPduId, Result, }; use database::{Deserialized, Map}; @@ -143,7 +146,7 @@ impl Service { .ignore_err() .map(RawPduId::from) .ready_take_while(move |pdu_id| pdu_id.shortroomid() == shortroomid.to_be_bytes()) - .filter_map(move |pdu_id| async move { + .wide_filter_map(move |pdu_id| async move { let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; let pdu_id: PduId = pdu_id.into(); From 68afdb22c7e4b0a7baeeba405c8257d9866263ff Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 4 Dec 2024 21:54:40 +0000 Subject: [PATCH 0335/1248] force Cargo.lock version to 3 Signed-off-by: Jason Volk --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index d787739e..52110f91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 4 +version = 3 [[package]] name = "addr2line" From ad0c5ceda409f9d5df9e49e335c82f5657beb1bf Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 4 Dec 2024 17:13:39 -0500 Subject: [PATCH 0336/1248] add origin to tracing instrument logs on /send Signed-off-by: strawberry --- src/api/server/send.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 2da99c93..3d861ad7 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -36,7 +36,7 @@ type ResolvedMap = BTreeMap>; /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. -#[tracing::instrument(skip_all, fields(%client), name = "send")] +#[tracing::instrument(skip_all, fields(%client, origin = body.origin().as_str()), name = "send")] pub(crate) async fn send_transaction_message_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, From fe1ce521aa77f07bb6102c1305c440ca904938d5 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 4 Dec 2024 18:33:12 -0500 Subject: [PATCH 0337/1248] add ignored user checks on /context and /event, misc cleanup Signed-off-by: strawberry --- src/api/client/context.rs | 8 +++-- src/api/client/room/event.rs | 65 ++++++++++++++++++++++-------------- 2 files changed, 46 insertions(+), 27 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index acd7d80b..45c04eb6 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -79,11 +79,15 @@ pub(crate) async fn get_context_route( let (base_token, base_event, visible) = try_join!(base_token, base_event, visible)?; - if base_event.room_id != body.room_id { + if base_event.room_id != body.room_id || base_event.event_id != body.event_id { return Err!(Request(NotFound("Base event not found."))); } - if !visible { + if !visible + || ignored_filter(&services, (base_token, base_event.clone()), sender_user) + .await + .is_none() + { return Err!(Request(Forbidden("You don't have permission to view this event."))); } diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index 090c70a7..6deb567f 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -1,38 +1,53 @@ use axum::extract::State; -use conduit::{err, Result}; -use futures::TryFutureExt; +use conduit::{err, Err, Event, Result}; +use futures::{try_join, FutureExt, TryFutureExt}; use ruma::api::client::room::get_room_event; -use crate::Ruma; +use crate::{client::ignored_filter, Ruma}; /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` /// /// Gets a single event. -/// -/// - You have to currently be joined to the room (TODO: Respect history -/// visibility) pub(crate) async fn get_room_event_route( State(services): State, ref body: Ruma, ) -> Result { + let event = services + .rooms + .timeline + .get_pdu(&body.event_id) + .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))); + + let token = services + .rooms + .timeline + .get_pdu_count(&body.event_id) + .map_err(|_| err!(Request(NotFound("Event not found.")))); + + let visible = services + .rooms + .state_accessor + .user_can_see_event(body.sender_user(), &body.room_id, &body.event_id) + .map(Ok); + + let (token, mut event, visible) = try_join!(token, event, visible)?; + + if !visible + || ignored_filter(&services, (token, event.clone()), body.sender_user()) + .await + .is_none() + { + return Err!(Request(Forbidden("You don't have permission to view this event."))); + } + + if event.event_id() != &body.event_id || event.room_id() != body.room_id { + return Err!(Request(NotFound("Event not found"))); + } + + event.add_age().ok(); + + let event = event.to_room_event(); + Ok(get_room_event::v3::Response { - event: services - .rooms - .timeline - .get_pdu(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))) - .and_then(|event| async move { - services - .rooms - .state_accessor - .user_can_see_event(body.sender_user(), &event.room_id, &body.event_id) - .await - .then_some(event) - .ok_or_else(|| err!(Request(Forbidden("You don't have permission to view this event.")))) - }) - .map_ok(|mut event| { - event.add_age().ok(); - event.to_room_event() - }) - .await?, + event, }) } From 52cee6574862fc68040a4831a91d0a32b2cb6a96 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 7 Dec 2024 23:27:56 +0000 Subject: [PATCH 0338/1248] add support for binding to a specific interface for url previews This is helpful to, for example, bind to an interface that can only access the public internet. The resulting setup is less maintenance-heavy / error-prone than manually maintaining a deny/ allowlist to protect internal resources. Signed-off-by: Jade Ellis --- conduwuit-example.toml | 10 ++++++++++ src/core/config/mod.rs | 19 +++++++++++++++++++ src/service/client/mod.rs | 16 ++++++++++++---- 3 files changed, 41 insertions(+), 4 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index aee29f92..b4bce140 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1117,6 +1117,16 @@ # #ip_range_denylist = ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", +# Optional interface to bind to with SO_BINDTODEVICE for URL previews. +# If not set, it will not bind to a specific interface. +# This uses [`reqwest::ClientBuilder::interface`] under the hood. +# +# To list the interfaces on your system, use the command `ip link show` +# +# Example: `"eth0"` +# +#url_preview_bound_interface = + # Vector list of domains allowed to send requests to for URL previews. # Defaults to none. Note: this is a *contains* match, not an explicit # match. Putting "google.com" will match "https://google.com" and diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index cb8f3b87..c613c819 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1250,6 +1250,16 @@ pub struct Config { #[serde(default = "default_ip_range_denylist")] pub ip_range_denylist: Vec, + /// Optional interface to bind to with SO_BINDTODEVICE for URL previews. + /// If not set, it will not bind to a specific interface. + /// This uses [`reqwest::ClientBuilder::interface`] under the hood. + /// + /// To list the interfaces on your system, use the command `ip link show` + /// + /// Example: `"eth0"` + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + pub url_preview_bound_interface: Option, + /// Vector list of domains allowed to send requests to for URL previews. /// Defaults to none. Note: this is a *contains* match, not an explicit /// match. Putting "google.com" will match "https://google.com" and @@ -1960,6 +1970,15 @@ impl fmt::Display for Config { line("Forbidden room aliases", { &self.forbidden_alias_names.patterns().iter().join(", ") }); + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + line( + "URL preview bound interface", + if let Some(interface) = &self.url_preview_bound_interface { + interface + } else { + "not set" + }, + ); line( "URL preview domain contains allowlist", &self.url_preview_domain_contains_allowlist.join(", "), diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index 2794efc1..bcd88158 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -25,15 +25,23 @@ impl crate::Service for Service { let config = &args.server.config; let resolver = args.require::("resolver"); + let url_preview_builder = base(config)? + .dns_resolver(resolver.resolver.clone()) + .redirect(redirect::Policy::limited(3)); + + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + let url_preview_builder = if let Some(interface) = &config.url_preview_bound_interface { + url_preview_builder.interface(interface) + } else { + url_preview_builder + }; + Ok(Arc::new(Self { default: base(config)? .dns_resolver(resolver.resolver.clone()) .build()?, - url_preview: base(config)? - .dns_resolver(resolver.resolver.clone()) - .redirect(redirect::Policy::limited(3)) - .build()?, + url_preview: url_preview_builder.build()?, extern_media: base(config)? .dns_resolver(resolver.resolver.clone()) From f0a1aaf7bcddbcec45cfa6c8040fada967a32cca Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 02:39:40 +0000 Subject: [PATCH 0339/1248] extend x-platform support for binding URL previews to interfaces via address Signed-off-by: Jason Volk --- Cargo.lock | 1 + conduwuit-example.toml | 12 ++++++----- src/core/config/check.rs | 12 +++++++++++ src/core/config/mod.rs | 31 ++++++++++++++++----------- src/service/Cargo.toml | 1 + src/service/client/mod.rs | 45 ++++++++++++++++++++++++++++++--------- 6 files changed, 74 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 52110f91..e6659e03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -824,6 +824,7 @@ dependencies = [ "conduit_core", "conduit_database", "const-str", + "either", "futures", "hickory-resolver", "http", diff --git a/conduwuit-example.toml b/conduwuit-example.toml index b4bce140..bdccdb99 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1117,13 +1117,15 @@ # #ip_range_denylist = ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", -# Optional interface to bind to with SO_BINDTODEVICE for URL previews. -# If not set, it will not bind to a specific interface. -# This uses [`reqwest::ClientBuilder::interface`] under the hood. +# Optional IP address or network interface-name to bind as the source of +# URL preview requests. If not set, it will not bind to a specific +# address or interface. # -# To list the interfaces on your system, use the command `ip link show` +# Interface names only supported on Linux, Android, and Fuchsia platforms; +# all other platforms can specify the IP address. To list the interfaces +# on your system, use the command `ip link show`. # -# Example: `"eth0"` +# example: `"eth0"` or `"1.2.3.4"` # #url_preview_bound_interface = diff --git a/src/core/config/check.rs b/src/core/config/check.rs index c0d05533..c75fb31e 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -1,3 +1,6 @@ +use std::env::consts::OS; + +use either::Either; use figment::Figment; use super::DEPRECATED_KEYS; @@ -191,6 +194,15 @@ For security and safety reasons, conduwuit will shut down. If you are extra sure ); } + if let Some(Either::Right(_)) = config.url_preview_bound_interface.as_ref() { + if !matches!(OS, "android" | "fuchsia" | "linux") { + return Err!(Config( + "url_preview_bound_interface", + "Not a valid IP address. Interface names not supported on {OS}." + )); + } + } + Ok(()) } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index c613c819..117b4da5 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1250,15 +1250,19 @@ pub struct Config { #[serde(default = "default_ip_range_denylist")] pub ip_range_denylist: Vec, - /// Optional interface to bind to with SO_BINDTODEVICE for URL previews. - /// If not set, it will not bind to a specific interface. - /// This uses [`reqwest::ClientBuilder::interface`] under the hood. + /// Optional IP address or network interface-name to bind as the source of + /// URL preview requests. If not set, it will not bind to a specific + /// address or interface. /// - /// To list the interfaces on your system, use the command `ip link show` + /// Interface names only supported on Linux, Android, and Fuchsia platforms; + /// all other platforms can specify the IP address. To list the interfaces + /// on your system, use the command `ip link show`. /// - /// Example: `"eth0"` - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - pub url_preview_bound_interface: Option, + /// example: `"eth0"` or `"1.2.3.4"` + /// + /// default: + #[serde(default, with = "either::serde_untagged_optional")] + pub url_preview_bound_interface: Option>, /// Vector list of domains allowed to send requests to for URL previews. /// Defaults to none. Note: this is a *contains* match, not an explicit @@ -1970,14 +1974,15 @@ impl fmt::Display for Config { line("Forbidden room aliases", { &self.forbidden_alias_names.patterns().iter().join(", ") }); - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] line( "URL preview bound interface", - if let Some(interface) = &self.url_preview_bound_interface { - interface - } else { - "not set" - }, + self.url_preview_bound_interface + .as_ref() + .map(Either::as_ref) + .map(|either| either.map_left(ToString::to_string)) + .map(Either::either_into::) + .unwrap_or_default() + .as_str(), ); line( "URL preview domain contains allowlist", diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 7578ef64..19747847 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -47,6 +47,7 @@ bytes.workspace = true conduit-core.workspace = true conduit-database.workspace = true const-str.workspace = true +either.workspace = true futures.workspace = true hickory-resolver.workspace = true http.workspace = true diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index bcd88158..71545541 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Duration}; use conduit::{err, implement, trace, Config, Result}; +use either::Either; use ipaddress::IPAddress; use reqwest::redirect; @@ -25,23 +26,27 @@ impl crate::Service for Service { let config = &args.server.config; let resolver = args.require::("resolver"); - let url_preview_builder = base(config)? - .dns_resolver(resolver.resolver.clone()) - .redirect(redirect::Policy::limited(3)); + let url_preview_bind_addr = config + .url_preview_bound_interface + .clone() + .and_then(Either::left); - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - let url_preview_builder = if let Some(interface) = &config.url_preview_bound_interface { - url_preview_builder.interface(interface) - } else { - url_preview_builder - }; + let url_preview_bind_iface = config + .url_preview_bound_interface + .clone() + .and_then(Either::right); Ok(Arc::new(Self { default: base(config)? .dns_resolver(resolver.resolver.clone()) .build()?, - url_preview: url_preview_builder.build()?, + url_preview: base(config) + .and_then(|builder| builder_interface(builder, url_preview_bind_iface.as_deref()))? + .local_address(url_preview_bind_addr) + .dns_resolver(resolver.resolver.clone()) + .redirect(redirect::Policy::limited(3)) + .build()?, extern_media: base(config)? .dns_resolver(resolver.resolver.clone()) @@ -172,6 +177,26 @@ fn base(config: &Config) -> Result { } } +#[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] +fn builder_interface(builder: reqwest::ClientBuilder, config: Option<&str>) -> Result { + if let Some(iface) = config { + Ok(builder.interface(iface)) + } else { + Ok(builder) + } +} + +#[cfg(not(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))] +fn builder_interface(builder: reqwest::ClientBuilder, config: Option<&str>) -> Result { + use conduit::Err; + + if let Some(iface) = config { + Err!("Binding to network-interface {iface:?} by name is not supported on this platform.") + } else { + Ok(builder) + } +} + #[inline] #[must_use] #[implement(Service)] From 8e8c6bfe07633eea75149ed067a04bc393433c15 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 5 Dec 2024 07:23:51 +0000 Subject: [PATCH 0340/1248] abstract supported room versions apis Signed-off-by: Jason Volk --- src/api/client/capabilities.rs | 16 ++-------- src/api/client/membership.rs | 20 +++--------- src/api/client/room/create.rs | 8 ++--- src/api/client/room/upgrade.rs | 6 +--- src/api/server/invite.rs | 6 +--- src/core/info/mod.rs | 1 + src/core/info/room_version.rs | 58 ++++++++++++++++++++++++++++++++++ src/service/admin/create.rs | 5 ++- src/service/globals/mod.rs | 43 +++---------------------- 9 files changed, 75 insertions(+), 88 deletions(-) create mode 100644 src/core/info/room_version.rs diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 89157e47..20f9cb58 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -19,23 +19,11 @@ use crate::{Result, Ruma}; pub(crate) async fn get_capabilities_route( State(services): State, _body: Ruma, ) -> Result { - let available: BTreeMap = services - .globals - .unstable_room_versions - .iter() - .map(|unstable_room_version| (unstable_room_version.clone(), RoomVersionStability::Unstable)) - .chain( - services - .globals - .stable_room_versions - .iter() - .map(|stable_room_version| (stable_room_version.clone(), RoomVersionStability::Stable)), - ) - .collect(); + let available: BTreeMap = services.server.available_room_versions().collect(); let mut capabilities = Capabilities::default(); capabilities.room_versions = RoomVersionsCapability { - default: services.globals.default_room_version(), + default: services.server.config.default_room_version.clone(), available, }; diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 9478e383..1cdf25a2 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -706,11 +706,7 @@ async fn join_room_by_id_helper_remote( return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); }; - if !services - .globals - .supported_room_versions() - .contains(&room_version_id) - { + if !services.server.supported_room_version(&room_version_id) { return Err!(BadServerResponse( "Remote room version {room_version_id} is not supported by conduwuit" )); @@ -1122,11 +1118,7 @@ async fn join_room_by_id_helper_local( return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); }; - if !services - .globals - .supported_room_versions() - .contains(&room_version_id) - { + if !services.server.supported_room_version(&room_version_id) { return Err!(BadServerResponse( "Remote room version {room_version_id} is not supported by conduwuit" )); @@ -1260,7 +1252,7 @@ async fn make_join_request( federation::membership::prepare_join_event::v1::Request { room_id: room_id.to_owned(), user_id: sender_user.to_owned(), - ver: services.globals.supported_room_versions(), + ver: services.server.supported_room_versions().collect(), }, ) .await; @@ -1616,11 +1608,7 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); }; - if !services - .globals - .supported_room_versions() - .contains(&room_version_id) - { + if !services.server.supported_room_version(&room_version_id) { return Err!(BadServerResponse( "Remote room version {room_version_id} is not supported by conduwuit" )); diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 2ccb1c87..2e9852f8 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -116,11 +116,7 @@ pub(crate) async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if services - .globals - .supported_room_versions() - .contains(&room_version) - { + if services.server.supported_room_version(&room_version) { room_version } else { return Err(Error::BadRequest( @@ -129,7 +125,7 @@ pub(crate) async fn create_room_route( )); } }, - None => services.globals.default_room_version(), + None => services.server.config.default_room_version.clone(), }; let create_content = match &body.creation_content { diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index ad5c356e..fafce2d1 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -47,11 +47,7 @@ pub(crate) async fn upgrade_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services - .globals - .supported_room_versions() - .contains(&body.new_version) - { + if !services.server.supported_room_version(&body.new_version) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 0ceb914f..f7919bb3 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -26,11 +26,7 @@ pub(crate) async fn create_invite_route( .acl_check(body.origin(), &body.room_id) .await?; - if !services - .globals - .supported_room_versions() - .contains(&body.room_version) - { + if !services.server.supported_room_version(&body.room_version) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), diff --git a/src/core/info/mod.rs b/src/core/info/mod.rs index e4a33b4e..22ea7d3d 100644 --- a/src/core/info/mod.rs +++ b/src/core/info/mod.rs @@ -2,6 +2,7 @@ //! etc information which can be queried by admins or used by developers. pub mod cargo; +pub mod room_version; pub mod rustc; pub mod version; diff --git a/src/core/info/room_version.rs b/src/core/info/room_version.rs new file mode 100644 index 00000000..59685b5f --- /dev/null +++ b/src/core/info/room_version.rs @@ -0,0 +1,58 @@ +//! Room version support + +use std::iter::once; + +use ruma::{api::client::discovery::get_capabilities::RoomVersionStability, RoomVersionId}; + +use crate::{at, is_equal_to}; + +/// Supported and stable room versions +pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[ + RoomVersionId::V6, + RoomVersionId::V7, + RoomVersionId::V8, + RoomVersionId::V9, + RoomVersionId::V10, + RoomVersionId::V11, +]; + +/// Experimental, partially supported room versions +pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] = + &[RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; + +impl crate::Server { + #[inline] + pub fn supported_room_version(&self, version: &RoomVersionId) -> bool { + self.supported_room_versions().any(is_equal_to!(*version)) + } + + #[inline] + pub fn supported_room_versions(&self) -> impl Iterator + '_ { + self.available_room_versions() + .filter(|(_, stability)| self.supported_stability(stability)) + .map(at!(0)) + } + + #[inline] + pub fn available_room_versions(&self) -> impl Iterator { + available_room_versions() + } + + #[inline] + fn supported_stability(&self, stability: &RoomVersionStability) -> bool { + self.config.allow_unstable_room_versions || *stability == RoomVersionStability::Stable + } +} + +pub fn available_room_versions() -> impl Iterator { + let unstable_room_versions = UNSTABLE_ROOM_VERSIONS + .iter() + .cloned() + .zip(once(RoomVersionStability::Unstable).cycle()); + + STABLE_ROOM_VERSIONS + .iter() + .cloned() + .zip(once(RoomVersionStability::Stable).cycle()) + .chain(unstable_room_versions) +} diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 1631f1cb..897d412d 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -25,6 +25,7 @@ use crate::Services; /// used to issue admin commands by talking to the server user inside it. pub async fn create_admin_room(services: &Services) -> Result<()> { let room_id = RoomId::new(services.globals.server_name()); + let room_version = &services.server.config.default_room_version; let _short_id = services .rooms @@ -38,8 +39,6 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { let server_user = &services.globals.server_user; services.users.create(server_user, None)?; - let room_version = services.globals.default_room_version(); - let create_content = { use RoomVersionId::*; match room_version { @@ -58,7 +57,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { &RoomCreateEventContent { federate: true, predecessor: None, - room_version, + room_version: room_version.clone(), ..create_content }, ), diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 4fb1ce2d..fefff3b5 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -10,9 +10,7 @@ use std::{ use conduit::{error, Config, Result}; use data::Data; use regex::RegexSet; -use ruma::{ - OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, RoomVersionId, ServerName, UserId, -}; +use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, ServerName, UserId}; use tokio::sync::Mutex; use crate::service; @@ -22,8 +20,6 @@ pub struct Service { pub config: Config, jwt_decoding_key: Option, - pub stable_room_versions: Vec, - pub unstable_room_versions: Vec, pub bad_event_ratelimiter: Arc>>, pub bad_query_ratelimiter: Arc>>, pub stateres_mutex: Arc>, @@ -45,18 +41,6 @@ impl crate::Service for Service { .as_ref() .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); - // Supported and stable room versions - let stable_room_versions = vec![ - RoomVersionId::V6, - RoomVersionId::V7, - RoomVersionId::V8, - RoomVersionId::V9, - RoomVersionId::V10, - RoomVersionId::V11, - ]; - // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; - let turn_secret = config .turn_secret_file .as_ref() @@ -86,8 +70,6 @@ impl crate::Service for Service { db, config: config.clone(), jwt_decoding_key, - stable_room_versions, - unstable_room_versions, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())), stateres_mutex: Arc::new(Mutex::new(())), @@ -99,9 +81,9 @@ impl crate::Service for Service { registration_token, }; - if !s - .supported_room_versions() - .contains(&s.config.default_room_version) + if !args + .server + .supported_room_version(&config.default_room_version) { error!(config=?s.config.default_room_version, fallback=?conduit::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); s.config.default_room_version = conduit::config::default_default_room_version(); @@ -173,11 +155,6 @@ impl Service { pub fn allow_room_creation(&self) -> bool { self.config.allow_room_creation } - pub fn allow_unstable_room_versions(&self) -> bool { self.config.allow_unstable_room_versions } - - #[inline] - pub fn default_room_version(&self) -> RoomVersionId { self.config.default_room_version.clone() } - pub fn new_user_displayname_suffix(&self) -> &String { &self.config.new_user_displayname_suffix } pub fn allow_check_for_updates(&self) -> bool { self.config.allow_check_for_updates } @@ -232,18 +209,6 @@ impl Service { pub fn block_non_admin_invites(&self) -> bool { self.config.block_non_admin_invites } - pub fn supported_room_versions(&self) -> Vec { - if self.config.allow_unstable_room_versions { - self.stable_room_versions - .clone() - .into_iter() - .chain(self.unstable_room_versions.clone()) - .collect() - } else { - self.stable_room_versions.clone() - } - } - /// checks if `user_id` is local to us via server_name comparison #[inline] pub fn user_is_local(&self, user_id: &UserId) -> bool { self.server_is_ours(user_id.server_name()) } From de3b137df801f4e362957235884b11e34a7c6626 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 6 Dec 2024 03:09:08 +0000 Subject: [PATCH 0341/1248] eliminate future wrapping stream for all_pdus() Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 1 - src/service/rooms/timeline/data.rs | 2 +- src/service/rooms/timeline/mod.rs | 17 ++++++++++------- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 5578077f..3a78c9ad 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -609,7 +609,6 @@ async fn load_joined_room( .rooms .timeline .all_pdus(sender_user, room_id) - .await? .ready_filter(|(_, pdu)| pdu.kind == RoomMember) .filter_map(|(_, pdu)| async move { let content: RoomMemberEventContent = pdu.get_content().ok()?; diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index c394dc3b..94621385 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -220,7 +220,7 @@ impl Data { pub(super) async fn pdus<'a>( &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, from: PduCount, - ) -> Result + Send + 'a> { + ) -> Result + Send + Unpin + 'a> { let current = self.count_to_id(room_id, from, Direction::Forward).await?; let prefix = current.shortroomid(); let stream = self diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 07b406c4..0a96322b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -15,7 +15,7 @@ use conduit::{ validated, warn, Err, Error, Result, Server, }; pub use conduit::{PduId, RawPduId}; -use futures::{future, future::ready, Future, FutureExt, Stream, StreamExt, TryStreamExt}; +use futures::{future, future::ready, Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ api::federation, canonical_json::to_canonical_value, @@ -168,7 +168,6 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result> { self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id) - .await? .next() .await .map(|(_, p)| Arc::new(p)) @@ -968,12 +967,17 @@ impl Service { Ok(Some(pdu_id)) } - /// Returns an iterator over all PDUs in a room. + /// Returns an iterator over all PDUs in a room. Unknown rooms produce no + /// items. #[inline] - pub async fn all_pdus<'a>( + pub fn all_pdus<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, - ) -> Result + Send + 'a> { - self.pdus(Some(user_id), room_id, None).await + ) -> impl Stream + Send + Unpin + 'a { + self.pdus(Some(user_id), room_id, None) + .map_ok(|stream| stream.map(Ok)) + .try_flatten_stream() + .ignore_err() + .boxed() } /// Reverse iteration starting at from. @@ -1048,7 +1052,6 @@ impl Service { let first_pdu = self .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id) - .await? .next() .await .expect("Room is not empty"); From 23cf2b2236405c64f990671e8610fa1a4471acd5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 6 Dec 2024 12:42:14 +0000 Subject: [PATCH 0342/1248] add is_err() to TryFuture extension add fold_default to Future tools extension Signed-off-by: Jason Volk --- src/core/utils/future/try_ext_ext.rs | 21 +++++++++++++++------ src/core/utils/stream/tools.rs | 16 ++++++++++++++++ 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index f97ae885..81c7aac0 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -1,5 +1,8 @@ //! Extended external extensions to futures::TryFutureExt #![allow(clippy::type_complexity)] +// is_ok() has to consume *self rather than borrow. This extension is for a +// caller only ever caring about result status while discarding all contents. +#![allow(clippy::wrong_self_convention)] use futures::{ future::{MapOkOrElse, UnwrapOrElse}, @@ -11,12 +14,10 @@ pub trait TryExtExt where Self: TryFuture + Send, { - /// Resolves to a bool for whether the TryFuture (Future of a Result) - /// resolved to Ok or Err. - /// - /// is_ok() has to consume *self rather than borrow. The intent of this - /// extension is therefor for a caller only ever caring about result status - /// while discarding all contents. + fn is_err(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> + where + Self: Sized; + #[allow(clippy::wrong_self_convention)] fn is_ok(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> where @@ -44,6 +45,14 @@ impl TryExtExt for Fut where Fut: TryFuture + Send, { + #[inline] + fn is_err(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> + where + Self: Sized, + { + self.map_ok_or(true, |_| false) + } + #[inline] fn is_ok(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> where diff --git a/src/core/utils/stream/tools.rs b/src/core/utils/stream/tools.rs index cc6b7ca9..b5b036cc 100644 --- a/src/core/utils/stream/tools.rs +++ b/src/core/utils/stream/tools.rs @@ -32,6 +32,12 @@ where fn counts_with_cap(self) -> impl Future> + Send where ::Item: Eq + Hash; + + fn fold_default(self, f: F) -> impl Future + Send + where + F: Fn(T, Item) -> Fut + Send, + Fut: Future + Send, + T: Default + Send; } impl Tools for S @@ -77,4 +83,14 @@ where counts }) } + + #[inline] + fn fold_default(self, f: F) -> impl Future + Send + where + F: Fn(T, Item) -> Fut + Send, + Fut: Future + Send, + T: Default + Send, + { + self.fold(T::default(), f) + } } From b77a1eb079b8d3e5144849a74805ec653cd722e6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 6 Dec 2024 12:44:10 +0000 Subject: [PATCH 0343/1248] move cork to remove_to_device_events fn add cork around write-heavy database routine called in loop; cleanup Signed-off-by: Jason Volk --- src/service/admin/console.rs | 2 +- src/service/rooms/user/mod.rs | 15 +++++++++------ src/service/users/mod.rs | 5 ++++- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index 0f5016e1..37af7452 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -89,7 +89,7 @@ impl Console { } } - #[tracing::instrument(skip_all, name = "console")] + #[tracing::instrument(skip_all, name = "console", level = "trace")] async fn worker(self: Arc) { debug!("session starting"); while self.server.running() { diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 948baa5e..537fe69b 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use conduit::{implement, Result}; -use database::{Deserialized, Map}; +use database::{Database, Deserialized, Map}; use ruma::{RoomId, UserId}; use crate::{globals, rooms, rooms::short::ShortStateHash, Dep}; @@ -12,6 +12,7 @@ pub struct Service { } struct Data { + db: Arc, userroomid_notificationcount: Arc, userroomid_highlightcount: Arc, roomuserid_lastnotificationread: Arc, @@ -27,11 +28,12 @@ impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { db: Data { - userroomid_notificationcount: args.db["userroomid_notificationcount"].clone(), - userroomid_highlightcount: args.db["userroomid_highlightcount"].clone(), - roomuserid_lastnotificationread: args.db["userroomid_highlightcount"].clone(), //< NOTE: known bug from conduit - roomsynctoken_shortstatehash: args.db["roomsynctoken_shortstatehash"].clone(), - }, + db: args.db.clone(), + userroomid_notificationcount: args.db["userroomid_notificationcount"].clone(), + userroomid_highlightcount: args.db["userroomid_highlightcount"].clone(), + roomuserid_lastnotificationread: args.db["userroomid_highlightcount"].clone(), + roomsynctoken_shortstatehash: args.db["roomsynctoken_shortstatehash"].clone(), + }, services: Services { globals: args.depend::("globals"), @@ -98,6 +100,7 @@ pub async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, .await .expect("room exists"); + let _cork = self.db.db.cork(); let key: &[u64] = &[shortroomid, token]; self.db .roomsynctoken_shortstatehash diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 2462dde3..f17a6b9d 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -5,7 +5,7 @@ use conduit::{ utils::{stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, }; -use database::{Deserialized, Ignore, Interfix, Json, Map}; +use database::{Database, Deserialized, Ignore, Interfix, Json, Map}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; use ruma::{ api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, @@ -26,6 +26,7 @@ pub struct Service { struct Services { server: Arc, + db: Arc, account_data: Dep, admin: Dep, globals: Dep, @@ -60,6 +61,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { server: args.server.clone(), + db: args.db.clone(), account_data: args.depend::("account_data"), admin: args.depend::("admin"), globals: args.depend::("globals"), @@ -721,6 +723,7 @@ impl Service { let mut last = prefix.clone(); last.extend_from_slice(&until.to_be_bytes()); + let _cork = self.services.db.cork_and_flush(); self.db .todeviceid_events .rev_raw_keys_from(&last) // this includes last From b7a41f283f0010d71f6475c8533c2384e20d629f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 6 Dec 2024 12:45:20 +0000 Subject: [PATCH 0344/1248] optimize sync v3 asynchronicity Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 1448 ++++++++++++++++++++----------------- 1 file changed, 788 insertions(+), 660 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 3a78c9ad..61a0ea5c 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,12 +6,25 @@ use std::{ use axum::extract::State; use conduit::{ - at, err, error, extract_variant, is_equal_to, - result::FlatOk, - utils::{math::ruma_from_u64, BoolExt, IterStream, ReadyExt, TryFutureExtExt}, - PduCount, + at, err, error, extract_variant, is_equal_to, is_false, + pdu::EventHash, + result::{FlatOk, LogDebugErr}, + utils, + utils::{ + math::ruma_from_u64, + stream::{BroadbandExt, Tools}, + BoolExt, IterStream, ReadyExt, TryFutureExtExt, + }, + Error, PduCount, PduEvent, Result, +}; +use conduit_service::{ + rooms::short::{ShortStateHash, ShortStateKey}, + Services, +}; +use futures::{ + future::{join, join3, join5, try_join, OptionFuture}, + FutureExt, StreamExt, TryFutureExt, }; -use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt}; use ruma::{ api::client::{ filter::{FilterDefinition, LazyLoadOptions}, @@ -34,14 +47,20 @@ use ruma::{ serde::Raw, uint, DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, }; -use tracing::{Instrument as _, Span}; use super::{load_timeline, share_encrypted_room}; -use crate::{ - client::ignored_filter, - service::{pdu::EventHash, Services}, - utils, Error, PduEvent, Result, Ruma, RumaResponse, -}; +use crate::{client::ignored_filter, Ruma, RumaResponse}; + +#[derive(Default)] +struct StateChanges { + heroes: Option>, + joined_member_count: Option, + invited_member_count: Option, + joined_since_last_sync: bool, + state_events: Vec, +} + +type PresenceUpdates = HashMap; /// # `GET /_matrix/client/r0/sync` /// @@ -81,32 +100,30 @@ use crate::{ pub(crate) async fn sync_events_route( State(services): State, body: Ruma, ) -> Result> { - let sender_user = body.sender_user.expect("user is authenticated"); - let sender_device = body.sender_device.expect("user is authenticated"); - let body = body.body; + let (sender_user, sender_device) = body.sender(); // Presence update if services.globals.allow_local_presence() { services .presence - .ping_presence(&sender_user, &body.set_presence) + .ping_presence(sender_user, &body.body.set_presence) .await?; } // Setup watchers, so if there's no response, we can wait for them - let watcher = services.sync.watch(&sender_user, &sender_device); + let watcher = services.sync.watch(sender_user, sender_device); let next_batch = services.globals.current_count()?; let next_batchcount = PduCount::Normal(next_batch); let next_batch_string = next_batch.to_string(); // Load filter - let filter = match body.filter { + let filter = match body.body.filter.as_ref() { None => FilterDefinition::default(), - Some(Filter::FilterDefinition(filter)) => filter, - Some(Filter::FilterId(filter_id)) => services + Some(Filter::FilterDefinition(ref filter)) => filter.clone(), + Some(Filter::FilterId(ref filter_id)) => services .users - .get_filter(&sender_user, &filter_id) + .get_filter(sender_user, filter_id) .await .unwrap_or_default(), }; @@ -120,183 +137,190 @@ pub(crate) async fn sync_events_route( LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")), }; - let full_state = body.full_state; + let full_state = body.body.full_state; - let mut joined_rooms = BTreeMap::new(); let since = body + .body .since .as_ref() .and_then(|string| string.parse().ok()) .unwrap_or(0); let sincecount = PduCount::Normal(since); - let mut presence_updates = HashMap::new(); - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in - let mut device_list_updates = HashSet::new(); - let mut device_list_left = HashSet::new(); - - // Look for device list updates of this account - device_list_updates.extend( - services - .users - .keys_changed(&sender_user, since, None) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - if services.globals.allow_local_presence() { - process_presence_updates(&services, &mut presence_updates, since, &sender_user).await?; - } - - let all_joined_rooms: Vec<_> = services + let joined_related = services .rooms .state_cache - .rooms_joined(&sender_user) + .rooms_joined(sender_user) .map(ToOwned::to_owned) - .collect() - .await; + .broad_filter_map(|room_id| { + load_joined_room( + &services, + sender_user, + sender_device, + room_id.clone(), + since, + sincecount, + next_batch, + next_batchcount, + lazy_load_enabled, + lazy_load_send_redundant, + full_state, + ) + .map_ok(move |(joined_room, dlu, jeu)| (room_id, joined_room, dlu, jeu)) + .ok() + }) + .ready_fold( + (BTreeMap::new(), HashSet::new(), HashSet::new()), + |(mut joined_rooms, mut device_list_updates, mut left_encrypted_users), + (room_id, joined_room, dlu, leu)| { + device_list_updates.extend(dlu); + left_encrypted_users.extend(leu); + if !joined_room.is_empty() { + joined_rooms.insert(room_id, joined_room); + } - // Coalesce database writes for the remainder of this scope. - let _cork = services.db.cork_and_flush(); - - for room_id in all_joined_rooms { - if let Ok(joined_room) = load_joined_room( - &services, - &sender_user, - &sender_device, - &room_id, - since, - sincecount, - next_batch, - next_batchcount, - lazy_load_enabled, - lazy_load_send_redundant, - full_state, - &mut device_list_updates, - &mut left_encrypted_users, - ) - .await - { - if !joined_room.is_empty() { - joined_rooms.insert(room_id.clone(), joined_room); - } - } - } - - let mut left_rooms = BTreeMap::new(); - let all_left_rooms: Vec<_> = services - .rooms - .state_cache - .rooms_left(&sender_user) - .collect() - .await; - - for result in all_left_rooms { - handle_left_room( - &services, - since, - &result.0, - &sender_user, - &mut left_rooms, - &next_batch_string, - full_state, - lazy_load_enabled, - ) - .instrument(Span::current()) - .await?; - } - - let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms: Vec<_> = services - .rooms - .state_cache - .rooms_invited(&sender_user) - .collect() - .await; - - for (room_id, invite_state_events) in all_invited_rooms { - // Get and drop the lock to wait for remaining operations to finish - let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; - drop(insert_lock); - - let invite_count = services - .rooms - .state_cache - .get_invite_count(&room_id, &sender_user) - .await - .ok(); - - // Invited before last sync - if Some(since) >= invite_count { - continue; - } - - invited_rooms.insert( - room_id.clone(), - InvitedRoom { - invite_state: InviteState { - events: invite_state_events, - }, + (joined_rooms, device_list_updates, left_encrypted_users) }, ); - } - for user_id in left_encrypted_users { - let dont_share_encrypted_room = !share_encrypted_room(&services, &sender_user, &user_id, None).await; + let left_rooms = services + .rooms + .state_cache + .rooms_left(sender_user) + .broad_filter_map(|(room_id, _)| { + handle_left_room( + &services, + since, + room_id.clone(), + sender_user, + &next_batch_string, + full_state, + lazy_load_enabled, + ) + .map_ok(move |left_room| (room_id, left_room)) + .ok() + }) + .ready_filter_map(|(room_id, left_room)| left_room.map(|left_room| (room_id, left_room))) + .collect(); - // If the user doesn't share an encrypted room with the target anymore, we need - // to tell them - if dont_share_encrypted_room { - device_list_left.insert(user_id); - } - } + let invited_rooms = services.rooms.state_cache.rooms_invited(sender_user).fold( + BTreeMap::new(), + |mut invited_rooms, (room_id, invite_state)| async move { + // Get and drop the lock to wait for remaining operations to finish + let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; + drop(insert_lock); + + let invite_count = services + .rooms + .state_cache + .get_invite_count(&room_id, sender_user) + .await + .ok(); + + // Invited before last sync + if Some(since) >= invite_count { + return invited_rooms; + } + + let invited_room = InvitedRoom { + invite_state: InviteState { + events: invite_state, + }, + }; + + invited_rooms.insert(room_id, invited_room); + invited_rooms + }, + ); + + let presence_updates: OptionFuture<_> = services + .globals + .allow_local_presence() + .then(|| process_presence_updates(&services, since, sender_user)) + .into(); + + let account_data = services + .account_data + .changes_since(None, sender_user, since) + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) + .collect(); + + // Look for device list updates of this account + let keys_changed = services + .users + .keys_changed(sender_user, since, None) + .map(ToOwned::to_owned) + .collect::>(); + + let to_device_events = services + .users + .get_to_device_events(sender_user, sender_device) + .collect::>(); + + let device_one_time_keys_count = services + .users + .count_one_time_keys(sender_user, sender_device); // Remove all to-device events the device received *last time* - services + let remove_to_device_events = services .users - .remove_to_device_events(&sender_user, &sender_device, since) + .remove_to_device_events(sender_user, sender_device, since); + + let rooms = join3(joined_related, left_rooms, invited_rooms); + let ephemeral = join3(remove_to_device_events, to_device_events, presence_updates); + let top = join5(account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) + .boxed() + .await; + + let (account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) = top; + let ((), to_device_events, presence_updates) = ephemeral; + let (joined_related, left_rooms, invited_rooms) = rooms; + let (joined_rooms, mut device_list_updates, left_encrypted_users) = joined_related; + device_list_updates.extend(keys_changed); + + // If the user doesn't share an encrypted room with the target anymore, we need + // to tell them + let device_list_left = left_encrypted_users + .into_iter() + .stream() + .broad_filter_map(|user_id| async move { + let no_shared_encrypted_room = !share_encrypted_room(&services, sender_user, &user_id, None).await; + no_shared_encrypted_room.then_some(user_id) + }) + .ready_fold(HashSet::new(), |mut device_list_left, user_id| { + device_list_left.insert(user_id); + device_list_left + }) .await; let response = sync_events::v3::Response { + account_data: GlobalAccountData { + events: account_data, + }, + device_lists: DeviceLists { + changed: device_list_updates.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, + device_one_time_keys_count, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, next_batch: next_batch_string, + presence: Presence { + events: presence_updates + .unwrap_or_default() + .into_values() + .map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully")) + .collect(), + }, rooms: Rooms { leave: left_rooms, join: joined_rooms, invite: invited_rooms, knock: BTreeMap::new(), // TODO }, - presence: Presence { - events: presence_updates - .into_values() - .map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully")) - .collect(), - }, - account_data: GlobalAccountData { - events: services - .account_data - .changes_since(None, &sender_user, since) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) - .collect() - .await, - }, - device_lists: DeviceLists { - changed: device_list_updates.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, - device_one_time_keys_count: services - .users - .count_one_time_keys(&sender_user, &sender_device) - .await, to_device: ToDevice { - events: services - .users - .get_to_device_events(&sender_user, &sender_device) - .collect() - .await, + events: to_device_events, }, - // Fallback keys are not yet supported - device_unused_fallback_key_types: None, }; // TODO: Retry the endpoint instead of returning @@ -310,37 +334,86 @@ pub(crate) async fn sync_events_route( // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives let default = Duration::from_secs(30); - let duration = cmp::min(body.timeout.unwrap_or(default), default); + let duration = cmp::min(body.body.timeout.unwrap_or(default), default); _ = tokio::time::timeout(duration, watcher).await; } Ok(response) } +async fn process_presence_updates(services: &Services, since: u64, syncing_user: &UserId) -> PresenceUpdates { + services + .presence + .presence_since(since) + .filter(|(user_id, ..)| { + services + .rooms + .state_cache + .user_sees_user(syncing_user, user_id) + }) + .filter_map(|(user_id, _, presence_bytes)| { + services + .presence + .from_json_bytes_to_event(presence_bytes, user_id) + .map_ok(move |event| (user_id, event)) + .ok() + }) + .ready_fold(PresenceUpdates::new(), |mut updates, (user_id, event)| { + match updates.entry(user_id.into()) { + Entry::Vacant(slot) => { + slot.insert(event); + }, + Entry::Occupied(mut slot) => { + let curr_event = slot.get_mut(); + let curr_content = &mut curr_event.content; + let new_content = event.content; + + // Update existing presence event with more info + curr_content.presence = new_content.presence; + curr_content.status_msg = new_content + .status_msg + .or_else(|| curr_content.status_msg.take()); + curr_content.last_active_ago = new_content.last_active_ago.or(curr_content.last_active_ago); + curr_content.displayname = new_content + .displayname + .or_else(|| curr_content.displayname.take()); + curr_content.avatar_url = new_content + .avatar_url + .or_else(|| curr_content.avatar_url.take()); + curr_content.currently_active = new_content + .currently_active + .or(curr_content.currently_active); + }, + }; + + updates + }) + .await +} + #[allow(clippy::too_many_arguments)] #[tracing::instrument(skip_all, fields(user_id = %sender_user, room_id = %room_id), name = "left_room")] async fn handle_left_room( - services: &Services, since: u64, room_id: &RoomId, sender_user: &UserId, - left_rooms: &mut BTreeMap, next_batch_string: &str, full_state: bool, - lazy_load_enabled: bool, -) -> Result<()> { + services: &Services, since: u64, room_id: OwnedRoomId, sender_user: &UserId, next_batch_string: &str, + full_state: bool, lazy_load_enabled: bool, +) -> Result> { // Get and drop the lock to wait for remaining operations to finish - let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; + let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; drop(insert_lock); let left_count = services .rooms .state_cache - .get_left_count(room_id, sender_user) + .get_left_count(&room_id, sender_user) .await .ok(); // Left before last sync if Some(since) >= left_count { - return Ok(()); + return Ok(None); } - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(&room_id).await { // This is just a rejected invite, not a room we know // Insert a leave event anyways let event = PduEvent { @@ -355,7 +428,7 @@ async fn handle_left_room( state_key: Some(sender_user.to_string()), unsigned: None, // The following keys are dropped on conversion - room_id: room_id.to_owned(), + room_id: room_id.clone(), prev_events: vec![], depth: uint!(1), auth_events: vec![], @@ -366,23 +439,19 @@ async fn handle_left_room( signatures: None, }; - left_rooms.insert( - room_id.to_owned(), - LeftRoom { - account_data: RoomAccountData { - events: Vec::new(), - }, - timeline: Timeline { - limited: false, - prev_batch: Some(next_batch_string.to_owned()), - events: Vec::new(), - }, - state: RoomState { - events: vec![event.to_sync_state_event()], - }, + return Ok(Some(LeftRoom { + account_data: RoomAccountData { + events: Vec::new(), }, - ); - return Ok(()); + timeline: Timeline { + limited: false, + prev_batch: Some(next_batch_string.to_owned()), + events: Vec::new(), + }, + state: RoomState { + events: vec![event.to_sync_state_event()], + }, + })); } let mut left_state_events = Vec::new(); @@ -390,7 +459,7 @@ async fn handle_left_room( let since_shortstatehash = services .rooms .user - .get_token_shortstatehash(room_id, since) + .get_token_shortstatehash(&room_id, since) .await; let since_state_ids = match since_shortstatehash { @@ -401,11 +470,11 @@ async fn handle_left_room( let Ok(left_event_id): Result = services .rooms .state_accessor - .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str()) + .room_state_get_id(&room_id, &StateEventType::RoomMember, sender_user.as_str()) .await else { error!("Left room but no left state event"); - return Ok(()); + return Ok(None); }; let Ok(left_shortstatehash) = services @@ -415,7 +484,7 @@ async fn handle_left_room( .await else { error!(event_id = %left_event_id, "Leave event has no state"); - return Ok(()); + return Ok(None); }; let mut left_state_ids = services @@ -456,488 +525,102 @@ async fn handle_left_room( } } - left_rooms.insert( - room_id.to_owned(), - LeftRoom { - account_data: RoomAccountData { - events: Vec::new(), - }, - timeline: Timeline { - limited: false, - prev_batch: Some(next_batch_string.to_owned()), - events: Vec::new(), - }, - state: RoomState { - events: left_state_events, - }, + Ok(Some(LeftRoom { + account_data: RoomAccountData { + events: Vec::new(), }, - ); - Ok(()) -} - -async fn process_presence_updates( - services: &Services, presence_updates: &mut HashMap, since: u64, syncing_user: &UserId, -) -> Result<()> { - let presence_since = services.presence.presence_since(since); - - // Take presence updates - pin_mut!(presence_since); - while let Some((user_id, _, presence_bytes)) = presence_since.next().await { - if !services - .rooms - .state_cache - .user_sees_user(syncing_user, user_id) - .await - { - continue; - } - - let presence_event = services - .presence - .from_json_bytes_to_event(presence_bytes, user_id) - .await?; - - match presence_updates.entry(user_id.into()) { - Entry::Vacant(slot) => { - slot.insert(presence_event); - }, - Entry::Occupied(mut slot) => { - let curr_event = slot.get_mut(); - let curr_content = &mut curr_event.content; - let new_content = presence_event.content; - - // Update existing presence event with more info - curr_content.presence = new_content.presence; - curr_content.status_msg = new_content - .status_msg - .or_else(|| curr_content.status_msg.take()); - curr_content.last_active_ago = new_content.last_active_ago.or(curr_content.last_active_ago); - curr_content.displayname = new_content - .displayname - .or_else(|| curr_content.displayname.take()); - curr_content.avatar_url = new_content - .avatar_url - .or_else(|| curr_content.avatar_url.take()); - curr_content.currently_active = new_content - .currently_active - .or(curr_content.currently_active); - }, - }; - } - - Ok(()) + timeline: Timeline { + limited: false, + prev_batch: Some(next_batch_string.to_owned()), + events: Vec::new(), + }, + state: RoomState { + events: left_state_events, + }, + })) } #[allow(clippy::too_many_arguments)] async fn load_joined_room( - services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, since: u64, + services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: OwnedRoomId, since: u64, sincecount: PduCount, next_batch: u64, next_batchcount: PduCount, lazy_load_enabled: bool, - lazy_load_send_redundant: bool, full_state: bool, device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, -) -> Result { + lazy_load_send_redundant: bool, full_state: bool, +) -> Result<(JoinedRoom, HashSet, HashSet)> { + let mut device_list_updates = HashSet::::new(); + let mut left_encrypted_users = HashSet::::new(); + // Get and drop the lock to wait for remaining operations to finish // This will make sure the we have all events until next_batch - let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; + let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; drop(insert_lock); let (timeline_pdus, limited) = - load_timeline(services, sender_user, room_id, sincecount, Some(next_batchcount), 10_usize).await?; + load_timeline(services, sender_user, &room_id, sincecount, Some(next_batchcount), 10_usize).await?; let send_notification_counts = !timeline_pdus.is_empty() || services .rooms .user - .last_notification_read(sender_user, room_id) + .last_notification_read(sender_user, &room_id) .await > since; - let mut timeline_users = HashSet::new(); - for (_, event) in &timeline_pdus { - timeline_users.insert(event.sender.as_str().to_owned()); - } + let timeline_users = timeline_pdus + .iter() + .fold(HashSet::new(), |mut timeline_users, (_, event)| { + timeline_users.insert(event.sender.as_str().to_owned()); + timeline_users + }); services .rooms .lazy_loading - .lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount); + .lazy_load_confirm_delivery(sender_user, sender_device, &room_id, sincecount); let current_shortstatehash = services .rooms .state - .get_room_shortstatehash(room_id) - .await - .map_err(|_| err!(Database(error!("Room {room_id} has no state"))))?; + .get_room_shortstatehash(&room_id) + .map_err(|_| err!(Database(error!("Room {room_id} has no state")))); let since_shortstatehash = services .rooms .user - .get_token_shortstatehash(room_id, since) - .await - .ok(); + .get_token_shortstatehash(&room_id, since) + .ok() + .map(Ok); - let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = if timeline_pdus - .is_empty() + let (current_shortstatehash, since_shortstatehash) = try_join(current_shortstatehash, since_shortstatehash).await?; + + let StateChanges { + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + } = if timeline_pdus.is_empty() && (since_shortstatehash.is_none() || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))) { // No state changes - (Vec::new(), None, None, false, Vec::new()) + StateChanges::default() } else { - // Calculates joined_member_count, invited_member_count and heroes - let calculate_counts = || async { - let joined_member_count = services - .rooms - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0); - - let invited_member_count = services - .rooms - .state_cache - .room_invited_count(room_id) - .await - .unwrap_or(0); - - if joined_member_count.saturating_add(invited_member_count) > 5 { - return Ok::<_, Error>((Some(joined_member_count), Some(invited_member_count), Vec::new())); - } - - // Go through all PDUs and for each member event, check if the user is still - // joined or invited until we have 5 or we reach the end - - // Recalculate heroes (first 5 members) - let heroes = services - .rooms - .timeline - .all_pdus(sender_user, room_id) - .ready_filter(|(_, pdu)| pdu.kind == RoomMember) - .filter_map(|(_, pdu)| async move { - let content: RoomMemberEventContent = pdu.get_content().ok()?; - let user_id: &UserId = pdu.state_key.as_deref().map(TryInto::try_into).flat_ok()?; - - if user_id == sender_user { - return None; - } - - // The membership was and still is invite or join - if !matches!(content.membership, MembershipState::Join | MembershipState::Invite) { - return None; - } - - let is_invited = services.rooms.state_cache.is_invited(user_id, room_id); - - let is_joined = services.rooms.state_cache.is_joined(user_id, room_id); - - if !is_joined.await && is_invited.await { - return None; - } - - Some(user_id.to_owned()) - }) - .collect::>() - .await; - - Ok::<_, Error>(( - Some(joined_member_count), - Some(invited_member_count), - heroes.into_iter().collect::>(), - )) - }; - - let get_sender_member_content = |short| { - services - .rooms - .state_accessor - .state_get_content(short, &StateEventType::RoomMember, sender_user.as_str()) - .ok() - }; - - let since_sender_member: OptionFuture<_> = since_shortstatehash.map(get_sender_member_content).into(); - - let joined_since_last_sync = since_sender_member - .await - .flatten() - .map_or(true, |content: RoomMemberEventContent| { - content.membership != MembershipState::Join - }); - - if since_shortstatehash.is_none() || joined_since_last_sync { - // Probably since = 0, we will do an initial sync - - let (joined_member_count, invited_member_count, heroes) = calculate_counts().await?; - - let current_state_ids: HashMap<_, OwnedEventId> = services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); - - for (shortstatekey, event_id) in current_state_ids { - let (event_type, state_key) = services - .rooms - .short - .get_statekey_from_short(shortstatekey) - .await?; - - if event_type != StateEventType::RoomMember { - let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { - error!("Pdu in state not found: {event_id}"); - continue; - }; - - state_events.push(pdu); - continue; - } - - // TODO: Delete "element_hacks" when this is resolved: https://github.com/vector-im/element-web/issues/22565 - if !lazy_load_enabled - || full_state || timeline_users.contains(&state_key) - || (cfg!(feature = "element_hacks") && *sender_user == state_key) - { - let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { - error!("Pdu in state not found: {event_id}"); - continue; - }; - - // This check is in case a bad user ID made it into the database - if let Ok(uid) = UserId::parse(&state_key) { - lazy_loaded.insert(uid); - } - - state_events.push(pdu); - } - } - - // Reset lazy loading because this is an initial sync - services - .rooms - .lazy_loading - .lazy_load_reset(sender_user, sender_device, room_id) - .await; - - // The state_events above should contain all timeline_users, let's mark them as - // lazy loaded. - services.rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ); - - (heroes, joined_member_count, invited_member_count, true, state_events) - } else { - // Incremental /sync - let since_shortstatehash = since_shortstatehash.expect("missing since_shortstatehash on incremental sync"); - - let mut delta_state_events = Vec::new(); - - if since_shortstatehash != current_shortstatehash { - let current_state_ids: HashMap<_, OwnedEventId> = services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - - let since_state_ids = services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - - for (key, id) in current_state_ids { - if full_state || since_state_ids.get(&key) != Some(&id) { - let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { - error!("Pdu in state not found: {id}"); - continue; - }; - - delta_state_events.push(pdu); - } - } - } - - let encrypted_room = services - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .await - .is_ok(); - - let since_encryption = services - .rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") - .await; - - // Calculations: - let new_encrypted_room = encrypted_room && since_encryption.is_err(); - - let send_member_count = delta_state_events - .iter() - .any(|event| event.kind == RoomMember); - - if encrypted_room { - for state_event in &delta_state_events { - if state_event.kind != RoomMember { - continue; - } - - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == sender_user { - continue; - } - - let content: RoomMemberEventContent = state_event.get_content()?; - - match content.membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(services, sender_user, &user_id, Some(room_id)).await { - device_list_updates.insert(user_id); - } - }, - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - }, - _ => {}, - } - } - } - } - - if joined_since_last_sync && encrypted_room || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend( - services - .rooms - .state_cache - .room_members(room_id) - // Don't send key updates from the sender to the sender - .ready_filter(|user_id| sender_user != *user_id) - // Only send keys if the sender doesn't share an encrypted room with the target - // already - .filter_map(|user_id| { - share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .map(|res| res.or_some(user_id.to_owned())) - }) - .collect::>() - .await, - ); - } - - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - calculate_counts().await? - } else { - (None, None, Vec::new()) - }; - - let mut state_events = delta_state_events; - let mut lazy_loaded = HashSet::new(); - - // Mark all member events we're returning as lazy-loaded - for pdu in &state_events { - if pdu.kind == RoomMember { - match UserId::parse( - pdu.state_key - .as_ref() - .expect("State event has state key") - .clone(), - ) { - Ok(state_key_userid) => { - lazy_loaded.insert(state_key_userid); - }, - Err(e) => error!("Invalid state key for member event: {}", e), - } - } - } - - // Fetch contextual member state events for events from the timeline, and - // mark them as lazy-loaded as well. - for (_, event) in &timeline_pdus { - if lazy_loaded.contains(&event.sender) { - continue; - } - - if !services - .rooms - .lazy_loading - .lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender) - .await || lazy_load_send_redundant - { - if let Ok(member_event) = services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, event.sender.as_str()) - .await - { - lazy_loaded.insert(event.sender.clone()); - state_events.push(member_event); - } - } - } - - services.rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ); - - ( - heroes, - joined_member_count, - invited_member_count, - joined_since_last_sync, - state_events, - ) - } - }; - - // Look for device list updates in this room - device_list_updates.extend( - services - .users - .room_keys_changed(room_id, since, None) - .map(|(user_id, _)| user_id) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - let notification_count = if send_notification_counts { - Some( - services - .rooms - .user - .notification_count(sender_user, room_id) - .await - .try_into() - .expect("notification count can't go that high"), + calculate_state_changes( + services, + sender_user, + sender_device, + &room_id, + next_batchcount, + lazy_load_enabled, + lazy_load_send_redundant, + full_state, + &mut device_list_updates, + &mut left_encrypted_users, + since_shortstatehash, + current_shortstatehash, + &timeline_pdus, + &timeline_users, ) - } else { - None - }; - - let highlight_count = if send_notification_counts { - Some( - services - .rooms - .user - .highlight_count(sender_user, room_id) - .await - .try_into() - .expect("highlight count can't go that high"), - ) - } else { - None + .boxed() + .await? }; let prev_batch = timeline_pdus @@ -946,18 +629,45 @@ async fn load_joined_room( .as_ref() .map(ToString::to_string); - let room_events: Vec<_> = timeline_pdus + let notification_count: OptionFuture<_> = send_notification_counts + .then(|| { + services + .rooms + .user + .notification_count(sender_user, &room_id) + .map(TryInto::try_into) + .unwrap_or(uint!(0)) + }) + .into(); + + let highlight_count: OptionFuture<_> = send_notification_counts + .then(|| { + services + .rooms + .user + .highlight_count(sender_user, &room_id) + .map(TryInto::try_into) + .unwrap_or(uint!(0)) + }) + .into(); + + let room_events = timeline_pdus .iter() .stream() .filter_map(|item| ignored_filter(services, item.clone(), sender_user)) .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect() - .await; + .collect(); - let edus: HashMap> = services + let account_data_events = services + .account_data + .changes_since(Some(&room_id), sender_user, since) + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect(); + + let receipt_events = services .rooms .read_receipt - .readreceipts_since(room_id, since) + .readreceipts_since(&room_id, since) .filter_map(|(read_user, _, edu)| async move { services .users @@ -965,19 +675,35 @@ async fn load_joined_room( .await .or_some((read_user.to_owned(), edu)) }) - .collect() + .collect::>>(); + + // Look for device list updates in this room + let device_updates = services + .users + .room_keys_changed(&room_id, since, None) + .map(|(user_id, _)| user_id) + .map(ToOwned::to_owned) + .collect::>(); + + let events = join3(room_events, account_data_events, receipt_events); + let unread_notifications = join(notification_count, highlight_count); + let (unread_notifications, events, device_updates) = join3(unread_notifications, events, device_updates) + .boxed() .await; - let mut edus: Vec> = edus.into_values().collect(); + let (room_events, account_data_events, receipt_events) = events; + let (notification_count, highlight_count) = unread_notifications; + device_list_updates.extend(device_updates); - if services.rooms.typing.last_typing_update(room_id).await? > since { + let mut edus: Vec> = receipt_events.into_values().collect(); + if services.rooms.typing.last_typing_update(&room_id).await? > since { edus.push( serde_json::from_str( &serde_json::to_string( &services .rooms .typing - .typings_all(room_id, sender_user) + .typings_all(&room_id, sender_user) .await?, ) .expect("event is valid, we just created it"), @@ -991,22 +717,22 @@ async fn load_joined_room( services .rooms .user - .associate_token_shortstatehash(room_id, next_batch, current_shortstatehash) + .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash) .await; - Ok(JoinedRoom { + let joined_room = JoinedRoom { account_data: RoomAccountData { - events: services - .account_data - .changes_since(Some(room_id), sender_user, since) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect() - .await, + events: account_data_events, }, summary: RoomSummary { - heroes, joined_member_count: joined_member_count.map(ruma_from_u64), invited_member_count: invited_member_count.map(ruma_from_u64), + heroes: heroes + .into_iter() + .flatten() + .map(TryInto::try_into) + .filter_map(Result::ok) + .collect(), }, unread_notifications: UnreadNotificationsCount { highlight_count, @@ -1027,5 +753,407 @@ async fn load_joined_room( events: edus, }, unread_thread_notifications: BTreeMap::new(), + }; + + Ok((joined_room, device_list_updates, left_encrypted_users)) +} + +#[allow(clippy::too_many_arguments)] +async fn calculate_state_changes( + services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, next_batchcount: PduCount, + lazy_load_enabled: bool, lazy_load_send_redundant: bool, full_state: bool, + device_list_updates: &mut HashSet, left_encrypted_users: &mut HashSet, + since_shortstatehash: Option, current_shortstatehash: ShortStateHash, + timeline_pdus: &Vec<(PduCount, PduEvent)>, timeline_users: &HashSet, +) -> Result { + let since_sender_member: OptionFuture<_> = since_shortstatehash + .map(|short| { + services + .rooms + .state_accessor + .state_get_content(short, &StateEventType::RoomMember, sender_user.as_str()) + .ok() + }) + .into(); + + let joined_since_last_sync = since_sender_member + .await + .flatten() + .map_or(true, |content: RoomMemberEventContent| { + content.membership != MembershipState::Join + }); + + if since_shortstatehash.is_none() || joined_since_last_sync { + calculate_state_initial( + services, + sender_user, + sender_device, + room_id, + next_batchcount, + lazy_load_enabled, + full_state, + current_shortstatehash, + timeline_users, + ) + .await + } else { + calculate_state_incremental( + services, + sender_user, + sender_device, + room_id, + next_batchcount, + lazy_load_send_redundant, + full_state, + device_list_updates, + left_encrypted_users, + since_shortstatehash, + current_shortstatehash, + timeline_pdus, + joined_since_last_sync, + ) + .await + } +} + +#[allow(clippy::too_many_arguments)] +async fn calculate_state_initial( + services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, next_batchcount: PduCount, + lazy_load_enabled: bool, full_state: bool, current_shortstatehash: ShortStateHash, + timeline_users: &HashSet, +) -> Result { + // Probably since = 0, we will do an initial sync + let state = services + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await? + .into_iter() + .stream() + .filter_map(|(shortstatekey, event_id): (ShortStateKey, OwnedEventId)| { + services + .rooms + .short + .get_statekey_from_short(shortstatekey) + .map_ok(move |(event_type, state_key)| ((event_type, state_key), event_id)) + .ok() + }) + .fold((Vec::new(), HashSet::new()), |a, item| async move { + let (mut state_events, mut lazy_loaded) = a; + let ((event_type, state_key), event_id) = item; + + if event_type != StateEventType::RoomMember { + let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { + error!("Pdu in state not found: {event_id}"); + return (state_events, lazy_loaded); + }; + + state_events.push(pdu); + return (state_events, lazy_loaded); + } + + // TODO: Delete "element_hacks" when this is resolved: https://github.com/vector-im/element-web/issues/22565 + if !lazy_load_enabled + || full_state + || timeline_users.contains(&state_key) + || (cfg!(feature = "element_hacks") && *sender_user == state_key) + { + let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { + error!("Pdu in state not found: {event_id}"); + return (state_events, lazy_loaded); + }; + + // This check is in case a bad user ID made it into the database + if let Ok(uid) = UserId::parse(&state_key) { + lazy_loaded.insert(uid); + } + + state_events.push(pdu); + } + + (state_events, lazy_loaded) + }) + .map(Ok); + + let counts = calculate_counts(services, room_id, sender_user); + let ((joined_member_count, invited_member_count, heroes), (state_events, lazy_loaded)) = + try_join(counts, state).boxed().await?; + + // Reset lazy loading because this is an initial sync + services + .rooms + .lazy_loading + .lazy_load_reset(sender_user, sender_device, room_id) + .await; + + // The state_events above should contain all timeline_users, let's mark them as + // lazy loaded. + services + .rooms + .lazy_loading + .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount); + + Ok(StateChanges { + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync: true, + state_events, }) } + +#[allow(clippy::too_many_arguments)] +async fn calculate_state_incremental( + services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, next_batchcount: PduCount, + lazy_load_send_redundant: bool, full_state: bool, device_list_updates: &mut HashSet, + left_encrypted_users: &mut HashSet, since_shortstatehash: Option, + current_shortstatehash: ShortStateHash, timeline_pdus: &Vec<(PduCount, PduEvent)>, joined_since_last_sync: bool, +) -> Result { + // Incremental /sync + let since_shortstatehash = since_shortstatehash.expect("missing since_shortstatehash on incremental sync"); + + let mut delta_state_events = Vec::new(); + + if since_shortstatehash != current_shortstatehash { + let current_state_ids = services + .rooms + .state_accessor + .state_full_ids(current_shortstatehash); + + let since_state_ids = services + .rooms + .state_accessor + .state_full_ids(since_shortstatehash); + + let (current_state_ids, since_state_ids): (HashMap<_, OwnedEventId>, HashMap<_, OwnedEventId>) = + try_join(current_state_ids, since_state_ids).await?; + + current_state_ids + .iter() + .stream() + .ready_filter(|(key, id)| full_state || since_state_ids.get(key) != Some(id)) + .filter_map(|(_, id)| services.rooms.timeline.get_pdu(id).ok()) + .ready_for_each(|pdu| delta_state_events.push(pdu)) + .await; + } + + let encrypted_room = services + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") + .is_ok(); + + let since_encryption = services + .rooms + .state_accessor + .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") + .is_ok(); + + let (encrypted_room, since_encryption) = join(encrypted_room, since_encryption).await; + + // Calculations: + let new_encrypted_room = encrypted_room && !since_encryption; + + let send_member_count = delta_state_events + .iter() + .any(|event| event.kind == RoomMember); + + if encrypted_room { + for state_event in &delta_state_events { + if state_event.kind != RoomMember { + continue; + } + + if let Some(state_key) = &state_event.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + if user_id == sender_user { + continue; + } + + let content: RoomMemberEventContent = state_event.get_content()?; + + match content.membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(services, sender_user, &user_id, Some(room_id)).await { + device_list_updates.insert(user_id); + } + }, + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + }, + _ => {}, + } + } + } + } + + if joined_since_last_sync && encrypted_room || new_encrypted_room { + let updates: Vec = services + .rooms + .state_cache + .room_members(room_id) + .ready_filter(|user_id| sender_user != *user_id) + .filter_map(|user_id| { + share_encrypted_room(services, sender_user, user_id, Some(room_id)) + .map(|res| res.or_some(user_id.to_owned())) + }) + .collect() + .await; + + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend(updates); + } + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + calculate_counts(services, room_id, sender_user).await? + } else { + (None, None, None) + }; + + let mut state_events = delta_state_events; + + // Mark all member events we're returning as lazy-loaded + let mut lazy_loaded = state_events + .iter() + .filter(|pdu| pdu.kind == RoomMember) + .filter_map(|pdu| { + pdu.state_key + .clone() + .map(TryInto::try_into) + .map(LogDebugErr::log_debug_err) + .flat_ok() + }) + .fold(HashSet::new(), |mut lazy_loaded, user_id| { + lazy_loaded.insert(user_id); + lazy_loaded + }); + + // Fetch contextual member state events for events from the timeline, and + // mark them as lazy-loaded as well. + for (_, event) in timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + + let sent_before: OptionFuture<_> = (!lazy_load_send_redundant) + .then(|| { + services.rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + room_id, + &event.sender, + ) + }) + .into(); + + let member_event: OptionFuture<_> = sent_before + .await + .is_none_or(is_false!()) + .then(|| { + services.rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomMember, + event.sender.as_str(), + ) + }) + .into(); + + let Some(Ok(member_event)) = member_event.await else { + continue; + }; + + lazy_loaded.insert(event.sender.clone()); + state_events.push(member_event); + } + + services + .rooms + .lazy_loading + .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount); + + Ok(StateChanges { + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + }) +} + +async fn calculate_counts( + services: &Services, room_id: &RoomId, sender_user: &UserId, +) -> Result<(Option, Option, Option>)> { + let joined_member_count = services + .rooms + .state_cache + .room_joined_count(room_id) + .unwrap_or(0); + + let invited_member_count = services + .rooms + .state_cache + .room_invited_count(room_id) + .unwrap_or(0); + + let (joined_member_count, invited_member_count) = join(joined_member_count, invited_member_count).await; + + let small_room = joined_member_count.saturating_add(invited_member_count) > 5; + + let heroes: OptionFuture<_> = small_room + .then(|| calculate_heroes(services, room_id, sender_user)) + .into(); + + Ok((Some(joined_member_count), Some(invited_member_count), heroes.await)) +} + +async fn calculate_heroes(services: &Services, room_id: &RoomId, sender_user: &UserId) -> Vec { + services + .rooms + .timeline + .all_pdus(sender_user, room_id) + .ready_filter(|(_, pdu)| pdu.kind == RoomMember) + .fold_default(|heroes: Vec<_>, (_, pdu)| fold_hero(heroes, services, room_id, sender_user, pdu)) + .await +} + +async fn fold_hero( + mut heroes: Vec, services: &Services, room_id: &RoomId, sender_user: &UserId, pdu: PduEvent, +) -> Vec { + let Some(user_id): Option<&UserId> = pdu.state_key.as_deref().map(TryInto::try_into).flat_ok() else { + return heroes; + }; + + if user_id == sender_user { + return heroes; + } + + let Ok(content): Result = pdu.get_content() else { + return heroes; + }; + + // The membership was and still is invite or join + if !matches!(content.membership, MembershipState::Join | MembershipState::Invite) { + return heroes; + } + + if heroes.iter().any(is_equal_to!(user_id)) { + return heroes; + } + + let (is_invited, is_joined) = join( + services.rooms.state_cache.is_invited(user_id, room_id), + services.rooms.state_cache.is_joined(user_id, room_id), + ) + .await; + + if !is_joined && is_invited { + return heroes; + } + + heroes.push(user_id.to_owned()); + heroes +} From da984d49cf1b3c4942662d325e04b40d8ef6d932 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 7 Dec 2024 04:42:35 +0000 Subject: [PATCH 0345/1248] remove unnecessary tracing of pdu conversions Signed-off-by: Jason Volk --- src/core/pdu/redact.rs | 7 +++---- src/core/pdu/strip.rs | 22 +++++++++++----------- src/service/rooms/timeline/mod.rs | 2 +- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index e116e563..01d9147c 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -11,7 +11,7 @@ use serde_json::{ value::{to_raw_value, RawValue as RawJsonValue}, }; -use crate::{implement, warn, Error, Result}; +use crate::{implement, Error, Result}; #[derive(Deserialize)] struct ExtractRedactedBecause { @@ -19,14 +19,13 @@ struct ExtractRedactedBecause { } #[implement(super::Pdu)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> Result { +pub fn redact(&mut self, room_version_id: &RoomVersionId, reason: &Self) -> Result { self.unsigned = None; let mut content = serde_json::from_str(self.content.get()).map_err(|_| Error::bad_database("PDU in db has invalid content."))?; - redact_content_in_place(&mut content, &room_version_id, self.kind.to_string()) + redact_content_in_place(&mut content, room_version_id, self.kind.to_string()) .map_err(|e| Error::Redaction(self.sender.server_name().to_owned(), e))?; self.unsigned = Some( diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 30fee863..59457749 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -8,10 +8,10 @@ use ruma::{ }; use serde_json::{json, value::Value as JsonValue}; -use crate::{implement, warn}; +use crate::implement; +#[must_use] #[implement(super::Pdu)] -#[tracing::instrument(skip(self), level = "debug")] pub fn to_sync_room_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); let mut json = json!({ @@ -36,8 +36,8 @@ pub fn to_sync_room_event(&self) -> Raw { } /// This only works for events that are also AnyRoomEvents. +#[must_use] #[implement(super::Pdu)] -#[tracing::instrument(skip(self), level = "debug")] pub fn to_any_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); let mut json = json!({ @@ -62,8 +62,8 @@ pub fn to_any_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } +#[must_use] #[implement(super::Pdu)] -#[tracing::instrument(skip(self), level = "debug")] pub fn to_room_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); let mut json = json!({ @@ -88,8 +88,8 @@ pub fn to_room_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } +#[must_use] #[implement(super::Pdu)] -#[tracing::instrument(skip(self), level = "debug")] pub fn to_message_like_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); let mut json = json!({ @@ -114,8 +114,8 @@ pub fn to_message_like_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } -#[implement(super::Pdu)] #[must_use] +#[implement(super::Pdu)] pub fn to_state_event_value(&self) -> JsonValue { let mut json = json!({ "content": self.content, @@ -134,14 +134,14 @@ pub fn to_state_event_value(&self) -> JsonValue { json } +#[must_use] #[implement(super::Pdu)] -#[tracing::instrument(skip(self), level = "debug")] pub fn to_state_event(&self) -> Raw { serde_json::from_value(self.to_state_event_value()).expect("Raw::from_value always works") } +#[must_use] #[implement(super::Pdu)] -#[tracing::instrument(skip(self), level = "debug")] pub fn to_sync_state_event(&self) -> Raw { let mut json = json!({ "content": self.content, @@ -159,8 +159,8 @@ pub fn to_sync_state_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } +#[must_use] #[implement(super::Pdu)] -#[tracing::instrument(skip(self), level = "debug")] pub fn to_stripped_state_event(&self) -> Raw { let json = json!({ "content": self.content, @@ -172,8 +172,8 @@ pub fn to_stripped_state_event(&self) -> Raw { serde_json::from_value(json).expect("Raw::from_value always works") } +#[must_use] #[implement(super::Pdu)] -#[tracing::instrument(skip(self), level = "debug")] pub fn to_stripped_spacechild_state_event(&self) -> Raw { let json = json!({ "content": self.content, @@ -186,8 +186,8 @@ pub fn to_stripped_spacechild_state_event(&self) -> Raw Raw> { let mut json = json!({ "content": self.content, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 0a96322b..b9fcdcd2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1024,7 +1024,7 @@ impl Service { let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; - pdu.redact(room_version_id, reason)?; + pdu.redact(&room_version_id, reason)?; let obj = utils::to_canonical_object(&pdu) .map_err(|e| err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON"))))?; From d921b823764330959179e3dd0fe9a0e56437a4a4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 7 Dec 2024 06:02:33 +0000 Subject: [PATCH 0346/1248] add options for console tracing EnvFilter details Signed-off-by: Jason Volk --- conduwuit-example.toml | 9 +++++++++ src/core/config/mod.rs | 13 +++++++++++++ src/main/tracing.rs | 8 ++++++-- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index bdccdb99..446375b8 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -604,6 +604,15 @@ # #log_span_events = "none" +# configures whether CONDUWUIT_LOG EnvFilter matches values using regular +# expressions. See the tracing_subscriber documentation on Directives. +# +#log_filter_regex = true + +# toggles the display of ThreadId in tracing log output +# +#log_thread_ids = false + # OpenID token expiration/TTL in seconds # # These are the OpenID tokens that are primarily used for Matrix account diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 117b4da5..0e25b197 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -698,6 +698,19 @@ pub struct Config { #[serde(default = "default_log_span_events")] pub log_span_events: String, + /// configures whether CONDUWUIT_LOG EnvFilter matches values using regular + /// expressions. See the tracing_subscriber documentation on Directives. + /// + /// default: true + #[serde(default = "true_fn")] + pub log_filter_regex: bool, + + /// toggles the display of ThreadId in tracing log output + /// + /// default: false + #[serde(default)] + pub log_thread_ids: bool, + /// OpenID token expiration/TTL in seconds /// /// These are the OpenID tokens that are primarily used for Matrix account diff --git a/src/main/tracing.rs b/src/main/tracing.rs index c28fef6b..efa47bab 100644 --- a/src/main/tracing.rs +++ b/src/main/tracing.rs @@ -18,11 +18,15 @@ pub(crate) type TracingFlameGuard = (); pub(crate) fn init(config: &Config) -> Result<(LogLevelReloadHandles, TracingFlameGuard, Arc)> { let reload_handles = LogLevelReloadHandles::default(); - let console_filter = EnvFilter::try_new(&config.log).map_err(|e| err!(Config("log", "{e}.")))?; let console_span_events = fmt_span::from_str(&config.log_span_events).unwrap_or_err(); + let console_filter = EnvFilter::builder() + .with_regex(config.log_filter_regex) + .parse(&config.log) + .map_err(|e| err!(Config("log", "{e}.")))?; let console_layer = tracing_subscriber::fmt::Layer::new() .with_ansi(config.log_colors) - .with_span_events(console_span_events); + .with_span_events(console_span_events) + .with_thread_ids(config.log_thread_ids); let (console_reload_filter, console_reload_handle) = reload::Layer::new(console_filter.clone()); reload_handles.add("console", Box::new(console_reload_handle)); From aa6d0fcaa7f5f5ca472b0c3eda09e3c7278b6fb6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 7 Dec 2024 06:02:59 +0000 Subject: [PATCH 0347/1248] add unwrap_or_default() to future TryExt extension start an OptionFuture extension Signed-off-by: Jason Volk --- src/core/utils/future/mod.rs | 2 ++ src/core/utils/future/option_ext.rs | 22 ++++++++++++++++++++++ src/core/utils/future/try_ext_ext.rs | 14 ++++++++++++++ 3 files changed, 38 insertions(+) create mode 100644 src/core/utils/future/option_ext.rs diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 6d45b656..3d8ec8f4 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -1,3 +1,5 @@ +mod option_ext; mod try_ext_ext; +pub use option_ext::OptionExt; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/option_ext.rs b/src/core/utils/future/option_ext.rs new file mode 100644 index 00000000..ed61de56 --- /dev/null +++ b/src/core/utils/future/option_ext.rs @@ -0,0 +1,22 @@ +#![allow(clippy::wrong_self_convention)] + +use futures::{future::OptionFuture, Future, FutureExt}; + +pub trait OptionExt { + fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send; + + fn is_some_and(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send; +} + +impl OptionExt for OptionFuture +where + Fut: Future + Send, +{ + fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { + self.map(|o| o.as_ref().is_none_or(f)) + } + + fn is_some_and(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { + self.map(|o| o.as_ref().is_some_and(f)) + } +} diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index 81c7aac0..977f74d2 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -39,6 +39,11 @@ where fn unwrap_or(self, default: Self::Ok) -> UnwrapOrElse Self::Ok> where Self: Sized; + + fn unwrap_or_default(self) -> UnwrapOrElse Self::Ok> + where + Self: Sized, + Self::Ok: Default; } impl TryExtExt for Fut @@ -89,4 +94,13 @@ where { self.unwrap_or_else(move |_| default) } + + #[inline] + fn unwrap_or_default(self) -> UnwrapOrElse Self::Ok> + where + Self: Sized, + Self::Ok: Default, + { + self.unwrap_or(Default::default()) + } } From 65a370836c5d824734a7d8b6229215fdd7b414ad Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 03:02:28 +0000 Subject: [PATCH 0348/1248] derive specific ReadOptions for iterators Signed-off-by: Jason Volk --- src/database/map.rs | 12 +++++++++--- src/database/map/keys.rs | 2 +- src/database/map/keys_from.rs | 2 +- src/database/map/rev_keys.rs | 2 +- src/database/map/rev_keys_from.rs | 2 +- src/database/map/rev_stream.rs | 2 +- src/database/map/rev_stream_from.rs | 2 +- src/database/map/stream.rs | 2 +- 8 files changed, 16 insertions(+), 10 deletions(-) diff --git a/src/database/map.rs b/src/database/map.rs index a15d5e9d..d9601599 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -116,18 +116,24 @@ fn open(db: &Arc, name: &str) -> Result> { } #[inline] -fn cache_read_options_default() -> ReadOptions { +pub(crate) fn iter_options_default() -> ReadOptions { + let mut read_options = read_options_default(); + read_options +} + +#[inline] +pub(crate) fn cache_read_options_default() -> ReadOptions { let mut read_options = read_options_default(); read_options.set_read_tier(ReadTier::BlockCache); read_options } #[inline] -fn read_options_default() -> ReadOptions { +pub(crate) fn read_options_default() -> ReadOptions { let mut read_options = ReadOptions::default(); read_options.set_total_order_seek(true); read_options } #[inline] -fn write_options_default() -> WriteOptions { WriteOptions::default() } +pub(crate) fn write_options_default() -> WriteOptions { WriteOptions::default() } diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 80cf1e15..3c4d491b 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -15,6 +15,6 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn raw_keys(&self) -> impl Stream>> + Send { - let opts = super::read_options_default(); + let opts = super::iter_options_default(); stream::Keys::new(&self.db, &self.cf, opts).init(None) } diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 7be3dd1d..40c608f2 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -46,7 +46,7 @@ where { use crate::pool::Seek; - let opts = super::read_options_default(); + let opts = super::iter_options_default(); let state = stream::State::new(&self.db, &self.cf, opts); let seek = Seek { map: self.clone(), diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index 45a0203f..1e000a47 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -15,6 +15,6 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_raw_keys(&self) -> impl Stream>> + Send { - let opts = super::read_options_default(); + let opts = super::iter_options_default(); stream::KeysRev::new(&self.db, &self.cf, opts).init(None) } diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index 2b59a5d7..b5565aa4 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -48,7 +48,7 @@ where { use crate::pool::Seek; - let opts = super::read_options_default(); + let opts = super::iter_options_default(); let state = stream::State::new(&self.db, &self.cf, opts); let seek = Seek { map: self.clone(), diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index 37b0d3b3..5f61cb08 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -23,6 +23,6 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn rev_raw_stream(&self) -> impl Stream>> + Send { - let opts = super::read_options_default(); + let opts = super::iter_options_default(); stream::ItemsRev::new(&self.db, &self.cf, opts).init(None) } diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 9811d106..542a5ba8 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -73,7 +73,7 @@ where { use crate::pool::Seek; - let opts = super::read_options_default(); + let opts = super::iter_options_default(); let state = stream::State::new(&self.db, &self.cf, opts); let seek = Seek { map: self.clone(), diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index 4f4fbd08..67cfff1b 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -22,6 +22,6 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] pub fn raw_stream(&self) -> impl Stream>> + Send { - let opts = super::read_options_default(); + let opts = super::iter_options_default(); stream::Items::new(&self.db, &self.cf, opts).init(None) } From 139a1ac5048aa6e18dae75ae8ec035f7e6572f11 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 03:07:08 +0000 Subject: [PATCH 0349/1248] configure the tokio runtime tunables Signed-off-by: Jason Volk --- src/main/main.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/main/main.rs b/src/main/main.rs index 32d122f6..67fb643f 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -20,6 +20,9 @@ use tokio::runtime; const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; +const GLOBAL_QUEUE_INTERVAL: u32 = 192; +const SYSTEM_QUEUE_INTERVAL: u32 = 256; +const SYSTEM_EVENTS_PER_TICK: usize = 512; rustc_flags_capture! {} @@ -31,6 +34,9 @@ fn main() -> Result<(), Error> { .thread_name(WORKER_NAME) .worker_threads(args.worker_threads.max(WORKER_MIN)) .thread_keep_alive(Duration::from_secs(WORKER_KEEPALIVE)) + .global_queue_interval(GLOBAL_QUEUE_INTERVAL) + .event_interval(SYSTEM_QUEUE_INTERVAL) + .max_io_events_per_tick(SYSTEM_EVENTS_PER_TICK) .build() .expect("built runtime"); From 6aa35260e688a5b5cffa28c5c755db6bb9deab1c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 03:03:34 +0000 Subject: [PATCH 0350/1248] misc database options tweaks Signed-off-by: Jason Volk --- deps/rust-rocksdb/lib.rs | 1 + src/database/map.rs | 2 ++ src/database/opts.rs | 20 +++++++++++++++----- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/deps/rust-rocksdb/lib.rs b/deps/rust-rocksdb/lib.rs index 2d53d4b1..2e699efb 100644 --- a/deps/rust-rocksdb/lib.rs +++ b/deps/rust-rocksdb/lib.rs @@ -5,6 +5,7 @@ pub use rust_rocksdb::*; extern "C" { pub fn rocksdb_list_column_families(); pub fn rocksdb_logger_create_stderr_logger(); + pub fn rocksdb_logger_create_callback_logger(); pub fn rocksdb_options_set_info_log(); pub fn rocksdb_get_options_from_string(); pub fn rocksdb_writebatch_create(); diff --git a/src/database/map.rs b/src/database/map.rs index d9601599..0f4d740a 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -118,6 +118,8 @@ fn open(db: &Arc, name: &str) -> Result> { #[inline] pub(crate) fn iter_options_default() -> ReadOptions { let mut read_options = read_options_default(); + read_options.set_background_purge_on_iterator_cleanup(true); + //read_options.set_pin_data(true); read_options } diff --git a/src/database/opts.rs b/src/database/opts.rs index 28a39cca..f1b4d3a9 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -3,7 +3,7 @@ use std::{cmp, collections::HashMap, convert::TryFrom}; use conduit::{err, utils, Config, Result}; use rocksdb::{ statistics::StatsLevel, BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Env, - LogLevel, Options, UniversalCompactOptions, UniversalCompactionStopStyle, + LogLevel, LruCacheOptions, Options, UniversalCompactOptions, UniversalCompactionStopStyle, }; /// Create database-wide options suitable for opening the database. This also @@ -24,9 +24,9 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ set_logging_defaults(&mut opts, config); // Processing - opts.set_enable_pipelined_write(true); opts.set_max_background_jobs(num_threads::(config)?); opts.set_max_subcompactions(num_threads::(config)?); + opts.set_avoid_unnecessary_blocking_io(true); opts.set_max_file_opening_threads(0); if config.rocksdb_compaction_prio_idle { env.lower_thread_pool_cpu_priority(); @@ -34,6 +34,7 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ // IO opts.set_manual_wal_flush(true); + opts.set_enable_pipelined_write(true); if config.rocksdb_direct_io { opts.set_use_direct_reads(true); opts.set_use_direct_io_for_flush_and_compaction(true); @@ -60,6 +61,7 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ opts.set_min_write_buffer_number(1); // Files + opts.set_table_cache_num_shard_bits(7); opts.set_max_total_wal_size(96 * 1024 * 1024); set_level_defaults(&mut opts, config); @@ -328,10 +330,16 @@ fn uc_options(_config: &Config) -> UniversalCompactOptions { } fn set_table_with_new_cache( - opts: &mut Options, config: &Config, cache: &mut HashMap, name: &str, size: usize, + opts: &mut Options, config: &Config, caches: &mut HashMap, name: &str, size: usize, ) { - cache.insert(name.to_owned(), Cache::new_lru_cache(size)); - set_table_with_shared_cache(opts, config, cache, name, name); + let mut cache_opts = LruCacheOptions::default(); + cache_opts.set_capacity(size); + cache_opts.set_num_shard_bits(7); + + let cache = Cache::new_lru_cache_opts(&cache_opts); + caches.insert(name.into(), cache); + + set_table_with_shared_cache(opts, config, caches, name, name); } fn set_table_with_shared_cache( @@ -343,6 +351,7 @@ fn set_table_with_shared_cache( .get(cache_name) .expect("existing cache to share with this column"), ); + opts.set_block_based_table_factory(&table); } @@ -361,6 +370,7 @@ fn table_options(_config: &Config) -> BlockBasedOptions { opts.set_block_size(4 * 1024); opts.set_metadata_block_size(4 * 1024); + opts.set_use_delta_encoding(false); opts.set_optimize_filters_for_memory(true); opts.set_cache_index_and_filter_blocks(true); opts.set_pin_top_level_index_and_filter(true); From ff56ee7413f973e4f70feaf71a027d926ee42341 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 05:45:15 +0000 Subject: [PATCH 0351/1248] add back the consume_budget on cache hits Signed-off-by: Jason Volk --- src/database/map/get.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 04f5d0ae..79556656 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -2,8 +2,9 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; use arrayvec::ArrayVec; use conduit::{err, implement, utils::result::MapExpect, Err, Result}; -use futures::{future, Future, FutureExt}; +use futures::{Future, FutureExt}; use serde::Serialize; +use tokio::task; use crate::{ keyval::KeyBuf, @@ -63,7 +64,9 @@ where let cached = self.get_cached(key); if matches!(cached, Err(_) | Ok(Some(_))) { - return future::ready(cached.map_expect("data found in cache")).boxed(); + return task::consume_budget() + .map(move |()| cached.map_expect("data found in cache")) + .boxed(); } debug_assert!(matches!(cached, Ok(None)), "expected status Incomplete"); From 82133ee2ea91723c60f46cbd2ec538c3c11e92e7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 05:49:51 +0000 Subject: [PATCH 0352/1248] bump rust-rocksdb Signed-off-by: Jason Volk --- Cargo.lock | 4 ++-- deps/rust-rocksdb/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e6659e03..fc8dc26d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3391,7 +3391,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.29.0+9.7.4" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=4bce1bb97d8be6f0d47245c99d465ca9cef33aad#4bce1bb97d8be6f0d47245c99d465ca9cef33aad" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=e2b8853ee8839dc886e88fd751fe50ed1c27b47e#e2b8853ee8839dc886e88fd751fe50ed1c27b47e" dependencies = [ "bindgen", "bzip2-sys", @@ -3408,7 +3408,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.33.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=4bce1bb97d8be6f0d47245c99d465ca9cef33aad#4bce1bb97d8be6f0d47245c99d465ca9cef33aad" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=e2b8853ee8839dc886e88fd751fe50ed1c27b47e#e2b8853ee8839dc886e88fd751fe50ed1c27b47e" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 64a8dcb6..c6c3fea5 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "4bce1bb97d8be6f0d47245c99d465ca9cef33aad" +rev = "e2b8853ee8839dc886e88fd751fe50ed1c27b47e" #branch = "master" default-features = false From f70fdca828d0c1e14601418fe337fb8444a76776 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 20:32:28 +0000 Subject: [PATCH 0353/1248] remove jemalloc_stats from main default features Signed-off-by: Jason Volk --- src/main/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index b9122942..ee434ceb 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -42,7 +42,6 @@ default = [ "gzip_compression", "io_uring", "jemalloc", - "jemalloc_stats", "release_max_log_level", "systemd", "zstd_compression", From a752fb99430b32e4cf7c3e2bd0a4cff9e8773980 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 13:47:02 +0000 Subject: [PATCH 0354/1248] rename mod with tracing inits due to macro issues Signed-off-by: Jason Volk --- src/main/{tracing.rs => logging.rs} | 0 src/main/main.rs | 2 +- src/main/server.rs | 4 ++-- 3 files changed, 3 insertions(+), 3 deletions(-) rename src/main/{tracing.rs => logging.rs} (100%) diff --git a/src/main/tracing.rs b/src/main/logging.rs similarity index 100% rename from src/main/tracing.rs rename to src/main/logging.rs diff --git a/src/main/main.rs b/src/main/main.rs index 67fb643f..c653444c 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -1,10 +1,10 @@ pub(crate) mod clap; +mod logging; mod mods; mod restart; mod sentry; mod server; mod signal; -mod tracing; extern crate conduit_core as conduit; diff --git a/src/main/server.rs b/src/main/server.rs index 27fd0673..179749b5 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use conduit::{config::Config, info, log::Log, utils::sys, Error, Result}; use tokio::{runtime, sync::Mutex}; -use crate::{clap::Args, tracing::TracingFlameGuard}; +use crate::{clap::Args, logging::TracingFlameGuard}; /// Server runtime state; complete pub(crate) struct Server { @@ -33,7 +33,7 @@ impl Server { #[cfg(feature = "sentry_telemetry")] let sentry_guard = crate::sentry::init(&config); - let (tracing_reload_handle, tracing_flame_guard, capture) = crate::tracing::init(&config)?; + let (tracing_reload_handle, tracing_flame_guard, capture) = crate::logging::init(&config)?; config.check()?; From 35e9d9b02e5bcb4562e84636ee5bd4d4e768d746 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 7 Dec 2024 00:44:51 -0500 Subject: [PATCH 0355/1248] fix duplicate timezone keys being sent on profile lookup requests Signed-off-by: strawberry --- src/api/client/profile.rs | 16 +++++++++++----- src/api/server/query.rs | 4 ++++ 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 32f7a723..690d0ad7 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -273,16 +273,22 @@ pub(crate) async fn get_profile_route( return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); } + let mut custom_profile_fields: BTreeMap = services + .users + .all_profile_keys(&body.user_id) + .collect() + .await; + + // services.users.timezone will collect the MSC4175 timezone key if it exists + custom_profile_fields.remove("us.cloke.msc4175.tz"); + custom_profile_fields.remove("m.tz"); + Ok(get_profile::v3::Response { avatar_url: services.users.avatar_url(&body.user_id).await.ok(), blurhash: services.users.blurhash(&body.user_id).await.ok(), displayname: services.users.displayname(&body.user_id).await.ok(), tz: services.users.timezone(&body.user_id).await.ok(), - custom_profile_fields: services - .users - .all_profile_keys(&body.user_id) - .collect() - .await, + custom_profile_fields, }) } diff --git a/src/api/server/query.rs b/src/api/server/query.rs index bf515b3c..edbeee77 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -117,6 +117,10 @@ pub(crate) async fn get_profile_information_route( }, } + // services.users.timezone will collect the MSC4175 timezone key if it exists + custom_profile_fields.remove("us.cloke.msc4175.tz"); + custom_profile_fields.remove("m.tz"); + Ok(get_profile_information::v1::Response { displayname, avatar_url, From c070edc18954a4a8cf6cde6ebe6f1dd6d91e68fe Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 7 Dec 2024 00:46:22 -0500 Subject: [PATCH 0356/1248] fix profile updates reusing old membership content + small parallelise + remove unnecessary Result Signed-off-by: strawberry --- src/admin/user/commands.rs | 12 ++-- src/api/client/account.rs | 10 +-- src/api/client/profile.rs | 143 +++++++++++++++++++++---------------- 3 files changed, 89 insertions(+), 76 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 444a7f37..61096625 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -213,8 +213,8 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> .await; full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?; - update_displayname(self.services, &user_id, None, &all_joined_rooms).await?; - update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms).await?; + update_displayname(self.services, &user_id, None, &all_joined_rooms).await; + update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms).await; leave_all_rooms(self.services, &user_id).await; } @@ -327,12 +327,8 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> .await; full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?; - update_displayname(self.services, &user_id, None, &all_joined_rooms) - .await - .ok(); - update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms) - .await - .ok(); + update_displayname(self.services, &user_id, None, &all_joined_rooms).await; + update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms).await; leave_all_rooms(self.services, &user_id).await; } }, diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 47f6fec8..3595f581 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -648,8 +648,8 @@ pub(crate) async fn deactivate_route( .collect() .await; - super::update_displayname(&services, sender_user, None, &all_joined_rooms).await?; - super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await?; + super::update_displayname(&services, sender_user, None, &all_joined_rooms).await; + super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await; full_user_deactivate(&services, sender_user, &all_joined_rooms).await?; @@ -744,9 +744,9 @@ pub(crate) async fn check_registration_token_validity( pub async fn full_user_deactivate( services: &Services, user_id: &UserId, all_joined_rooms: &[OwnedRoomId], ) -> Result<()> { - services.users.deactivate_account(user_id).await?; - super::update_displayname(services, user_id, None, all_joined_rooms).await?; - super::update_avatar_url(services, user_id, None, None, all_joined_rooms).await?; + services.users.deactivate_account(user_id).await.ok(); + super::update_displayname(services, user_id, None, all_joined_rooms).await; + super::update_avatar_url(services, user_id, None, None, all_joined_rooms).await; services .users diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 690d0ad7..97ea21ea 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -1,10 +1,12 @@ +use std::collections::BTreeMap; + use axum::extract::State; use conduit::{ pdu::PduBuilder, utils::{stream::TryIgnore, IterStream}, warn, Err, Error, Result, }; -use futures::{StreamExt, TryStreamExt}; +use futures::{future::join3, StreamExt, TryStreamExt}; use ruma::{ api::{ client::{ @@ -13,7 +15,7 @@ use ruma::{ }, federation, }, - events::{room::member::RoomMemberEventContent, StateEventType}, + events::room::member::{MembershipState, RoomMemberEventContent}, presence::PresenceState, OwnedMxcUri, OwnedRoomId, UserId, }; @@ -43,7 +45,7 @@ pub(crate) async fn set_displayname_route( .collect() .await; - update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms).await?; + update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms).await; if services.globals.allow_local_presence() { // Presence update @@ -138,7 +140,7 @@ pub(crate) async fn set_avatar_url_route( body.blurhash.clone(), &all_joined_rooms, ) - .await?; + .await; if services.globals.allow_local_presence() { // Presence update @@ -294,79 +296,43 @@ pub(crate) async fn get_profile_route( pub async fn update_displayname( services: &Services, user_id: &UserId, displayname: Option, all_joined_rooms: &[OwnedRoomId], -) -> Result<()> { - let current_display_name = services.users.displayname(user_id).await.ok(); +) { + let (current_avatar_url, current_blurhash, current_displayname) = join3( + services.users.avatar_url(user_id), + services.users.blurhash(user_id), + services.users.displayname(user_id), + ) + .await; - if displayname == current_display_name { - return Ok(()); + let current_avatar_url = current_avatar_url.ok(); + let current_blurhash = current_blurhash.ok(); + let current_displayname = current_displayname.ok(); + + if displayname == current_displayname { + return; } services.users.set_displayname(user_id, displayname.clone()); // Send a new join membership event into all joined rooms - let mut joined_rooms = Vec::new(); - for room_id in all_joined_rooms { - let Ok(content) = services - .rooms - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str()) - .await - else { - continue; - }; - - let pdu = PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent { - displayname: displayname.clone(), - join_authorized_via_users_server: None, - ..content - }, - ); - - joined_rooms.push((pdu, room_id)); - } - - update_all_rooms(services, joined_rooms, user_id).await; - - Ok(()) -} - -pub async fn update_avatar_url( - services: &Services, user_id: &UserId, avatar_url: Option, blurhash: Option, - all_joined_rooms: &[OwnedRoomId], -) -> Result<()> { - let current_avatar_url = services.users.avatar_url(user_id).await.ok(); - let current_blurhash = services.users.blurhash(user_id).await.ok(); - - if current_avatar_url == avatar_url && current_blurhash == blurhash { - return Ok(()); - } - - services.users.set_avatar_url(user_id, avatar_url.clone()); - - services.users.set_blurhash(user_id, blurhash.clone()); - - // Send a new join membership event into all joined rooms - let avatar_url = &avatar_url; - let blurhash = &blurhash; + let avatar_url = ¤t_avatar_url; + let blurhash = ¤t_blurhash; + let displayname = &displayname; let all_joined_rooms: Vec<_> = all_joined_rooms .iter() .try_stream() .and_then(|room_id: &OwnedRoomId| async move { - let content = services - .rooms - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str()) - .await?; - let pdu = PduBuilder::state( user_id.to_string(), &RoomMemberEventContent { + displayname: displayname.clone(), + membership: MembershipState::Join, avatar_url: avatar_url.clone(), blurhash: blurhash.clone(), join_authorized_via_users_server: None, - ..content + reason: None, + is_direct: None, + third_party_invite: None, }, ); @@ -377,8 +343,59 @@ pub async fn update_avatar_url( .await; update_all_rooms(services, all_joined_rooms, user_id).await; +} - Ok(()) +pub async fn update_avatar_url( + services: &Services, user_id: &UserId, avatar_url: Option, blurhash: Option, + all_joined_rooms: &[OwnedRoomId], +) { + let (current_avatar_url, current_blurhash, current_displayname) = join3( + services.users.avatar_url(user_id), + services.users.blurhash(user_id), + services.users.displayname(user_id), + ) + .await; + + let current_avatar_url = current_avatar_url.ok(); + let current_blurhash = current_blurhash.ok(); + let current_displayname = current_displayname.ok(); + + if current_avatar_url == avatar_url && current_blurhash == blurhash { + return; + } + + services.users.set_avatar_url(user_id, avatar_url.clone()); + services.users.set_blurhash(user_id, blurhash.clone()); + + // Send a new join membership event into all joined rooms + let avatar_url = &avatar_url; + let blurhash = &blurhash; + let displayname = ¤t_displayname; + let all_joined_rooms: Vec<_> = all_joined_rooms + .iter() + .try_stream() + .and_then(|room_id: &OwnedRoomId| async move { + let pdu = PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent { + avatar_url: avatar_url.clone(), + blurhash: blurhash.clone(), + membership: MembershipState::Join, + displayname: displayname.clone(), + join_authorized_via_users_server: None, + reason: None, + is_direct: None, + third_party_invite: None, + }, + ); + + Ok((pdu, room_id)) + }) + .ignore_err() + .collect() + .await; + + update_all_rooms(services, all_joined_rooms, user_id).await; } pub async fn update_all_rooms( @@ -392,7 +409,7 @@ pub async fn update_all_rooms( .build_and_append_pdu(pdu_builder, user_id, room_id, &state_lock) .await { - warn!(%user_id, %room_id, %e, "Failed to update/send new profile join membership update in room"); + warn!(%user_id, %room_id, "Failed to update/send new profile join membership update in room: {e}"); } } } From a1b03690334442733bfb8dd0ebb8ef77f3efd1d7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 7 Dec 2024 00:53:15 -0500 Subject: [PATCH 0357/1248] reduce line width on banned_room_check Signed-off-by: strawberry --- src/api/client/membership.rs | 146 ++++++++++++++++++----------------- 1 file changed, 74 insertions(+), 72 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 1cdf25a2..fa671213 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -56,87 +56,89 @@ use crate::{client::full_user_deactivate, Ruma}; async fn banned_room_check( services: &Services, user_id: &UserId, room_id: Option<&RoomId>, server_name: Option<&ServerName>, client_ip: IpAddr, -) -> Result<()> { - if !services.users.is_admin(user_id).await { - if let Some(room_id) = room_id { - if services.rooms.metadata.is_banned(room_id).await - || services - .globals - .config - .forbidden_remote_server_names - .contains(&room_id.server_name().unwrap().to_owned()) - { - warn!( - "User {user_id} who is not an admin attempted to send an invite for or attempted to join a banned \ - room or banned room server name: {room_id}" - ); +) -> Result { + if services.users.is_admin(user_id).await { + return Ok(()); + } - if services.globals.config.auto_deactivate_banned_room_attempts { - warn!("Automatically deactivating user {user_id} due to attempted banned room join"); - - if services.globals.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Automatically deactivating user {user_id} due to attempted banned room join from IP \ - {client_ip}" - ))) - .await - .ok(); - } - - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(user_id) - .map(Into::into) - .collect() - .await; - - full_user_deactivate(services, user_id, &all_joined_rooms).await?; - } - - return Err!(Request(Forbidden("This room is banned on this homeserver."))); - } - } else if let Some(server_name) = server_name { - if services + if let Some(room_id) = room_id { + if services.rooms.metadata.is_banned(room_id).await + || services .globals .config .forbidden_remote_server_names - .contains(&server_name.to_owned()) - { - warn!( - "User {user_id} who is not an admin tried joining a room which has the server name {server_name} \ - that is globally forbidden. Rejecting.", - ); + .contains(&room_id.server_name().unwrap().to_owned()) + { + warn!( + "User {user_id} who is not an admin attempted to send an invite for or attempted to join a banned \ + room or banned room server name: {room_id}" + ); - if services.globals.config.auto_deactivate_banned_room_attempts { - warn!("Automatically deactivating user {user_id} due to attempted banned room join"); + if services.globals.config.auto_deactivate_banned_room_attempts { + warn!("Automatically deactivating user {user_id} due to attempted banned room join"); - if services.globals.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Automatically deactivating user {user_id} due to attempted banned room join from IP \ - {client_ip}" - ))) - .await - .ok(); - } - - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(user_id) - .map(Into::into) - .collect() - .await; - - full_user_deactivate(services, user_id, &all_joined_rooms).await?; + if services.globals.config.admin_room_notices { + services + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "Automatically deactivating user {user_id} due to attempted banned room join from IP \ + {client_ip}" + ))) + .await + .ok(); } - return Err!(Request(Forbidden("This remote server is banned on this homeserver."))); + let all_joined_rooms: Vec = services + .rooms + .state_cache + .rooms_joined(user_id) + .map(Into::into) + .collect() + .await; + + full_user_deactivate(services, user_id, &all_joined_rooms).await?; } + + return Err!(Request(Forbidden("This room is banned on this homeserver."))); + } + } else if let Some(server_name) = server_name { + if services + .globals + .config + .forbidden_remote_server_names + .contains(&server_name.to_owned()) + { + warn!( + "User {user_id} who is not an admin tried joining a room which has the server name {server_name} that \ + is globally forbidden. Rejecting.", + ); + + if services.globals.config.auto_deactivate_banned_room_attempts { + warn!("Automatically deactivating user {user_id} due to attempted banned room join"); + + if services.globals.config.admin_room_notices { + services + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "Automatically deactivating user {user_id} due to attempted banned room join from IP \ + {client_ip}" + ))) + .await + .ok(); + } + + let all_joined_rooms: Vec = services + .rooms + .state_cache + .rooms_joined(user_id) + .map(Into::into) + .collect() + .await; + + full_user_deactivate(services, user_id, &all_joined_rooms).await?; + } + + return Err!(Request(Forbidden("This remote server is banned on this homeserver."))); } } From cf71aeef0b46015c53f39cf766abd2106d2c0bb8 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 7 Dec 2024 01:02:52 -0500 Subject: [PATCH 0358/1248] fix+improve membership endpoint UX, dont reuse old membership reasons, small parallelisation Signed-off-by: strawberry --- src/api/client/membership.rs | 114 ++++++++++++++++++++--------------- src/api/client/unstable.rs | 8 +-- 2 files changed, 69 insertions(+), 53 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index fa671213..7c3a597e 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -14,7 +14,7 @@ use conduit::{ utils::{shuffle, IterStream, ReadyExt}, warn, Err, Error, PduEvent, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{join, FutureExt, StreamExt}; use ruma::{ api::{ client::{ @@ -158,7 +158,7 @@ pub(crate) async fn join_room_by_id_route( State(services): State, InsecureClientIp(client_ip): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); banned_room_check( &services, @@ -334,9 +334,7 @@ pub(crate) async fn join_room_by_id_or_alias_route( pub(crate) async fn leave_room_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - leave_room(&services, sender_user, &body.room_id, body.reason.clone()).await?; + leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()).await?; Ok(leave_room::v3::Response::new()) } @@ -349,7 +347,7 @@ pub(crate) async fn invite_user_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { info!( @@ -368,9 +366,17 @@ pub(crate) async fn invite_user_route( user_id, } = &body.recipient { - if services.users.user_is_ignored(sender_user, user_id).await { + let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); + let recipient_ignored_by_sender = services.users.user_is_ignored(user_id, sender_user); + + let (sender_ignored_recipient, recipient_ignored_by_sender) = + join!(sender_ignored_recipient, recipient_ignored_by_sender); + + if sender_ignored_recipient { return Err!(Request(Forbidden("You cannot invite users you have ignored to rooms."))); - } else if services.users.user_is_ignored(user_id, sender_user).await { + } + + if recipient_ignored_by_sender { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked return Ok(invite_user::v3::Response {}); @@ -379,6 +385,7 @@ pub(crate) async fn invite_user_route( invite_helper(&services, sender_user, user_id, &body.room_id, body.reason.clone(), false) .boxed() .await?; + Ok(invite_user::v3::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) @@ -391,16 +398,18 @@ pub(crate) async fn invite_user_route( pub(crate) async fn kick_user_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - let event: RoomMemberEventContent = services + let Ok(event) = services .rooms .state_accessor - .room_state_get_content(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) + .get_member(&body.room_id, &body.user_id) .await - .map_err(|_| err!(Request(BadState("Cannot kick member that's not in the room."))))?; + else { + // copy synapse's behaviour of returning 200 without any change to the state + // instead of erroring on left users + return Ok(kick_user::v3::Response::new()); + }; services .rooms @@ -411,10 +420,13 @@ pub(crate) async fn kick_user_route( &RoomMemberEventContent { membership: MembershipState::Leave, reason: body.reason.clone(), + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, ..event }, ), - sender_user, + body.sender_user(), &body.room_id, &state_lock, ) @@ -431,39 +443,38 @@ pub(crate) async fn kick_user_route( pub(crate) async fn ban_user_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); + + if sender_user == body.user_id { + return Err!(Request(Forbidden("You cannot ban yourself."))); + } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - let blurhash = services.users.blurhash(&body.user_id).await.ok(); - - let event = services + let current_member_content = services .rooms .state_accessor - .room_state_get_content(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) + .get_member(&body.room_id, &body.user_id) .await - .map_or_else( - |_| RoomMemberEventContent { - blurhash: blurhash.clone(), - reason: body.reason.clone(), - ..RoomMemberEventContent::new(MembershipState::Ban) - }, - |event| RoomMemberEventContent { - membership: MembershipState::Ban, - displayname: None, - avatar_url: None, - blurhash: blurhash.clone(), - reason: body.reason.clone(), - join_authorized_via_users_server: None, - ..event - }, - ); + .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Ban)); services .rooms .timeline .build_and_append_pdu( - PduBuilder::state(body.user_id.to_string(), &event), + PduBuilder::state( + body.user_id.to_string(), + &RoomMemberEventContent { + membership: MembershipState::Ban, + reason: body.reason.clone(), + displayname: None, // display name may be offensive + avatar_url: None, // avatar may be offensive + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, + ..current_member_content + }, + ), sender_user, &body.room_id, &state_lock, @@ -481,16 +492,21 @@ pub(crate) async fn ban_user_route( pub(crate) async fn unban_user_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - let event: RoomMemberEventContent = services + let current_member_content = services .rooms .state_accessor - .room_state_get_content(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref()) + .get_member(&body.room_id, &body.user_id) .await - .map_err(|_| err!(Request(BadState("Cannot unban a user who is not banned."))))?; + .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Leave)); + + if current_member_content.membership != MembershipState::Ban { + return Err!(Request(Forbidden( + "Cannot ban a user who is not banned (current membership: {})", + current_member_content.membership + ))); + } services .rooms @@ -502,10 +518,12 @@ pub(crate) async fn unban_user_route( membership: MembershipState::Leave, reason: body.reason.clone(), join_authorized_via_users_server: None, - ..event + third_party_invite: None, + is_direct: None, + ..current_member_content }, ), - sender_user, + body.sender_user(), &body.room_id, &state_lock, ) @@ -528,7 +546,7 @@ pub(crate) async fn unban_user_route( pub(crate) async fn forget_room_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if services .rooms @@ -553,13 +571,11 @@ pub(crate) async fn forget_room_route( pub(crate) async fn joined_rooms_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(joined_rooms::v3::Response { joined_rooms: services .rooms .state_cache - .rooms_joined(sender_user) + .rooms_joined(body.sender_user()) .map(ToOwned::to_owned) .collect() .await, @@ -575,7 +591,7 @@ pub(crate) async fn joined_rooms_route( pub(crate) async fn get_member_events_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services .rooms @@ -608,7 +624,7 @@ pub(crate) async fn get_member_events_route( pub(crate) async fn joined_members_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services .rooms diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 5de41f44..3660d674 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -294,7 +294,7 @@ pub(crate) async fn set_profile_key_route( .collect() .await; - update_displayname(&services, &body.user_id, Some(profile_key_value.to_string()), &all_joined_rooms).await?; + update_displayname(&services, &body.user_id, Some(profile_key_value.to_string()), &all_joined_rooms).await; } else if body.key == "avatar_url" { let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string()); @@ -306,7 +306,7 @@ pub(crate) async fn set_profile_key_route( .collect() .await; - update_avatar_url(&services, &body.user_id, Some(mxc), None, &all_joined_rooms).await?; + update_avatar_url(&services, &body.user_id, Some(mxc), None, &all_joined_rooms).await; } else { services .users @@ -352,7 +352,7 @@ pub(crate) async fn delete_profile_key_route( .collect() .await; - update_displayname(&services, &body.user_id, None, &all_joined_rooms).await?; + update_displayname(&services, &body.user_id, None, &all_joined_rooms).await; } else if body.key == "avatar_url" { let all_joined_rooms: Vec = services .rooms @@ -362,7 +362,7 @@ pub(crate) async fn delete_profile_key_route( .collect() .await; - update_avatar_url(&services, &body.user_id, None, None, &all_joined_rooms).await?; + update_avatar_url(&services, &body.user_id, None, None, &all_joined_rooms).await; } else { services .users From 1606441d090952a7806140f060a0d1d7b34f94b9 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 20 Nov 2024 20:23:13 -0500 Subject: [PATCH 0359/1248] disable admin_room_notices for complement, update welcome text Signed-off-by: strawberry --- nix/pkgs/complement/config.toml | 1 + src/service/admin/grant.rs | 24 +++++++++++++----------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 7aaed2f4..631100fd 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -17,6 +17,7 @@ media_compat_file_link = false media_startup_check = false rocksdb_direct_io = false log_colors = false +admin_room_notices = false [global.tls] certs = "/certificate.crt" diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 405da982..090c0294 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -77,18 +77,20 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { } } - let welcome_message = String::from("## Thank you for trying out conduwuit!\n\nconduwuit is a fork of upstream Conduit which is in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Git and Documentation: https://github.com/girlbossceo/conduwuit\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command):\n\nconduwuit room (Ask questions and get notified on updates):\n`/join #conduwuit:puppygock.gay`"); + if self.services.server.config.admin_room_notices { + let welcome_message = String::from("## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`"); - // Send welcome message - self.services - .timeline - .build_and_append_pdu( - PduBuilder::timeline(&RoomMessageEventContent::text_markdown(welcome_message)), - server_user, - &room_id, - &state_lock, - ) - .await?; + // Send welcome message + self.services + .timeline + .build_and_append_pdu( + PduBuilder::timeline(&RoomMessageEventContent::text_markdown(welcome_message)), + server_user, + &room_id, + &state_lock, + ) + .await?; + } Ok(()) } From 61670370edab2cc54445afd441891c7cd4d294fe Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 7 Dec 2024 20:46:31 -0500 Subject: [PATCH 0360/1248] add rocksdb paranoid_file_checks config option, add some more config checks Signed-off-by: strawberry --- conduwuit-example.toml | 41 +++++++++++++++++------------- src/core/config/check.rs | 54 ++++++++++++++++++++++++---------------- src/core/config/mod.rs | 44 ++++++++++++++++++++------------ src/database/opts.rs | 2 ++ 4 files changed, 87 insertions(+), 54 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 446375b8..e179ce30 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -105,7 +105,7 @@ # # This defaults to 128.0 + (64.0 * CPU core count) # -#db_cache_capacity_mb = +#db_cache_capacity_mb = varies by system # Option to control adding arbitrary text to the end of the user's # displayname upon registration with a space before the text. This was the @@ -114,9 +114,9 @@ # # The default is the trans pride flag. # -# example: "🏳️⚧️" +# example: "🏳️‍⚧️" # -#new_user_displayname_suffix = "🏳️⚧️" +#new_user_displayname_suffix = "🏳️‍⚧️" # If enabled, conduwuit will send a simple GET request periodically to # `https://pupbrain.dev/check-for-updates/stable` for any new @@ -132,8 +132,7 @@ # with such as "auth_chain_cache_capacity". # # May be useful if you have significant memory to spare to increase -# performance. This was previously called -# `conduit_cache_capacity_modifier`. +# performance. # # If you have low memory, reducing this may be viable. # @@ -204,9 +203,7 @@ # longer running Matrix). Only decrease this if you are using an external # DNS cache. # -# default_dns_min_ttl: 259200 -# -#dns_min_ttl = +#dns_min_ttl = 10800 # Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. # This value is critical for the server to federate efficiently. @@ -217,7 +214,7 @@ # #dns_min_ttl_nxdomain = 259200 -# Number of retries after a timeout. +# Number of DNS nameserver retries after a timeout or error. # #dns_attempts = 10 @@ -547,7 +544,7 @@ # Currently, conduwuit doesn't support inbound batched key requests, so # this list should only contain other Synapse servers # -# example: ["matrix.org", "constellatory.net", "tchncs.de"] +# example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] # #trusted_servers = ["matrix.org"] @@ -689,14 +686,14 @@ # room invites) are ignored here. # # Defaults to false as rooms can be banned for non-moderation-related -# reasons +# reasons and this performs a full user deactivation # #auto_deactivate_banned_room_attempts = false # RocksDB log level. This is not the same as conduwuit's log level. This # is the log level for the RocksDB engine/library which show up in your # database folder/path as `LOG` files. conduwuit will log RocksDB errors -# as normal through tracing. +# as normal through tracing or panics if severe for safety. # #rocksdb_log_level = "error" @@ -745,7 +742,7 @@ # operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use # all your logical threads. Defaults to your CPU logical thread count. # -#rocksdb_parallelism_threads = 0 +#rocksdb_parallelism_threads = varies by system # Maximum number of LOG files RocksDB will keep. This must *not* be set to # 0. It must be at least 1. Defaults to 3 as these are not very useful @@ -833,6 +830,14 @@ # #rocksdb_recovery_mode = 1 +# Enables or disables paranoid SST file checks. This can improve RocksDB +# database consistency at a potential performance impact due to further +# safety checks ran. +# +# See https://github.com/facebook/rocksdb/wiki/Online-Verification#columnfamilyoptionsparanoid_file_checks for more information. +# +#rocksdb_paranoid_file_checks = false + # Database repair mode (for RocksDB SST corruption) # # Use this option when the server reports corruption while running or @@ -1119,12 +1124,14 @@ # # To disable, set this to be an empty vector (`[]`). # +# Defaults to: +# ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", # "192.168.0.0/16", "100.64.0.0/10", "192.0.0.0/24", "169.254.0.0/16", # "192.88.99.0/24", "198.18.0.0/15", "192.0.2.0/24", "198.51.100.0/24", # "203.0.113.0/24", "224.0.0.0/4", "::1/128", "fe80::/10", "fc00::/7", # "2001:db8::/32", "ff00::/8", "fec0::/10"] # -#ip_range_denylist = ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", +#ip_range_denylist = # Optional IP address or network interface-name to bind as the source of # URL preview requests. If not set, it will not bind to a specific @@ -1181,9 +1188,9 @@ #url_preview_url_contains_allowlist = [] # Maximum amount of bytes allowed in a URL preview body size when -# spidering. Defaults to 384KB in bytes. +# spidering. Defaults to 256KB in bytes. # -#url_preview_max_spider_size = 384000 +#url_preview_max_spider_size = 256000 # Option to decide whether you would like to run the domain allowlist # checks (contains and explicit) on the root domain or not. Does not apply @@ -1302,7 +1309,7 @@ # Sentry.io crash/panic reporting, performance monitoring/metrics, etc. # This is NOT enabled by default. conduwuit's default Sentry reporting -# endpoint is o4506996327251968.ingest.us.sentry.io +# endpoint domain is o4506996327251968.ingest.us.sentry.io # #sentry = false diff --git a/src/core/config/check.rs b/src/core/config/check.rs index c75fb31e..b8415281 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -20,10 +20,7 @@ pub fn check(config: &Config) -> Result<()> { } if cfg!(all(feature = "hardened_malloc", feature = "jemalloc")) { - warn!( - "hardened_malloc and jemalloc are both enabled, this causes jemalloc to be used. If using --all-features, \ - this is harmless." - ); + info!("hardened_malloc and jemalloc compile-time features are both enabled, this causes jemalloc to be used."); } if cfg!(not(unix)) && config.unix_socket_path.is_some() { @@ -34,7 +31,15 @@ pub fn check(config: &Config) -> Result<()> { )); } - if cfg!(unix) && config.unix_socket_path.is_none() { + if config.unix_socket_path.is_none() && config.get_bind_hosts().is_empty() { + return Err!(Config("address", "No TCP addresses were specified to listen on")); + } + + if config.unix_socket_path.is_none() && config.get_bind_ports().is_empty() { + return Err!(Config("port", "No ports were specified to listen on")); + } + + if config.unix_socket_path.is_none() { config.get_bind_addrs().iter().for_each(|addr| { use std::path::Path; @@ -50,18 +55,14 @@ pub fn check(config: &Config) -> Result<()> { host and guest, this will NOT work. Please change this to \"0.0.0.0\". If this is expected, \ you can ignore.", ); - } - - if Path::new("/.dockerenv").exists() { + } else if Path::new("/.dockerenv").exists() { error!( "You are detected using Docker with a loopback/localhost listening address of {addr}. If you \ are using a reverse proxy on the host and require communication to conduwuit in the Docker \ container via NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". \ If this is expected, you can ignore.", ); - } - - if Path::new("/run/.containerenv").exists() { + } else if Path::new("/run/.containerenv").exists() { error!( "You are detected using Podman with a loopback/localhost listening address of {addr}. If you \ are using a reverse proxy on the host and require communication to conduwuit in the Podman \ @@ -89,6 +90,13 @@ pub fn check(config: &Config) -> Result<()> { )); } + if config.emergency_password == Some(String::from("F670$2CP@Hw8mG7RY1$%!#Ic7YA")) { + return Err!(Config( + "emergency_password", + "The public example emergency password is being used, this is insecure. Please change this." + )); + } + // check if the user specified a registration token as `""` if config.registration_token == Some(String::new()) { return Err!(Config( @@ -113,17 +121,20 @@ pub fn check(config: &Config) -> Result<()> { )); } - if config.max_request_size < 5_120_000 { + if config.max_request_size < 10_000_000 { return Err!(Config( "max_request_size", - "Max request size is less than 5MB. Please increase it." + "Max request size is less than 10MB. Please increase it as this is too low for operable federation." )); } // check if user specified valid IP CIDR ranges on startup for cidr in &config.ip_range_denylist { if let Err(e) = ipaddress::IPAddress::parse(cidr) { - return Err!(Config("ip_range_denylist", "Parsing specified IP CIDR range from string: {e}.")); + return Err!(Config( + "ip_range_denylist", + "Parsing specified IP CIDR range from string failed: {e}." + )); } } @@ -135,10 +146,10 @@ pub fn check(config: &Config) -> Result<()> { return Err!(Config( "registration_token", "!! You have `allow_registration` enabled without a token configured in your config which means you are \ - allowing ANYONE to register on your conduwuit instance without any 2nd-step (e.g. registration token).\n -If this is not the intended behaviour, please set a registration token.\n -For security and safety reasons, conduwuit will shut down. If you are extra sure this is the desired behaviour you \ - want, please set the following config option to true: + allowing ANYONE to register on your conduwuit instance without any 2nd-step (e.g. registration token). \ + If this is not the intended behaviour, please set a registration token. For security and safety reasons, \ + conduwuit will shut down. If you are extra sure this is the desired behaviour you want, please set the \ + following config option to true: `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`" )); } @@ -151,15 +162,16 @@ For security and safety reasons, conduwuit will shut down. If you are extra sure warn!( "Open registration is enabled via setting \ `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` and `allow_registration` to \ - true without a registration token configured. You are expected to be aware of the risks now.\n - If this is not the desired behaviour, please set a registration token." + true without a registration token configured. You are expected to be aware of the risks now. If this is \ + not the desired behaviour, please set a registration token." ); } if config.allow_outgoing_presence && !config.allow_local_presence { return Err!(Config( "allow_local_presence", - "Outgoing presence requires allowing local presence. Please enable 'allow_local_presence'." + "Outgoing presence requires allowing local presence. Please enable 'allow_local_presence' or disable \ + outgoing presence." )); } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 0e25b197..413aa7f4 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -141,6 +141,8 @@ pub struct Config { /// core count. /// /// This defaults to 128.0 + (64.0 * CPU core count) + /// + /// default: varies by system #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, @@ -151,9 +153,9 @@ pub struct Config { /// /// The default is the trans pride flag. /// - /// example: "🏳️⚧️" + /// example: "🏳️‍⚧️" /// - /// default: "🏳️⚧️" + /// default: "🏳️‍⚧️" #[serde(default = "default_new_user_displayname_suffix")] pub new_user_displayname_suffix: String, @@ -164,15 +166,14 @@ pub struct Config { /// /// This is disabled by default as this is rarely used except for security /// updates or major updates. - #[serde(default)] + #[serde(default, alias = "allow_announcements_check")] pub allow_check_for_updates: bool, /// Set this to any float value to multiply conduwuit's in-memory LRU caches /// with such as "auth_chain_cache_capacity". /// /// May be useful if you have significant memory to spare to increase - /// performance. This was previously called - /// `conduit_cache_capacity_modifier`. + /// performance. /// /// If you have low memory, reducing this may be viable. /// @@ -247,7 +248,7 @@ pub struct Config { /// longer running Matrix). Only decrease this if you are using an external /// DNS cache. /// - /// default_dns_min_ttl: 259200 + /// default: 10800 #[serde(default = "default_dns_min_ttl")] pub dns_min_ttl: u64, @@ -262,7 +263,7 @@ pub struct Config { #[serde(default = "default_dns_min_ttl_nxdomain")] pub dns_min_ttl_nxdomain: u64, - /// Number of retries after a timeout. + /// Number of DNS nameserver retries after a timeout or error. /// /// default: 10 #[serde(default = "default_dns_attempts")] @@ -633,7 +634,7 @@ pub struct Config { /// Currently, conduwuit doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers /// - /// example: ["matrix.org", "constellatory.net", "tchncs.de"] + /// example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] /// /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] @@ -794,14 +795,14 @@ pub struct Config { /// room invites) are ignored here. /// /// Defaults to false as rooms can be banned for non-moderation-related - /// reasons + /// reasons and this performs a full user deactivation #[serde(default)] pub auto_deactivate_banned_room_attempts: bool, /// RocksDB log level. This is not the same as conduwuit's log level. This /// is the log level for the RocksDB engine/library which show up in your /// database folder/path as `LOG` files. conduwuit will log RocksDB errors - /// as normal through tracing. + /// as normal through tracing or panics if severe for safety. /// /// default: "error" #[serde(default = "default_rocksdb_log_level")] @@ -855,7 +856,7 @@ pub struct Config { /// operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use /// all your logical threads. Defaults to your CPU logical thread count. /// - /// default: 0 + /// default: varies by system #[serde(default = "default_rocksdb_parallelism_threads")] pub rocksdb_parallelism_threads: usize, @@ -955,6 +956,14 @@ pub struct Config { #[serde(default = "default_rocksdb_recovery_mode")] pub rocksdb_recovery_mode: u8, + /// Enables or disables paranoid SST file checks. This can improve RocksDB + /// database consistency at a potential performance impact due to further + /// safety checks ran. + /// + /// See https://github.com/facebook/rocksdb/wiki/Online-Verification#columnfamilyoptionsparanoid_file_checks for more information. + #[serde(default)] + pub rocksdb_paranoid_file_checks: bool, + /// Database repair mode (for RocksDB SST corruption) /// /// Use this option when the server reports corruption while running or @@ -1255,7 +1264,8 @@ pub struct Config { /// /// To disable, set this to be an empty vector (`[]`). /// - /// default: ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", + /// Defaults to: + /// ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", /// "192.168.0.0/16", "100.64.0.0/10", "192.0.0.0/24", "169.254.0.0/16", /// "192.88.99.0/24", "198.18.0.0/15", "192.0.2.0/24", "198.51.100.0/24", /// "203.0.113.0/24", "224.0.0.0/4", "::1/128", "fe80::/10", "fc00::/7", @@ -1328,9 +1338,9 @@ pub struct Config { pub url_preview_url_contains_allowlist: Vec, /// Maximum amount of bytes allowed in a URL preview body size when - /// spidering. Defaults to 384KB in bytes. + /// spidering. Defaults to 256KB in bytes. /// - /// default: 384000 + /// default: 256000 #[serde(default = "default_url_preview_max_spider_size")] pub url_preview_max_spider_size: usize, @@ -1465,7 +1475,7 @@ pub struct Config { /// Sentry.io crash/panic reporting, performance monitoring/metrics, etc. /// This is NOT enabled by default. conduwuit's default Sentry reporting - /// endpoint is o4506996327251968.ingest.us.sentry.io + /// endpoint domain is o4506996327251968.ingest.us.sentry.io #[serde(default)] pub sentry: bool, @@ -1532,12 +1542,14 @@ pub struct Config { /// specifically the queue-depth or the number of simultaneous requests in /// flight. Defaults to 32 or four times the number of CPU cores, whichever /// is greater. + /// /// default: 32 #[serde(default = "default_db_pool_workers")] pub db_pool_workers: usize, /// Size of the queue feeding the database's frontend-pool. Defaults to 256 /// or eight times the number of CPU cores, whichever is greater. + /// /// default: 256 #[serde(default = "default_db_pool_queue_size")] pub db_pool_queue_size: usize, @@ -2282,7 +2294,7 @@ fn default_ip_range_denylist() -> Vec { } fn default_url_preview_max_spider_size() -> usize { - 384_000 // 384KB + 256_000 // 256KB } fn default_new_user_displayname_suffix() -> String { "🏳️‍⚧️".to_owned() } diff --git a/src/database/opts.rs b/src/database/opts.rs index f1b4d3a9..d0ae618c 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -95,6 +95,8 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ 4_u8..=u8::MAX => unimplemented!(), }); + opts.set_paranoid_checks(config.rocksdb_paranoid_file_checks); + opts.set_env(env); Ok(opts) } From 9d59f777d27bff30f776edec0f5a9788cfdba534 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 7 Dec 2024 01:07:01 -0500 Subject: [PATCH 0361/1248] refactor fed membership endpoints, add missing checks, some cleanup, reduce line width Signed-off-by: strawberry --- src/api/client/membership.rs | 500 +++++++++++++-------------- src/api/client/user_directory.rs | 2 +- src/api/server/invite.rs | 34 +- src/api/server/make_join.rs | 138 ++++---- src/api/server/make_knock.rs | 7 +- src/api/server/make_leave.rs | 11 +- src/api/server/send_join.rs | 159 +++++---- src/api/server/send_knock.rs | 5 +- src/api/server/send_leave.rs | 79 ++--- src/service/pusher/mod.rs | 6 +- src/service/rooms/state_cache/mod.rs | 17 +- src/service/rooms/timeline/mod.rs | 14 +- 12 files changed, 474 insertions(+), 498 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 7c3a597e..a61011b0 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -7,12 +7,12 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduit::{ - debug, debug_info, debug_warn, err, error, info, pdu, - pdu::{gen_event_id_canonical_json, PduBuilder}, + debug, debug_info, debug_warn, err, error, info, + pdu::{self, gen_event_id_canonical_json, PduBuilder}, result::FlatOk, - trace, utils, - utils::{shuffle, IterStream, ReadyExt}, - warn, Err, Error, PduEvent, Result, + trace, + utils::{self, shuffle, IterStream, ReadyExt}, + warn, Err, PduEvent, Result, }; use futures::{join, FutureExt, StreamExt}; use ruma::{ @@ -153,21 +153,14 @@ async fn banned_room_check( /// rules locally /// - If the server does not know about the room: asks other servers over /// federation -#[tracing::instrument(skip_all, fields(%client_ip), name = "join")] +#[tracing::instrument(skip_all, fields(%client), name = "join")] pub(crate) async fn join_room_by_id_route( - State(services): State, InsecureClientIp(client_ip): InsecureClientIp, + State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user(); - banned_room_check( - &services, - sender_user, - Some(&body.room_id), - body.room_id.server_name(), - client_ip, - ) - .await?; + banned_room_check(&services, sender_user, Some(&body.room_id), body.room_id.server_name(), client).await?; // There is no body.server_name for /roomId/join let mut servers: Vec<_> = services @@ -354,10 +347,7 @@ pub(crate) async fn invite_user_route( "User {sender_user} is not an admin and attempted to send an invite to room {}", &body.room_id ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Invites are not allowed on this server.", - )); + return Err!(Request(Forbidden("Invites are not allowed on this server."))); } banned_room_check(&services, sender_user, Some(&body.room_id), body.room_id.server_name(), client).await?; @@ -388,7 +378,7 @@ pub(crate) async fn invite_user_route( Ok(invite_user::v3::Response {}) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) + Err!(Request(NotFound("User not found."))) } } @@ -686,6 +676,18 @@ pub async fn join_room_by_id_helper( }); } + if let Ok(membership) = services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + { + if membership.membership == MembershipState::Ban { + debug_warn!("{sender_user} is banned from {room_id} but attempted to join"); + return Err!(Request(Forbidden("You are banned from the room."))); + } + } + let server_in_room = services .rooms .state_cache @@ -730,19 +732,29 @@ async fn join_room_by_id_helper_remote( )); } - let mut join_event_stub: CanonicalJsonObject = serde_json::from_str(make_join_response.event.get()) - .map_err(|e| err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")))?; + let mut join_event_stub: CanonicalJsonObject = + serde_json::from_str(make_join_response.event.get()).map_err(|e| { + err!(BadServerResponse(warn!( + "Invalid make_join event json received from server: {e:?}" + ))) + })?; - let join_authorized_via_users_server = join_event_stub - .get("content") - .map(|s| { - s.as_object()? - .get("join_authorised_via_users_server")? - .as_str() - }) - .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + let join_authorized_via_users_server = { + use RoomVersionId::*; + if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { + join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()) + } else { + None + } + }; - // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), @@ -811,65 +823,46 @@ async fn join_room_by_id_helper_remote( info!("send_join finished"); if join_authorized_via_users_server.is_some() { - use RoomVersionId::*; - match &room_version_id { - V1 | V2 | V3 | V4 | V5 | V6 | V7 => { - warn!( - "Found `join_authorised_via_users_server` but room {} is version {}. Ignoring.", - room_id, &room_version_id - ); - }, - // only room versions 8 and above using `join_authorized_via_users_server` (restricted joins) need to - // validate and send signatures - _ => { - if let Some(signed_raw) = &send_join_response.room_state.event { - debug_info!( - "There is a signed event. This room is probably using restricted joins. Adding signature to \ - our event" + if let Some(signed_raw) = &send_join_response.room_state.event { + debug_info!( + "There is a signed event with join_authorized_via_users_server. This room is probably using \ + restricted joins. Adding signature to our event" + ); + + let (signed_event_id, signed_value) = gen_event_id_canonical_json(signed_raw, &room_version_id) + .map_err(|e| err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))))?; + + if signed_event_id != event_id { + return Err!(Request(BadJson( + warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") + ))); + } + + match signed_value["signatures"] + .as_object() + .ok_or_else(|| err!(BadServerResponse(warn!("Server {remote_server} sent invalid signatures type")))) + .and_then(|e| { + e.get(remote_server.as_str()).ok_or_else(|| { + err!(BadServerResponse(warn!( + "Server {remote_server} did not send its signature for a restricted room" + ))) + }) + }) { + Ok(signature) => { + join_event + .get_mut("signatures") + .expect("we created a valid pdu") + .as_object_mut() + .expect("we created a valid pdu") + .insert(remote_server.to_string(), signature.clone()); + }, + Err(e) => { + warn!( + "Server {remote_server} sent invalid signature in send_join signatures for event \ + {signed_value:?}: {e:?}", ); - let Ok((signed_event_id, signed_value)) = gen_event_id_canonical_json(signed_raw, &room_version_id) - else { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - }; - - if signed_event_id != event_id { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Server sent event with wrong event id", - )); - } - - match signed_value["signatures"] - .as_object() - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Server sent invalid signatures type", - )) - .and_then(|e| { - e.get(remote_server.as_str()) - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Server did not send its signature")) - }) { - Ok(signature) => { - join_event - .get_mut("signatures") - .expect("we created a valid pdu") - .as_object_mut() - .expect("we created a valid pdu") - .insert(remote_server.to_string(), signature.clone()); - }, - Err(e) => { - warn!( - "Server {remote_server} sent invalid signature in sendjoin signatures for event \ - {signed_value:?}: {e:?}", - ); - }, - } - } - }, + }, + } } } @@ -1041,14 +1034,13 @@ async fn join_room_by_id_helper_local( services: &Services, sender_user: &UserId, room_id: &RoomId, reason: Option, servers: &[OwnedServerName], _third_party_signed: Option<&ThirdPartySigned>, state_lock: RoomMutexGuard, ) -> Result { - debug!("We can join locally"); + debug_info!("We can join locally"); let join_rules_event_content = services .rooms .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map(|content: RoomJoinRulesEventContent| content); + .room_state_get_content::(room_id, &StateEventType::RoomJoinRules, "") + .await; let restriction_rooms = match join_rules_event_content { Ok(RoomJoinRulesEventContent { @@ -1064,40 +1056,36 @@ async fn join_room_by_id_helper_local( _ => Vec::new(), }; - let local_members: Vec<_> = services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|user| services.globals.user_is_local(user)) - .map(ToOwned::to_owned) - .collect() - .await; - - let mut join_authorized_via_users_server: Option = None; - - if restriction_rooms - .iter() - .stream() - .any(|restriction_room_id| { + let join_authorized_via_users_server: Option = { + if restriction_rooms + .iter() + .stream() + .any(|restriction_room_id| { + services + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + }) + .await + { services .rooms .state_cache - .is_joined(sender_user, restriction_room_id) - }) - .await - { - for user in local_members { - if services - .rooms - .state_accessor - .user_can_invite(room_id, &user, sender_user, &state_lock) + .local_users_in_room(room_id) + .filter(|user| { + services + .rooms + .state_accessor + .user_can_invite(room_id, user, sender_user, &state_lock) + }) + .boxed() + .next() .await - { - join_authorized_via_users_server = Some(user); - break; - } + .map(ToOwned::to_owned) + } else { + None } - } + }; let content = RoomMemberEventContent { displayname: services.users.displayname(sender_user).await.ok(), @@ -1109,7 +1097,7 @@ async fn join_room_by_id_helper_local( }; // Try normal join first - let error = match services + let Err(error) = services .rooms .timeline .build_and_append_pdu( @@ -1119,130 +1107,125 @@ async fn join_room_by_id_helper_local( &state_lock, ) .await - { - Ok(_) => return Ok(()), - Err(e) => e, + else { + return Ok(()); }; - if !restriction_rooms.is_empty() - && servers - .iter() - .any(|server_name| !services.globals.server_is_ours(server_name)) + if restriction_rooms.is_empty() + && (servers.is_empty() || servers.len() == 1 && services.globals.server_is_ours(&servers[0])) { - warn!("We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements"); - let (make_join_response, remote_server) = make_join_request(services, sender_user, room_id, servers).await?; + return Err(error); + } - let Some(room_version_id) = make_join_response.room_version else { - return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); - }; + warn!("We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements"); + let Ok((make_join_response, remote_server)) = make_join_request(services, sender_user, room_id, servers).await + else { + return Err(error); + }; - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); - } + let Some(room_version_id) = make_join_response.room_version else { + return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); + }; - let mut join_event_stub: CanonicalJsonObject = serde_json::from_str(make_join_response.event.get()) - .map_err(|e| err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")))?; - let join_authorized_via_users_server = join_event_stub - .get("content") - .map(|s| { - s.as_object()? - .get("join_authorised_via_users_server")? - .as_str() - }) - .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); - // TODO: Is origin needed? - join_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - join_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - join_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason, - join_authorized_via_users_server, - ..RoomMemberEventContent::new(MembershipState::Join) - }) - .expect("event is valid, we just created it"), - ); + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } - // We keep the "event_id" in the pdu only in v1 or - // v2 rooms - match room_version_id { - RoomVersionId::V1 | RoomVersionId::V2 => {}, - _ => { - join_event_stub.remove("event_id"); + let mut join_event_stub: CanonicalJsonObject = serde_json::from_str(make_join_response.event.get()) + .map_err(|e| err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")))?; + + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + + join_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + join_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + join_authorized_via_users_server, + ..RoomMemberEventContent::new(MembershipState::Join) + }) + .expect("event is valid, we just created it"), + ); + + // We keep the "event_id" in the pdu only in v1 or + // v2 rooms + match room_version_id { + RoomVersionId::V1 | RoomVersionId::V2 => {}, + _ => { + join_event_stub.remove("event_id"); + }, + }; + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; + + // Generate event id + let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; + + // Add event_id back + join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let join_event = join_event_stub; + + let send_join_response = services + .sending + .send_synapse_request( + &remote_server, + federation::membership::create_join_event::v2::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + omit_members: false, + pdu: services + .sending + .convert_to_outgoing_federation_event(join_event.clone()) + .await, }, - }; + ) + .await?; - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; + if let Some(signed_raw) = send_join_response.room_state.event { + let (signed_event_id, signed_value) = gen_event_id_canonical_json(&signed_raw, &room_version_id) + .map_err(|e| err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))))?; - // Generate event id - let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; - - // Add event_id back - join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let join_event = join_event_stub; - - let send_join_response = services - .sending - .send_synapse_request( - &remote_server, - federation::membership::create_join_event::v2::Request { - room_id: room_id.to_owned(), - event_id: event_id.clone(), - omit_members: false, - pdu: services - .sending - .convert_to_outgoing_federation_event(join_event.clone()) - .await, - }, - ) - .await?; - - if let Some(signed_raw) = send_join_response.room_state.event { - let Ok((signed_event_id, signed_value)) = gen_event_id_canonical_json(&signed_raw, &room_version_id) else { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - }; - - if signed_event_id != event_id { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Server sent event with wrong event id", - )); - } - - drop(state_lock); - services - .rooms - .event_handler - .handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true) - .await?; - } else { - return Err(error); + if signed_event_id != event_id { + return Err!(Request(BadJson( + warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") + ))); } + + drop(state_lock); + services + .rooms + .event_handler + .handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true) + .await?; } else { return Err(error); } @@ -1317,13 +1300,10 @@ async fn make_join_request( pub(crate) async fn invite_helper( services: &Services, sender_user: &UserId, user_id: &UserId, room_id: &RoomId, reason: Option, is_direct: bool, -) -> Result<()> { +) -> Result { if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { info!("User {sender_user} is not an admin and attempted to send an invite to room {room_id}"); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Invites are not allowed on this server.", - )); + return Err!(Request(Forbidden("Invites are not allowed on this server."))); } if !services.globals.user_is_local(user_id) { @@ -1382,30 +1362,24 @@ pub(crate) async fn invite_helper( // We do not add the event_id field to the pdu here because of signature and // hashes checks - let Ok((event_id, value)) = gen_event_id_canonical_json(&response.event, &room_version_id) else { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - }; + let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id) + .map_err(|e| err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))))?; - if *pdu.event_id != *event_id { - warn!( - "Server {} changed invite event, that's not allowed in the spec: ours: {pdu_json:?}, theirs: {value:?}", - user_id.server_name(), - ); + if pdu.event_id != event_id { + return Err!(Request(BadJson( + warn!(%pdu.event_id, %event_id, "Server {} sent event with wrong event ID", user_id.server_name()) + ))); } let origin: OwnedServerName = serde_json::from_value( serde_json::to_value( value .get("origin") - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event needs an origin field."))?, + .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, ) .expect("CanonicalJson is valid json value"), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + .map_err(|e| err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))))?; let pdu_id = services .rooms @@ -1414,8 +1388,7 @@ pub(crate) async fn invite_helper( .await? .ok_or_else(|| err!(Request(InvalidParam("Could not accept incoming PDU as timeline event."))))?; - services.sending.send_pdu_room(room_id, &pdu_id).await?; - return Ok(()); + return services.sending.send_pdu_room(room_id, &pdu_id).await; } if !services @@ -1424,10 +1397,9 @@ pub(crate) async fn invite_helper( .is_joined(sender_user, room_id) .await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this room.", - )); + return Err!(Request(Forbidden( + "You must be joined in the room you are trying to invite from." + ))); } let state_lock = services.rooms.state.mutex.lock(room_id).await; @@ -1599,7 +1571,11 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room .map(|user| user.server_name().to_owned()), ); - debug!("servers in remote_leave_room: {servers:?}"); + if let Some(room_id_server_name) = room_id.server_name() { + servers.insert(room_id_server_name.to_owned()); + } + + debug_info!("servers in remote_leave_room: {servers:?}"); for remote_server in servers { let make_leave_response = services diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index f3fee8d1..feb48346 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -21,7 +21,7 @@ pub(crate) async fn search_users_route( State(services): State, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = usize::try_from(body.limit).unwrap_or(10); // default limit is 10 + let limit = usize::try_from(body.limit).map_or(10, usize::from).min(100); // default limit is 10 let users = services.users.stream().filter_map(|user_id| async { // Filter out buggy users (they should not exist, but you never know...) diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index f7919bb3..49263d52 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -145,24 +145,24 @@ pub(crate) async fn create_invite_route( true, ) .await?; - } - for appservice in services.appservice.read().await.values() { - if appservice.is_user_match(&invited_user) { - services - .sending - .send_appservice_request( - appservice.registration.clone(), - ruma::api::appservice::event::push_events::v1::Request { - events: vec![pdu.to_room_event()], - txn_id: general_purpose::URL_SAFE_NO_PAD - .encode(sha256::hash(pdu.event_id.as_bytes())) - .into(), - ephemeral: Vec::new(), - to_device: Vec::new(), - }, - ) - .await?; + for appservice in services.appservice.read().await.values() { + if appservice.is_user_match(&invited_user) { + services + .sending + .send_appservice_request( + appservice.registration.clone(), + ruma::api::appservice::event::push_events::v1::Request { + events: vec![pdu.to_room_event()], + txn_id: general_purpose::URL_SAFE_NO_PAD + .encode(sha256::hash(pdu.event_id.as_bytes())) + .into(), + ephemeral: Vec::new(), + to_device: Vec::new(), + }, + ) + .await?; + } } } diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index d5ea675e..05a4125d 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,8 +1,5 @@ use axum::extract::State; -use conduit::{ - utils::{IterStream, ReadyExt}, - warn, -}; +use conduit::{debug_info, utils::IterStream, warn, Err}; use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::membership::prepare_join_event}, @@ -13,7 +10,7 @@ use ruma::{ }, StateEventType, }, - CanonicalJsonObject, RoomId, RoomVersionId, UserId, + CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; @@ -29,14 +26,11 @@ pub(crate) async fn create_join_event_template_route( State(services): State, body: Ruma, ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + return Err!(Request(NotFound("Room is unknown to this server."))); } if body.user_id.server_name() != body.origin() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to join on behalf of another server/user", - )); + return Err!(Request(BadJson("Not allowed to join on behalf of another server/user."))); } // ACL check origin server @@ -59,10 +53,7 @@ pub(crate) async fn create_join_event_template_route( &body.user_id, &body.room_id, ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { @@ -72,10 +63,9 @@ pub(crate) async fn create_join_event_template_route( .forbidden_remote_server_names .contains(&server.to_owned()) { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + return Err!(Request(Forbidden(warn!( + "Room ID server name {server} is banned on this homeserver." + )))); } } @@ -91,39 +81,35 @@ pub(crate) async fn create_join_event_template_route( let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - let join_authorized_via_users_server = if (services - .rooms - .state_cache - .is_left(&body.user_id, &body.room_id) - .await) - && user_can_perform_restricted_join(&services, &body.user_id, &body.room_id, &room_version_id).await? - { - let auth_user = services - .rooms - .state_cache - .room_members(&body.room_id) - .ready_filter(|user| user.server_name() == services.globals.server_name()) - .filter(|user| { - services - .rooms - .state_accessor - .user_can_invite(&body.room_id, user, &body.user_id, &state_lock) - }) - .boxed() - .next() - .await - .map(ToOwned::to_owned); - - if auth_user.is_some() { - auth_user + let join_authorized_via_users_server: Option = { + use RoomVersionId::*; + if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { + // room version does not support restricted join rules + None + } else if user_can_perform_restricted_join(&services, &body.user_id, &body.room_id, &room_version_id).await? { + let Some(auth_user) = services + .rooms + .state_cache + .local_users_in_room(&body.room_id) + .filter(|user| { + services + .rooms + .state_accessor + .user_can_invite(&body.room_id, user, &body.user_id, &state_lock) + }) + .boxed() + .next() + .await + .map(ToOwned::to_owned) + else { + return Err!(Request(UnableToGrantJoin( + "No user on this server is able to assist in joining." + ))); + }; + Some(auth_user) } else { - return Err(Error::BadRequest( - ErrorKind::UnableToGrantJoin, - "No user on this server is able to assist in joining.", - )); + None } - } else { - None }; let (_pdu, mut pdu_json) = services @@ -155,37 +141,39 @@ pub(crate) async fn create_join_event_template_route( } /// Checks whether the given user can join the given room via a restricted join. -/// This doesn't check the current user's membership. This should be done -/// externally, either by using the state cache or attempting to authorize the -/// event. pub(crate) async fn user_can_perform_restricted_join( services: &Services, user_id: &UserId, room_id: &RoomId, room_version_id: &RoomVersionId, ) -> Result { use RoomVersionId::*; - let join_rules_event = services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomJoinRules, "") - .await; - - let Ok(Ok(join_rules_event_content)) = join_rules_event.as_ref().map(|join_rules_event| { - serde_json::from_str::(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event in database: {e}"); - Error::bad_database("Invalid join rules event in database") - }) - }) else { - return Ok(false); - }; - + // restricted rooms are not supported on <=v7 if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { return Ok(false); } + if services.rooms.state_cache.is_joined(user_id, room_id).await { + // joining user is already joined, there is nothing we need to do + return Ok(false); + } + + let Ok(join_rules_event_content) = services + .rooms + .state_accessor + .room_state_get_content::(room_id, &StateEventType::RoomJoinRules, "") + .await + else { + return Ok(false); + }; + let (JoinRule::Restricted(r) | JoinRule::KnockRestricted(r)) = join_rules_event_content.join_rule else { return Ok(false); }; + if r.allow.is_empty() { + debug_info!("{room_id} is restricted but the allow key is empty"); + return Ok(false); + } + if r.allow .iter() .filter_map(|rule| { @@ -201,22 +189,20 @@ pub(crate) async fn user_can_perform_restricted_join( { Ok(true) } else { - Err(Error::BadRequest( - ErrorKind::UnableToAuthorizeJoin, - "User is not known to be in any required room.", - )) + Err!(Request(UnableToAuthorizeJoin( + "Joining user is not known to be in any required room." + ))) } } -pub(crate) fn maybe_strip_event_id(pdu_json: &mut CanonicalJsonObject, room_version_id: &RoomVersionId) -> Result<()> { +pub(crate) fn maybe_strip_event_id(pdu_json: &mut CanonicalJsonObject, room_version_id: &RoomVersionId) -> Result { use RoomVersionId::*; match room_version_id { - V1 | V2 => {}, + V1 | V2 => Ok(()), _ => { pdu_json.remove("event_id"); + Ok(()) }, - }; - - Ok(()) + } } diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index c1875a1f..34883355 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -18,14 +18,11 @@ pub(crate) async fn create_knock_event_template_route( State(services): State, body: Ruma, ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + return Err!(Request(NotFound("Room is unknown to this server."))); } if body.user_id.server_name() != body.origin() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to knock on behalf of another server/user", - )); + return Err!(Request(BadJson("Not allowed to knock on behalf of another server/user."))); } // ACL check origin server diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 33a94560..1b81aea5 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -1,7 +1,7 @@ use axum::extract::State; -use conduit::{Error, Result}; +use conduit::{Err, Result}; use ruma::{ - api::{client::error::ErrorKind, federation::membership::prepare_leave_event}, + api::federation::membership::prepare_leave_event, events::room::member::{MembershipState, RoomMemberEventContent}, }; use serde_json::value::to_raw_value; @@ -16,14 +16,11 @@ pub(crate) async fn create_leave_event_template_route( State(services): State, body: Ruma, ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + return Err!(Request(NotFound("Room is unknown to this server."))); } if body.user_id.server_name() != body.origin() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to leave on behalf of another server/user", - )); + return Err!(Request(BadJson("Not allowed to leave on behalf of another server/user."))); } // ACL check origin diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index d1574e62..1e1e8fed 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -7,16 +7,16 @@ use conduit::{ err, pdu::gen_event_id_canonical_json, utils::stream::{IterStream, TryBroadbandExt}, - warn, Error, Result, + warn, Err, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - api::{client::error::ErrorKind, federation::membership::create_join_event}, + api::federation::membership::create_join_event, events::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, }, - CanonicalJsonValue, OwnedEventId, OwnedServerName, OwnedUserId, RoomId, ServerName, + CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use service::Services; @@ -28,7 +28,7 @@ async fn create_join_event( services: &Services, origin: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, ) -> Result { if !services.rooms.metadata.exists(room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + return Err!(Request(NotFound("Room is unknown to this server."))); } // ACL check origin server @@ -45,7 +45,7 @@ async fn create_join_event( .state .get_room_shortstatehash(room_id) .await - .map_err(|_| err!(Request(NotFound("Event state not found."))))?; + .map_err(|e| err!(Request(NotFound(error!("Room has no state: {e}")))))?; // We do not add the event_id field to the pdu here because of signature and // hashes checks @@ -53,53 +53,62 @@ async fn create_join_event( let Ok((event_id, mut value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); + return Err!(Request(BadJson("Could not convert event to canonical json."))); }; + let event_room_id: OwnedRoomId = serde_json::from_value( + serde_json::to_value( + value + .get("room_id") + .ok_or_else(|| err!(Request(BadJson("Event missing room_id property."))))?, + ) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|e| err!(Request(BadJson(warn!("room_id field is not a valid room ID: {e}")))))?; + + if event_room_id != room_id { + return Err!(Request(BadJson("Event room_id does not match request path room ID."))); + } + let event_type: StateEventType = serde_json::from_value( value .get("type") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing type property."))? + .ok_or_else(|| err!(Request(BadJson("Event missing type property."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event has invalid event type."))?; + .map_err(|e| err!(Request(BadJson(warn!("Event has invalid state event type: {e}")))))?; if event_type != StateEventType::RoomMember { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to send non-membership state event to join endpoint.", - )); + return Err!(Request(BadJson( + "Not allowed to send non-membership state event to join endpoint." + ))); } let content: RoomMemberEventContent = serde_json::from_value( value .get("content") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing content property"))? + .ok_or_else(|| err!(Request(BadJson("Event missing content property"))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event content is empty or invalid"))?; + .map_err(|e| err!(Request(BadJson(warn!("Event content is empty or invalid: {e}")))))?; if content.membership != MembershipState::Join { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to send a non-join membership event to join endpoint.", - )); + return Err!(Request(BadJson( + "Not allowed to send a non-join membership event to join endpoint." + ))); } - // ACL check sender server name + // ACL check sender user server name let sender: OwnedUserId = serde_json::from_value( value .get("sender") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing sender property."))? + .ok_or_else(|| err!(Request(BadJson("Event missing sender property."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "sender is not a valid user ID."))?; + .map_err(|e| err!(Request(BadJson(warn!("sender property is not a valid user ID: {e}")))))?; services .rooms @@ -109,50 +118,71 @@ async fn create_join_event( // check if origin server is trying to send for another server if sender.server_name() != origin { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to join on behalf of another server.", - )); + return Err!(Request(Forbidden("Not allowed to join on behalf of another server."))); } let state_key: OwnedUserId = serde_json::from_value( value .get("state_key") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing state_key property."))? + .ok_or_else(|| err!(Request(BadJson("Event missing state_key property."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "state_key is invalid or not a user ID."))?; + .map_err(|e| err!(Request(BadJson(warn!("State key is not a valid user ID: {e}")))))?; if state_key != sender { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "State key does not match sender user", - )); + return Err!(Request(BadJson("State key does not match sender user."))); }; - if content - .join_authorized_via_users_server - .is_some_and(|user| services.globals.user_is_local(&user)) - && super::user_can_perform_restricted_join(services, &sender, room_id, &room_version_id) + if let Some(authorising_user) = content.join_authorized_via_users_server { + use ruma::RoomVersionId::*; + + if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { + return Err!(Request(InvalidParam( + "Room version {room_version_id} does not support restricted rooms but \ + join_authorised_via_users_server ({authorising_user}) was found in the event." + ))); + } + + if !services.globals.user_is_local(&authorising_user) { + return Err!(Request(InvalidParam( + "Cannot authorise membership event through {authorising_user} as they do not belong to this homeserver" + ))); + } + + if !services + .rooms + .state_cache + .is_joined(&authorising_user, room_id) .await - .unwrap_or_default() - { - services - .server_keys - .hash_and_sign_event(&mut value, &room_version_id) - .map_err(|e| err!(Request(InvalidParam("Failed to sign event: {e}"))))?; + { + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} is not in the room you are trying to join, they cannot authorise \ + your join." + ))); + } + + if !super::user_can_perform_restricted_join(services, &state_key, room_id, &room_version_id).await? { + return Err!(Request(UnableToAuthorizeJoin( + "Joining user did not pass restricted room's rules." + ))); + } } + services + .server_keys + .hash_and_sign_event(&mut value, &room_version_id) + .map_err(|e| err!(Request(InvalidParam(warn!("Failed to sign send_join event: {e}")))))?; + let origin: OwnedServerName = serde_json::from_value( serde_json::to_value( value .get("origin") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing origin property."))?, + .ok_or_else(|| err!(Request(BadJson("Event missing origin property."))))?, ) .expect("CanonicalJson is valid json value"), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?; + .map_err(|e| err!(Request(BadJson(warn!("origin field is not a valid server name: {e}")))))?; let mutex_lock = services .rooms @@ -214,7 +244,6 @@ async fn create_join_event( Ok(create_join_event::v1::RoomState { auth_chain, state, - // Event field is required if the room version supports restricted join rules. event: to_raw_value(&CanonicalJsonValue::Object(value)).ok(), }) } @@ -232,14 +261,12 @@ pub(crate) async fn create_join_event_v1_route( .contains(body.origin()) { warn!( - "Server {} tried joining room ID {} who has a server name that is globally forbidden. Rejecting.", + "Server {} tried joining room ID {} through us who has a server name that is globally forbidden. \ + Rejecting.", body.origin(), &body.room_id, ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { @@ -250,14 +277,14 @@ pub(crate) async fn create_join_event_v1_route( .contains(&server.to_owned()) { warn!( - "Server {} tried joining room ID {} which has a server name that is globally forbidden. Rejecting.", + "Server {} tried joining room ID {} through us which has a server name that is globally forbidden. \ + Rejecting.", body.origin(), &body.room_id, ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + return Err!(Request(Forbidden(warn!( + "Room ID server name {server} is banned on this homeserver." + )))); } } @@ -282,10 +309,7 @@ pub(crate) async fn create_join_event_v2_route( .forbidden_remote_server_names .contains(body.origin()) { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { @@ -295,10 +319,15 @@ pub(crate) async fn create_join_event_v2_route( .forbidden_remote_server_names .contains(&server.to_owned()) { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server is banned on this homeserver.", - )); + warn!( + "Server {} tried joining room ID {} through us which has a server name that is globally forbidden. \ + Rejecting.", + body.origin(), + &body.room_id, + ); + return Err!(Request(Forbidden(warn!( + "Room ID server name {server} is banned on this homeserver." + )))); } } diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index c57998ae..9738c2eb 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -121,10 +121,7 @@ pub(crate) async fn create_knock_event_v1_route( // check if origin server is trying to send for another server if sender.server_name() != body.origin() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to knock on behalf of another server.", - )); + return Err!(Request(BadJson("Not allowed to knock on behalf of another server/user."))); } let state_key: OwnedUserId = serde_json::from_value( diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index e4f41833..0de485e2 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,14 +1,14 @@ #![allow(deprecated)] use axum::extract::State; -use conduit::{err, utils::ReadyExt, Error, Result}; +use conduit::{err, Err, Result}; use ruma::{ - api::{client::error::ErrorKind, federation::membership::create_leave_event}, + api::federation::membership::create_leave_event, events::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, }, - OwnedUserId, RoomId, ServerName, + OwnedRoomId, OwnedUserId, RoomId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; @@ -39,11 +39,9 @@ pub(crate) async fn create_leave_event_v2_route( Ok(create_leave_event::v2::Response::new()) } -async fn create_leave_event( - services: &Services, origin: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, -) -> Result<()> { +async fn create_leave_event(services: &Services, origin: &ServerName, room_id: &RoomId, pdu: &RawJsonValue) -> Result { if !services.rooms.metadata.exists(room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + return Err!(Request(NotFound("Room is unknown to this server."))); } // ACL check origin @@ -58,53 +56,62 @@ async fn create_leave_event( let room_version_id = services.rooms.state.get_room_version(room_id).await?; let Ok((event_id, value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); + return Err!(Request(BadJson("Could not convert event to canonical json."))); }; + let event_room_id: OwnedRoomId = serde_json::from_value( + serde_json::to_value( + value + .get("room_id") + .ok_or_else(|| err!(Request(BadJson("Event missing room_id property."))))?, + ) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|e| err!(Request(BadJson(warn!("room_id field is not a valid room ID: {e}")))))?; + + if event_room_id != room_id { + return Err!(Request(BadJson("Event room_id does not match request path room ID."))); + } + let content: RoomMemberEventContent = serde_json::from_value( value .get("content") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing content property"))? + .ok_or_else(|| err!(Request(BadJson("Event missing content property."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event content is empty or invalid"))?; + .map_err(|e| err!(Request(BadJson(warn!("Event content is empty or invalid: {e}")))))?; if content.membership != MembershipState::Leave { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to send a non-leave membership event to leave endpoint.", - )); + return Err!(Request(BadJson( + "Not allowed to send a non-leave membership event to leave endpoint." + ))); } let event_type: StateEventType = serde_json::from_value( value .get("type") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing type property."))? + .ok_or_else(|| err!(Request(BadJson("Event missing type property."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Event does not have a valid state event type."))?; + .map_err(|e| err!(Request(BadJson(warn!("Event has invalid state event type: {e}")))))?; if event_type != StateEventType::RoomMember { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to send non-membership state event to leave endpoint.", - )); + return Err!(Request(BadJson( + "Not allowed to send non-membership state event to leave endpoint." + ))); } // ACL check sender server name let sender: OwnedUserId = serde_json::from_value( value .get("sender") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing sender property."))? + .ok_or_else(|| err!(Request(BadJson("Event missing sender property."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "User ID in sender is invalid."))?; + .map_err(|e| err!(Request(BadJson(warn!("sender property is not a valid user ID: {e}")))))?; services .rooms @@ -113,26 +120,20 @@ async fn create_leave_event( .await?; if sender.server_name() != origin { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to leave on behalf of another server.", - )); + return Err!(Request(BadJson("Not allowed to leave on behalf of another server/user."))); } let state_key: OwnedUserId = serde_json::from_value( value .get("state_key") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing state_key property."))? + .ok_or_else(|| err!(Request(BadJson("Event missing state_key property."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "state_key is invalid or not a user ID"))?; + .map_err(|e| err!(Request(BadJson(warn!("State key is not a valid user ID: {e}")))))?; if state_key != sender { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "state_key does not match sender user.", - )); + return Err!(Request(BadJson("State key does not match sender user."))); } let mutex_lock = services @@ -151,11 +152,5 @@ async fn create_leave_event( drop(mutex_lock); - let servers = services - .rooms - .state_cache - .room_servers(room_id) - .ready_filter(|server| !services.globals.server_is_ours(server)); - - services.sending.send_pdu_servers(servers, &pdu_id).await + services.sending.send_pdu_room(room_id, &pdu_id).await } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index df5852c6..ff1837db 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -227,7 +227,7 @@ impl Service { for action in self .get_actions(user, &ruleset, &power_levels, &pdu.to_sync_room_event(), &pdu.room_id) - .await? + .await { let n = match action { Action::Notify => true, @@ -259,7 +259,7 @@ impl Service { pub async fn get_actions<'a>( &self, user: &UserId, ruleset: &'a Ruleset, power_levels: &RoomPowerLevelsEventContent, pdu: &Raw, room_id: &RoomId, - ) -> Result<&'a [Action]> { + ) -> &'a [Action] { let power_levels = PushConditionPowerLevelsCtx { users: power_levels.users.clone(), users_default: power_levels.users_default, @@ -290,7 +290,7 @@ impl Service { power_levels: Some(power_levels), }; - Ok(ruleset.get_actions(pdu, &ctx)) + ruleset.get_actions(pdu, &ctx) } #[tracing::instrument(skip(self, unread, pusher, tweaks, event))] diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 4a33224e..cbd72cdb 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -4,7 +4,7 @@ use std::{ }; use conduit::{ - err, is_not_empty, + is_not_empty, result::LogErr, utils::{stream::TryIgnore, ReadyExt, StreamTools}, warn, Result, @@ -600,11 +600,11 @@ impl Service { .map(|(_, servers): KeyVal<'_>| *servers.last().expect("at least one server")) } - /// Gets up to three servers that are likely to be in the room in the + /// Gets up to five servers that are likely to be in the room in the /// distant future. /// - /// See - #[tracing::instrument(skip(self))] + /// See + #[tracing::instrument(skip(self), level = "debug")] pub async fn servers_route_via(&self, room_id: &RoomId) -> Result> { let most_powerful_user_server = self .services @@ -618,8 +618,7 @@ impl Service { .max_by_key(|(_, power)| *power) .and_then(|x| (x.1 >= &int!(50)).then_some(x)) .map(|(user, _power)| user.server_name().to_owned()) - }) - .map_err(|e| err!(Database(error!(?e, "Invalid power levels event content in database."))))?; + }); let mut servers: Vec = self .room_members(room_id) @@ -629,12 +628,12 @@ impl Service { .sorted_by_key(|(_, users)| *users) .map(|(server, _)| server) .rev() - .take(3) + .take(5) .collect(); - if let Some(server) = most_powerful_user_server { + if let Ok(Some(server)) = most_powerful_user_server { servers.insert(0, server); - servers.truncate(3); + servers.truncate(5); } Ok(servers) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index b9fcdcd2..8603b7e7 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -417,7 +417,7 @@ impl Service { .services .pusher .get_actions(user, &rules_for_user, &power_levels, &sync_pdu, &pdu.room_id) - .await? + .await { match action { Action::Notify => notify = true, @@ -769,10 +769,8 @@ impl Service { } // Hash and sign - let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| { - error!("Failed to convert PDU to canonical JSON: {e}"); - Error::bad_database("Failed to convert PDU to canonical JSON.") - })?; + let mut pdu_json = utils::to_canonical_object(&pdu) + .map_err(|e| err!(Request(BadJson(warn!("Failed to convert PDU to canonical JSON: {e}")))))?; // room v3 and above removed the "event_id" field from remote PDU format match room_version_id { @@ -794,8 +792,10 @@ impl Service { .hash_and_sign_event(&mut pdu_json, &room_version_id) { return match e { - Error::Signatures(ruma::signatures::Error::PduSize) => Err!(Request(TooLarge("Message is too long"))), - _ => Err!(Request(Unknown("Signing event failed"))), + Error::Signatures(ruma::signatures::Error::PduSize) => { + Err!(Request(TooLarge("Message/PDU is too long (exceeds 65535 bytes)"))) + }, + _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))), }; } From ab4a283870f6e0e986d818872d7c3c62efdefc17 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 8 Dec 2024 19:01:41 -0500 Subject: [PATCH 0362/1248] update complement config Signed-off-by: strawberry --- nix/pkgs/complement/config.toml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 631100fd..f20abee2 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -9,15 +9,27 @@ database_path = "/database" log = "trace,h2=warn,hyper=warn" port = [8008, 8448] trusted_servers = [] +only_query_trusted_key_servers = false query_trusted_key_servers_first = false +query_trusted_key_servers_first_on_join = false yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true ip_range_denylist = [] url_preview_domain_contains_allowlist = ["*"] +url_preview_domain_explicit_denylist = ["*"] media_compat_file_link = false media_startup_check = false -rocksdb_direct_io = false +prune_missing_media = false log_colors = false admin_room_notices = false +allow_check_for_updates = false +allow_unstable_room_versions = true +rocksdb_log_level = "debug" +rocksdb_max_log_files = 1 +rocksdb_recovery_mode = 0 +rocksdb_paranoid_file_checks = true +log_guest_registrations = false +allow_legacy_media = true +startup_netburst = false [global.tls] certs = "/certificate.crt" From faf48405aa3ce491e142196b121268fa76b7a325 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 8 Dec 2024 20:40:03 -0500 Subject: [PATCH 0363/1248] bump rocksdb to v9.8.4, enable track_and_verify_wals_in_manifest by default Signed-off-by: strawberry --- Cargo.lock | 8 ++++---- deps/rust-rocksdb/Cargo.toml | 2 +- flake.lock | 8 ++++---- flake.nix | 2 +- src/database/opts.rs | 6 ++++++ 5 files changed, 16 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc8dc26d..e766fe46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3390,8 +3390,8 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.29.0+9.7.4" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=e2b8853ee8839dc886e88fd751fe50ed1c27b47e#e2b8853ee8839dc886e88fd751fe50ed1c27b47e" +version = "0.30.0+9.8.4" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3a9640a6b9173d24819c22a49487e31d20a2e59e#3a9640a6b9173d24819c22a49487e31d20a2e59e" dependencies = [ "bindgen", "bzip2-sys", @@ -3407,8 +3407,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.33.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=e2b8853ee8839dc886e88fd751fe50ed1c27b47e#e2b8853ee8839dc886e88fd751fe50ed1c27b47e" +version = "0.34.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3a9640a6b9173d24819c22a49487e31d20a2e59e#3a9640a6b9173d24819c22a49487e31d20a2e59e" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index c6c3fea5..b6895ad3 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "e2b8853ee8839dc886e88fd751fe50ed1c27b47e" +rev = "3a9640a6b9173d24819c22a49487e31d20a2e59e" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index 7740e925..6049fded 100644 --- a/flake.lock +++ b/flake.lock @@ -922,16 +922,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1731690620, - "narHash": "sha256-Xd4TJYqPERMJLXaGa6r6Ny1Wlw8Uy5Cyf/8q7nS58QM=", + "lastModified": 1733704887, + "narHash": "sha256-4ijNmXACyTJWKRcTdlgObdbOVm2oN3Zefg55/4UPIL0=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "292446aa2bc41699204d817a1e4b091679a886eb", + "rev": "9c656e58c9f969aea28f25b22bc52ea03109677a", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.7.4", + "ref": "v9.8.4", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index 281052a8..4eb72731 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.7.4"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.8.4"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; diff --git a/src/database/opts.rs b/src/database/opts.rs index d0ae618c..b0b33927 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -95,6 +95,12 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ 4_u8..=u8::MAX => unimplemented!(), }); + // + // "We recommend to set track_and_verify_wals_in_manifest to true for + // production, it has been enabled in production for the entire database cluster + // serving the social graph for all Meta apps." + opts.set_track_and_verify_wals_in_manifest(true); + opts.set_paranoid_checks(config.rocksdb_paranoid_file_checks); opts.set_env(env); From e1bd6a7c2d28853c4f19cd32e283b8c76434e15e Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 9 Dec 2024 12:31:08 -0500 Subject: [PATCH 0364/1248] nix: try bumping to latest complement Signed-off-by: strawberry --- flake.lock | 6 +++--- flake.nix | 7 +++---- nix/pkgs/main/default.nix | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/flake.lock b/flake.lock index 6049fded..56675fb0 100644 --- a/flake.lock +++ b/flake.lock @@ -120,11 +120,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1724347376, - "narHash": "sha256-y0e/ULDJ92IhNQZsS/06g0s+AYZ82aJfrIO9qEse94c=", + "lastModified": 1732612157, + "narHash": "sha256-uqQfVmwCUrsw7eHs9przqnhMoyx4r7WUU8AWfT2Buo4=", "owner": "matrix-org", "repo": "complement", - "rev": "39733c1b2f8314800776748cc7164f9a34650686", + "rev": "fc63446512261a496b794a7082a6598b6f98e925", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 4eb72731..817f4e6d 100644 --- a/flake.nix +++ b/flake.nix @@ -18,7 +18,6 @@ let pkgsHost = import inputs.nixpkgs{ inherit system; - config.permittedInsecurePackages = [ "olm-3.2.16" ]; }; pkgsHostStatic = pkgsHost.pkgsStatic; @@ -118,9 +117,9 @@ # code. COMPLEMENT_SRC = inputs.complement.outPath; - # Needed for Complement - CGO_CFLAGS = "-I${scope.pkgs.olm}/include"; - CGO_LDFLAGS = "-L${scope.pkgs.olm}/lib"; + # Needed for Complement: + CGO_CFLAGS = "-Wl,--no-gc-sections"; + CGO_LDFLAGS = "-Wl,--no-gc-sections"; }; # Development tools diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index d11c36cc..c40af176 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -176,7 +176,7 @@ commonAttrs = { # # postInstall = with pkgsBuildHost; '' - find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${libgcc} -t ${llvm} -t ${libllvm} -t ${rustc.unwrapped} -t ${rustc} -t ${libidn2} -t ${libunistring} '{}' + + find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${llvm} -t ${rustc.unwrapped} -t ${rustc} '{}' + ''; }; in From b6ac3649eea5e7b9d335ecb47e902654875b16b0 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 9 Dec 2024 13:04:13 -0500 Subject: [PATCH 0365/1248] use fork of complement Signed-off-by: strawberry --- flake.lock | 10 +++++----- flake.nix | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index 56675fb0..ea067333 100644 --- a/flake.lock +++ b/flake.lock @@ -120,15 +120,15 @@ "complement": { "flake": false, "locked": { - "lastModified": 1732612157, - "narHash": "sha256-uqQfVmwCUrsw7eHs9przqnhMoyx4r7WUU8AWfT2Buo4=", - "owner": "matrix-org", + "lastModified": 1733767359, + "narHash": "sha256-3ZBFqvmTSE5Rtbb0Co89aut7q4ERBAeP7HntLN6sFik=", + "owner": "girlbossceo", "repo": "complement", - "rev": "fc63446512261a496b794a7082a6598b6f98e925", + "rev": "6ec9a3a28fb2baec9afb4fc8b9974a5f5d6525fc", "type": "github" }, "original": { - "owner": "matrix-org", + "owner": "girlbossceo", "ref": "main", "repo": "complement", "type": "github" diff --git a/flake.nix b/flake.nix index 817f4e6d..e4ba8e3d 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,7 @@ inputs = { attic.url = "github:zhaofengli/attic?ref=main"; cachix.url = "github:cachix/cachix?ref=master"; - complement = { url = "github:matrix-org/complement?ref=main"; flake = false; }; + complement = { url = "github:girlbossceo/complement?ref=main"; flake = false; }; crane = { url = "github:ipetkov/crane?ref=master"; }; fenix = { url = "github:nix-community/fenix?ref=main"; inputs.nixpkgs.follows = "nixpkgs"; }; flake-compat = { url = "github:edolstra/flake-compat?ref=master"; flake = false; }; From 0ff37b829cc04ad84c7769f8676078cc13f7c6ca Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 9 Dec 2024 13:33:24 -0500 Subject: [PATCH 0366/1248] update default systemd units to use network-online.target fully Signed-off-by: strawberry --- arch/conduwuit.service | 3 ++- debian/conduwuit.service | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/conduwuit.service b/arch/conduwuit.service index b343c91a..7c05c259 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -1,6 +1,7 @@ [Unit] Description=conduwuit Matrix homeserver -After=network.target +Wants=network-online.target +After=network-online.target Documentation=https://conduwuit.puppyirl.gay/ RequiresMountsFor=/var/lib/private/conduwuit diff --git a/debian/conduwuit.service b/debian/conduwuit.service index d1b0d5d6..3c2ec49d 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -1,7 +1,8 @@ [Unit] Description=conduwuit Matrix homeserver -Documentation=https://conduwuit.puppyirl.gay/ +Wants=network-online.target After=network-online.target +Documentation=https://conduwuit.puppyirl.gay/ [Service] DynamicUser=yes From 9cb71e212ab44d27de1cf84650d7e0f0c5c9e90f Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 9 Dec 2024 17:03:54 -0500 Subject: [PATCH 0367/1248] nix: bump flake.lock finally after 3 months Signed-off-by: strawberry --- flake.lock | 598 +++++------------------- nix/pkgs/main/cross-compilation-env.nix | 35 +- nix/pkgs/main/default.nix | 2 - 3 files changed, 121 insertions(+), 514 deletions(-) diff --git a/flake.lock b/flake.lock index ea067333..25d6de81 100644 --- a/flake.lock +++ b/flake.lock @@ -5,15 +5,16 @@ "crane": "crane", "flake-compat": "flake-compat", "flake-parts": "flake-parts", + "nix-github-actions": "nix-github-actions", "nixpkgs": "nixpkgs", "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1729116596, - "narHash": "sha256-NnLMLIXGZtAscUF4dCShksuQ1nOGF6Y2dEeyj0rBbUg=", + "lastModified": 1731270564, + "narHash": "sha256-6KMC/NH/VWP5Eb+hA56hz0urel3jP6Y6cF2PX6xaTkk=", "owner": "zhaofengli", "repo": "attic", - "rev": "2b05b7d986cf6009b1c1ef7daa4961cd1a658782", + "rev": "47752427561f1c34debb16728a210d378f0ece36", "type": "github" }, "original": { @@ -26,16 +27,16 @@ "cachix": { "inputs": { "devenv": "devenv", - "flake-compat": "flake-compat_3", + "flake-compat": "flake-compat_2", "git-hooks": "git-hooks", "nixpkgs": "nixpkgs_4" }, "locked": { - "lastModified": 1728672398, - "narHash": "sha256-KxuGSoVUFnQLB2ZcYODW7AVPAh9JqRlD5BrfsC/Q4qs=", + "lastModified": 1733424942, + "narHash": "sha256-5t7Sl6EkOaoP4FvzLmH7HFDbdl9SizmLh53RjDQCbWQ=", "owner": "cachix", "repo": "cachix", - "rev": "aac51f698309fd0f381149214b7eee213c66ef0a", + "rev": "8b6b0e4694b9aa78b2ea4c93bff6e1a222dc7e4a", "type": "github" }, "original": { @@ -47,72 +48,31 @@ }, "cachix_2": { "inputs": { - "devenv": "devenv_2", + "devenv": [ + "cachix", + "devenv" + ], "flake-compat": [ "cachix", - "devenv", - "flake-compat" + "devenv" ], "git-hooks": [ "cachix", - "devenv", - "pre-commit-hooks" + "devenv" ], - "nixpkgs": [ - "cachix", - "devenv", - "nixpkgs" - ] + "nixpkgs": "nixpkgs_2" }, "locked": { - "lastModified": 1726520618, - "narHash": "sha256-jOsaBmJ/EtX5t/vbylCdS7pWYcKGmWOKg4QKUzKr6dA=", + "lastModified": 1728672398, + "narHash": "sha256-KxuGSoVUFnQLB2ZcYODW7AVPAh9JqRlD5BrfsC/Q4qs=", "owner": "cachix", "repo": "cachix", - "rev": "695525f9086542dfb09fde0871dbf4174abbf634", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "cachix", - "type": "github" - } - }, - "cachix_3": { - "inputs": { - "devenv": "devenv_3", - "flake-compat": [ - "cachix", - "devenv", - "cachix", - "devenv", - "flake-compat" - ], - "nixpkgs": [ - "cachix", - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "pre-commit-hooks": [ - "cachix", - "devenv", - "cachix", - "devenv", - "pre-commit-hooks" - ] - }, - "locked": { - "lastModified": 1712055811, - "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", - "owner": "cachix", - "repo": "cachix", - "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "rev": "aac51f698309fd0f381149214b7eee213c66ef0a", "type": "github" }, "original": { "owner": "cachix", + "ref": "latest", "repo": "cachix", "type": "github" } @@ -157,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1729741221, - "narHash": "sha256-8AHZZXs1lFkERfBY0C8cZGElSo33D/et7NKEpLRmvzo=", + "lastModified": 1733688869, + "narHash": "sha256-KrhxxFj1CjESDrL5+u/zsVH0K+Ik9tvoac/oFPoxSB8=", "owner": "ipetkov", "repo": "crane", - "rev": "f235b656ee5b2bfd6d94c3bfd67896a575d4a6ed", + "rev": "604637106e420ad99907cae401e13ab6b452e7d9", "type": "github" }, "original": { @@ -178,100 +138,26 @@ "cachix", "flake-compat" ], - "nix": "nix_3", - "nixpkgs": [ - "cachix", - "nixpkgs" - ], - "pre-commit-hooks": [ + "git-hooks": [ "cachix", "git-hooks" - ] - }, - "locked": { - "lastModified": 1727963652, - "narHash": "sha256-os0EDjn7QVXL6RtHNb9TrZLXVm2Tc5/nZKk3KpbTzd8=", - "owner": "cachix", - "repo": "devenv", - "rev": "cb0052e25dbcc8267b3026160dc73cddaac7d5fd", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_2": { - "inputs": { - "cachix": "cachix_3", - "flake-compat": [ - "cachix", - "devenv", - "cachix", - "flake-compat" - ], - "nix": "nix_2", - "nixpkgs": [ - "cachix", - "devenv", - "cachix", - "nixpkgs" - ], - "pre-commit-hooks": [ - "cachix", - "devenv", - "cachix", - "git-hooks" - ] - }, - "locked": { - "lastModified": 1723156315, - "narHash": "sha256-0JrfahRMJ37Rf1i0iOOn+8Z4CLvbcGNwa2ChOAVrp/8=", - "owner": "cachix", - "repo": "devenv", - "rev": "ff5eb4f2accbcda963af67f1a1159e3f6c7f5f91", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_3": { - "inputs": { - "flake-compat": [ - "cachix", - "devenv", - "cachix", - "devenv", - "cachix", - "flake-compat" ], "nix": "nix", - "nixpkgs": "nixpkgs_2", - "poetry2nix": "poetry2nix", - "pre-commit-hooks": [ + "nixpkgs": [ "cachix", - "devenv", - "cachix", - "devenv", - "cachix", - "pre-commit-hooks" + "nixpkgs" ] }, "locked": { - "lastModified": 1708704632, - "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "lastModified": 1733323168, + "narHash": "sha256-d5DwB4MZvlaQpN6OQ4SLYxb5jA4UH5EtV5t5WOtjLPU=", "owner": "cachix", "repo": "devenv", - "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "rev": "efa9010b8b1cfd5dd3c7ed1e172a470c3b84a064", "type": "github" }, "original": { "owner": "cachix", - "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -284,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1729751566, - "narHash": "sha256-99u/hrgBdi8bxSXZc9ZbNkR5EL1htrkbd3lsbKzS60g=", + "lastModified": 1733726208, + "narHash": "sha256-Z6zL4AtpZWxkvTd3l0KkPZamu2wtTKl4nNiqNSlgsb4=", "owner": "nix-community", "repo": "fenix", - "rev": "f32a2d484091a6dc98220b1f4a2c2d60b7c97c64", + "rev": "d51a64e1d23e509f28a6955a6652cc62409dd4a8", "type": "github" }, "original": { @@ -317,11 +203,11 @@ "flake-compat_2": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1733328505, + "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", "type": "github" }, "original": { @@ -333,27 +219,11 @@ "flake-compat_3": { "flake": false, "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "lastModified": 1733328505, + "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", "owner": "edolstra", "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_4": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", "type": "github" }, "original": { @@ -412,44 +282,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", "owner": "numtide", "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { - "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_3": { - "inputs": { - "systems": "systems_2" - }, - "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", "type": "github" }, "original": { @@ -473,11 +310,11 @@ "nixpkgs-stable": "nixpkgs-stable_2" }, "locked": { - "lastModified": 1727854478, - "narHash": "sha256-/odH2nUMAwkMgOS2nG2z0exLQNJS4S2LfMW0teqU7co=", + "lastModified": 1733318908, + "narHash": "sha256-SVQVsbafSM1dJ4fpgyBqLZ+Lft+jcQuMtEL3lQWx2Sk=", "owner": "cachix", "repo": "git-hooks.nix", - "rev": "5f58871c9657b5fc0a7f65670fe2ba99c26c1d79", + "rev": "6f4e2a2112050951a314d2733a994fbab94864c6", "type": "github" }, "original": { @@ -527,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1725659644, - "narHash": "sha256-WjnpmopfvFoUbubIu9bki+Y6P4YXDfvnW4+72hniq3g=", + "lastModified": 1733603756, + "narHash": "sha256-eTKnZDZ1Ex++v+BI0DBcUBmCXAO/tE8hxK9MiyztZkU=", "owner": "axboe", "repo": "liburing", - "rev": "0fe5c09195c0918f89582dd6ff098a58a0bdf62a", + "rev": "c3d5d6270cd5ed48d817fc1e8e95f7c8b222f2ff", "type": "github" }, "original": { @@ -542,123 +379,26 @@ } }, "nix": { - "inputs": { - "flake-compat": "flake-compat_2", - "nixpkgs": [ - "cachix", - "devenv", - "cachix", - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix-filter": { - "locked": { - "lastModified": 1710156097, - "narHash": "sha256-1Wvk8UP7PXdf8bCCaEoMnOT1qe5/Duqgj+rL8sRQsSM=", - "owner": "numtide", - "repo": "nix-filter", - "rev": "3342559a24e85fc164b295c3444e8a139924675b", - "type": "github" - }, - "original": { - "owner": "numtide", - "ref": "main", - "repo": "nix-filter", - "type": "github" - } - }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "cachix", - "devenv", - "cachix", - "devenv", - "cachix", - "devenv", - "poetry2nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688870561, - "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, - "nix_2": { "inputs": { "flake-compat": [ "cachix", - "devenv", - "cachix", - "devenv", - "flake-compat" - ], - "nixpkgs": [ - "cachix", - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression_2" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix_3": { - "inputs": { - "flake-compat": [ - "cachix", - "devenv", - "flake-compat" + "devenv" ], "flake-parts": "flake-parts_2", "libgit2": "libgit2", "nixpkgs": "nixpkgs_3", - "nixpkgs-23-11": "nixpkgs-23-11", - "nixpkgs-regression": "nixpkgs-regression_3", - "pre-commit-hooks": "pre-commit-hooks" + "nixpkgs-23-11": [ + "cachix", + "devenv" + ], + "nixpkgs-regression": [ + "cachix", + "devenv" + ], + "pre-commit-hooks": [ + "cachix", + "devenv" + ] }, "locked": { "lastModified": 1727438425, @@ -675,6 +415,43 @@ "type": "github" } }, + "nix-filter": { + "locked": { + "lastModified": 1731533336, + "narHash": "sha256-oRam5PS1vcrr5UPgALW0eo1m/5/pls27Z/pabHNy2Ms=", + "owner": "numtide", + "repo": "nix-filter", + "rev": "f7653272fd234696ae94229839a99b73c9ab7de0", + "type": "github" + }, + "original": { + "owner": "numtide", + "ref": "main", + "repo": "nix-filter", + "type": "github" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "attic", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1729742964, + "narHash": "sha256-B4mzTcQ0FZHdpeWcpDYPERtyjJd/NIuaQ9+BV1h+MpA=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "e04df33f62cdcf93d73e9a04142464753a16db67", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, "nixpkgs": { "locked": { "lastModified": 1726042813, @@ -691,70 +468,6 @@ "type": "github" } }, - "nixpkgs-23-11": { - "locked": { - "lastModified": 1717159533, - "narHash": "sha256-oamiKNfr2MS6yH64rUn99mIZjc45nGJlj9eGth/3Xuw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446", - "type": "github" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_2": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_3": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, "nixpkgs-stable": { "locked": { "lastModified": 1724316499, @@ -773,11 +486,11 @@ }, "nixpkgs-stable_2": { "locked": { - "lastModified": 1720386169, - "narHash": "sha256-NGKVY4PjzwAa4upkGtAMz1npHGoRzWotlSnVlqI40mo=", + "lastModified": 1730741070, + "narHash": "sha256-edm8WG19kWozJ/GqyYx2VjW99EdhjKwbY3ZwdlPAAlo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "194846768975b7ad2c4988bdb82572c00222c0d7", + "rev": "d063c1dd113c91ab27959ba540c0d9753409edf3", "type": "github" }, "original": { @@ -789,16 +502,16 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1692808169, - "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", + "lastModified": 1730531603, + "narHash": "sha256-Dqg6si5CqIzm87sp57j5nTaeBbWhHFaVyG7V6L8k3lY=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", + "rev": "7ffd9ae656aec493492b44d0ddfb28e79a1ea25d", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixpkgs-unstable", + "ref": "nixos-unstable", "repo": "nixpkgs", "type": "github" } @@ -821,11 +534,11 @@ }, "nixpkgs_4": { "locked": { - "lastModified": 1727802920, - "narHash": "sha256-HP89HZOT0ReIbI7IJZJQoJgxvB2Tn28V6XS3MNKnfLs=", + "lastModified": 1733212471, + "narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "27e30d177e57d912d614c88c622dcfdb2e6e6515", + "rev": "55d15ad12a74eb7d4646254e13638ad0c4128776", "type": "github" }, "original": { @@ -837,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1725534445, - "narHash": "sha256-Yd0FK9SkWy+ZPuNqUgmVPXokxDgMJoGuNpMEtkfcf84=", + "lastModified": 1733656523, + "narHash": "sha256-w0FXPfpGhOihoJDiwMsyN1EzpsXi2F8VQ+NVZQSMtys=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9bb1e7571aadf31ddb4af77fc64b2d59580f9a39", + "rev": "93dc9803a1ee435e590b02cde9589038d5cc3a4e", "type": "github" }, "original": { @@ -851,74 +564,6 @@ "type": "github" } }, - "poetry2nix": { - "inputs": { - "flake-utils": "flake-utils", - "nix-github-actions": "nix-github-actions", - "nixpkgs": [ - "cachix", - "devenv", - "cachix", - "devenv", - "cachix", - "devenv", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1692876271, - "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", - "owner": "nix-community", - "repo": "poetry2nix", - "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "poetry2nix", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "cachix", - "devenv", - "nix" - ], - "flake-utils": "flake-utils_2", - "gitignore": [ - "cachix", - "devenv", - "nix" - ], - "nixpkgs": [ - "cachix", - "devenv", - "nix", - "nixpkgs" - ], - "nixpkgs-stable": [ - "cachix", - "devenv", - "nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1712897695, - "narHash": "sha256-nMirxrGteNAl9sWiOhoN5tIHyjBbVi5e2tgZUgZlK3Y=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "40e6053ecb65fcbf12863338a6dcefb3f55f1bf8", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, "rocksdb": { "flake": false, "locked": { @@ -943,8 +588,8 @@ "complement": "complement", "crane": "crane_2", "fenix": "fenix", - "flake-compat": "flake-compat_4", - "flake-utils": "flake-utils_3", + "flake-compat": "flake-compat_3", + "flake-utils": "flake-utils", "liburing": "liburing", "nix-filter": "nix-filter", "nixpkgs": "nixpkgs_5", @@ -954,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1729715509, - "narHash": "sha256-jUDN4e1kObbksb4sc+57NEeujBEDRdLCOu9wiE3RZdM=", + "lastModified": 1733642337, + "narHash": "sha256-I1uc97f/cNhOpCemIbBAUS+CV0R7jts0NW9lc8jrpxc=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "40492e15d49b89cf409e2c5536444131fac49429", + "rev": "4c755e62a617eeeef3066994731ce1cdd16504ac", "type": "github" }, "original": { @@ -982,21 +627,6 @@ "repo": "default", "type": "github" } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } } }, "root": "root", diff --git a/nix/pkgs/main/cross-compilation-env.nix b/nix/pkgs/main/cross-compilation-env.nix index c189d57c..3fb94d41 100644 --- a/nix/pkgs/main/cross-compilation-env.nix +++ b/nix/pkgs/main/cross-compilation-env.nix @@ -1,6 +1,5 @@ { lib , pkgsBuildHost -, pkgsBuildTarget , rust , stdenv }: @@ -23,25 +22,13 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic { [ "-C" "relocation-model=static" ] ++ lib.optionals (stdenv.buildPlatform.config != stdenv.hostPlatform.config) - [ "-l" "c" ] - ++ lib.optionals - # This check has to match the one [here][0]. We only need to set - # these flags when using a different linker. Don't ask me why, - # though, because I don't know. All I know is it breaks otherwise. - # - # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40 - ( - # Nixpkgs doesn't check for x86_64 here but we do, because I - # observed a failure building statically for x86_64 without - # including it here. Linkers are weird. - (stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64) - && stdenv.hostPlatform.isStatic - && !stdenv.hostPlatform.isDarwin - && !stdenv.cc.bintools.isLLVM - ) [ + "-l" + "c" + "-l" "stdc++" + "-L" "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" ] @@ -58,7 +45,7 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic { ( let inherit (rust.lib) envVars; - shouldUseLLD = platform: platform.isAarch64 && platform.isStatic && !stdenv.hostPlatform.isDarwin; + #shouldUseLLD = platform: platform.isAarch64 && platform.isStatic && !stdenv.hostPlatform.isDarwin; in lib.optionalAttrs (stdenv.targetPlatform.rust.rustcTarget @@ -66,30 +53,22 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic { ( let inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget; - linkerForTarget = if shouldUseLLD stdenv.targetPlatform - && !stdenv.cc.bintools.isLLVM # whether stdenv's linker is lld already - then "${pkgsBuildTarget.llvmPackages.bintools}/bin/${stdenv.cc.targetPrefix}ld.lld" - else envVars.ccForTarget; in { "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = linkerForTarget; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForTarget; } ) // ( let inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; - linkerForHost = if shouldUseLLD stdenv.targetPlatform - && !stdenv.cc.bintools.isLLVM - then "${pkgsBuildHost.llvmPackages.bintools}/bin/${stdenv.cc.targetPrefix}ld.lld" - else envVars.ccForHost; in { "CC_${cargoEnvVarTarget}" = envVars.ccForHost; "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = linkerForHost; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForHost; CARGO_BUILD_TARGET = rustcTarget; } ) diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index c40af176..abc85d82 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -6,7 +6,6 @@ , libiconv , liburing , pkgsBuildHost -, pkgsBuildTarget , rocksdb , removeReferencesTo , rust @@ -96,7 +95,6 @@ buildDepsOnlyEnv = inherit lib pkgsBuildHost - pkgsBuildTarget rust stdenv; }); From cd67ca6c57420f0c9550ce1861926dfd427b202a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 10 Dec 2024 04:15:24 +0000 Subject: [PATCH 0368/1248] fix .gitignore to allow /target to be a symlink Signed-off-by: Jason Volk --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index cf366522..b5fea66b 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,7 @@ modules.xml .nfs* # Rust -/target/ +/target ### vscode ### .vscode/* From 5fc8e90e0231214a5b52ed0a4320d071f07b559f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 10 Dec 2024 05:33:04 +0000 Subject: [PATCH 0369/1248] fix runtime metrics command Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index d4c9a57b..a17d0800 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -773,7 +773,14 @@ pub(super) async fn memory_stats(&self) -> Result { pub(super) async fn runtime_metrics(&self) -> Result { let out = self.services.server.metrics.runtime_metrics().map_or_else( || "Runtime metrics are not available.".to_owned(), - |metrics| format!("```rs\n{metrics:#?}\n```"), + |metrics| { + format!( + "```rs\nnum_workers: {}\nnum_alive_tasks: {}\nglobal_queue_depth: {}\n```", + metrics.num_workers(), + metrics.num_alive_tasks(), + metrics.global_queue_depth() + ) + }, ); Ok(RoomMessageEventContent::text_markdown(out)) From aba88ccead6afef9197683890bc2bb279295db19 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 20:31:16 +0000 Subject: [PATCH 0370/1248] misc tracing span tweaks Signed-off-by: Jason Volk --- src/api/client/media.rs | 35 +++++++++++++++++++++---- src/api/client/sync/v3.rs | 19 +++++++++++++- src/database/engine.rs | 7 ++++- src/database/map/get_batch.rs | 2 +- src/database/maps.rs | 2 +- src/database/pool.rs | 17 +++++++++--- src/main/main.rs | 5 ++++ src/router/layers.rs | 4 +-- src/router/request.rs | 8 ++++-- src/router/serve/unix.rs | 2 +- src/service/manager.rs | 6 +++++ src/service/resolver/actual.rs | 2 +- src/service/rooms/state_accessor/mod.rs | 6 ++--- src/service/sending/data.rs | 1 - src/service/sending/send.rs | 2 +- src/service/sending/sender.rs | 2 +- 16 files changed, 96 insertions(+), 24 deletions(-) diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 71693618..67a2bbdf 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -39,7 +39,12 @@ pub(crate) async fn get_media_config_route( /// /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory -#[tracing::instrument(skip_all, fields(%client), name = "media_upload")] +#[tracing::instrument( + name = "media_upload", + level = "debug", + skip_all, + fields(%client), +)] pub(crate) async fn create_content_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, @@ -67,7 +72,12 @@ pub(crate) async fn create_content_route( /// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` /// /// Load media thumbnail from our server or over federation. -#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")] +#[tracing::instrument( + name = "media_thumbnail_get", + level = "debug", + skip_all, + fields(%client), +)] pub(crate) async fn get_content_thumbnail_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, @@ -98,7 +108,12 @@ pub(crate) async fn get_content_thumbnail_route( /// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}` /// /// Load media from our server or over federation. -#[tracing::instrument(skip_all, fields(%client), name = "media_get")] +#[tracing::instrument( + name = "media_get", + level = "debug", + skip_all, + fields(%client), +)] pub(crate) async fn get_content_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, @@ -128,7 +143,12 @@ pub(crate) async fn get_content_route( /// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}` /// /// Load media from our server or over federation as fileName. -#[tracing::instrument(skip_all, fields(%client), name = "media_get_af")] +#[tracing::instrument( + name = "media_get_af", + level = "debug", + skip_all, + fields(%client), +)] pub(crate) async fn get_content_as_filename_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, @@ -158,7 +178,12 @@ pub(crate) async fn get_content_as_filename_route( /// # `GET /_matrix/client/v1/media/preview_url` /// /// Returns URL preview. -#[tracing::instrument(skip_all, fields(%client), name = "url_preview")] +#[tracing::instrument( + name = "url_preview", + level = "debug", + skip_all, + fields(%client), +)] pub(crate) async fn get_media_preview_route( State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 61a0ea5c..31179d3c 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -97,6 +97,14 @@ type PresenceUpdates = HashMap; /// For left rooms: /// - If the user left after `since`: `prev_batch` token, empty state (TODO: /// subset of the state at the point of the leave) +#[tracing::instrument( + name = "sync", + level = "debug", + skip_all, + fields( + since = %body.body.since.as_deref().unwrap_or_default(), + ) +)] pub(crate) async fn sync_events_route( State(services): State, body: Ruma, ) -> Result> { @@ -391,8 +399,17 @@ async fn process_presence_updates(services: &Services, since: u64, syncing_user: .await } +#[tracing::instrument( + name = "left", + level = "debug", + skip_all, + fields( + room_id = %room_id, + full = %full_state, + ll = %lazy_load_enabled, + ), +)] #[allow(clippy::too_many_arguments)] -#[tracing::instrument(skip_all, fields(user_id = %sender_user, room_id = %room_id), name = "left_room")] async fn handle_left_room( services: &Services, since: u64, room_id: OwnedRoomId, sender_user: &UserId, next_batch_string: &str, full_state: bool, lazy_load_enabled: bool, diff --git a/src/database/engine.rs b/src/database/engine.rs index fe6602ae..d3bb727e 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -307,7 +307,12 @@ pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result<()> { Ok(()) } -#[tracing::instrument(skip(msg), name = "rocksdb", level = "trace")] +#[tracing::instrument( + parent = None, + name = "rocksdb", + level = "trace" + skip(msg), +)] pub(crate) fn handle_log(level: LogLevel, msg: &str) { let msg = msg.trim(); if msg.starts_with("Options") { diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index 0f1fdea7..bb904943 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -33,7 +33,7 @@ where } #[implement(super::Map)] -#[tracing::instrument(skip(self, keys), name = "batch_blocking", level = "trace")] +#[tracing::instrument(name = "batch_blocking", level = "trace", skip_all)] pub(crate) fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator>> + Send where I: Iterator + ExactSizeIterator + Debug + Send, diff --git a/src/database/maps.rs b/src/database/maps.rs index 0e835abf..9b8d326a 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -10,7 +10,7 @@ pub(crate) type MapsKey = String; pub(crate) fn open(db: &Arc) -> Result { open_list(db, MAPS) } -#[tracing::instrument(skip_all, level = "debug")] +#[tracing::instrument(name = "maps", level = "debug", skip_all)] pub(crate) fn open_list(db: &Arc, maps: &[&str]) -> Result { Ok(maps .iter() diff --git a/src/database/pool.rs b/src/database/pool.rs index e7ffc807..79755ea6 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -113,10 +113,14 @@ async fn spawn_until(self: &Arc, recv: Receiver, max: usize) -> Resul } #[implement(Pool)] +#[tracing::instrument( + name = "spawn", + level = "trace", + skip_all, + fields(id = %workers.len()) +)] fn spawn_one(self: &Arc, workers: &mut JoinSet<()>, recv: Receiver) -> Result { let id = workers.len(); - - debug!(?id, "spawning"); let self_ = self.clone(); let _abort = workers.spawn_blocking_on(move || self_.worker(id, recv), self.server.runtime()); @@ -181,7 +185,14 @@ async fn execute(&self, cmd: Cmd) -> Result { } #[implement(Pool)] -#[tracing::instrument(skip(self, recv))] +#[tracing::instrument( + parent = None, + level = "debug", + skip(self, recv), + fields( + tid = ?std::thread::current().id(), + ), +)] fn worker(self: Arc, id: usize, recv: Receiver) { debug!("worker spawned"); defer! {{ debug!("worker finished"); }} diff --git a/src/main/main.rs b/src/main/main.rs index c653444c..41b21b58 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -59,6 +59,11 @@ fn main() -> Result<(), Error> { /// Operate the server normally in release-mode static builds. This will start, /// run and stop the server within the asynchronous runtime. #[cfg(not(conduit_mods))] +#[tracing::instrument( + name = "main", + parent = None, + skip_all +)] async fn async_main(server: &Arc) -> Result<(), Error> { extern crate conduit_router as router; diff --git a/src/router/layers.rs b/src/router/layers.rs index fd68cc36..bb3ffe87 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -150,8 +150,8 @@ fn cors_layer(_server: &Server) -> CorsLayer { fn body_limit_layer(server: &Server) -> DefaultBodyLimit { DefaultBodyLimit::max(server.config.max_request_size) } +#[tracing::instrument(name = "panic", level = "error", skip_all)] #[allow(clippy::needless_pass_by_value)] -#[tracing::instrument(skip_all, name = "panic")] fn catch_panic(err: Box) -> http::Response> { //TODO: XXX /* @@ -197,7 +197,7 @@ fn tracing_span(request: &http::Request) -> tracing::Span { let method = request.method(); - tracing::info_span!("router:", %method, %path) + tracing::debug_span!(parent: None, "router", %method, %path) } fn truncated_matched_path(path: &MatchedPath) -> &str { diff --git a/src/router/request.rs b/src/router/request.rs index 271d9573..13d36981 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -8,7 +8,11 @@ use conduit::{debug, debug_error, debug_warn, defer, err, error, trace, Result}; use conduit_service::Services; use http::{Method, StatusCode, Uri}; -#[tracing::instrument(skip_all, level = "debug")] +#[tracing::instrument( + parent = None, + level = "trace", + skip_all, +)] pub(crate) async fn spawn( State(services): State>, req: http::Request, next: axum::middleware::Next, ) -> Result { @@ -34,7 +38,7 @@ pub(crate) async fn spawn( task.await.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) } -#[tracing::instrument(skip_all, name = "handle")] +#[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn handle( State(services): State>, req: http::Request, next: axum::middleware::Next, ) -> Result { diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index 5df41b61..dd2ecf6a 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -30,7 +30,7 @@ type MakeService = IntoMakeServiceWithConnectInfo; const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0); const FINI_POLL_INTERVAL: Duration = Duration::from_millis(750); -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip_all, level = "debug")] pub(super) async fn serve(server: &Arc, app: Router, mut shutdown: broadcast::Receiver<()>) -> Result<()> { let mut tasks = JoinSet::<()>::new(); let executor = TokioExecutor::new(); diff --git a/src/service/manager.rs b/src/service/manager.rs index 21e0ed7c..7384e512 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -155,6 +155,12 @@ impl Manager { /// should never error with a panic, and if so it should propagate, but it may /// error with an Abort which the manager should handle along with results to /// determine if the worker should be restarted. +#[tracing::instrument( + parent = None, + level = "trace", + skip_all, + fields(service = %service.name()), +)] async fn worker(service: Arc) -> WorkerResult { let service_ = Arc::clone(&service); let result = AssertUnwindSafe(service_.worker()) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 8553e8bb..8b720365 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -26,7 +26,7 @@ impl ActualDest { } impl super::Service { - #[tracing::instrument(skip_all, name = "resolve")] + #[tracing::instrument(skip_all, level = "debug", name = "resolve")] pub(crate) async fn get_actual_dest(&self, server_name: &ServerName) -> Result { let (result, cached) = if let Some(result) = self.get_cached_destination(server_name) { (result, true) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index ef1b63f5..bcc1263d 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -184,7 +184,7 @@ impl Service { /// Whether a server is allowed to see an event through federation, based on /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self, origin, room_id, event_id))] + #[tracing::instrument(skip_all, level = "trace")] pub async fn server_can_see_event(&self, origin: &ServerName, room_id: &RoomId, event_id: &EventId) -> bool { let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { return true; @@ -242,7 +242,7 @@ impl Service { /// Whether a user is allowed to see an event, based on /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self, user_id, room_id, event_id))] + #[tracing::instrument(skip_all, level = "trace")] pub async fn user_can_see_event(&self, user_id: &UserId, room_id: &RoomId, event_id: &EventId) -> bool { let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { return true; @@ -293,7 +293,7 @@ impl Service { /// Whether a user is allowed to see an event, based on /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self, user_id, room_id))] + #[tracing::instrument(skip_all, level = "trace")] pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { if self.services.state_cache.is_joined(user_id, room_id).await { return true; diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 118dab91..bc70e875 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -176,7 +176,6 @@ impl Data { } } -#[tracing::instrument(skip(key), level = "debug")] fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, SendingEvent)> { // Appservices start with a plus Ok::<_, Error>(if key.starts_with(b"+") { diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 6a8f1b1b..df39bcd1 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -24,7 +24,7 @@ use crate::{ }; impl super::Service { - #[tracing::instrument(skip(self, client, request), name = "send")] + #[tracing::instrument(skip_all, level = "debug")] pub async fn send(&self, client: &Client, dest: &ServerName, request: T) -> Result where T: OutgoingRequest + Send, diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 5c00915b..49ce1be4 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -62,7 +62,7 @@ pub const PDU_LIMIT: usize = 50; pub const EDU_LIMIT: usize = 100; impl Service { - #[tracing::instrument(skip_all, name = "sender")] + #[tracing::instrument(skip_all, level = "debug")] pub(super) async fn sender(&self) -> Result<()> { let mut statuses: CurTransactionStatus = CurTransactionStatus::new(); let mut futures: SendingFutures<'_> = FuturesUnordered::new(); From 34f9e3260f44c8a9f642aeb3c9b94266431d5e25 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 8 Dec 2024 03:00:09 +0000 Subject: [PATCH 0371/1248] additional sync v3 refactoring/optimizations and tracing instruments Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 275 +++++++++++++++++++++----------------- 1 file changed, 154 insertions(+), 121 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 31179d3c..28ca1ea2 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -11,8 +11,9 @@ use conduit::{ result::{FlatOk, LogDebugErr}, utils, utils::{ + future::OptionExt, math::ruma_from_u64, - stream::{BroadbandExt, Tools}, + stream::{BroadbandExt, Tools, WidebandExt}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, Error, PduCount, PduEvent, Result, @@ -22,7 +23,7 @@ use conduit_service::{ Services, }; use futures::{ - future::{join, join3, join5, try_join, OptionFuture}, + future::{join, join3, join4, join5, try_join, try_join3, OptionFuture}, FutureExt, StreamExt, TryFutureExt, }; use ruma::{ @@ -122,7 +123,6 @@ pub(crate) async fn sync_events_route( let watcher = services.sync.watch(sender_user, sender_device); let next_batch = services.globals.current_count()?; - let next_batchcount = PduCount::Normal(next_batch); let next_batch_string = next_batch.to_string(); // Load filter @@ -153,9 +153,8 @@ pub(crate) async fn sync_events_route( .as_ref() .and_then(|string| string.parse().ok()) .unwrap_or(0); - let sincecount = PduCount::Normal(since); - let joined_related = services + let joined_rooms = services .rooms .state_cache .rooms_joined(sender_user) @@ -167,9 +166,7 @@ pub(crate) async fn sync_events_route( sender_device, room_id.clone(), since, - sincecount, next_batch, - next_batchcount, lazy_load_enabled, lazy_load_send_redundant, full_state, @@ -211,9 +208,11 @@ pub(crate) async fn sync_events_route( .ready_filter_map(|(room_id, left_room)| left_room.map(|left_room| (room_id, left_room))) .collect(); - let invited_rooms = services.rooms.state_cache.rooms_invited(sender_user).fold( - BTreeMap::new(), - |mut invited_rooms, (room_id, invite_state)| async move { + let invited_rooms = services + .rooms + .state_cache + .rooms_invited(sender_user) + .fold_default(|mut invited_rooms: BTreeMap<_, _>, (room_id, invite_state)| async move { // Get and drop the lock to wait for remaining operations to finish let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; drop(insert_lock); @@ -238,8 +237,7 @@ pub(crate) async fn sync_events_route( invited_rooms.insert(room_id, invited_room); invited_rooms - }, - ); + }); let presence_updates: OptionFuture<_> = services .globals @@ -274,7 +272,7 @@ pub(crate) async fn sync_events_route( .users .remove_to_device_events(sender_user, sender_device, since); - let rooms = join3(joined_related, left_rooms, invited_rooms); + let rooms = join3(joined_rooms, left_rooms, invited_rooms); let ephemeral = join3(remove_to_device_events, to_device_events, presence_updates); let top = join5(account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) .boxed() @@ -282,8 +280,8 @@ pub(crate) async fn sync_events_route( let (account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) = top; let ((), to_device_events, presence_updates) = ephemeral; - let (joined_related, left_rooms, invited_rooms) = rooms; - let (joined_rooms, mut device_list_updates, left_encrypted_users) = joined_related; + let (joined_rooms, left_rooms, invited_rooms) = rooms; + let (joined_rooms, mut device_list_updates, left_encrypted_users) = joined_rooms; device_list_updates.extend(keys_changed); // If the user doesn't share an encrypted room with the target anymore, we need @@ -349,6 +347,7 @@ pub(crate) async fn sync_events_route( Ok(response) } +#[tracing::instrument(name = "presence", level = "debug", skip_all)] async fn process_presence_updates(services: &Services, since: u64, syncing_user: &UserId) -> PresenceUpdates { services .presence @@ -411,17 +410,17 @@ async fn process_presence_updates(services: &Services, since: u64, syncing_user: )] #[allow(clippy::too_many_arguments)] async fn handle_left_room( - services: &Services, since: u64, room_id: OwnedRoomId, sender_user: &UserId, next_batch_string: &str, + services: &Services, since: u64, ref room_id: OwnedRoomId, sender_user: &UserId, next_batch_string: &str, full_state: bool, lazy_load_enabled: bool, ) -> Result> { // Get and drop the lock to wait for remaining operations to finish - let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; + let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; drop(insert_lock); let left_count = services .rooms .state_cache - .get_left_count(&room_id, sender_user) + .get_left_count(room_id, sender_user) .await .ok(); @@ -430,7 +429,7 @@ async fn handle_left_room( return Ok(None); } - if !services.rooms.metadata.exists(&room_id).await { + if !services.rooms.metadata.exists(room_id).await { // This is just a rejected invite, not a room we know // Insert a leave event anyways let event = PduEvent { @@ -476,7 +475,7 @@ async fn handle_left_room( let since_shortstatehash = services .rooms .user - .get_token_shortstatehash(&room_id, since) + .get_token_shortstatehash(room_id, since) .await; let since_state_ids = match since_shortstatehash { @@ -487,7 +486,7 @@ async fn handle_left_room( let Ok(left_event_id): Result = services .rooms .state_accessor - .room_state_get_id(&room_id, &StateEventType::RoomMember, sender_user.as_str()) + .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str()) .await else { error!("Left room but no left state event"); @@ -557,30 +556,46 @@ async fn handle_left_room( })) } +#[tracing::instrument( + name = "joined", + level = "debug", + skip_all, + fields( + room_id = ?room_id, + ), +)] #[allow(clippy::too_many_arguments)] async fn load_joined_room( - services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: OwnedRoomId, since: u64, - sincecount: PduCount, next_batch: u64, next_batchcount: PduCount, lazy_load_enabled: bool, - lazy_load_send_redundant: bool, full_state: bool, + services: &Services, sender_user: &UserId, sender_device: &DeviceId, ref room_id: OwnedRoomId, since: u64, + next_batch: u64, lazy_load_enabled: bool, lazy_load_send_redundant: bool, full_state: bool, ) -> Result<(JoinedRoom, HashSet, HashSet)> { - let mut device_list_updates = HashSet::::new(); - let mut left_encrypted_users = HashSet::::new(); - // Get and drop the lock to wait for remaining operations to finish // This will make sure the we have all events until next_batch - let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; + let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; drop(insert_lock); - let (timeline_pdus, limited) = - load_timeline(services, sender_user, &room_id, sincecount, Some(next_batchcount), 10_usize).await?; + let sincecount = PduCount::Normal(since); + let next_batchcount = PduCount::Normal(next_batch); - let send_notification_counts = !timeline_pdus.is_empty() - || services - .rooms - .user - .last_notification_read(sender_user, &room_id) - .await > since; + let current_shortstatehash = services + .rooms + .state + .get_room_shortstatehash(room_id) + .map_err(|_| err!(Database(error!("Room {room_id} has no state")))); + let since_shortstatehash = services + .rooms + .user + .get_token_shortstatehash(room_id, since) + .ok() + .map(Ok); + + let timeline = load_timeline(services, sender_user, room_id, sincecount, Some(next_batchcount), 10_usize); + + let (current_shortstatehash, since_shortstatehash, timeline) = + try_join3(current_shortstatehash, since_shortstatehash, timeline).await?; + + let (timeline_pdus, limited) = timeline; let timeline_users = timeline_pdus .iter() .fold(HashSet::new(), |mut timeline_users, (_, event)| { @@ -588,43 +603,44 @@ async fn load_joined_room( timeline_users }); + let last_notification_read: OptionFuture<_> = timeline_pdus + .is_empty() + .then(|| { + services + .rooms + .user + .last_notification_read(sender_user, room_id) + }) + .into(); + + let send_notification_counts = last_notification_read + .is_none_or(|&count| count > since) + .await; + services .rooms .lazy_loading - .lazy_load_confirm_delivery(sender_user, sender_device, &room_id, sincecount); + .lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount); - let current_shortstatehash = services - .rooms - .state - .get_room_shortstatehash(&room_id) - .map_err(|_| err!(Database(error!("Room {room_id} has no state")))); - - let since_shortstatehash = services - .rooms - .user - .get_token_shortstatehash(&room_id, since) - .ok() - .map(Ok); - - let (current_shortstatehash, since_shortstatehash) = try_join(current_shortstatehash, since_shortstatehash).await?; + let no_state_changes = timeline_pdus.is_empty() + && (since_shortstatehash.is_none() || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))); + let mut device_list_updates = HashSet::::new(); + let mut left_encrypted_users = HashSet::::new(); let StateChanges { heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events, - } = if timeline_pdus.is_empty() - && (since_shortstatehash.is_none() || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))) - { - // No state changes + } = if no_state_changes { StateChanges::default() } else { calculate_state_changes( services, sender_user, sender_device, - &room_id, + room_id, next_batchcount, lazy_load_enabled, lazy_load_send_redundant, @@ -636,22 +652,68 @@ async fn load_joined_room( &timeline_pdus, &timeline_users, ) - .boxed() .await? }; - let prev_batch = timeline_pdus - .first() - .map(at!(0)) - .as_ref() - .map(ToString::to_string); + let account_data_events = services + .account_data + .changes_since(Some(room_id), sender_user, since) + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect(); + + // Look for device list updates in this room + let device_updates = services + .users + .room_keys_changed(room_id, since, None) + .map(|(user_id, _)| user_id) + .map(ToOwned::to_owned) + .collect::>(); + + let room_events = timeline_pdus + .iter() + .stream() + .wide_filter_map(|item| ignored_filter(services, item.clone(), sender_user)) + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect(); + + let receipt_events = services + .rooms + .read_receipt + .readreceipts_since(room_id, since) + .filter_map(|(read_user, _, edu)| async move { + services + .users + .user_is_ignored(read_user, sender_user) + .await + .or_some((read_user.to_owned(), edu)) + }) + .collect::>>(); + + let typing_events = services + .rooms + .typing + .last_typing_update(room_id) + .and_then(|count| async move { + if count <= since { + return Ok(Vec::>::new()); + } + + let typings = services + .rooms + .typing + .typings_all(room_id, sender_user) + .await?; + + Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) + }) + .unwrap_or(Vec::new()); let notification_count: OptionFuture<_> = send_notification_counts .then(|| { services .rooms .user - .notification_count(sender_user, &room_id) + .notification_count(sender_user, room_id) .map(TryInto::try_into) .unwrap_or(uint!(0)) }) @@ -662,79 +724,33 @@ async fn load_joined_room( services .rooms .user - .highlight_count(sender_user, &room_id) + .highlight_count(sender_user, room_id) .map(TryInto::try_into) .unwrap_or(uint!(0)) }) .into(); - let room_events = timeline_pdus - .iter() - .stream() - .filter_map(|item| ignored_filter(services, item.clone(), sender_user)) - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - - let account_data_events = services - .account_data - .changes_since(Some(&room_id), sender_user, since) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect(); - - let receipt_events = services - .rooms - .read_receipt - .readreceipts_since(&room_id, since) - .filter_map(|(read_user, _, edu)| async move { - services - .users - .user_is_ignored(read_user, sender_user) - .await - .or_some((read_user.to_owned(), edu)) - }) - .collect::>>(); - - // Look for device list updates in this room - let device_updates = services - .users - .room_keys_changed(&room_id, since, None) - .map(|(user_id, _)| user_id) - .map(ToOwned::to_owned) - .collect::>(); - - let events = join3(room_events, account_data_events, receipt_events); + let events = join4(room_events, account_data_events, receipt_events, typing_events); let unread_notifications = join(notification_count, highlight_count); let (unread_notifications, events, device_updates) = join3(unread_notifications, events, device_updates) .boxed() .await; - let (room_events, account_data_events, receipt_events) = events; + let (room_events, account_data_events, receipt_events, typing_events) = events; let (notification_count, highlight_count) = unread_notifications; - device_list_updates.extend(device_updates); - let mut edus: Vec> = receipt_events.into_values().collect(); - if services.rooms.typing.last_typing_update(&room_id).await? > since { - edus.push( - serde_json::from_str( - &serde_json::to_string( - &services - .rooms - .typing - .typings_all(&room_id, sender_user) - .await?, - ) - .expect("event is valid, we just created it"), - ) - .expect("event is valid, we just created it"), - ); - } + device_list_updates.extend(device_updates); + let edus: Vec> = receipt_events + .into_values() + .chain(typing_events.into_iter()) + .collect(); // Save the state after this sync so we can send the correct state diff next // sync services .rooms .user - .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash) + .associate_token_shortstatehash(room_id, next_batch, current_shortstatehash) .await; let joined_room = JoinedRoom { @@ -757,8 +773,12 @@ async fn load_joined_room( }, timeline: Timeline { limited: limited || joined_since_last_sync, - prev_batch, events: room_events, + prev_batch: timeline_pdus + .first() + .map(at!(0)) + .as_ref() + .map(ToString::to_string), }, state: RoomState { events: state_events @@ -775,6 +795,17 @@ async fn load_joined_room( Ok((joined_room, device_list_updates, left_encrypted_users)) } +#[tracing::instrument( + name = "state", + level = "trace", + skip_all, + fields( + full = %full_state, + ll = ?(lazy_load_enabled, lazy_load_send_redundant), + cs = %current_shortstatehash, + ss = ?since_shortstatehash, + ) +)] #[allow(clippy::too_many_arguments)] async fn calculate_state_changes( services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, next_batchcount: PduCount, @@ -833,6 +864,7 @@ async fn calculate_state_changes( } } +#[tracing::instrument(name = "initial", level = "trace", skip_all)] #[allow(clippy::too_many_arguments)] async fn calculate_state_initial( services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, next_batchcount: PduCount, @@ -847,7 +879,7 @@ async fn calculate_state_initial( .await? .into_iter() .stream() - .filter_map(|(shortstatekey, event_id): (ShortStateKey, OwnedEventId)| { + .broad_filter_map(|(shortstatekey, event_id): (ShortStateKey, OwnedEventId)| { services .rooms .short @@ -919,6 +951,7 @@ async fn calculate_state_initial( }) } +#[tracing::instrument(name = "incremental", level = "trace", skip_all)] #[allow(clippy::too_many_arguments)] async fn calculate_state_incremental( services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, next_batchcount: PduCount, @@ -949,7 +982,7 @@ async fn calculate_state_incremental( .iter() .stream() .ready_filter(|(key, id)| full_state || since_state_ids.get(key) != Some(id)) - .filter_map(|(_, id)| services.rooms.timeline.get_pdu(id).ok()) + .wide_filter_map(|(_, id)| services.rooms.timeline.get_pdu(id).ok()) .ready_for_each(|pdu| delta_state_events.push(pdu)) .await; } From badb83484fe913188f683b73e7d2afd86b0b29ba Mon Sep 17 00:00:00 2001 From: strawberry Date: Tue, 10 Dec 2024 22:54:19 -0500 Subject: [PATCH 0372/1248] fix private read receipt support Signed-off-by: strawberry --- src/api/client/read_marker.rs | 129 ++++++++++++------------- src/api/client/sync/v3.rs | 21 +++- src/service/rooms/read_receipt/data.rs | 6 +- src/service/rooms/read_receipt/mod.rs | 64 ++++++++++-- 4 files changed, 140 insertions(+), 80 deletions(-) diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index f28b2aec..f6123614 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -1,9 +1,9 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::PduCount; +use conduit::{err, Err, PduCount}; use ruma::{ - api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, + api::client::{read_marker::set_read_marker, receipt::create_receipt}, events::{ receipt::{ReceiptThread, ReceiptType}, RoomAccountDataEventType, @@ -11,7 +11,7 @@ use ruma::{ MilliSecondsSinceUnixEpoch, }; -use crate::{Error, Result, Ruma}; +use crate::{Result, Ruma}; /// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` /// @@ -23,14 +23,15 @@ use crate::{Error, Result, Ruma}; pub(crate) async fn set_read_marker_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); - if let Some(fully_read) = &body.fully_read { + if let Some(event) = &body.fully_read { let fully_read_event = ruma::events::fully_read::FullyReadEvent { content: ruma::events::fully_read::FullyReadEventContent { - event_id: fully_read.clone(), + event_id: event.clone(), }, }; + services .account_data .update( @@ -49,44 +50,20 @@ pub(crate) async fn set_read_marker_route( .reset_notification_counts(sender_user, &body.room_id); } - if let Some(event) = &body.private_read_receipt { - let count = services - .rooms - .timeline - .get_pdu_count(event) - .await - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?; - - let count = match count { - PduCount::Backfilled(_) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Read receipt is in backfilled timeline", - )) - }, - PduCount::Normal(c) => c, - }; - services - .rooms - .read_receipt - .private_read_set(&body.room_id, sender_user, count); - } - if let Some(event) = &body.read_receipt { - let mut user_receipts = BTreeMap::new(); - user_receipts.insert( - sender_user.clone(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - thread: ReceiptThread::Unthreaded, - }, - ); - - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(event.to_owned(), receipts); + let receipt_content = BTreeMap::from_iter([( + event.to_owned(), + BTreeMap::from_iter([( + ReceiptType::Read, + BTreeMap::from_iter([( + sender_user.to_owned(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + )]), + )]), + )]); services .rooms @@ -102,6 +79,24 @@ pub(crate) async fn set_read_marker_route( .await; } + if let Some(event) = &body.private_read_receipt { + let count = services + .rooms + .timeline + .get_pdu_count(event) + .await + .map_err(|_| err!(Request(NotFound("Event not found."))))?; + + let PduCount::Normal(count) = count else { + return Err!(Request(InvalidParam("Event is a backfilled PDU and cannot be marked as read."))); + }; + + services + .rooms + .read_receipt + .private_read_set(&body.room_id, sender_user, count); + } + Ok(set_read_marker::v3::Response {}) } @@ -111,7 +106,7 @@ pub(crate) async fn set_read_marker_route( pub(crate) async fn create_receipt_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if matches!( &body.receipt_type, @@ -141,19 +136,19 @@ pub(crate) async fn create_receipt_route( .await?; }, create_receipt::v3::ReceiptType::Read => { - let mut user_receipts = BTreeMap::new(); - user_receipts.insert( - sender_user.clone(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - thread: ReceiptThread::Unthreaded, - }, - ); - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(body.event_id.clone(), receipts); + let receipt_content = BTreeMap::from_iter([( + body.event_id.clone(), + BTreeMap::from_iter([( + ReceiptType::Read, + BTreeMap::from_iter([( + sender_user.to_owned(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + )]), + )]), + )]); services .rooms @@ -174,23 +169,23 @@ pub(crate) async fn create_receipt_route( .timeline .get_pdu_count(&body.event_id) .await - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?; + .map_err(|_| err!(Request(NotFound("Event not found."))))?; - let count = match count { - PduCount::Backfilled(_) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Read receipt is in backfilled timeline", - )) - }, - PduCount::Normal(c) => c, + let PduCount::Normal(count) = count else { + return Err!(Request(InvalidParam("Event is a backfilled PDU and cannot be marked as read."))); }; + services .rooms .read_receipt .private_read_set(&body.room_id, sender_user, count); }, - _ => return Err(Error::bad_database("Unsupported receipt type")), + _ => { + return Err!(Request(InvalidParam(warn!( + "Received unknown read receipt type: {}", + &body.receipt_type + )))) + }, } Ok(create_receipt::v3::Response {}) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 28ca1ea2..44572970 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -9,8 +9,8 @@ use conduit::{ at, err, error, extract_variant, is_equal_to, is_false, pdu::EventHash, result::{FlatOk, LogDebugErr}, - utils, utils::{ + self, future::OptionExt, math::ruma_from_u64, stream::{BroadbandExt, Tools, WidebandExt}, @@ -740,9 +740,28 @@ async fn load_joined_room( let (notification_count, highlight_count) = unread_notifications; device_list_updates.extend(device_updates); + + let last_privateread_update = services + .rooms + .read_receipt + .last_privateread_update(sender_user, room_id) + .await > since; + + let private_read_event = if last_privateread_update { + services + .rooms + .read_receipt + .private_read_get(room_id, sender_user) + .await + .ok() + } else { + None + }; + let edus: Vec> = receipt_events .into_values() .chain(typing_events.into_iter()) + .chain(private_read_event.into_iter()) .collect(); // Save the state after this sync so we can send the correct state diff next diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index 34639e27..9a1dba45 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -79,15 +79,15 @@ impl Data { .ignore_err() } - pub(super) fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) { + pub(super) fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, pdu_count: u64) { let key = (room_id, user_id); let next_count = self.services.globals.next_count().unwrap(); - self.roomuserid_privateread.put(key, count); + self.roomuserid_privateread.put(key, pdu_count); self.roomuserid_lastprivatereadupdate.put(key, next_count); } - pub(super) async fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result { + pub(super) async fn private_read_get_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { let key = (room_id, user_id); self.roomuserid_privateread.qry(&key).await.deserialized() } diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index e089d369..a3cd7098 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,19 +2,19 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduit::{debug, Result}; -use futures::Stream; +use conduit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; +use futures::{try_join, Stream, TryFutureExt}; use ruma::{ events::{ receipt::{ReceiptEvent, ReceiptEventContent}, AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, }, serde::Raw, - OwnedUserId, RoomId, UserId, + OwnedEventId, OwnedUserId, RoomId, UserId, }; use self::data::{Data, ReceiptItem}; -use crate::{sending, Dep}; +use crate::{rooms, sending, Dep}; pub struct Service { services: Services, @@ -23,6 +23,8 @@ pub struct Service { struct Services { sending: Dep, + short: Dep, + timeline: Dep, } impl crate::Service for Service { @@ -30,6 +32,8 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { sending: args.depend::("sending"), + short: args.depend::("rooms::short"), + timeline: args.depend::("rooms::timeline"), }, db: Data::new(&args), })) @@ -49,6 +53,48 @@ impl Service { .expect("room flush failed"); } + /// Gets the latest private read receipt from the user in the room + pub async fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let pdu_count = self + .private_read_get_count(room_id, user_id) + .map_err(|e| err!(Database(warn!("No private read receipt was set in {room_id}: {e}")))); + let shortroomid = self + .services + .short + .get_shortroomid(room_id) + .map_err(|e| err!(Database(warn!("Short room ID does not exist in database for {room_id}: {e}")))); + let (pdu_count, shortroomid) = try_join!(pdu_count, shortroomid)?; + + let shorteventid = PduCount::Normal(pdu_count); + let pdu_id: RawPduId = PduId { + shortroomid, + shorteventid, + } + .into(); + + let pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await?; + + let event_id: OwnedEventId = pdu.event_id.into(); + let receipt_content = BTreeMap::from_iter([( + event_id, + BTreeMap::from_iter([( + ruma::events::receipt::ReceiptType::ReadPrivate, + BTreeMap::from_iter([( + user_id, + ruma::events::receipt::Receipt { + ts: None, // TODO: start storing the timestamp so we can return one + thread: ruma::events::receipt::ReceiptThread::Unthreaded, + }, + )]), + )]), + )]); + //let receipt_json = Json + + let event = serde_json::value::to_raw_value(&receipt_content).expect("receipt_content created manually"); + + Ok(Raw::from_json(event)) + } + /// Returns an iterator over the most recent read_receipts in a room that /// happened after the event with id `since`. #[inline] @@ -59,21 +105,21 @@ impl Service { self.db.readreceipts_since(room_id, since) } - /// Sets a private read marker at `count`. + /// Sets a private read marker at PDU `count`. #[inline] #[tracing::instrument(skip(self), level = "debug")] pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) { self.db.private_read_set(room_id, user_id, count); } - /// Returns the private read marker. + /// Returns the private read marker PDU count. #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub async fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result { - self.db.private_read_get(room_id, user_id).await + pub async fn private_read_get_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + self.db.private_read_get_count(room_id, user_id).await } - /// Returns the count of the last typing update in this room. + /// Returns the PDU count of the last typing update in this room. #[inline] pub async fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> u64 { self.db.last_privateread_update(user_id, room_id).await From 1e3bb02ced7ba4994a37f4e917c866774100ee2f Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 14:26:15 -0500 Subject: [PATCH 0373/1248] bump cargo.lock, bump some deps Signed-off-by: strawberry --- Cargo.lock | 464 ++++++++++++++++++++++++----------------------------- Cargo.toml | 34 ++-- 2 files changed, 229 insertions(+), 269 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e766fe46..86b90c3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -49,9 +49,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "arc-swap" @@ -139,7 +139,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -150,7 +150,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -303,7 +303,7 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "rustls 0.23.18", + "rustls", "rustls-pemfile", "rustls-pki-types", "tokio", @@ -323,7 +323,7 @@ dependencies = [ "http", "http-body-util", "pin-project", - "rustls 0.23.18", + "rustls", "tokio", "tokio-rustls", "tokio-util", @@ -383,7 +383,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.89", + "syn 2.0.90", "which", ] @@ -464,9 +464,9 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" [[package]] name = "bytesize" @@ -487,9 +487,9 @@ dependencies = [ [[package]] name = "cargo_toml" -version = "0.20.5" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88da5a13c620b4ca0078845707ea9c3faf11edbc3ffd8497d11d686211cd1ac0" +checksum = "5fbd1fe9db3ebf71b89060adaf7b0504c2d6a425cf061313099547e382c2e472" dependencies = [ "serde", "toml", @@ -497,9 +497,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.1" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" +checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" dependencies = [ "jobserver", "libc", @@ -538,9 +538,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "num-traits", ] @@ -558,9 +558,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -568,9 +568,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstyle", "clap_lex", @@ -585,20 +585,20 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "clap_lex" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" dependencies = [ "cc", ] @@ -739,7 +739,7 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", - "thiserror 2.0.3", + "thiserror 2.0.6", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -777,7 +777,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -801,7 +801,7 @@ dependencies = [ "hyper-util", "log", "ruma", - "rustls 0.23.18", + "rustls", "sd-notify", "sentry", "sentry-tower", @@ -1076,7 +1076,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1103,7 +1103,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1172,7 +1172,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1224,7 +1224,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1235,12 +1235,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1256,9 +1256,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ "event-listener", "pin-project-lite", @@ -1266,9 +1266,9 @@ dependencies = [ [[package]] name = "fdeflate" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c6f4c64c1d33a3111c4466f7365ebdcc37c5bd1ea0d62aae2e3d722aacbedb" +checksum = "1e6853b52649d4ac5c0bd02320cddc5ba956bdb407c4b75a2c6b75bf51500f8c" dependencies = [ "simd-adler32", ] @@ -1411,7 +1411,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1501,7 +1501,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -1522,9 +1522,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.15.1" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" [[package]] name = "hdrhistogram" @@ -1583,9 +1583,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" dependencies = [ "async-trait", "cfg-if", @@ -1594,7 +1594,7 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.4.0", + "idna", "ipnet", "once_cell", "rand", @@ -1607,9 +1607,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" dependencies = [ "cfg-if", "futures-util", @@ -1677,14 +1677,14 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -1772,7 +1772,7 @@ dependencies = [ "http", "hyper", "hyper-util", - "rustls 0.23.18", + "rustls", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -1929,17 +1929,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", -] - -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", + "syn 2.0.90", ] [[package]] @@ -2002,12 +1992,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown 0.15.1", + "hashbrown 0.15.2", "serde", ] @@ -2075,9 +2065,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jobserver" @@ -2090,10 +2080,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -2130,9 +2121,9 @@ dependencies = [ [[package]] name = "konst" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65f00fb3910881e52bf0850ae2a82aea411488a557e1c02820ceaa60963dce3" +checksum = "298ddf99f06a97c1ecd0e910932662b7842855046234b0d0376d35d93add087f" dependencies = [ "const_panic", "konst_kernel", @@ -2141,9 +2132,9 @@ dependencies = [ [[package]] name = "konst_kernel" -version = "0.3.12" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "599c1232f55c72c7fc378335a3efe1c878c92720838c8e6a4fd87784ef7764de" +checksum = "e4b1eb7788f3824c629b1116a7a9060d6e898c358ebff59070093d51103dcc3c" dependencies = [ "typewit", ] @@ -2168,7 +2159,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2185,15 +2176,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.164" +version = "0.2.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" +checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -2371,11 +2362,10 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi", "libc", "log", "wasi", @@ -2544,7 +2534,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.6.0", + "indexmap 2.7.0", "js-sys", "once_cell", "pin-project-lite", @@ -2619,9 +2609,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.8.2" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae99c7fa6dd38c7cafe1ec085e804f8f555a2f8659b0dbe03f1f9963a9b51092" +checksum = "e5ca711d8b83edbb00b44d504503cd247c9c0bd8b0fa2694f2a1a3d8165379ce" dependencies = [ "log", "serde", @@ -2700,7 +2690,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2783,7 +2773,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2816,9 +2806,9 @@ checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "png" -version = "0.17.14" +version = "0.17.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52f9d46a34a05a6a57566bc2bfae066ef07585a6e3fa30fbbdff5936380623f0" +checksum = "b67582bd5b65bdff614270e2ea89a1cf15bef71245cc1e5f7ea126977144211d" dependencies = [ "bitflags 1.3.2", "crc32fast", @@ -2855,7 +2845,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2884,16 +2874,16 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "version_check", "yansi", ] [[package]] name = "prost" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" +checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" dependencies = [ "bytes", "prost-derive", @@ -2901,22 +2891,22 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" dependencies = [ "anyhow", "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "prost-types" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" +checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" dependencies = [ "prost", ] @@ -2961,10 +2951,10 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustc-hash 2.1.0", + "rustls", "socket2", - "thiserror 2.0.3", + "thiserror 2.0.6", "tokio", "tracing", ] @@ -2979,11 +2969,11 @@ dependencies = [ "getrandom", "rand", "ring", - "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustc-hash 2.1.0", + "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.6", "tinyvec", "tracing", "web-time 1.1.0", @@ -2991,9 +2981,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" +checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527" dependencies = [ "cfg_aliases", "libc", @@ -3123,7 +3113,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.18", + "rustls", "rustls-native-certs", "rustls-pemfile", "rustls-pki-types", @@ -3172,7 +3162,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "assign", "js_int", @@ -3194,7 +3184,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "js_int", "ruma-common", @@ -3206,7 +3196,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "as_variant", "assign", @@ -3221,7 +3211,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.6", "url", "web-time 1.1.0", ] @@ -3229,14 +3219,14 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", "http", - "indexmap 2.6.0", + "indexmap 2.7.0", "js_int", "konst", "percent-encoding", @@ -3247,7 +3237,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.6", "time", "tracing", "url", @@ -3259,10 +3249,10 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "as_variant", - "indexmap 2.6.0", + "indexmap 2.7.0", "js_int", "js_option", "percent-encoding", @@ -3273,7 +3263,7 @@ dependencies = [ "ruma-macros", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.6", "tracing", "url", "web-time 1.1.0", @@ -3283,7 +3273,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "bytes", "http", @@ -3301,16 +3291,16 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "js_int", - "thiserror 2.0.3", + "thiserror 2.0.6", ] [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "js_int", "ruma-common", @@ -3320,7 +3310,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3328,14 +3318,14 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.89", + "syn 2.0.90", "toml", ] [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "js_int", "ruma-common", @@ -3347,20 +3337,20 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "headers", "http", "http-auth", "ruma-common", - "thiserror 2.0.3", + "thiserror 2.0.6", "tracing", ] [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3370,13 +3360,13 @@ dependencies = [ "serde_json", "sha2", "subslice", - "thiserror 2.0.3", + "thiserror 2.0.6", ] [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=1a550585bf025cce48ef8b734339245092bc986e#1a550585bf025cce48ef8b734339245092bc986e" +source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "futures-util", "js_int", @@ -3384,7 +3374,7 @@ dependencies = [ "ruma-events", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.6", "tracing", ] @@ -3436,9 +3426,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc_version" @@ -3451,36 +3441,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.22.4" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" -dependencies = [ - "log", - "ring", - "rustls-pki-types", - "rustls-webpki", - "subtle", - "zeroize", -] - -[[package]] -name = "rustls" -version = "0.23.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "aws-lc-rs", "log", @@ -3550,7 +3526,7 @@ dependencies = [ "futures-util", "pin-project", "thingbuf", - "thiserror 2.0.3", + "thiserror 2.0.6", "unicode-segmentation", "unicode-width 0.2.0", ] @@ -3622,13 +3598,13 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "sentry" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5484316556650182f03b43d4c746ce0e3e48074a21e2f51244b648b6542e1066" +checksum = "016958f51b96861dead7c1e02290f138411d05e94fad175c8636a835dee6e51e" dependencies = [ "httpdate", "reqwest", - "rustls 0.22.4", + "rustls", "sentry-backtrace", "sentry-contexts", "sentry-core", @@ -3644,9 +3620,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40aa225bb41e2ec9d7c90886834367f560efc1af028f1c5478a6cce6a59c463a" +checksum = "e57712c24e99252ef175b4b06c485294f10ad6bc5b5e1567ff3803ee7a0b7d3f" dependencies = [ "backtrace", "once_cell", @@ -3656,9 +3632,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a8dd746da3d16cb8c39751619cefd4fcdbd6df9610f3310fd646b55f6e39910" +checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" dependencies = [ "hostname 0.4.0", "libc", @@ -3670,9 +3646,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161283cfe8e99c8f6f236a402b9ccf726b201f365988b5bb637ebca0abbd4a30" +checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" dependencies = [ "once_cell", "rand", @@ -3683,9 +3659,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc6b25e945fcaa5e97c43faee0267eebda9f18d4b09a251775d8fef1086238a" +checksum = "8982a69133d3f5e4efdbfa0776937fca43c3a2e275a8fe184f50b1b0aa92e07c" dependencies = [ "findshlibs", "once_cell", @@ -3694,9 +3670,9 @@ dependencies = [ [[package]] name = "sentry-log" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75bbcc61886955045a1dd4bdb173412a80bb2571be3c5bfcf7eb8f55a442bbf5" +checksum = "efcbfbb74628eaef033c1154d4bb082437c7592ce2282c7c5ccb455c4c97a06d" dependencies = [ "log", "sentry-core", @@ -3704,9 +3680,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc74f229c7186dd971a9491ffcbe7883544aa064d1589bd30b83fb856cd22d63" +checksum = "de296dae6f01e931b65071ee5fe28d66a27909857f744018f107ed15fd1f6b25" dependencies = [ "sentry-backtrace", "sentry-core", @@ -3714,9 +3690,9 @@ dependencies = [ [[package]] name = "sentry-tower" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c90802b38c899a2c9e557dff25ad186362eddf755d5f5244001b172dd03bead" +checksum = "fcdaf9b1939589476bd57751d12a9653bbfe356610fc476d03d7683189183ab7" dependencies = [ "http", "pin-project", @@ -3728,9 +3704,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3c5faf2103cd01eeda779ea439b68c4ee15adcdb16600836e97feafab362ec" +checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" dependencies = [ "sentry-backtrace", "sentry-core", @@ -3740,9 +3716,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d68cdf6bc41b8ff3ae2a9c4671e97426dcdd154cc1d4b6b72813f285d6b163f" +checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" dependencies = [ "debugid", "hex", @@ -3757,22 +3733,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3782,7 +3758,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "ryu", "serde", @@ -3847,7 +3823,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "ryu", "serde", @@ -3962,9 +3938,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4052,9 +4028,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -4084,7 +4060,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4135,11 +4111,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.6", ] [[package]] @@ -4150,18 +4126,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4226,9 +4202,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -4247,9 +4223,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -4282,9 +4258,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -4306,14 +4282,14 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "tokio-metrics" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eace09241d62c98b7eeb1107d4c5c64ca3bd7da92e8c218c153ab3a78f9be112" +checksum = "cb2bb07a8451c4c6fa8b3497ad198510d8b8dffa5df5cfb97a64102a58b113c8" dependencies = [ "futures-util", "pin-project-lite", @@ -4323,12 +4299,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls 0.23.18", - "rustls-pki-types", + "rustls", "tokio", ] @@ -4346,9 +4321,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -4357,9 +4332,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -4395,7 +4370,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -4520,7 +4495,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61 dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4602,9 +4577,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "typewit" -version = "1.9.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fb9ae6a3cafaf0a5d14c2302ca525f9ae8e07a0f0e6949de88d882c37a6e24" +checksum = "cb77c29baba9e4d3a6182d51fa75e3215c7fd1dab8f4ea9d107c716878e55fc0" dependencies = [ "typewit_proc_macros", ] @@ -4639,27 +4614,12 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" -[[package]] -name = "unicode-bidi" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" - [[package]] name = "unicode-ident" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" -[[package]] -name = "unicode-normalization" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] - [[package]] name = "unicode-segmentation" version = "1.12.0" @@ -4692,14 +4652,14 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.10.1" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" +checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" dependencies = [ "base64 0.22.1", "log", "once_cell", - "rustls 0.23.18", + "rustls", "rustls-pki-types", "url", "webpki-roots", @@ -4712,7 +4672,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 1.0.3", + "idna", "percent-encoding", "serde", ] @@ -4786,9 +4746,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -4797,36 +4757,36 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4834,28 +4794,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" dependencies = [ "js-sys", "wasm-bindgen", @@ -5219,7 +5179,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -5241,7 +5201,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5261,7 +5221,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -5290,7 +5250,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 673cab09..8da6728c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,7 +45,7 @@ version = "0.5.7" version = "0.2.9" [workspace.dependencies.cargo_toml] -version = "0.20" +version = "0.21" default-features = false features = ["features"] @@ -79,13 +79,13 @@ version = "0.8.5" # Used for the http request / response body type for Ruma endpoints used with reqwest [workspace.dependencies.bytes] -version = "1.8.0" +version = "1.9.0" [workspace.dependencies.http-body-util] version = "0.1.2" [workspace.dependencies.http] -version = "1.1.0" +version = "1.2.0" [workspace.dependencies.regex] version = "1.11.1" @@ -138,7 +138,7 @@ features = [ ] [workspace.dependencies.rustls] -version = "0.23.16" +version = "0.23.19" default-features = false features = ["aws_lc_rs"] @@ -153,7 +153,7 @@ features = [ ] [workspace.dependencies.serde] -version = "1.0.215" +version = "1.0.216" default-features = false features = ["rc"] @@ -199,7 +199,7 @@ default-features = false version = "0.1.41" default-features = false [workspace.dependencies.tracing-subscriber] -version = "0.3.18" +version = "=0.3.18" default-features = false features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"] [workspace.dependencies.tracing-core] @@ -213,7 +213,7 @@ default-features = false # used for conduit's CLI and admin room command parsing [workspace.dependencies.clap] -version = "4.5.21" +version = "4.5.23" default-features = false features = [ "derive", @@ -231,7 +231,7 @@ default-features = false features = ["std", "async-await"] [workspace.dependencies.tokio] -version = "1.41.1" +version = "1.42.0" default-features = false features = [ "fs", @@ -245,10 +245,10 @@ features = [ ] [workspace.dependencies.tokio-metrics] -version = "0.3.1" +version = "0.4.0" [workspace.dependencies.libloading] -version = "0.8.5" +version = "0.8.6" # Validating urls in config, was already a transitive dependency [workspace.dependencies.url] @@ -294,12 +294,12 @@ default-features = false features = ["env", "toml"] [workspace.dependencies.hickory-resolver] -version = "0.24.1" +version = "0.24.2" default-features = false # Used for conduit::Error type [workspace.dependencies.thiserror] -version = "2.0.3" +version = "2.0.6" default-features = false # Used when hashing the state @@ -334,7 +334,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "1a550585bf025cce48ef8b734339245092bc986e" +rev = "08f58cd3236fdf175913b2bcaf8865359696d94d" features = [ "compat", "rand", @@ -410,7 +410,7 @@ features = ["rt-tokio"] # optional sentry metrics for crash/panic reporting [workspace.dependencies.sentry] -version = "0.34.0" +version = "0.35.0" default-features = false features = [ "backtrace", @@ -426,9 +426,9 @@ features = [ ] [workspace.dependencies.sentry-tracing] -version = "0.34.0" +version = "0.35.0" [workspace.dependencies.sentry-tower] -version = "0.34.0" +version = "0.35.0" # jemalloc usage [workspace.dependencies.tikv-jemalloc-sys] @@ -480,7 +480,7 @@ default-features = false version = "0.1" [workspace.dependencies.syn] -version = "2.0.87" +version = "2.0.90" default-features = false features = ["full", "extra-traits"] From ef8392cbbeec94906a4f85278765579935225dd3 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 14:27:08 -0500 Subject: [PATCH 0374/1248] allow empty POST request bodies (synapse behaviour) to improve compatibility Signed-off-by: strawberry --- src/api/router/args.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 0b693956..65dd8e9e 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -2,9 +2,10 @@ use std::{mem, ops::Deref}; use axum::{async_trait, body::Body, extract::FromRequest}; use bytes::{BufMut, Bytes, BytesMut}; -use conduit::{debug, err, utils::string::EMPTY, Error, Result}; +use conduit::{debug, debug_warn, err, trace, utils::string::EMPTY, Error, Result}; use ruma::{ - api::IncomingRequest, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, OwnedUserId, ServerName, UserId, + api::IncomingRequest, CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, + OwnedUserId, ServerName, UserId, }; use service::Services; @@ -85,6 +86,19 @@ where async fn from_request(request: hyper::Request, services: &State) -> Result { let mut request = request::from(services, request).await?; let mut json_body = serde_json::from_slice::(&request.body).ok(); + + // while very unusual and really shouldn't be recommended, Synapse accepts POST + // requests with a completely empty body. very old clients, libraries, and some + // appservices still call APIs like /join like this. so let's just default to + // empty object `{}` to copy synapse's behaviour + if json_body.is_none() + && request.parts.method == http::Method::POST + && !request.parts.uri.path().contains("/media/") + { + trace!("json_body from_request: {:?}", json_body.clone()); + debug_warn!("received a POST request with an empty body, defaulting/assuming to {{}} like Synapse does"); + json_body = Some(CanonicalJsonValue::Object(CanonicalJsonObject::new())); + } let auth = auth::auth(services, &mut request, json_body.as_ref(), &T::METADATA).await?; Ok(Self { body: make_body::(services, &mut request, json_body.as_mut(), &auth)?, From 80f9536d21fe98b18f429b6c26550a8115956972 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 14:28:54 -0500 Subject: [PATCH 0375/1248] rename config.rs to account_data.rs, refactor, add some missing checks Signed-off-by: strawberry --- src/api/client/{config.rs => account_data.rs} | 82 ++++++++++++------- src/api/client/mod.rs | 4 +- 2 files changed, 54 insertions(+), 32 deletions(-) rename src/api/client/{config.rs => account_data.rs} (54%) diff --git a/src/api/client/config.rs b/src/api/client/account_data.rs similarity index 54% rename from src/api/client/config.rs rename to src/api/client/account_data.rs index 3cf71135..86ef41ef 100644 --- a/src/api/client/config.rs +++ b/src/api/client/account_data.rs @@ -1,18 +1,20 @@ use axum::extract::State; -use conduit::err; +use conduit::{err, Err}; use ruma::{ - api::client::{ - config::{get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data}, - error::ErrorKind, + api::client::config::{ + get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, + }, + events::{ + AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent, GlobalAccountDataEventType, + RoomAccountDataEventType, }, - events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent}, serde::Raw, - OwnedUserId, RoomId, + RoomId, UserId, }; use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -use crate::{service::Services, Error, Result, Ruma}; +use crate::{service::Services, Result, Ruma}; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// @@ -20,14 +22,13 @@ use crate::{service::Services, Error, Result, Ruma}; pub(crate) async fn set_global_account_data_route( State(services): State, body: Ruma, ) -> Result { - set_account_data( - &services, - None, - body.sender_user.as_ref(), - &body.event_type.to_string(), - body.data.json(), - ) - .await?; + let sender_user = body.sender_user(); + + if sender_user != body.user_id && body.appservice_info.is_none() { + return Err!(Request(Forbidden("You cannot set account data for other users."))); + } + + set_account_data(&services, None, &body.user_id, &body.event_type.to_string(), body.data.json()).await?; Ok(set_global_account_data::v3::Response {}) } @@ -38,10 +39,16 @@ pub(crate) async fn set_global_account_data_route( pub(crate) async fn set_room_account_data_route( State(services): State, body: Ruma, ) -> Result { + let sender_user = body.sender_user(); + + if sender_user != body.user_id && body.appservice_info.is_none() { + return Err!(Request(Forbidden("You cannot set account data for other users."))); + } + set_account_data( &services, Some(&body.room_id), - body.sender_user.as_ref(), + &body.user_id, &body.event_type.to_string(), body.data.json(), ) @@ -56,11 +63,15 @@ pub(crate) async fn set_room_account_data_route( pub(crate) async fn get_global_account_data_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); + + if sender_user != body.user_id && body.appservice_info.is_none() { + return Err!(Request(Forbidden("You cannot get account data of other users."))); + } let account_data: ExtractGlobalEventContent = services .account_data - .get_global(sender_user, body.event_type.clone()) + .get_global(&body.user_id, body.event_type.clone()) .await .map_err(|_| err!(Request(NotFound("Data not found."))))?; @@ -75,11 +86,15 @@ pub(crate) async fn get_global_account_data_route( pub(crate) async fn get_room_account_data_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); + + if sender_user != body.user_id && body.appservice_info.is_none() { + return Err!(Request(Forbidden("You cannot get account data of other users."))); + } let account_data: ExtractRoomEventContent = services .account_data - .get_room(&body.room_id, sender_user, body.event_type.clone()) + .get_room(&body.room_id, &body.user_id, body.event_type.clone()) .await .map_err(|_| err!(Request(NotFound("Data not found."))))?; @@ -89,28 +104,35 @@ pub(crate) async fn get_room_account_data_route( } async fn set_account_data( - services: &Services, room_id: Option<&RoomId>, sender_user: Option<&OwnedUserId>, event_type: &str, - data: &RawJsonValue, -) -> Result<()> { - let sender_user = sender_user.as_ref().expect("user is authenticated"); + services: &Services, room_id: Option<&RoomId>, sender_user: &UserId, event_type_s: &str, data: &RawJsonValue, +) -> Result { + if event_type_s == RoomAccountDataEventType::FullyRead.to_cow_str() { + return Err!(Request(BadJson( + "This endpoint cannot be used for marking a room as fully read (setting m.fully_read)" + ))); + } + + if event_type_s == GlobalAccountDataEventType::PushRules.to_cow_str() { + return Err!(Request(BadJson( + "This endpoint cannot be used for setting/configuring push rules." + ))); + } let data: serde_json::Value = - serde_json::from_str(data.get()).map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; + serde_json::from_str(data.get()).map_err(|e| err!(Request(BadJson(warn!("Invalid JSON provided: {e}")))))?; services .account_data .update( room_id, sender_user, - event_type.into(), + event_type_s.into(), &json!({ - "type": event_type, + "type": event_type_s, "content": data, }), ) - .await?; - - Ok(()) + .await } #[derive(Deserialize)] diff --git a/src/api/client/mod.rs b/src/api/client/mod.rs index 3c9736ea..be54e65f 100644 --- a/src/api/client/mod.rs +++ b/src/api/client/mod.rs @@ -1,9 +1,9 @@ pub(super) mod account; +pub(super) mod account_data; pub(super) mod alias; pub(super) mod appservice; pub(super) mod backup; pub(super) mod capabilities; -pub(super) mod config; pub(super) mod context; pub(super) mod device; pub(super) mod directory; @@ -41,11 +41,11 @@ pub(super) mod well_known; pub use account::full_user_deactivate; pub(super) use account::*; +pub(super) use account_data::*; pub(super) use alias::*; pub(super) use appservice::*; pub(super) use backup::*; pub(super) use capabilities::*; -pub(super) use config::*; pub(super) use context::*; pub(super) use device::*; pub(super) use directory::*; From ac0545f7733f2638703b5a4f4fc57ce392036b26 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 14:29:37 -0500 Subject: [PATCH 0376/1248] always set limited true on left timeline events in sync (for now) Signed-off-by: strawberry --- src/api/client/sync/v3.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 44572970..815ec87c 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -546,9 +546,9 @@ async fn handle_left_room( events: Vec::new(), }, timeline: Timeline { - limited: false, + limited: true, // TODO: support left timeline events so we dont need to set this to true prev_batch: Some(next_batch_string.to_owned()), - events: Vec::new(), + events: Vec::new(), // and so we dont need to set this to empty vec }, state: RoomState { events: left_state_events, From 39abd9a8278f2383b474211e7d4b737dd8f80927 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 16:05:06 -0500 Subject: [PATCH 0377/1248] (temp) drop sccache version down in CI to 0.8.2 Signed-off-by: strawberry --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1441dd44..5ab4581a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -155,6 +155,8 @@ jobs: - name: Run sccache-cache if: (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') uses: mozilla-actions/sccache-action@main + with: + version: "v0.8.2" # use rust-cache - uses: Swatinem/rust-cache@v2 @@ -292,6 +294,8 @@ jobs: - name: Run sccache-cache if: (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') uses: mozilla-actions/sccache-action@main + with: + version: "v0.8.2" # use rust-cache - uses: Swatinem/rust-cache@v2 From 9ca964a97e765af2060cbbc1fe9f244412e2d763 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 17:36:51 -0500 Subject: [PATCH 0378/1248] update stored default push rules when removing legacy mentions Signed-off-by: strawberry --- src/api/client/push.rs | 96 +++++++++++++++++++++++++++++++++--------- 1 file changed, 75 insertions(+), 21 deletions(-) diff --git a/src/api/client/push.rs b/src/api/client/push.rs index f27ead1f..7efa59e8 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -25,7 +25,7 @@ use crate::{Error, Result, Ruma}; pub(crate) async fn get_pushrules_all_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let Some(content_value) = services .account_data @@ -46,20 +46,47 @@ pub(crate) async fn get_pushrules_all_route( let mut global_ruleset = account_data_content.global; // remove old deprecated mentions push rules as per MSC4210 + // and update the stored server default push rules #[allow(deprecated)] { use ruma::push::RuleKind::*; + if global_ruleset + .get(Override, PredefinedOverrideRuleId::ContainsDisplayName.as_str()) + .is_some() + || global_ruleset + .get(Override, PredefinedOverrideRuleId::RoomNotif.as_str()) + .is_some() + || global_ruleset + .get(Content, PredefinedContentRuleId::ContainsUserName.as_str()) + .is_some() + { + global_ruleset + .remove(Override, PredefinedOverrideRuleId::ContainsDisplayName) + .ok(); + global_ruleset + .remove(Override, PredefinedOverrideRuleId::RoomNotif) + .ok(); + global_ruleset + .remove(Content, PredefinedContentRuleId::ContainsUserName) + .ok(); - global_ruleset - .remove(Override, PredefinedOverrideRuleId::ContainsDisplayName) - .ok(); - global_ruleset - .remove(Override, PredefinedOverrideRuleId::RoomNotif) - .ok(); + global_ruleset.update_with_server_default(Ruleset::server_default(sender_user)); - global_ruleset - .remove(Content, PredefinedContentRuleId::ContainsUserName) - .ok(); + services + .account_data + .update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(PushRulesEvent { + content: PushRulesEventContent { + global: global_ruleset.clone(), + }, + }) + .expect("to json always works"), + ) + .await?; + } }; Ok(get_pushrules_all::v3::Response { @@ -113,20 +140,47 @@ pub(crate) async fn get_pushrules_global_route( let mut global_ruleset = account_data_content.global; // remove old deprecated mentions push rules as per MSC4210 + // and update the stored server default push rules #[allow(deprecated)] { use ruma::push::RuleKind::*; + if global_ruleset + .get(Override, PredefinedOverrideRuleId::ContainsDisplayName.as_str()) + .is_some() + || global_ruleset + .get(Override, PredefinedOverrideRuleId::RoomNotif.as_str()) + .is_some() + || global_ruleset + .get(Content, PredefinedContentRuleId::ContainsUserName.as_str()) + .is_some() + { + global_ruleset + .remove(Override, PredefinedOverrideRuleId::ContainsDisplayName) + .ok(); + global_ruleset + .remove(Override, PredefinedOverrideRuleId::RoomNotif) + .ok(); + global_ruleset + .remove(Content, PredefinedContentRuleId::ContainsUserName) + .ok(); - global_ruleset - .remove(Override, PredefinedOverrideRuleId::ContainsDisplayName) - .ok(); - global_ruleset - .remove(Override, PredefinedOverrideRuleId::RoomNotif) - .ok(); + global_ruleset.update_with_server_default(Ruleset::server_default(sender_user)); - global_ruleset - .remove(Content, PredefinedContentRuleId::ContainsUserName) - .ok(); + services + .account_data + .update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(PushRulesEvent { + content: PushRulesEventContent { + global: global_ruleset.clone(), + }, + }) + .expect("to json always works"), + ) + .await?; + } }; Ok(get_pushrules_global_scope::v3::Response { @@ -259,7 +313,7 @@ pub(crate) async fn get_pushrule_actions_route( .global .get(body.kind.clone(), &body.rule_id) .map(|rule| rule.actions().to_owned()) - .ok_or(err!(Request(NotFound("Push rule not found."))))?; + .ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?; Ok(get_pushrule_actions::v3::Response { actions, @@ -332,7 +386,7 @@ pub(crate) async fn get_pushrule_enabled_route( .global .get(body.kind.clone(), &body.rule_id) .map(ruma::push::AnyPushRuleRef::enabled) - .ok_or(err!(Request(NotFound("Push rule not found."))))?; + .ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?; Ok(get_pushrule_enabled::v3::Response { enabled, From af08d3f7bd7936f13e4e426cc004dc863edee8e0 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 18:12:27 -0500 Subject: [PATCH 0379/1248] document 'DNS No connections available' troubleshooting Signed-off-by: strawberry --- docs/troubleshooting.md | 51 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index c8655e06..283cdeee 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -41,6 +41,54 @@ workarounds for this are: - Don't use Docker's default DNS setup and instead allow the container to use and communicate with your host's DNS servers (host's `/etc/resolv.conf`) +#### DNS No connections available error message + +If you receive spurious amounts of error logs saying "DNS No connections +available", this is due to your DNS server (servers from `/etc/resolv.conf`) +being overloaded and unable to handle typical Matrix federation volume. Some +users have reported that the upstream servers are rate-limiting them as well +when they get this error (e.g. popular upstreams like Google DNS). + +Matrix federation is extremely heavy and sends wild amounts of DNS requests. +Unfortunately this is by design and has only gotten worse with more +server/destination resolution steps. Synapse also expects a very perfect DNS +setup. + +There are some ways you can reduce the amount of DNS queries, but ultimately +the best solution/fix is selfhosting a high quality caching DNS server like +[Unbound][unbound-arch] without any upstream resolvers, and without DNSSEC +validation enabled. + +DNSSEC validation is highly recommended to be **disabled** due to DNSSEC being +very computationally expensive, and is extremely susceptible to denial of +service, especially on Matrix. Many servers also strangely have broken DNSSEC +setups and will result in non-functional federation. + +conduwuit cannot provide a "works-for-everyone" Unbound DNS setup guide, but +the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch] +may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors +config options and removing the `validator` module. + +**Avoid** using `systemd-resolved` as it does **not** perform very well under +high load, and we have identified its DNS caching to not be very effective. + +dnsmasq can possibly work, but it does **not** support TCP fallback which can be +problematic when receiving large DNS responses such as from large SRV records. +If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback` +in conduwuit config. + +Raising `dns_cache_entries` in conduwuit config from the default can also assist +in DNS caching, but a full-fledged external caching resolver is better and more +reliable. + +If you don't have IPv6 connectivity, changing `ip_lookup_strategy` to match +your setup can help reduce unnecessary AAAA queries +(`1 - Ipv4Only (Only query for A records, no AAAA/IPv6)`). + +If your DNS server supports it, some users have reported enabling +`query_over_tcp_only` to force only TCP querying by default has improved DNS +reliability at a slight performance cost due to TCP overhead. + ## RocksDB / database issues #### Direct IO @@ -149,3 +197,6 @@ If you are a developer, you can also view the raw jemalloc statistics with `!admin debug memory-stats`. Please note that this output is extremely large which may only be visible in the conduwuit console CLI due to PDU size limits, and is not easy for non-developers to understand. + +[unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html +[unbound-arch]: https://wiki.archlinux.org/title/Unbound From 0740d7259ac7467610b1935d8d20eb2ed2f8cf4d Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 18:22:51 -0500 Subject: [PATCH 0380/1248] document modifying systemd unit if changing database path Signed-off-by: strawberry --- docs/deploying/generic.md | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 9eafbc46..70d4b561 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -89,11 +89,25 @@ on the network level, consider something like NextDNS or Pi-Hole. ## Setting up a systemd service -The systemd unit for conduwuit can be found -[here](../configuration/examples.md#example-systemd-unit-file). You may need to -change the `ExecStart=` path to where you placed the conduwuit binary. +Two example systemd units for conduwuit can be found +[on the configuration page](../configuration/examples.md#debian-systemd-unit-file). +You may need to change the `ExecStart=` path to where you placed the conduwuit +binary if it is not `/usr/bin/conduwuit`. -On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros and OpenSUSE), put `$EscapeControlCharactersOnReceive off` inside `/etc/rsyslog.conf` to allow color in logs. +On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros +and OpenSUSE), put `$EscapeControlCharactersOnReceive off` inside +`/etc/rsyslog.conf` to allow color in logs. + +If you are using a different `database_path` other than the systemd unit +configured default `/var/lib/conduwuit`, you need to add your path to the +systemd unit's `ReadWritePaths=`. This can be done by either directly editing +`conduwuit.service` and reloading systemd, or running `systemctl edit conduwuit.service` +and entering the following: + +``` +[Service] +ReadWritePaths=/path/to/custom/database/path +``` ## Creating the conduwuit configuration file @@ -101,7 +115,8 @@ Now we need to create the conduwuit's config file in `/etc/conduwuit/conduwuit.toml`. The example config can be found at [conduwuit-example.toml](../configuration/examples.md). -**Please take a moment to read the config. You need to change at least the server name.** +**Please take a moment to read the config. You need to change at least the +server name.** RocksDB is the only supported database backend. From 5d51adc29de4dea8e2995a2e88ec2923d6e4f650 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 19:17:23 -0500 Subject: [PATCH 0381/1248] update complement test results Signed-off-by: strawberry --- tests/test_results/complement/test_results.jsonl | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 575a22fe..0db37515 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -141,6 +141,7 @@ {"Action":"fail","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} +{"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} {"Action":"pass","Test":"TestMediaFilenames"} {"Action":"pass","Test":"TestMediaFilenames/Parallel"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII"} @@ -185,22 +186,23 @@ {"Action":"pass","Test":"TestRemoteAliasRequestsUnderstandUnicode"} {"Action":"pass","Test":"TestRemotePngThumbnail"} {"Action":"pass","Test":"TestRemotePngThumbnail/test_/_matrix/client/v1/media_endpoint"} +{"Action":"pass","Test":"TestRemotePngThumbnail/test_/_matrix/media/v3_endpoint"} {"Action":"fail","Test":"TestRemotePresence"} {"Action":"fail","Test":"TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"} {"Action":"fail","Test":"TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"} {"Action":"pass","Test":"TestRemoteTyping"} -{"Action":"fail","Test":"TestRestrictedRoomsLocalJoin"} +{"Action":"pass","Test":"TestRestrictedRoomsLocalJoin"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_with_mangled_join_rules"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_succeed_when_invited"} -{"Action":"fail","Test":"TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room"} -{"Action":"fail","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room"} +{"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room"} +{"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_invited"} -{"Action":"fail","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} +{"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoin"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room"} From 3e64a5c907362774479c5e4d7a7bb8bcc3ed7f38 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 19:46:19 -0500 Subject: [PATCH 0382/1248] delete pointless bad_query_ratelimiter harming e2ee performance Signed-off-by: strawberry --- src/api/client/keys.rs | 45 +++++--------------------------------- src/service/globals/mod.rs | 14 ------------ 2 files changed, 5 insertions(+), 54 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 53ec12f9..18f7d21c 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,10 +1,7 @@ -use std::{ - collections::{hash_map, BTreeMap, HashMap, HashSet}, - time::Instant, -}; +use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduit::{err, utils, utils::math::continue_exponential_backoff_secs, Err, Error, Result}; +use conduit::{err, utils, Error, Result}; use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -345,41 +342,9 @@ where let mut failures = BTreeMap::new(); - let back_off = |id| async { - match services - .globals - .bad_query_ratelimiter - .write() - .expect("locked") - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - }, - hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)); - }, - } - }; - let mut futures: FuturesUnordered<_> = get_over_federation .into_iter() .map(|(server, vec)| async move { - if let Some((time, tries)) = services - .globals - .bad_query_ratelimiter - .read() - .expect("locked") - .get(server) - { - // Exponential backoff - const MIN: u64 = 5 * 60; - const MAX: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN, MAX, time.elapsed(), *tries) { - return (server, Err!(BadServerResponse("bad query from {server:?}, still backing off"))); - } - } - let mut device_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); @@ -388,17 +353,18 @@ where let request = federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed, }; + let response = services .sending .send_federation_request(server, request) .await; - (server, Ok(response)) + (server, response) }) .collect(); while let Some((server, response)) = futures.next().await { - if let Ok(Ok(response)) = response { + if let Ok(response) = response { for (user, master_key) in response.master_keys { let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; @@ -426,7 +392,6 @@ where self_signing_keys.extend(response.self_signing_keys); device_keys.extend(response.device_keys); } else { - back_off(server.to_owned()).await; failures.insert(server.to_string(), json!({})); } } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index fefff3b5..081794e2 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -21,7 +21,6 @@ pub struct Service { pub config: Config, jwt_decoding_key: Option, pub bad_event_ratelimiter: Arc>>, - pub bad_query_ratelimiter: Arc>>, pub stateres_mutex: Arc>, pub server_user: OwnedUserId, pub admin_alias: OwnedRoomAliasId, @@ -71,7 +70,6 @@ impl crate::Service for Service { config: config.clone(), jwt_decoding_key, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())), stateres_mutex: Arc::new(Mutex::new(())), admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name)) .expect("#admins:server_name is valid alias name"), @@ -100,13 +98,6 @@ impl crate::Service for Service { .len(); writeln!(out, "bad_event_ratelimiter: {bad_event_ratelimiter}")?; - let bad_query_ratelimiter = self - .bad_query_ratelimiter - .read() - .expect("locked for reading") - .len(); - writeln!(out, "bad_query_ratelimiter: {bad_query_ratelimiter}")?; - Ok(()) } @@ -115,11 +106,6 @@ impl crate::Service for Service { .write() .expect("locked for writing") .clear(); - - self.bad_query_ratelimiter - .write() - .expect("locked for writing") - .clear(); } fn name(&self) -> &str { service::make_name(std::module_path!()) } From 66a82447da0c0f3de6abf6ebd3346ae1483615da Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 21:24:53 -0500 Subject: [PATCH 0383/1248] log hostname in DNS error tracing spans Signed-off-by: strawberry --- src/service/resolver/actual.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 8b720365..a4475216 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -258,7 +258,7 @@ impl super::Service { #[tracing::instrument(skip_all, name = "ip")] async fn query_and_cache_override(&self, overname: &'_ str, hostname: &'_ str, port: u16) -> Result<()> { match self.resolver.resolver.lookup_ip(hostname.to_owned()).await { - Err(e) => Self::handle_resolve_error(&e), + Err(e) => Self::handle_resolve_error(&e, hostname), Ok(override_ip) => { if hostname != overname { debug_info!("{overname:?} overriden by {hostname:?}"); @@ -286,7 +286,7 @@ impl super::Service { debug!("querying SRV for {hostname:?}"); let hostname = hostname.trim_end_matches('.'); match self.resolver.resolver.srv_lookup(hostname).await { - Err(e) => Self::handle_resolve_error(&e)?, + Err(e) => Self::handle_resolve_error(&e, hostname)?, Ok(result) => { return Ok(result.iter().next().map(|result| { FedDest::Named( @@ -304,7 +304,7 @@ impl super::Service { Ok(None) } - fn handle_resolve_error(e: &ResolveError) -> Result<()> { + fn handle_resolve_error(e: &ResolveError, host: &'_ str) -> Result<()> { use hickory_resolver::error::ResolveErrorKind; match *e.kind() { @@ -312,11 +312,11 @@ impl super::Service { .. } => { // Raise to debug_warn if we can find out the result wasn't from cache - debug!("No DNS records found: {e}"); + debug!(%host, "No DNS records found: {e}"); Ok(()) }, ResolveErrorKind::Timeout => { - Err!(warn!("DNS {e}")) + Err!(warn!(%host, "DNS {e}")) }, ResolveErrorKind::NoConnections => { error!( @@ -324,9 +324,9 @@ impl super::Service { remediate this issue to ensure proper federation connectivity." ); - Err!(error!("DNS error: {e}")) + Err!(error!(%host, "DNS error: {e}")) }, - _ => Err!(error!("DNS error: {e}")), + _ => Err!(error!(%host, "DNS error: {e}")), } } From 353dc17af4ad4e7266d67046090ddb93dca4bb01 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 21:25:07 -0500 Subject: [PATCH 0384/1248] construct private read receipt properly Signed-off-by: strawberry --- src/service/rooms/read_receipt/mod.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index a3cd7098..72f2e1ad 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -6,7 +6,7 @@ use conduit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; use futures::{try_join, Stream, TryFutureExt}; use ruma::{ events::{ - receipt::{ReceiptEvent, ReceiptEventContent}, + receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, }, serde::Raw, @@ -75,7 +75,8 @@ impl Service { let pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await?; let event_id: OwnedEventId = pdu.event_id.into(); - let receipt_content = BTreeMap::from_iter([( + let user_id: OwnedUserId = user_id.to_owned(); + let content: BTreeMap = BTreeMap::from_iter([( event_id, BTreeMap::from_iter([( ruma::events::receipt::ReceiptType::ReadPrivate, @@ -88,9 +89,12 @@ impl Service { )]), )]), )]); - //let receipt_json = Json + let receipt_event_content = ReceiptEventContent(content); + let receipt_sync_event = SyncEphemeralRoomEvent { + content: receipt_event_content, + }; - let event = serde_json::value::to_raw_value(&receipt_content).expect("receipt_content created manually"); + let event = serde_json::value::to_raw_value(&receipt_sync_event).expect("receipt created manually"); Ok(Raw::from_json(event)) } From c9bb943a343596bf604e9a982d1e1d9262ec6d01 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 21:25:40 -0500 Subject: [PATCH 0385/1248] mark device list updates when a device is deleted/removed Signed-off-by: strawberry --- src/api/client/session.rs | 6 ------ src/service/users/mod.rs | 1 + 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 573f3d97..e889a867 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -243,9 +243,6 @@ pub(crate) async fn logout_route( .remove_device(sender_user, sender_device) .await; - // send device list update for user after logout - services.users.mark_device_key_update(sender_user).await; - Ok(logout::v3::Response::new()) } @@ -275,8 +272,5 @@ pub(crate) async fn logout_all_route( .for_each(|device_id| services.users.remove_device(sender_user, device_id)) .await; - // send device list update for user after logout - services.users.mark_device_key_update(sender_user).await; - Ok(logout_all::v3::Response::new()) } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index f17a6b9d..2aa80e32 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -300,6 +300,7 @@ impl Service { increment(&self.db.userid_devicelistversion, user_id.as_bytes()); self.db.userdeviceid_metadata.del(userdeviceid); + self.mark_device_key_update(user_id).await; } /// Returns an iterator over all device ids of this user. From a26b1c57220cfb1c0df3870a2f2b062f9e6dac19 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 22:02:48 -0500 Subject: [PATCH 0386/1248] send private read receipt on sliding stink (sync) Signed-off-by: strawberry --- src/api/client/sync/v4.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 14d79c19..3480489d 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -490,7 +490,25 @@ pub(crate) async fn sync_events_v4_route( .await, ); - let vector: Vec<_> = services + let last_privateread_update = services + .rooms + .read_receipt + .last_privateread_update(sender_user, room_id) + .await > *roomsince; + + let private_read_event = if last_privateread_update { + services + .rooms + .read_receipt + .private_read_get(room_id, sender_user) + .await + .ok() + .map(|read_event| (sender_user.to_owned(), 0_u64, read_event)) + } else { + None + }; + + let mut vector: Vec<_> = services .rooms .read_receipt .readreceipts_since(room_id, *roomsince) @@ -504,6 +522,10 @@ pub(crate) async fn sync_events_v4_route( .collect() .await; + if let Some(private_read_event) = private_read_event { + vector.push(private_read_event); + } + let receipt_size = vector.len(); receipts .rooms From e0446181c5960a7760e94747a68e769a0556bfc8 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 11 Dec 2024 22:07:53 -0500 Subject: [PATCH 0387/1248] remove unnecessary args in pack_receipts Signed-off-by: strawberry --- src/api/client/sync/v4.rs | 10 +++++----- src/service/rooms/read_receipt/mod.rs | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 3480489d..fddb81bf 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -26,9 +26,10 @@ use ruma::{ directory::RoomTypeFilter, events::{ room::member::{MembershipState, RoomMemberEventContent}, - AnyRawAccountDataEvent, StateEventType, + AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::{self, *}, }, + serde::Raw, state_res::Event, uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, UInt, UserId, }; @@ -503,21 +504,20 @@ pub(crate) async fn sync_events_v4_route( .private_read_get(room_id, sender_user) .await .ok() - .map(|read_event| (sender_user.to_owned(), 0_u64, read_event)) } else { None }; - let mut vector: Vec<_> = services + let mut vector: Vec> = services .rooms .read_receipt .readreceipts_since(room_id, *roomsince) - .filter_map(|(read_user, ts, v)| async move { + .filter_map(|(read_user, _ts, v)| async move { services .users .user_is_ignored(read_user, sender_user) .await - .or_some((read_user.to_owned(), ts, v)) + .or_some(v) }) .collect() .await; diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 72f2e1ad..5cac535d 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -133,10 +133,10 @@ impl Service { #[must_use] pub fn pack_receipts(receipts: I) -> Raw> where - I: Iterator)>, + I: Iterator>, { let mut json = BTreeMap::new(); - for (_, _, value) in receipts { + for value in receipts { let receipt = serde_json::from_str::>(value.json().get()); if let Ok(value) = receipt { for (event, receipt) in value.content { From 76a5a67b6f6cedd94496e3abb695b2e1c4f88f05 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 12 Dec 2024 10:22:30 -0500 Subject: [PATCH 0388/1248] fix a couple rust 2024 lints introduced in rust 1.82.0 Signed-off-by: strawberry --- deps/rust-rocksdb/lib.rs | 114 +++++++++++++++++++-------------------- src/core/alloc/je.rs | 2 +- src/core/mods/macros.rs | 16 +++--- src/main/mods.rs | 2 +- src/router/mod.rs | 6 +-- 5 files changed, 70 insertions(+), 70 deletions(-) diff --git a/deps/rust-rocksdb/lib.rs b/deps/rust-rocksdb/lib.rs index 2e699efb..0551991a 100644 --- a/deps/rust-rocksdb/lib.rs +++ b/deps/rust-rocksdb/lib.rs @@ -2,61 +2,61 @@ pub use rust_rocksdb::*; #[cfg_attr(not(conduit_mods), link(name = "rocksdb"))] #[cfg_attr(conduit_mods, link(name = "rocksdb", kind = "static"))] -extern "C" { - pub fn rocksdb_list_column_families(); - pub fn rocksdb_logger_create_stderr_logger(); - pub fn rocksdb_logger_create_callback_logger(); - pub fn rocksdb_options_set_info_log(); - pub fn rocksdb_get_options_from_string(); - pub fn rocksdb_writebatch_create(); - pub fn rocksdb_writebatch_destroy(); - pub fn rocksdb_writebatch_put_cf(); - pub fn rocksdb_writebatch_delete_cf(); - pub fn rocksdb_iter_value(); - pub fn rocksdb_iter_seek_to_last(); - pub fn rocksdb_iter_seek_for_prev(); - pub fn rocksdb_iter_seek_to_first(); - pub fn rocksdb_iter_next(); - pub fn rocksdb_iter_prev(); - pub fn rocksdb_iter_seek(); - pub fn rocksdb_iter_valid(); - pub fn rocksdb_iter_get_error(); - pub fn rocksdb_iter_key(); - pub fn rocksdb_iter_destroy(); - pub fn rocksdb_livefiles(); - pub fn rocksdb_livefiles_count(); - pub fn rocksdb_livefiles_destroy(); - pub fn rocksdb_livefiles_column_family_name(); - pub fn rocksdb_livefiles_name(); - pub fn rocksdb_livefiles_size(); - pub fn rocksdb_livefiles_level(); - pub fn rocksdb_livefiles_smallestkey(); - pub fn rocksdb_livefiles_largestkey(); - pub fn rocksdb_livefiles_entries(); - pub fn rocksdb_livefiles_deletions(); - pub fn rocksdb_put_cf(); - pub fn rocksdb_delete_cf(); - pub fn rocksdb_get_pinned_cf(); - pub fn rocksdb_create_column_family(); - pub fn rocksdb_get_latest_sequence_number(); - pub fn rocksdb_batched_multi_get_cf(); - pub fn rocksdb_cancel_all_background_work(); - pub fn rocksdb_repair_db(); - pub fn rocksdb_list_column_families_destroy(); - pub fn rocksdb_flush(); - pub fn rocksdb_flush_wal(); - pub fn rocksdb_open_column_families(); - pub fn rocksdb_open_for_read_only_column_families(); - pub fn rocksdb_open_as_secondary_column_families(); - pub fn rocksdb_open_column_families_with_ttl(); - pub fn rocksdb_open(); - pub fn rocksdb_open_for_read_only(); - pub fn rocksdb_open_with_ttl(); - pub fn rocksdb_open_as_secondary(); - pub fn rocksdb_write(); - pub fn rocksdb_create_iterator_cf(); - pub fn rocksdb_backup_engine_create_new_backup_flush(); - pub fn rocksdb_backup_engine_options_create(); - pub fn rocksdb_write_buffer_manager_destroy(); - pub fn rocksdb_options_set_ttl(); +unsafe extern "C" { + pub unsafe fn rocksdb_list_column_families(); + pub unsafe fn rocksdb_logger_create_stderr_logger(); + pub unsafe fn rocksdb_logger_create_callback_logger(); + pub unsafe fn rocksdb_options_set_info_log(); + pub unsafe fn rocksdb_get_options_from_string(); + pub unsafe fn rocksdb_writebatch_create(); + pub unsafe fn rocksdb_writebatch_destroy(); + pub unsafe fn rocksdb_writebatch_put_cf(); + pub unsafe fn rocksdb_writebatch_delete_cf(); + pub unsafe fn rocksdb_iter_value(); + pub unsafe fn rocksdb_iter_seek_to_last(); + pub unsafe fn rocksdb_iter_seek_for_prev(); + pub unsafe fn rocksdb_iter_seek_to_first(); + pub unsafe fn rocksdb_iter_next(); + pub unsafe fn rocksdb_iter_prev(); + pub unsafe fn rocksdb_iter_seek(); + pub unsafe fn rocksdb_iter_valid(); + pub unsafe fn rocksdb_iter_get_error(); + pub unsafe fn rocksdb_iter_key(); + pub unsafe fn rocksdb_iter_destroy(); + pub unsafe fn rocksdb_livefiles(); + pub unsafe fn rocksdb_livefiles_count(); + pub unsafe fn rocksdb_livefiles_destroy(); + pub unsafe fn rocksdb_livefiles_column_family_name(); + pub unsafe fn rocksdb_livefiles_name(); + pub unsafe fn rocksdb_livefiles_size(); + pub unsafe fn rocksdb_livefiles_level(); + pub unsafe fn rocksdb_livefiles_smallestkey(); + pub unsafe fn rocksdb_livefiles_largestkey(); + pub unsafe fn rocksdb_livefiles_entries(); + pub unsafe fn rocksdb_livefiles_deletions(); + pub unsafe fn rocksdb_put_cf(); + pub unsafe fn rocksdb_delete_cf(); + pub unsafe fn rocksdb_get_pinned_cf(); + pub unsafe fn rocksdb_create_column_family(); + pub unsafe fn rocksdb_get_latest_sequence_number(); + pub unsafe fn rocksdb_batched_multi_get_cf(); + pub unsafe fn rocksdb_cancel_all_background_work(); + pub unsafe fn rocksdb_repair_db(); + pub unsafe fn rocksdb_list_column_families_destroy(); + pub unsafe fn rocksdb_flush(); + pub unsafe fn rocksdb_flush_wal(); + pub unsafe fn rocksdb_open_column_families(); + pub unsafe fn rocksdb_open_for_read_only_column_families(); + pub unsafe fn rocksdb_open_as_secondary_column_families(); + pub unsafe fn rocksdb_open_column_families_with_ttl(); + pub unsafe fn rocksdb_open(); + pub unsafe fn rocksdb_open_for_read_only(); + pub unsafe fn rocksdb_open_with_ttl(); + pub unsafe fn rocksdb_open_as_secondary(); + pub unsafe fn rocksdb_write(); + pub unsafe fn rocksdb_create_iterator_cf(); + pub unsafe fn rocksdb_backup_engine_create_new_backup_flush(); + pub unsafe fn rocksdb_backup_engine_options_create(); + pub unsafe fn rocksdb_write_buffer_manager_destroy(); + pub unsafe fn rocksdb_options_set_ttl(); } diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 7561eb95..a3a7acc0 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -59,7 +59,7 @@ pub fn memory_stats() -> Option { Some(format!("

    {str}
    ")) } -extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { +unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { // SAFETY: we have to trust the opaque points to our String let res: &mut String = unsafe { opaque.cast::().as_mut().unwrap() }; diff --git a/src/core/mods/macros.rs b/src/core/mods/macros.rs index aa0999c9..1f3b7f5f 100644 --- a/src/core/mods/macros.rs +++ b/src/core/mods/macros.rs @@ -23,11 +23,11 @@ macro_rules! mod_dtor { macro_rules! mod_init { ($body:block) => { #[used] - #[cfg_attr(target_family = "unix", link_section = ".init_array")] - static MOD_INIT: extern "C" fn() = { _mod_init }; + #[cfg_attr(target_family = "unix", unsafe(link_section = ".init_array"))] + static MOD_INIT: unsafe extern "C" fn() = { _mod_init }; - #[cfg_attr(target_family = "unix", link_section = ".text.startup")] - extern "C" fn _mod_init() -> () $body + #[cfg_attr(target_family = "unix", unsafe(link_section = ".text.startup"))] + unsafe extern "C" fn _mod_init() -> () $body }; } @@ -35,10 +35,10 @@ macro_rules! mod_init { macro_rules! mod_fini { ($body:block) => { #[used] - #[cfg_attr(target_family = "unix", link_section = ".fini_array")] - static MOD_FINI: extern "C" fn() = { _mod_fini }; + #[cfg_attr(target_family = "unix", unsafe(link_section = ".fini_array"))] + static MOD_FINI: unsafe extern "C" fn() = { _mod_fini }; - #[cfg_attr(target_family = "unix", link_section = ".text.startup")] - extern "C" fn _mod_fini() -> () $body + #[cfg_attr(target_family = "unix", unsafe(link_section = ".text.startup"))] + unsafe extern "C" fn _mod_fini() -> () $body }; } diff --git a/src/main/mods.rs b/src/main/mods.rs index e377e71c..16d975cc 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -1,6 +1,6 @@ #![cfg(conduit_mods)] -#[no_link] +#[unsafe(no_link)] extern crate conduit_service; use std::{ diff --git a/src/router/mod.rs b/src/router/mod.rs index 1580f605..215000cb 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -16,7 +16,7 @@ conduit::mod_ctor! {} conduit::mod_dtor! {} conduit::rustc_flags_capture! {} -#[no_mangle] +#[unsafe(no_mangle)] pub extern "Rust" fn start(server: &Arc) -> Pin>> + Send>> { AssertUnwindSafe(run::start(server.clone())) .catch_unwind() @@ -25,7 +25,7 @@ pub extern "Rust" fn start(server: &Arc) -> Pin) -> Pin> + Send>> { AssertUnwindSafe(run::stop(services)) .catch_unwind() @@ -34,7 +34,7 @@ pub extern "Rust" fn stop(services: Arc) -> Pin) -> Pin> + Send>> { AssertUnwindSafe(run::run(services.clone())) .catch_unwind() From b73d558cbac4f1f251a7a7f22db5c768549d81fa Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 13 Dec 2024 00:21:36 -0500 Subject: [PATCH 0389/1248] start producing haswell target optimised x86 binaries for rocksdb fast CRC32 support Signed-off-by: strawberry --- .github/workflows/ci.yml | 49 +++++++++++---- flake.nix | 79 +++++++++++++++++++++---- nix/pkgs/main/cross-compilation-env.nix | 1 - nix/pkgs/main/default.nix | 22 ++++++- 4 files changed, 126 insertions(+), 25 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5ab4581a..a1fa1895 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -302,7 +302,7 @@ jobs: with: cache-all-crates: "true" - - name: Build static ${{ matrix.target }} + - name: Build static ${{ matrix.target }}-all-features run: | if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]] then @@ -325,15 +325,35 @@ jobs: mv -v target/release/conduwuit static-${{ matrix.target }} mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb + - name: Build static x86_64-linux-musl-all-features-x86_64-haswell-optimised + if: ${{ matrix.target == "x86_64-linux-musl"}} + run: | + CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" + SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) + + bin/nix-build-and-cache just .#static-x86_64-linux-musl-all-features-x86_64-haswell-optimised + + mkdir -v -p target/release/ + mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ + cp -v -f result/bin/conduit target/release/conduwuit + cp -v -f result/bin/conduit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit + # -p conduit is the main crate name + direnv exec . cargo deb --verbose --no-build --no-strip -p conduit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb + mv -v target/release/conduwuit static-x86_64-linux-musl-x86_64-haswell-optimised + mv -v target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb x86_64-linux-musl-x86_64-haswell-optimised.deb + # quick smoke test of the x86_64 static release binary - - name: Run x86_64 static release binary + - name: Quick smoke test the x86_64 static release binary + if: ${{ matrix.target == "x86_64-linux-musl"}} run: | # GH actions default runners are x86_64 only if file result/bin/conduit | grep x86-64; then result/bin/conduit --version + result/bin/conduit --help + result/bin/conduit -Oserver_name="'$(date -u +%s).local'" -Odatabase_path="'/tmp/$(date -u +%s)'" --execute "server admin-notice awawawawawawawawawawa" --execute "server memory-usage" --execute "server shutdown" fi - - name: Build static debug ${{ matrix.target }} + - name: Build static debug ${{ matrix.target }}-all-features run: | if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]] then @@ -376,14 +396,14 @@ jobs: dpkg-deb --info ${{ matrix.target }}.deb dpkg-deb --info ${{ matrix.target }}-debug.deb - - name: Upload static-${{ matrix.target }} + - name: Upload static-${{ matrix.target }}-all-features uses: actions/upload-artifact@v4 with: name: static-${{ matrix.target }} path: static-${{ matrix.target }} if-no-files-found: error - - name: Upload deb ${{ matrix.target }} + - name: Upload deb ${{ matrix.target }}-all-features uses: actions/upload-artifact@v4 with: name: deb-${{ matrix.target }} @@ -391,14 +411,14 @@ jobs: if-no-files-found: error compression-level: 0 - - name: Upload static-${{ matrix.target }}-debug + - name: Upload static-${{ matrix.target }}-debug-all-features uses: actions/upload-artifact@v4 with: name: static-${{ matrix.target }}-debug path: static-${{ matrix.target }}-debug if-no-files-found: error - - name: Upload deb ${{ matrix.target }}-debug + - name: Upload deb ${{ matrix.target }}-debug-all-features uses: actions/upload-artifact@v4 with: name: deb-${{ matrix.target }}-debug @@ -406,19 +426,26 @@ jobs: if-no-files-found: error compression-level: 0 - - name: Build OCI image ${{ matrix.target }} + - name: Build OCI image ${{ matrix.target }}-all-features run: | bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features cp -v -f result oci-image-${{ matrix.target }}.tar.gz - - name: Build debug OCI image ${{ matrix.target }} + - name: Build OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised + if: ${{ matrix.target == "x86_64-linux-musl"}} + run: | + bin/nix-build-and-cache just .#oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised + + cp -v -f result oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz + + - name: Build debug OCI image ${{ matrix.target }}-all-features run: | bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features-debug cp -v -f result oci-image-${{ matrix.target }}-debug.tar.gz - - name: Upload OCI image ${{ matrix.target }} + - name: Upload OCI image ${{ matrix.target }}-all-features uses: actions/upload-artifact@v4 with: name: oci-image-${{ matrix.target }} @@ -426,7 +453,7 @@ jobs: if-no-files-found: error compression-level: 0 - - name: Upload OCI image ${{ matrix.target }}-debug + - name: Upload OCI image ${{ matrix.target }}-debug-all-features uses: actions/upload-artifact@v4 with: name: oci-image-${{ matrix.target }}-debug diff --git a/flake.nix b/flake.nix index e4ba8e3d..5041e468 100644 --- a/flake.nix +++ b/flake.nix @@ -72,9 +72,14 @@ "-DWITH_TESTS=1" # we use rust-rocksdb via C interface and dont need C++ RTTI "-DUSE_RTTI=1" + # this doesn't exist in RocksDB, and USE_SSE is deprecated for + # PORTABLE=$(march) + "-DFORCE_SSE42=1" ] old.cmakeFlags ++ [ + # no real reason to have snappy, no one uses this + "-DWITH_SNAPPY=0" # we dont need to use ldb or sst_dump (core_tools) "-DWITH_CORE_TOOLS=0" # we dont need trace tools @@ -272,6 +277,15 @@ value = scopeCrossStatic.main; } + # An output for a statically-linked binary with x86_64 haswell + # target optimisations + { + name = "${binaryName}-x86_64-haswell-optimised"; + value = scopeCrossStatic.main.override { + x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); + }; + } + # An output for a statically-linked unstripped debug ("dev") binary { name = "${binaryName}-debug"; @@ -306,6 +320,22 @@ }; } + # An output for a statically-linked binary with `--all-features` and with x86_64 haswell + # target optimisations + { + name = "${binaryName}-all-features-x86_64-haswell-optimised"; + value = scopeCrossStatic.main.override { + all_features = true; + disable_features = [ + # this is non-functional on nix for some reason + "hardened_malloc" + # dont include experimental features + "experimental" + ]; + x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); + }; + } + # An output for a statically-linked unstripped debug ("dev") binary with `--all-features` { name = "${binaryName}-all-features-debug"; @@ -337,6 +367,17 @@ value = scopeCrossStatic.oci-image; } + # An output for an OCI image based on that binary with x86_64 haswell + # target optimisations + { + name = "oci-image-${crossSystem}-x86_64-haswell-optimised"; + value = scopeCrossStatic.oci-image.override { + main = scopeCrossStatic.main.override { + x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); + }; + }; + } + # An output for an OCI image based on that unstripped debug ("dev") binary { name = "oci-image-${crossSystem}-debug"; @@ -365,21 +406,39 @@ }; } + # An output for an OCI image based on that binary with `--all-features` and with x86_64 haswell + # target optimisations + { + name = "oci-image-${crossSystem}-all-features-x86_64-haswell-optimised"; + value = scopeCrossStatic.oci-image.override { + main = scopeCrossStatic.main.override { + all_features = true; + disable_features = [ + # this is non-functional on nix for some reason + "hardened_malloc" + # dont include experimental features + "experimental" + ]; + x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); + }; + }; + } + # An output for an OCI image based on that unstripped debug ("dev") binary with `--all-features` { name = "oci-image-${crossSystem}-all-features-debug"; value = scopeCrossStatic.oci-image.override { main = scopeCrossStatic.main.override { - profile = "dev"; - all_features = true; - # debug build users expect full logs - disable_release_max_log_level = true; - disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" - # dont include experimental features - "experimental" - ]; + profile = "dev"; + all_features = true; + # debug build users expect full logs + disable_release_max_log_level = true; + disable_features = [ + # this is non-functional on nix for some reason + "hardened_malloc" + # dont include experimental features + "experimental" + ]; }; }; } diff --git a/nix/pkgs/main/cross-compilation-env.nix b/nix/pkgs/main/cross-compilation-env.nix index 3fb94d41..0f326c92 100644 --- a/nix/pkgs/main/cross-compilation-env.nix +++ b/nix/pkgs/main/cross-compilation-env.nix @@ -45,7 +45,6 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic { ( let inherit (rust.lib) envVars; - #shouldUseLLD = platform: platform.isAarch64 && platform.isStatic && !stdenv.hostPlatform.isDarwin; in lib.optionalAttrs (stdenv.targetPlatform.rust.rustcTarget diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index abc85d82..32e11c06 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -13,12 +13,17 @@ , stdenv # Options (keep sorted) -, default_features ? true -, disable_release_max_log_level ? false , all_features ? false +, default_features ? true , disable_features ? [] +, disable_release_max_log_level ? false , features ? [] , profile ? "release" +# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag +# haswell is pretty much any x86 cpu made in the last 12 years, and +# supports modern CPU extensions that rocksdb can make use of. +# disable if trying to make a portable x86_64 build for very old hardware +, x86_64_haswell_target_optimised ? false }: let @@ -79,6 +84,15 @@ buildDepsOnlyEnv = enableLiburing = enableLiburing; }).overrideAttrs (old: { enableLiburing = enableLiburing; + cmakeFlags = lib.optional x86_64_haswell_target_optimised (lib.subtractLists [ + # dont make a portable build if x86_64_haswell_target_optimised is enabled + "-DPORTABLE=1" + ] + old.cmakeFlags) + ++ lib.optionals x86_64_haswell_target_optimised [ + "-DPORTABLE=haswell" + ] + ++ old.cmakeFlags; }); in { @@ -105,7 +119,9 @@ buildPackageEnv = { # Only needed in static stdenv because these are transitive dependencies of rocksdb CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS + lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic) - " -L${lib.getLib liburing}/lib -luring"; + " -L${lib.getLib liburing}/lib -luring" + + lib.optionalString x86_64_haswell_target_optimised + " -Ctarget-cpu=haswell"; }; From c5aca8036d57e907d100f80554cfbecd9b90dbc7 Mon Sep 17 00:00:00 2001 From: Asen <78863300+AsenHu@users.noreply.github.com> Date: Fri, 13 Dec 2024 01:46:09 -0500 Subject: [PATCH 0390/1248] add auto upload assets, upload binary to webserver Co-authored-by: strawberry Signed-off-by: strawberry --- .github/workflows/release.yml | 106 ++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..902f0132 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,106 @@ +name: Upload Release Assets + +on: + release: + types: [published, prereleased] + workflow_dispatch: + inputs: + tag: + description: 'Tag to release' + required: true + type: string + action_id: + description: 'Action ID of the CI run' + required: true + type: string + +permissions: + contents: writes + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: get latest ci id + id: get_ci_id + env: + GH_TOKEN: ${{ github.token }} + run: | + if [ "${{ github.event_name }}" == "workflow_dispatch" ] + then + id="${{ github.event.inputs.action_id }}" + tag="${{ github.event.inputs.tag }}" + else + # get all runs of the ci workflow + json=$(gh api "repos/${{ github.repository }}/actions/workflows/ci.yml/runs") + + # find first run that is github sha and status is completed + id=$(echo "$json" | jq ".workflow_runs[] | select(.head_sha == \"${{ github.sha }}\" and .status == \"completed\") | .id" | head -n 1) + if [ ! "$id" ]; then + echo "No completed runs found" + echo "ci_id=0" >> "$GITHUB_OUTPUT" + exit 0 + fi + + tag="${{ github.event.release.tag_name }}" + fi + + echo "ci_id=$id" >> "$GITHUB_OUTPUT" + echo "tag=$tag" >> "$GITHUB_OUTPUT" + + - name: get latest ci artifacts + if: steps.get_ci_id.outputs.ci_id != 0 + uses: actions/download-artifact@v4 + env: + GH_TOKEN: ${{ github.token }} + with: + merge-multiple: true + run-id: ${{ steps.get_ci_id.outputs.ci_id }} + github-token: ${{ github.token }} + + - run: | + ls + + - name: upload release assets + if: steps.get_ci_id.outputs.ci_id != 0 + env: + GH_TOKEN: ${{ github.token }} + TAG: ${{ steps.get_ci_id.outputs.tag }} + run: | + for file in $(find . -type f); do + echo "Uploading $file..." + gh release upload $TAG "$file" --clobber --repo="${{github.repository}}" || echo "Something went wrong, skipping." + done + + - name: upload release assets to website + if: steps.get_ci_id.outputs.ci_id != 0 + env: + TAG: ${{ steps.get_ci_id.outputs.tag }} + run: | + mkdir -p -v ~/.ssh + + echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts + echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 + + chmod 600 ~/.ssh/id_ed25519 + + cat >>~/.ssh/config < Date: Fri, 13 Dec 2024 01:47:53 -0500 Subject: [PATCH 0391/1248] upload CI binaries to webserver for easy direct download Signed-off-by: strawberry --- .github/workflows/ci.yml | 186 +++++++++++++++++++++++++++++++++++--- flake.nix | 2 + nix/pkgs/main/default.nix | 10 +- 3 files changed, 180 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a1fa1895..5b44cbbb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,6 +52,7 @@ env: accept-flake-config = true # complement uses libolm NIXPKGS_ALLOW_INSECURE: 1 + WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }} permissions: packages: write @@ -62,6 +63,34 @@ jobs: name: Test runs-on: ubuntu-24.04 steps: + - name: Setup SSH web publish + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + run: | + mkdir -p -v ~/.ssh + + echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts + echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 + + chmod 600 ~/.ssh/id_ed25519 + + cat >>~/.ssh/config <> ~/.ssh/known_hosts + echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 + + chmod 600 ~/.ssh/id_ed25519 + + cat >>~/.ssh/config <> ~/.ssh/known_hosts + echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 + + chmod 600 ~/.ssh/id_ed25519 + + cat >>~/.ssh/config < Date: Fri, 13 Dec 2024 11:10:23 -0500 Subject: [PATCH 0392/1248] use database::Json() to serialise MSC4133 profile value before insert Signed-off-by: strawberry --- src/service/users/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 2aa80e32..70868ec8 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -866,7 +866,7 @@ impl Service { let key = (user_id, profile_key); if let Some(value) = profile_key_value { - self.db.useridprofilekey_value.put(key, value); + self.db.useridprofilekey_value.put(key, Json(value)); } else { self.db.useridprofilekey_value.del(key); } From a9f6bb6169d60b5c2e9cd2cdbf11bba4464658cb Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 13 Dec 2024 13:35:36 -0500 Subject: [PATCH 0393/1248] remove jemalloc_prof and jemalloc_stats from all-features builds (exc. debug) Signed-off-by: strawberry --- flake.nix | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/flake.nix b/flake.nix index ba93333f..08120ffd 100644 --- a/flake.nix +++ b/flake.nix @@ -208,6 +208,10 @@ "hardened_malloc" # dont include experimental features "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" ]; }; all-features-debug = scopeHost.main.override { @@ -233,6 +237,10 @@ "hardened_malloc" # dont include experimental features "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" ]; }; }; @@ -318,6 +326,10 @@ "hardened_malloc" # dont include experimental features "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" ]; }; } @@ -333,6 +345,10 @@ "hardened_malloc" # dont include experimental features "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" ]; x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); }; @@ -403,6 +419,10 @@ "hardened_malloc" # dont include experimental features "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" ]; }; }; @@ -420,6 +440,10 @@ "hardened_malloc" # dont include experimental features "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" ]; x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); }; @@ -482,6 +506,10 @@ "hardened_malloc" # dont include experimental features "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" ]; }; })); From e4489a5d20ef0b1a7f36474f57e4740da1052530 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 13 Dec 2024 21:59:37 -0500 Subject: [PATCH 0394/1248] bump rust-rocksdb fork Signed-off-by: strawberry --- Cargo.lock | 4 ++-- deps/rust-rocksdb/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86b90c3d..94533ae1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3381,7 +3381,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.30.0+9.8.4" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3a9640a6b9173d24819c22a49487e31d20a2e59e#3a9640a6b9173d24819c22a49487e31d20a2e59e" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=fcb772d84572c63b1defc57baf9cdbdf5577c6a4#fcb772d84572c63b1defc57baf9cdbdf5577c6a4" dependencies = [ "bindgen", "bzip2-sys", @@ -3398,7 +3398,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.34.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3a9640a6b9173d24819c22a49487e31d20a2e59e#3a9640a6b9173d24819c22a49487e31d20a2e59e" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=fcb772d84572c63b1defc57baf9cdbdf5577c6a4#fcb772d84572c63b1defc57baf9cdbdf5577c6a4" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index b6895ad3..e89c26d7 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "3a9640a6b9173d24819c22a49487e31d20a2e59e" +rev = "fcb772d84572c63b1defc57baf9cdbdf5577c6a4" #branch = "master" default-features = false From c6bf8f5ea15a6b963220a1de4bb50a639d0d0696 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 14 Dec 2024 21:00:33 -0500 Subject: [PATCH 0395/1248] improve gh actions security using zizmor https://github.com/woodruffw/zizmor Signed-off-by: strawberry --- .github/workflows/ci.yml | 166 +++++++++++++++------------- .github/workflows/documentation.yml | 8 +- .github/workflows/release.yml | 154 ++++++++++++++------------ 3 files changed, 176 insertions(+), 152 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b44cbbb..4dd47a30 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -53,10 +53,10 @@ env: # complement uses libolm NIXPKGS_ALLOW_INSECURE: 1 WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }} + GH_SHA: ${{ github.sha }} + GH_REF_NAME: ${{ github.ref_name }} -permissions: - packages: write - contents: read +permissions: {} jobs: tests: @@ -88,8 +88,8 @@ jobs: ssh -q website "echo test" echo "Creating commit rev directory on web server" - ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${{ github.sha }}/" - ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${{ github.sha }}/" + ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/$GITHUB_SHA/" + ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/$GITHUB_SHA/" - name: Install liburing run: | @@ -106,6 +106,8 @@ jobs: - name: Sync repository uses: actions/checkout@v4 + with: + persist-credentials: false - name: Tag comparison check if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} @@ -113,7 +115,7 @@ jobs: # Tag mismatch with latest repo tag check to prevent potential downgrades LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - if [ $LATEST_TAG != ${{ github.ref_name }} ]; then + if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY exit 1 @@ -162,8 +164,8 @@ jobs: if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }} run: | sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < >(tee -a complement_diff_output.log) - name: Update Job Summary + env: + GH_JOB_STATUS: ${{ job.status }} if: success() || failure() run: | - if [ ${{ job.status }} == 'success' ]; then + if [ ${GH_JOB_STATUS} == 'success' ]; then echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY else echo '# CI failure' >> $GITHUB_STEP_SUMMARY @@ -264,6 +268,8 @@ jobs: steps: - name: Sync repository uses: actions/checkout@v4 + with: + persist-credentials: false - name: Setup SSH web publish if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' @@ -332,8 +338,8 @@ jobs: if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }} run: | sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <> $GITHUB_STEP_SUMMARY exit 1 @@ -671,14 +680,14 @@ jobs: if: ${{ matrix.os == 'macos-13' }} run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then - scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${{ github.sha }}/conduwuit-macos-x86_64 + scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-x86_64 fi - name: Upload macOS arm64 binary to webserver if: ${{ matrix.os == 'macos-latest' }} run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then - scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${{ github.sha }}/conduwuit-macos-arm64 + scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-arm64 fi - name: Upload macOS x86_64 binary @@ -701,6 +710,9 @@ jobs: name: Docker publish runs-on: ubuntu-24.04 needs: build + permissions: + packages: write + contents: read if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' env: DOCKER_ARM64: docker.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 @@ -756,92 +768,92 @@ jobs: if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | docker load -i oci-image-amd64.tar.gz - docker tag $(docker images -q conduit:main) ${{ env.DOCKER_AMD64 }} - docker tag $(docker images -q conduit:main) ${{ env.GHCR_AMD64 }} - docker tag $(docker images -q conduit:main) ${{ env.GLCR_AMD64 }} - docker push ${{ env.DOCKER_AMD64 }} - docker push ${{ env.GHCR_AMD64 }} - docker push ${{ env.GLCR_AMD64 }} + docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64} + docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64} + docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64} + docker push ${DOCKER_AMD64} + docker push ${GHCR_AMD64} + docker push ${GLCR_AMD64} - name: Load and push arm64 image if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | docker load -i oci-image-arm64v8.tar.gz - docker tag $(docker images -q conduit:main) ${{ env.DOCKER_ARM64 }} - docker tag $(docker images -q conduit:main) ${{ env.GHCR_ARM64 }} - docker tag $(docker images -q conduit:main) ${{ env.GLCR_ARM64 }} - docker push ${{ env.DOCKER_ARM64 }} - docker push ${{ env.GHCR_ARM64 }} - docker push ${{ env.GLCR_ARM64 }} + docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64} + docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64} + docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64} + docker push ${DOCKER_ARM64} + docker push ${GHCR_ARM64} + docker push ${GLCR_ARM64} - name: Load and push amd64 debug image if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | docker load -i oci-image-amd64-debug.tar.gz - docker tag $(docker images -q conduit:main) ${{ env.DOCKER_AMD64 }}-debug - docker tag $(docker images -q conduit:main) ${{ env.GHCR_AMD64 }}-debug - docker tag $(docker images -q conduit:main) ${{ env.GLCR_AMD64 }}-debug - docker push ${{ env.DOCKER_AMD64 }}-debug - docker push ${{ env.GHCR_AMD64 }}-debug - docker push ${{ env.GLCR_AMD64 }}-debug + docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64}-debug + docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64}-debug + docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64}-debug + docker push ${DOCKER_AMD64}-debug + docker push ${GHCR_AMD64}-debug + docker push ${GLCR_AMD64}-debug - name: Load and push arm64 debug image if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | docker load -i oci-image-arm64v8-debug.tar.gz - docker tag $(docker images -q conduit:main) ${{ env.DOCKER_ARM64 }}-debug - docker tag $(docker images -q conduit:main) ${{ env.GHCR_ARM64 }}-debug - docker tag $(docker images -q conduit:main) ${{ env.GLCR_ARM64 }}-debug - docker push ${{ env.DOCKER_ARM64 }}-debug - docker push ${{ env.GHCR_ARM64 }}-debug - docker push ${{ env.GLCR_ARM64 }}-debug + docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64}-debug + docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64}-debug + docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64}-debug + docker push ${DOCKER_ARM64}-debug + docker push ${GHCR_ARM64}-debug + docker push ${GLCR_ARM64}-debug - name: Create Docker combined manifests run: | # Dockerhub Container Registry - docker manifest create ${{ env.DOCKER_TAG }} --amend ${{ env.DOCKER_ARM64 }} --amend ${{ env.DOCKER_AMD64 }} - docker manifest create ${{ env.DOCKER_BRANCH }} --amend ${{ env.DOCKER_ARM64 }} --amend ${{ env.DOCKER_AMD64 }} + docker manifest create ${DOCKER_TAG} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} + docker manifest create ${DOCKER_BRANCH} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} # GitHub Container Registry - docker manifest create ${{ env.GHCR_TAG }} --amend ${{ env.GHCR_ARM64 }} --amend ${{ env.GHCR_AMD64 }} - docker manifest create ${{ env.GHCR_BRANCH }} --amend ${{ env.GHCR_ARM64 }} --amend ${{ env.GHCR_AMD64 }} + docker manifest create ${GHCR_TAG} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} + docker manifest create ${GHCR_BRANCH} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} # GitLab Container Registry - docker manifest create ${{ env.GLCR_TAG }} --amend ${{ env.GLCR_ARM64 }} --amend ${{ env.GLCR_AMD64 }} - docker manifest create ${{ env.GLCR_BRANCH }} --amend ${{ env.GLCR_ARM64 }} --amend ${{ env.GLCR_AMD64 }} + docker manifest create ${GLCR_TAG} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} + docker manifest create ${GLCR_BRANCH} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} - name: Create Docker combined debug manifests run: | # Dockerhub Container Registry - docker manifest create ${{ env.DOCKER_TAG }}-debug --amend ${{ env.DOCKER_ARM64 }}-debug --amend ${{ env.DOCKER_AMD64 }}-debug - docker manifest create ${{ env.DOCKER_BRANCH }}-debug --amend ${{ env.DOCKER_ARM64 }}-debug --amend ${{ env.DOCKER_AMD64 }}-debug + docker manifest create ${DOCKER_TAG}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug + docker manifest create ${DOCKER_BRANCH}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug # GitHub Container Registry - docker manifest create ${{ env.GHCR_TAG }}-debug --amend ${{ env.GHCR_ARM64 }}-debug --amend ${{ env.GHCR_AMD64 }}-debug - docker manifest create ${{ env.GHCR_BRANCH }}-debug --amend ${{ env.GHCR_ARM64 }}-debug --amend ${{ env.GHCR_AMD64 }}-debug + docker manifest create ${GHCR_TAG}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug + docker manifest create ${GHCR_BRANCH}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug # GitLab Container Registry - docker manifest create ${{ env.GLCR_TAG }}-debug --amend ${{ env.GLCR_ARM64 }}-debug --amend ${{ env.GLCR_AMD64 }}-debug - docker manifest create ${{ env.GLCR_BRANCH }}-debug --amend ${{ env.GLCR_ARM64 }}-debug --amend ${{ env.GLCR_AMD64 }}-debug + docker manifest create ${GLCR_TAG}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug + docker manifest create ${GLCR_BRANCH}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug - name: Push manifests to Docker registries if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | - docker manifest push ${{ env.DOCKER_TAG }} - docker manifest push ${{ env.DOCKER_BRANCH }} - docker manifest push ${{ env.GHCR_TAG }} - docker manifest push ${{ env.GHCR_BRANCH }} - docker manifest push ${{ env.GLCR_TAG }} - docker manifest push ${{ env.GLCR_BRANCH }} - docker manifest push ${{ env.DOCKER_TAG }}-debug - docker manifest push ${{ env.DOCKER_BRANCH }}-debug - docker manifest push ${{ env.GHCR_TAG }}-debug - docker manifest push ${{ env.GHCR_BRANCH }}-debug - docker manifest push ${{ env.GLCR_TAG }}-debug - docker manifest push ${{ env.GLCR_BRANCH }}-debug + docker manifest push ${DOCKER_TAG} + docker manifest push ${DOCKER_BRANCH} + docker manifest push ${GHCR_TAG} + docker manifest push ${GHCR_BRANCH} + docker manifest push ${GLCR_TAG} + docker manifest push ${GLCR_BRANCH} + docker manifest push ${DOCKER_TAG}-debug + docker manifest push ${DOCKER_BRANCH}-debug + docker manifest push ${GHCR_TAG}-debug + docker manifest push ${GHCR_BRANCH}-debug + docker manifest push ${GLCR_TAG}-debug + docker manifest push ${GLCR_BRANCH}-debug - name: Add Image Links to Job Summary if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | - echo "- \`docker pull ${{ env.DOCKER_TAG }}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${{ env.GHCR_TAG }}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${{ env.GLCR_TAG }}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${{ env.DOCKER_TAG }}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${{ env.GHCR_TAG }}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${{ env.GLCR_TAG }}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${DOCKER_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GHCR_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GLCR_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${DOCKER_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GHCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GLCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 20b1e4c6..4aa93f58 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -36,6 +36,8 @@ concurrency: group: "pages" cancel-in-progress: false +permissions: {} + jobs: docs: name: Documentation and GitHub Pages @@ -61,6 +63,8 @@ jobs: - name: Sync repository uses: actions/checkout@v4 + with: + persist-credentials: false - name: Setup GitHub Pages if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') @@ -109,8 +113,8 @@ jobs: if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }} run: | sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null <> "$GITHUB_OUTPUT" - exit 0 + # find first run that is github sha and status is completed + id=$(echo "$json" | jq ".workflow_runs[] | select(.head_sha == \"${GH_SHA}\" and .status == \"completed\") | .id" | head -n 1) + + if [ ! "$id" ]; then + echo "No completed runs found" + echo "ci_id=0" >> "$GITHUB_OUTPUT" + exit 0 + fi + + tag="${GH_TAG}}" fi - tag="${{ github.event.release.tag_name }}" - fi + echo "ci_id=$id" >> "$GITHUB_OUTPUT" + echo "tag=$tag" >> "$GITHUB_OUTPUT" - echo "ci_id=$id" >> "$GITHUB_OUTPUT" - echo "tag=$tag" >> "$GITHUB_OUTPUT" + - name: get latest ci artifacts + if: steps.get_ci_id.outputs.ci_id != 0 + uses: actions/download-artifact@v4 + env: + GH_TOKEN: ${{ github.token }} + with: + merge-multiple: true + run-id: ${{ steps.get_ci_id.outputs.ci_id }} + github-token: ${{ github.token }} - - name: get latest ci artifacts - if: steps.get_ci_id.outputs.ci_id != 0 - uses: actions/download-artifact@v4 - env: - GH_TOKEN: ${{ github.token }} - with: - merge-multiple: true - run-id: ${{ steps.get_ci_id.outputs.ci_id }} - github-token: ${{ github.token }} + - run: | + ls - - run: | - ls + - name: upload release assets + if: steps.get_ci_id.outputs.ci_id != 0 + env: + GH_TOKEN: ${{ github.token }} + TAG: ${{ steps.get_ci_id.outputs.tag }} + run: | + for file in $(find . -type f); do + echo "Uploading $file..." + gh release upload $TAG "$file" --clobber --repo="${GH_REPOSITORY}" || echo "Something went wrong, skipping." + done - - name: upload release assets - if: steps.get_ci_id.outputs.ci_id != 0 - env: - GH_TOKEN: ${{ github.token }} - TAG: ${{ steps.get_ci_id.outputs.tag }} - run: | - for file in $(find . -type f); do - echo "Uploading $file..." - gh release upload $TAG "$file" --clobber --repo="${{github.repository}}" || echo "Something went wrong, skipping." - done + - name: upload release assets to website + if: steps.get_ci_id.outputs.ci_id != 0 + env: + TAG: ${{ steps.get_ci_id.outputs.tag }} + run: | + mkdir -p -v ~/.ssh - - name: upload release assets to website - if: steps.get_ci_id.outputs.ci_id != 0 - env: - TAG: ${{ steps.get_ci_id.outputs.tag }} - run: | - mkdir -p -v ~/.ssh + echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts + echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 + chmod 600 ~/.ssh/id_ed25519 - chmod 600 ~/.ssh/id_ed25519 + cat >>~/.ssh/config <>~/.ssh/config < Date: Sat, 14 Dec 2024 21:58:01 -0500 Subject: [PATCH 0396/1248] rename conduit to conduwuit finally Signed-off-by: strawberry --- .github/workflows/ci.yml | 37 ++++---- Cargo.lock | 94 +++++++++--------- Cargo.toml | 52 +++++----- README.md | 6 ++ clippy.toml | 10 +- deps/rust-rocksdb/lib.rs | 4 +- docs/configuration.md | 2 +- docs/deploying/docker.md | 2 +- docs/deploying/generic.md | 8 +- docs/development.md | 2 +- src/admin/Cargo.toml | 12 +-- src/admin/admin.rs | 4 +- src/admin/appservice/mod.rs | 2 +- src/admin/check/commands.rs | 4 +- src/admin/check/mod.rs | 2 +- src/admin/command.rs | 2 +- src/admin/debug/commands.rs | 4 +- src/admin/debug/mod.rs | 2 +- src/admin/debug/tester.rs | 2 +- src/admin/federation/commands.rs | 2 +- src/admin/federation/mod.rs | 2 +- src/admin/media/commands.rs | 4 +- src/admin/media/mod.rs | 2 +- src/admin/mod.rs | 16 ++-- src/admin/processor.rs | 2 +- src/admin/query/account_data.rs | 2 +- src/admin/query/appservice.rs | 2 +- src/admin/query/globals.rs | 2 +- src/admin/query/mod.rs | 2 +- src/admin/query/presence.rs | 2 +- src/admin/query/pusher.rs | 2 +- src/admin/query/resolver.rs | 2 +- src/admin/query/room_alias.rs | 2 +- src/admin/query/room_state_cache.rs | 2 +- src/admin/query/sending.rs | 2 +- src/admin/query/users.rs | 2 +- src/admin/room/alias.rs | 2 +- src/admin/room/commands.rs | 2 +- src/admin/room/directory.rs | 2 +- src/admin/room/info.rs | 2 +- src/admin/room/mod.rs | 2 +- src/admin/room/moderation.rs | 2 +- src/admin/server/commands.rs | 6 +- src/admin/server/mod.rs | 2 +- src/admin/user/commands.rs | 4 +- src/admin/user/mod.rs | 2 +- src/admin/utils.rs | 2 +- src/api/Cargo.toml | 8 +- src/api/client/account.rs | 2 +- src/api/client/account_data.rs | 2 +- src/api/client/alias.rs | 2 +- src/api/client/appservice.rs | 2 +- src/api/client/backup.rs | 2 +- src/api/client/context.rs | 2 +- src/api/client/device.rs | 2 +- src/api/client/directory.rs | 2 +- src/api/client/filter.rs | 2 +- src/api/client/keys.rs | 2 +- src/api/client/media.rs | 4 +- src/api/client/media_legacy.rs | 4 +- src/api/client/membership.rs | 4 +- src/api/client/message.rs | 2 +- src/api/client/openid.rs | 2 +- src/api/client/profile.rs | 2 +- src/api/client/push.rs | 2 +- src/api/client/read_marker.rs | 2 +- src/api/client/relations.rs | 2 +- src/api/client/report.rs | 2 +- src/api/client/room/aliases.rs | 2 +- src/api/client/room/create.rs | 2 +- src/api/client/room/event.rs | 2 +- src/api/client/room/initial_sync.rs | 2 +- src/api/client/room/upgrade.rs | 2 +- src/api/client/search.rs | 2 +- src/api/client/send.rs | 2 +- src/api/client/session.rs | 2 +- src/api/client/state.rs | 2 +- src/api/client/sync/mod.rs | 2 +- src/api/client/sync/v3.rs | 4 +- src/api/client/sync/v4.rs | 2 +- src/api/client/threads.rs | 2 +- src/api/client/to_device.rs | 2 +- src/api/client/unstable.rs | 2 +- src/api/client/unversioned.rs | 4 +- src/api/client/user_directory.rs | 2 +- src/api/client/voip.rs | 2 +- src/api/client/well_known.rs | 2 +- src/api/mod.rs | 10 +- src/api/router.rs | 2 +- src/api/router/args.rs | 2 +- src/api/router/auth.rs | 2 +- src/api/router/handler.rs | 2 +- src/api/router/request.rs | 2 +- src/api/router/response.rs | 2 +- src/api/router/state.rs | 2 +- src/api/server/backfill.rs | 2 +- src/api/server/event.rs | 2 +- src/api/server/event_auth.rs | 2 +- src/api/server/get_missing_events.rs | 2 +- src/api/server/invite.rs | 2 +- src/api/server/key.rs | 2 +- src/api/server/make_join.rs | 2 +- src/api/server/make_knock.rs | 2 +- src/api/server/make_leave.rs | 2 +- src/api/server/media.rs | 4 +- src/api/server/query.rs | 2 +- src/api/server/send.rs | 2 +- src/api/server/send_join.rs | 2 +- src/api/server/send_knock.rs | 2 +- src/api/server/send_leave.rs | 2 +- src/api/server/state.rs | 2 +- src/api/server/state_ids.rs | 2 +- src/api/server/user.rs | 2 +- src/api/server/utils.rs | 4 +- src/api/server/version.rs | 4 +- src/core/Cargo.toml | 4 +- src/core/config/mod.rs | 2 +- src/core/debug.rs | 2 +- src/core/info/cargo.rs | 2 +- src/core/info/mod.rs | 2 +- src/core/mod.rs | 4 +- src/core/mods/mod.rs | 2 +- src/core/server.rs | 2 +- src/core/utils/debug.rs | 2 +- src/core/utils/mod.rs | 2 +- src/core/utils/string.rs | 2 +- src/database/Cargo.toml | 4 +- src/database/database.rs | 2 +- src/database/de.rs | 4 +- src/database/deserialized.rs | 2 +- src/database/engine.rs | 2 +- src/database/handle.rs | 2 +- src/database/keyval.rs | 2 +- src/database/map.rs | 2 +- src/database/map/contains.rs | 2 +- src/database/map/count.rs | 2 +- src/database/map/get.rs | 2 +- src/database/map/get_batch.rs | 2 +- src/database/map/insert.rs | 2 +- src/database/map/keys.rs | 2 +- src/database/map/keys_from.rs | 2 +- src/database/map/keys_prefix.rs | 2 +- src/database/map/remove.rs | 2 +- src/database/map/rev_keys.rs | 2 +- src/database/map/rev_keys_from.rs | 2 +- src/database/map/rev_keys_prefix.rs | 2 +- src/database/map/rev_stream.rs | 2 +- src/database/map/rev_stream_from.rs | 2 +- src/database/map/rev_stream_prefix.rs | 2 +- src/database/map/stream.rs | 2 +- src/database/map/stream_from.rs | 2 +- src/database/map/stream_prefix.rs | 2 +- src/database/maps.rs | 2 +- src/database/mod.rs | 8 +- src/database/opts.rs | 2 +- src/database/pool.rs | 2 +- src/database/ser.rs | 2 +- src/database/stream.rs | 2 +- src/database/stream/items.rs | 2 +- src/database/stream/items_rev.rs | 2 +- src/database/stream/keys.rs | 2 +- src/database/stream/keys_rev.rs | 2 +- src/database/tests.rs | 12 +-- src/database/util.rs | 10 +- src/macros/Cargo.toml | 4 +- src/macros/admin.rs | 2 +- src/macros/rustc.rs | 10 +- src/main/Cargo.toml | 95 +++++++++---------- src/main/clap.rs | 4 +- src/main/logging.rs | 2 +- src/main/main.rs | 10 +- src/main/mods.rs | 28 +++--- src/main/restart.rs | 2 +- src/main/sentry.rs | 4 +- src/main/server.rs | 16 ++-- src/main/signal.rs | 4 +- src/router/Cargo.toml | 10 +- src/router/layers.rs | 8 +- src/router/mod.rs | 12 +-- src/router/request.rs | 4 +- src/router/router.rs | 8 +- src/router/run.rs | 8 +- src/router/serve/mod.rs | 6 +- src/router/serve/plain.rs | 2 +- src/router/serve/tls.rs | 2 +- src/router/serve/unix.rs | 2 +- src/service/Cargo.toml | 6 +- src/service/account_data/mod.rs | 2 +- src/service/admin/console.rs | 2 +- src/service/admin/create.rs | 4 +- src/service/admin/grant.rs | 6 +- src/service/admin/mod.rs | 2 +- src/service/admin/startup.rs | 2 +- src/service/appservice/mod.rs | 2 +- src/service/appservice/namespace_regex.rs | 2 +- src/service/appservice/registration_info.rs | 2 +- src/service/client/mod.rs | 6 +- src/service/emergency/mod.rs | 18 ++-- src/service/globals/data.rs | 2 +- src/service/globals/mod.rs | 6 +- src/service/key_backups/mod.rs | 2 +- src/service/manager.rs | 2 +- src/service/media/data.rs | 2 +- src/service/media/migrations.rs | 2 +- src/service/media/mod.rs | 2 +- src/service/media/preview.rs | 4 +- src/service/media/remote.rs | 2 +- src/service/media/thumbnail.rs | 2 +- src/service/migrations.rs | 8 +- src/service/mod.rs | 12 +-- src/service/presence/data.rs | 2 +- src/service/presence/mod.rs | 2 +- src/service/presence/presence.rs | 2 +- src/service/pusher/mod.rs | 2 +- src/service/resolver/actual.rs | 2 +- src/service/resolver/cache.rs | 2 +- src/service/resolver/dns.rs | 2 +- src/service/resolver/mod.rs | 2 +- src/service/rooms/alias/mod.rs | 2 +- src/service/rooms/alias/remote.rs | 2 +- src/service/rooms/auth_chain/data.rs | 2 +- src/service/rooms/auth_chain/mod.rs | 2 +- src/service/rooms/directory/mod.rs | 2 +- src/service/rooms/event_handler/acl_check.rs | 2 +- .../fetch_and_handle_outliers.rs | 2 +- src/service/rooms/event_handler/fetch_prev.rs | 2 +- .../rooms/event_handler/fetch_state.rs | 2 +- .../event_handler/handle_incoming_pdu.rs | 2 +- .../rooms/event_handler/handle_outlier_pdu.rs | 2 +- .../rooms/event_handler/handle_prev_pdu.rs | 2 +- src/service/rooms/event_handler/mod.rs | 2 +- .../rooms/event_handler/parse_incoming_pdu.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 2 +- .../rooms/event_handler/state_at_incoming.rs | 2 +- .../event_handler/upgrade_outlier_pdu.rs | 2 +- src/service/rooms/lazy_loading/mod.rs | 2 +- src/service/rooms/metadata/mod.rs | 2 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/pdu_metadata/mod.rs | 2 +- src/service/rooms/read_receipt/data.rs | 2 +- src/service/rooms/read_receipt/mod.rs | 2 +- src/service/rooms/search/mod.rs | 2 +- src/service/rooms/short/mod.rs | 4 +- src/service/rooms/spaces/mod.rs | 2 +- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/state_accessor/data.rs | 2 +- src/service/rooms/state_accessor/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 4 +- src/service/rooms/state_compressor/mod.rs | 2 +- src/service/rooms/threads/mod.rs | 2 +- src/service/rooms/timeline/data.rs | 2 +- src/service/rooms/timeline/mod.rs | 4 +- src/service/rooms/typing/mod.rs | 2 +- src/service/rooms/user/mod.rs | 2 +- src/service/sending/appservice.rs | 2 +- src/service/sending/data.rs | 2 +- src/service/sending/dest.rs | 2 +- src/service/sending/mod.rs | 2 +- src/service/sending/send.rs | 2 +- src/service/sending/sender.rs | 2 +- src/service/server_keys/acquire.rs | 2 +- src/service/server_keys/get.rs | 2 +- src/service/server_keys/keypair.rs | 2 +- src/service/server_keys/mod.rs | 2 +- src/service/server_keys/request.rs | 2 +- src/service/server_keys/sign.rs | 2 +- src/service/server_keys/verify.rs | 2 +- src/service/service.rs | 2 +- src/service/services.rs | 2 +- src/service/sync/mod.rs | 2 +- src/service/sync/watch.rs | 2 +- src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/mod.rs | 2 +- src/service/updates/mod.rs | 2 +- src/service/users/mod.rs | 2 +- tests/cargo_smoke.sh | 4 +- 277 files changed, 559 insertions(+), 551 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4dd47a30..66c8f635 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -377,10 +377,9 @@ jobs: mkdir -v -p target/release/ mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduit target/release/conduwuit - cp -v -f result/bin/conduit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - # -p conduit is the main crate name - direnv exec . cargo deb --verbose --no-build --no-strip -p conduit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb + cp -v -f result/bin/conduwuit target/release/conduwuit + cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit + direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb mv -v target/release/conduwuit static-${{ matrix.target }} mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb @@ -394,10 +393,9 @@ jobs: mkdir -v -p target/release/ mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduit target/release/conduwuit - cp -v -f result/bin/conduit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - # -p conduit is the main crate name - direnv exec . cargo deb --verbose --no-build --no-strip -p conduit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb + cp -v -f result/bin/conduwuit target/release/conduwuit + cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit + direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb mv -v target/release/conduwuit static-x86_64-linux-musl-x86_64-haswell-optimised mv -v target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb x86_64-linux-musl-x86_64-haswell-optimised.deb @@ -406,10 +404,10 @@ jobs: if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | # GH actions default runners are x86_64 only - if file result/bin/conduit | grep x86-64; then - result/bin/conduit --version - result/bin/conduit --help - result/bin/conduit -Oserver_name="'$(date -u +%s).local'" -Odatabase_path="'/tmp/$(date -u +%s)'" --execute "server admin-notice awawawawawawawawawawa" --execute "server memory-usage" --execute "server shutdown" + if file result/bin/conduwuit | grep x86-64; then + result/bin/conduwuit --version + result/bin/conduwuit --help + result/bin/conduwuit -Oserver_name="'$(date -u +%s).local'" -Odatabase_path="'/tmp/$(date -u +%s)'" --execute "server admin-notice awawawawawawawawawawa" --execute "server memory-usage" --execute "server shutdown" fi - name: Build static debug ${{ matrix.target }}-all-features @@ -430,10 +428,9 @@ jobs: # so we need to coerce cargo-deb into thinking this is a release binary mkdir -v -p target/release/ mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduit target/release/conduwuit - cp -v -f result/bin/conduit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - # -p conduit is the main crate name - direnv exec . cargo deb --verbose --no-build --no-strip -p conduit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}-debug.deb + cp -v -f result/bin/conduwuit target/release/conduwuit + cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit + direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}-debug.deb mv -v target/release/conduwuit static-${{ matrix.target }}-debug mv -v target/release/${{ matrix.target }}-debug.deb ${{ matrix.target }}-debug.deb @@ -441,8 +438,8 @@ jobs: - name: Run x86_64 static debug binary run: | # GH actions default runners are x86_64 only - if file result/bin/conduit | grep x86-64; then - result/bin/conduit --version + if file result/bin/conduwuit | grep x86-64; then + result/bin/conduwuit --version fi # check validity of produced deb package, invalid debs will error on these commands @@ -654,7 +651,7 @@ jobs: if: ${{ matrix.os == 'macos-13' }} run: | CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release - cp -v -f target/release/conduit conduwuit-macos-x86_64 + cp -v -f target/release/conduwuit conduwuit-macos-x86_64 otool -L conduwuit-macos-x86_64 # quick smoke test of the x86_64 macOS binary @@ -667,7 +664,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} run: | CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release - cp -v -f target/release/conduit conduwuit-macos-arm64 + cp -v -f target/release/conduwuit conduwuit-macos-arm64 otool -L conduwuit-macos-arm64 # quick smoke test of the arm64 macOS binary diff --git a/Cargo.lock b/Cargo.lock index 94533ae1..95ab8d04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -619,16 +619,16 @@ dependencies = [ ] [[package]] -name = "conduit" +name = "conduwuit" version = "0.5.0" dependencies = [ "clap", - "conduit_admin", - "conduit_api", - "conduit_core", - "conduit_database", - "conduit_router", - "conduit_service", + "conduwuit_admin", + "conduwuit_api", + "conduwuit_core", + "conduwuit_database", + "conduwuit_router", + "conduwuit_service", "console-subscriber", "const-str", "hardened_malloc-rs", @@ -648,15 +648,15 @@ dependencies = [ ] [[package]] -name = "conduit_admin" +name = "conduwuit_admin" version = "0.5.0" dependencies = [ "clap", - "conduit_api", - "conduit_core", - "conduit_database", - "conduit_macros", - "conduit_service", + "conduwuit_api", + "conduwuit_core", + "conduwuit_database", + "conduwuit_macros", + "conduwuit_service", "const-str", "futures", "log", @@ -669,7 +669,7 @@ dependencies = [ ] [[package]] -name = "conduit_api" +name = "conduwuit_api" version = "0.5.0" dependencies = [ "axum", @@ -677,9 +677,9 @@ dependencies = [ "axum-extra", "base64 0.22.1", "bytes", - "conduit_core", - "conduit_database", - "conduit_service", + "conduwuit_core", + "conduwuit_database", + "conduwuit_service", "const-str", "futures", "hmac", @@ -701,7 +701,7 @@ dependencies = [ ] [[package]] -name = "conduit_core" +name = "conduwuit_core" version = "0.5.0" dependencies = [ "argon2", @@ -713,7 +713,7 @@ dependencies = [ "checked_ops", "chrono", "clap", - "conduit_macros", + "conduwuit_macros", "const-str", "ctor", "cyborgtime", @@ -739,7 +739,7 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", - "thiserror 2.0.6", + "thiserror 2.0.7", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -753,12 +753,12 @@ dependencies = [ ] [[package]] -name = "conduit_database" +name = "conduwuit_database" version = "0.5.0" dependencies = [ "arrayvec", "async-channel", - "conduit_core", + "conduwuit_core", "const-str", "futures", "log", @@ -771,7 +771,7 @@ dependencies = [ ] [[package]] -name = "conduit_macros" +name = "conduwuit_macros" version = "0.5.0" dependencies = [ "itertools 0.13.0", @@ -781,7 +781,7 @@ dependencies = [ ] [[package]] -name = "conduit_router" +name = "conduwuit_router" version = "0.5.0" dependencies = [ "axum", @@ -789,10 +789,10 @@ dependencies = [ "axum-server", "axum-server-dual-protocol", "bytes", - "conduit_admin", - "conduit_api", - "conduit_core", - "conduit_service", + "conduwuit_admin", + "conduwuit_api", + "conduwuit_core", + "conduwuit_service", "const-str", "futures", "http", @@ -814,15 +814,15 @@ dependencies = [ ] [[package]] -name = "conduit_service" +name = "conduwuit_service" version = "0.5.0" dependencies = [ "arrayvec", "async-trait", "base64 0.22.1", "bytes", - "conduit_core", - "conduit_database", + "conduwuit_core", + "conduwuit_database", "const-str", "either", "futures", @@ -2954,7 +2954,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.6", + "thiserror 2.0.7", "tokio", "tracing", ] @@ -2973,7 +2973,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.6", + "thiserror 2.0.7", "tinyvec", "tracing", "web-time 1.1.0", @@ -3211,7 +3211,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.6", + "thiserror 2.0.7", "url", "web-time 1.1.0", ] @@ -3237,7 +3237,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.6", + "thiserror 2.0.7", "time", "tracing", "url", @@ -3263,7 +3263,7 @@ dependencies = [ "ruma-macros", "serde", "serde_json", - "thiserror 2.0.6", + "thiserror 2.0.7", "tracing", "url", "web-time 1.1.0", @@ -3294,7 +3294,7 @@ version = "0.9.5" source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" dependencies = [ "js_int", - "thiserror 2.0.6", + "thiserror 2.0.7", ] [[package]] @@ -3343,7 +3343,7 @@ dependencies = [ "http", "http-auth", "ruma-common", - "thiserror 2.0.6", + "thiserror 2.0.7", "tracing", ] @@ -3360,7 +3360,7 @@ dependencies = [ "serde_json", "sha2", "subslice", - "thiserror 2.0.6", + "thiserror 2.0.7", ] [[package]] @@ -3374,7 +3374,7 @@ dependencies = [ "ruma-events", "serde", "serde_json", - "thiserror 2.0.6", + "thiserror 2.0.7", "tracing", ] @@ -3526,7 +3526,7 @@ dependencies = [ "futures-util", "pin-project", "thingbuf", - "thiserror 2.0.6", + "thiserror 2.0.7", "unicode-segmentation", "unicode-width 0.2.0", ] @@ -4111,11 +4111,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "93605438cbd668185516ab499d589afb7ee1859ea3d5fc8f6b0755e1c7443767" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.7", ] [[package]] @@ -4131,9 +4131,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "e1d8749b4531af2117677a5fcd12b1348a3fe2b81e36e61ffeac5c4aa3273e36" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 8da6728c..2d99db02 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ rust-version = "1.83.0" version = "0.5.0" [workspace.metadata.crane] -name = "conduit" +name = "conduwuit" [workspace.dependencies.arrayvec] version = "0.7.4" @@ -211,7 +211,7 @@ default-features = false version = "2.0.1" default-features = false -# used for conduit's CLI and admin room command parsing +# used for conduwuit's CLI and admin room command parsing [workspace.dependencies.clap] version = "4.5.23" default-features = false @@ -297,9 +297,9 @@ features = ["env", "toml"] version = "0.24.2" default-features = false -# Used for conduit::Error type +# Used for conduwuit::Error type [workspace.dependencies.thiserror] -version = "2.0.6" +version = "2.0.7" default-features = false # Used when hashing the state @@ -523,38 +523,38 @@ rev = "deaeb0694e2083f53d363b648da06e10fc13900c" # Our crates # -[workspace.dependencies.conduit-router] -package = "conduit_router" +[workspace.dependencies.conduwuit-router] +package = "conduwuit_router" path = "src/router" default-features = false -[workspace.dependencies.conduit-admin] -package = "conduit_admin" +[workspace.dependencies.conduwuit-admin] +package = "conduwuit_admin" path = "src/admin" default-features = false -[workspace.dependencies.conduit-api] -package = "conduit_api" +[workspace.dependencies.conduwuit-api] +package = "conduwuit_api" path = "src/api" default-features = false -[workspace.dependencies.conduit-service] -package = "conduit_service" +[workspace.dependencies.conduwuit-service] +package = "conduwuit_service" path = "src/service" default-features = false -[workspace.dependencies.conduit-database] -package = "conduit_database" +[workspace.dependencies.conduwuit-database] +package = "conduwuit_database" path = "src/database" default-features = false -[workspace.dependencies.conduit-core] -package = "conduit_core" +[workspace.dependencies.conduwuit-core] +package = "conduwuit_core" path = "src/core" default-features = false -[workspace.dependencies.conduit-macros] -package = "conduit_macros" +[workspace.dependencies.conduwuit-macros] +package = "conduwuit_macros" path = "src/macros" default-features = false @@ -613,7 +613,7 @@ codegen-units = 32 # '-Clink-arg=-Wl,--no-gc-sections', #] -[profile.release-max-perf.package.conduit_macros] +[profile.release-max-perf.package.conduwuit_macros] inherits = "release-max-perf.build-override" #rustflags = [ # '-Crelocation-model=pic', @@ -647,7 +647,7 @@ panic = "unwind" debug-assertions = true incremental = true #rustflags = [ -# '--cfg', 'conduit_mods', +# '--cfg', 'conduwuit_mods', # '-Ztime-passes', # '-Zmir-opt-level=0', # '-Zvalidate-mir=false', @@ -664,11 +664,11 @@ incremental = true # '-Clink-arg=-Wl,-z,lazy', #] -[profile.dev.package.conduit_core] +[profile.dev.package.conduwuit_core] inherits = "dev" incremental = false #rustflags = [ -# '--cfg', 'conduit_mods', +# '--cfg', 'conduwuit_mods', # '-Ztime-passes', # '-Zmir-opt-level=0', # '-Ztls-model=initial-exec', @@ -685,10 +685,10 @@ incremental = false # '-Clink-arg=-Wl,-z,nodelete', #] -[profile.dev.package.conduit] +[profile.dev.package.conduwuit] inherits = "dev" #rustflags = [ -# '--cfg', 'conduit_mods', +# '--cfg', 'conduwuit_mods', # '-Ztime-passes', # '-Zmir-opt-level=0', # '-Zvalidate-mir=false', @@ -710,7 +710,7 @@ incremental = false codegen-units = 1 opt-level = 'z' #rustflags = [ -# '--cfg', 'conduit_mods', +# '--cfg', 'conduwuit_mods', # '-Ztls-model=initial-exec', # '-Cprefer-dynamic=true', # '-Zstaticlib-prefer-dynamic=true', @@ -731,7 +731,7 @@ incremental = false codegen-units = 1 opt-level = 'z' #rustflags = [ -# '--cfg', 'conduit_mods', +# '--cfg', 'conduwuit_mods', # '-Ztls-model=global-dynamic', # '-Cprefer-dynamic=true', # '-Zstaticlib-prefer-dynamic=true', diff --git a/README.md b/README.md index 4faf1ad7..e6ad1981 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,12 @@ is no harm or additional steps required for using conduwuit. See the [Migrating from Conduit](https://conduwuit.puppyirl.gay/deploying/generic.html#migrating-from-conduit) section on the generic deploying guide. +Note that as of conduwuit version 0.5.0, backwards compatibility with Conduit is +no longer supported. We only support migrating *from* Conduit, not back to +Conduit like before. If you are truly finding yourself wanting to migrate back +to Conduit, we would appreciate all your feedback and if we can assist with +any issues or concerns. + diff --git a/clippy.toml b/clippy.toml index b93b2377..3a0aa695 100644 --- a/clippy.toml +++ b/clippy.toml @@ -7,9 +7,9 @@ too-many-lines-threshold = 700 # TODO reduce me to <= 100 type-complexity-threshold = 250 # reduce me to ~200 disallowed-macros = [ - { path = "log::error", reason = "use conduit_core::error" }, - { path = "log::warn", reason = "use conduit_core::warn" }, - { path = "log::info", reason = "use conduit_core::info" }, - { path = "log::debug", reason = "use conduit_core::debug" }, - { path = "log::trace", reason = "use conduit_core::trace" }, + { path = "log::error", reason = "use conduwuit_core::error" }, + { path = "log::warn", reason = "use conduwuit_core::warn" }, + { path = "log::info", reason = "use conduwuit_core::info" }, + { path = "log::debug", reason = "use conduwuit_core::debug" }, + { path = "log::trace", reason = "use conduwuit_core::trace" }, ] diff --git a/deps/rust-rocksdb/lib.rs b/deps/rust-rocksdb/lib.rs index 0551991a..8dbbda98 100644 --- a/deps/rust-rocksdb/lib.rs +++ b/deps/rust-rocksdb/lib.rs @@ -1,7 +1,7 @@ pub use rust_rocksdb::*; -#[cfg_attr(not(conduit_mods), link(name = "rocksdb"))] -#[cfg_attr(conduit_mods, link(name = "rocksdb", kind = "static"))] +#[cfg_attr(not(conduwuit_mods), link(name = "rocksdb"))] +#[cfg_attr(conduwuit_mods, link(name = "rocksdb", kind = "static"))] unsafe extern "C" { pub unsafe fn rocksdb_list_column_families(); pub unsafe fn rocksdb_logger_create_stderr_logger(); diff --git a/docs/configuration.md b/docs/configuration.md index f4f7f4c7..0c670210 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -42,7 +42,7 @@ The syntax of this is a standard admin command without the prefix such as An example output of a success is: ``` -INFO conduit_service::admin::startup: Startup command #0 completed: +INFO conduwuit_service::admin::startup: Startup command #0 completed: Created user with user_id: @june:girlboss.ceo and password: `` ``` diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index fffa7770..d0aa13b3 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -41,7 +41,7 @@ docker run -d -p 8448:6167 \ -v db:/var/lib/conduwuit/ \ -e CONDUWUIT_SERVER_NAME="your.server.name" \ -e CONDUWUIT_ALLOW_REGISTRATION=false \ - --name conduit $LINK + --name conduwuit $LINK ``` or you can use [docker compose](#docker-compose). diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 70d4b561..398ba67f 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -26,7 +26,7 @@ allows easy cross-compilation. You can run the `nix build -L .#static-x86_64-linux-musl-all-features` or `nix build -L .#static-aarch64-linux-musl-all-features` commands based on architecture to cross-compile the necessary static binary located at -`result/bin/conduit`. This is reproducible with the static binaries produced in our CI. +`result/bin/conduwuit`. This is reproducible with the static binaries produced in our CI. Otherwise, follow standard Rust project build guides (installing git and cloning the repo, getting the Rust toolchain via rustup, installing LLVM toolchain + @@ -38,6 +38,12 @@ As mentioned in the README, there is little to no steps needed to migrate from Conduit. As long as you are using the RocksDB database backend, just replace the binary / container image / etc. +**WARNING**: As of conduwuit 0.5.0, all database and backwards compatibility +with Conduit is no longer supported. We only support migrating *from* Conduit, +not back to Conduit like before. If you are truly finding yourself wanting to +migrate back to Conduit, we would appreciate all your feedback and if we can +assist with any issues or concerns. + **Note**: If you are relying on Conduit's "automatic delegation" feature, this will **NOT** work on conduwuit and you must configure delegation manually. This is not a mistake and no support for this feature will be added. diff --git a/docs/development.md b/docs/development.md index 28b07667..fa7519c0 100644 --- a/docs/development.md +++ b/docs/development.md @@ -52,7 +52,7 @@ the said workspace crate(s) must define the feature there in its `Cargo.toml`. So, if this is adding a feature to the API such as `woof`, you define the feature in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition in `main`'s -`Cargo.toml` will be `woof = ["conduit-api/woof"]`. +`Cargo.toml` will be `woof = ["conduwuit-api/woof"]`. The rationale for this is due to Rust / Cargo not supporting ["workspace level features"][9], we must make a choice of; either scattering diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml index f5cab449..3f8fbf79 100644 --- a/src/admin/Cargo.toml +++ b/src/admin/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "conduit_admin" +name = "conduwuit_admin" categories.workspace = true description.workspace = true edition.workspace = true @@ -27,11 +27,11 @@ release_max_log_level = [ [dependencies] clap.workspace = true -conduit-api.workspace = true -conduit-core.workspace = true -conduit-database.workspace = true -conduit-macros.workspace = true -conduit-service.workspace = true +conduwuit-api.workspace = true +conduwuit-core.workspace = true +conduwuit-database.workspace = true +conduwuit-macros.workspace = true +conduwuit-service.workspace = true const-str.workspace = true futures.workspace = true log.workspace = true diff --git a/src/admin/admin.rs b/src/admin/admin.rs index d1d8d394..7b707446 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -1,5 +1,5 @@ use clap::Parser; -use conduit::Result; +use conduwuit::Result; use ruma::events::room::message::RoomMessageEventContent; use crate::{ @@ -9,7 +9,7 @@ use crate::{ }; #[derive(Debug, Parser)] -#[command(name = "conduwuit", version = conduit::version())] +#[command(name = "conduwuit", version = conduwuit::version())] pub(super) enum AdminCommand { #[command(subcommand)] /// - Commands for managing appservices diff --git a/src/admin/appservice/mod.rs b/src/admin/appservice/mod.rs index ca5f46bb..2e0694aa 100644 --- a/src/admin/appservice/mod.rs +++ b/src/admin/appservice/mod.rs @@ -1,7 +1,7 @@ mod commands; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use crate::admin_command_dispatch; diff --git a/src/admin/check/commands.rs b/src/admin/check/commands.rs index 88fca462..ecb18c1a 100644 --- a/src/admin/check/commands.rs +++ b/src/admin/check/commands.rs @@ -1,5 +1,5 @@ -use conduit::Result; -use conduit_macros::implement; +use conduwuit::Result; +use conduwuit_macros::implement; use futures::StreamExt; use ruma::events::room::message::RoomMessageEventContent; diff --git a/src/admin/check/mod.rs b/src/admin/check/mod.rs index e543e5b5..44b1ddf2 100644 --- a/src/admin/check/mod.rs +++ b/src/admin/check/mod.rs @@ -1,7 +1,7 @@ mod commands; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use ruma::events::room::message::RoomMessageEventContent; use crate::Command; diff --git a/src/admin/command.rs b/src/admin/command.rs index c594736d..5277b976 100644 --- a/src/admin/command.rs +++ b/src/admin/command.rs @@ -1,6 +1,6 @@ use std::time::SystemTime; -use conduit_service::Services; +use conduwuit_service::Services; use ruma::EventId; pub(crate) struct Command<'a> { diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index a17d0800..9d77a1de 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,7 @@ use std::{ time::{Instant, SystemTime}, }; -use conduit::{debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result}; +use conduwuit::{debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result}; use futures::{FutureExt, StreamExt}; use ruma::{ api::{client::error::ErrorKind, federation::event::get_room_state}, @@ -754,7 +754,7 @@ pub(super) async fn resolve_true_destination( #[admin_command] pub(super) async fn memory_stats(&self) -> Result { - let html_body = conduit::alloc::memory_stats(); + let html_body = conduwuit::alloc::memory_stats(); if html_body.is_none() { return Ok(RoomMessageEventContent::text_plain( diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index b74e9c36..c87dbb0a 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -2,7 +2,7 @@ mod commands; pub(crate) mod tester; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use ruma::{EventId, OwnedRoomOrAliasId, RoomId, ServerName}; use self::tester::TesterCommand; diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index c11f893e..5f922ece 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -1,4 +1,4 @@ -use conduit::Err; +use conduwuit::Err; use ruma::events::room::message::RoomMessageEventContent; use crate::{admin_command, admin_command_dispatch, Result}; diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 0c9df433..e0c1598d 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use conduit::Result; +use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId}; diff --git a/src/admin/federation/mod.rs b/src/admin/federation/mod.rs index 8f5d3fae..3adfd459 100644 --- a/src/admin/federation/mod.rs +++ b/src/admin/federation/mod.rs @@ -1,7 +1,7 @@ mod commands; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use ruma::{RoomId, ServerName, UserId}; use crate::admin_command_dispatch; diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index 82ac162e..977ecdae 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,7 +1,7 @@ use std::time::Duration; -use conduit::{debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result}; -use conduit_service::media::Dim; +use conduwuit::{debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result}; +use conduwuit_service::media::Dim; use ruma::{ events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName, }; diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index fbf6532b..046be556 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -1,7 +1,7 @@ mod commands; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use ruma::{EventId, MxcUri, OwnedMxcUri, OwnedServerName, ServerName}; use crate::admin_command_dispatch; diff --git a/src/admin/mod.rs b/src/admin/mod.rs index 83db18fa..ac51104a 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -18,12 +18,12 @@ pub(crate) mod room; pub(crate) mod server; pub(crate) mod user; -extern crate conduit_api as api; -extern crate conduit_core as conduit; -extern crate conduit_service as service; +extern crate conduwuit_api as api; +extern crate conduwuit_core as conduwuit; +extern crate conduwuit_service as service; -pub(crate) use conduit::Result; -pub(crate) use conduit_macros::{admin_command, admin_command_dispatch}; +pub(crate) use conduwuit::Result; +pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch}; pub(crate) use crate::{ command::Command, @@ -32,9 +32,9 @@ pub(crate) use crate::{ pub(crate) const PAGE_SIZE: usize = 100; -conduit::mod_ctor! {} -conduit::mod_dtor! {} -conduit::rustc_flags_capture! {} +conduwuit::mod_ctor! {} +conduwuit::mod_dtor! {} +conduwuit::rustc_flags_capture! {} /// Install the admin command processor pub async fn init(admin_service: &service::admin::Service) { diff --git a/src/admin/processor.rs b/src/admin/processor.rs index caaa7d2d..e041a889 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -6,7 +6,7 @@ use std::{ }; use clap::{CommandFactory, Parser}; -use conduit::{ +use conduwuit::{ debug, error, log::{ capture, diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index 91217334..53ed64dd 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId}; diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index 02e89e7a..6bf9b9ad 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use ruma::events::room::message::RoomMessageEventContent; use crate::Command; diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 837d34e6..68f68648 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use ruma::{events::room::message::RoomMessageEventContent, ServerName}; use crate::Command; diff --git a/src/admin/query/mod.rs b/src/admin/query/mod.rs index 1f0f5505..b1849091 100644 --- a/src/admin/query/mod.rs +++ b/src/admin/query/mod.rs @@ -10,7 +10,7 @@ mod sending; mod users; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use self::{ account_data::AccountDataCommand, appservice::AppserviceCommand, globals::GlobalsCommand, diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 0963429e..73e33bf6 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, UserId}; diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index a1bd32f9..3004af8b 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use ruma::{events::room::message::RoomMessageEventContent, UserId}; use crate::Command; diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index e8340dad..43c63770 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,7 @@ use std::fmt::Write; use clap::Subcommand; -use conduit::{utils::time, Result}; +use conduwuit::{utils::time, Result}; use ruma::{events::room::message::RoomMessageEventContent, OwnedServerName}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs index 382e4a78..19a727e9 100644 --- a/src/admin/query/room_alias.rs +++ b/src/admin/query/room_alias.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId}; diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index e32517fb..6e84507a 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId}; diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index eaab1f5e..efb4275f 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, ServerName, UserId}; use service::sending::Destination; diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 0792e484..d58e2d2a 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use futures::stream::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId}; diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 1ccde47d..841bc51a 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -1,7 +1,7 @@ use std::fmt::Write; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index 35e40c8b..d2fdbfca 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -1,4 +1,4 @@ -use conduit::Result; +use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 0bdaf56d..d3ed2575 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId}; diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index 13a74a9d..6a6ec695 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduit::{utils::ReadyExt, Result}; +use conduwuit::{utils::ReadyExt, Result}; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId}; diff --git a/src/admin/room/mod.rs b/src/admin/room/mod.rs index 8c6cbeaa..ad05d16d 100644 --- a/src/admin/room/mod.rs +++ b/src/admin/room/mod.rs @@ -5,7 +5,7 @@ mod info; mod moderation; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use ruma::OwnedRoomId; use self::{ diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index cfc048bd..427f3e42 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -1,6 +1,6 @@ use api::client::leave_room; use clap::Subcommand; -use conduit::{ +use conduwuit::{ debug, error, info, utils::{IterStream, ReadyExt}, warn, Result, diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 94f695ce..676f2d33 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,6 @@ use std::{fmt::Write, sync::Arc}; -use conduit::{info, utils::time, warn, Err, Result}; +use conduwuit::{info, utils::time, warn, Err, Result}; use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; @@ -73,7 +73,7 @@ pub(super) async fn list_features( pub(super) async fn memory_usage(&self) -> Result { let services_usage = self.services.memory_usage().await?; let database_usage = self.services.db.db.memory_usage()?; - let allocator_usage = conduit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}")); + let allocator_usage = conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}")); Ok(RoomMessageEventContent::text_plain(format!( "Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}", @@ -143,7 +143,7 @@ pub(super) async fn reload_mods(&self) -> Result { #[admin_command] #[cfg(unix)] pub(super) async fn restart(&self, force: bool) -> Result { - use conduit::utils::sys::current_exe_deleted; + use conduwuit::utils::sys::current_exe_deleted; if !force && current_exe_deleted() { return Err!( diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 222c537a..69ad7953 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -1,7 +1,7 @@ mod commands; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use crate::admin_command_dispatch; diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 61096625..017c6239 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -1,12 +1,12 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; -use conduit::{ +use conduwuit::{ debug_warn, error, info, is_equal_to, utils::{self, ReadyExt}, warn, PduBuilder, Result, }; -use conduit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; +use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; use ruma::{ events::{ diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index 649cdfb8..60db6de8 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -1,7 +1,7 @@ mod commands; use clap::Subcommand; -use conduit::Result; +use conduwuit::Result; use ruma::{EventId, OwnedRoomOrAliasId, RoomId}; use crate::admin_command_dispatch; diff --git a/src/admin/utils.rs b/src/admin/utils.rs index ba98bbea..8c67a0ca 100644 --- a/src/admin/utils.rs +++ b/src/admin/utils.rs @@ -1,4 +1,4 @@ -use conduit_core::{err, Err, Result}; +use conduwuit_core::{err, Err, Result}; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use service::Services; diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index a0fc09de..f3a84664 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "conduit_api" +name = "conduwuit_api" categories.workspace = true description.workspace = true edition.workspace = true @@ -41,9 +41,9 @@ axum-extra.workspace = true axum.workspace = true base64.workspace = true bytes.workspace = true -conduit-core.workspace = true -conduit-database.workspace = true -conduit-service.workspace = true +conduwuit-core.workspace = true +conduwuit-database.workspace = true +conduwuit-service.workspace = true const-str.workspace = true futures.workspace = true hmac.workspace = true diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 3595f581..71e18fd3 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -2,7 +2,7 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result}; +use conduwuit::{debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result}; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs index 86ef41ef..2fc78808 100644 --- a/src/api/client/account_data.rs +++ b/src/api/client/account_data.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{err, Err}; +use conduwuit::{err, Err}; use ruma::{ api::client::config::{ get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 83f3291d..c60a2f4c 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{debug, Err, Result}; +use conduwuit::{debug, Err, Result}; use futures::StreamExt; use rand::seq::SliceRandom; use ruma::{ diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs index d299185e..9dbd141e 100644 --- a/src/api/client/appservice.rs +++ b/src/api/client/appservice.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{err, Err, Result}; +use conduwuit::{err, Err, Result}; use ruma::api::{appservice::ping, client::appservice::request_ping}; use crate::Ruma; diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index f435e086..9e4746ca 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{err, Err}; +use conduwuit::{err, Err}; use ruma::{ api::client::backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 45c04eb6..52f27692 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,7 +1,7 @@ use std::iter::once; use axum::extract::State; -use conduit::{ +use conduwuit::{ at, err, ref_at, utils::{ future::TryExtExt, diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 7e56f27e..63f0c210 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,6 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{err, Err}; +use conduwuit::{err, Err}; use futures::StreamExt; use ruma::{ api::client::{ diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 6120c7b3..9407a0bd 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,6 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{info, warn, Err, Error, Result}; +use conduwuit::{info, warn, Err, Error, Result}; use futures::{StreamExt, TryFutureExt}; use ruma::{ api::{ diff --git a/src/api/client/filter.rs b/src/api/client/filter.rs index 2a8ebb9c..a1576e8e 100644 --- a/src/api/client/filter.rs +++ b/src/api/client/filter.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::err; +use conduwuit::err; use ruma::api::client::filter::{create_filter, get_filter}; use crate::{Result, Ruma}; diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 18f7d21c..13dc3467 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduit::{err, utils, Error, Result}; +use conduwuit::{err, utils, Error, Result}; use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 67a2bbdf..a9792c3b 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -2,12 +2,12 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{ +use conduwuit::{ err, utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize}, Err, Result, }; -use conduit_service::{ +use conduwuit_service::{ media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, MXC_LENGTH}, Services, }; diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index f6837462..a8b366ed 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -2,12 +2,12 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{ +use conduwuit::{ err, utils::{content_disposition::make_content_disposition, math::ruma_from_usize}, Err, Result, }; -use conduit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN}; +use conduwuit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN}; use reqwest::Url; use ruma::{ api::client::media::{ diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index a61011b0..02166271 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{ +use conduwuit::{ debug, debug_info, debug_warn, err, error, info, pdu::{self, gen_event_id_canonical_json, PduBuilder}, result::FlatOk, @@ -1457,7 +1457,7 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { } pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, reason: Option) -> Result<()> { - //use conduit::utils::stream::OptionStream; + //use conduwuit::utils::stream::OptionStream; use futures::TryFutureExt; // Ask a remote server if we don't have this room diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 242c1681..32e0b1e3 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,7 +1,7 @@ use std::collections::HashSet; use axum::extract::State; -use conduit::{ +use conduwuit::{ at, is_equal_to, utils::{ result::{FlatOk, LogErr}, diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 3e4c6ca8..b8bbfb91 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -1,7 +1,7 @@ use std::time::Duration; use axum::extract::State; -use conduit::utils; +use conduwuit::utils; use ruma::{ api::client::{account, error::ErrorKind}, authentication::TokenType, diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 97ea21ea..ea74cdaf 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::{ +use conduwuit::{ pdu::PduBuilder, utils::{stream::TryIgnore, IterStream}, warn, Err, Error, Result, diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 7efa59e8..e290c952 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{err, Err}; +use conduwuit::{err, Err}; use ruma::{ api::client::{ error::ErrorKind, diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index f6123614..d7f24101 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::{err, Err, PduCount}; +use conduwuit::{err, Err, PduCount}; use ruma::{ api::client::{read_marker::set_read_marker, receipt::create_receipt}, events::{ diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index de54c4e4..c47bc60f 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{ +use conduwuit::{ at, utils::{result::FlatOk, stream::WidebandExt, IterStream, ReadyExt}, PduCount, Result, diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 31667323..429cd033 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,7 +2,7 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{info, utils::ReadyExt, Err}; +use conduwuit::{info, utils::ReadyExt, Err}; use rand::Rng; use ruma::{ api::client::{ diff --git a/src/api/client/room/aliases.rs b/src/api/client/room/aliases.rs index e530b260..f500e9c7 100644 --- a/src/api/client/room/aliases.rs +++ b/src/api/client/room/aliases.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{Error, Result}; +use conduwuit::{Error, Result}; use futures::StreamExt; use ruma::api::client::{error::ErrorKind, room::aliases}; diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 2e9852f8..b98f1dab 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::{debug_info, debug_warn, error, info, pdu::PduBuilder, warn, Err, Error, Result}; +use conduwuit::{debug_info, debug_warn, error, info, pdu::PduBuilder, warn, Err, Error, Result}; use futures::FutureExt; use ruma::{ api::client::{ diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index 6deb567f..ff9524e6 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{err, Err, Event, Result}; +use conduwuit::{err, Err, Event, Result}; use futures::{try_join, FutureExt, TryFutureExt}; use ruma::api::client::room::get_room_event; diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 16b3a53b..8b2e45df 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{at, utils::BoolExt, Err, Result}; +use conduwuit::{at, utils::BoolExt, Err, Result}; use futures::StreamExt; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index fafce2d1..401bf800 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,7 +1,7 @@ use std::cmp::max; use axum::extract::State; -use conduit::{err, info, pdu::PduBuilder, Error, Result}; +use conduwuit::{err, info, pdu::PduBuilder, Error, Result}; use futures::StreamExt; use ruma::{ api::client::{error::ErrorKind, room::upgrade_room}, diff --git a/src/api/client/search.rs b/src/api/client/search.rs index 38468abb..28a8891c 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::{ +use conduwuit::{ at, is_true, result::FlatOk, utils::{stream::ReadyExt, IterStream}, diff --git a/src/api/client/send.rs b/src/api/client/send.rs index ff011efa..e909ebbf 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::{err, Err}; +use conduwuit::{err, Err}; use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; use serde_json::from_str; diff --git a/src/api/client/session.rs b/src/api/client/session.rs index e889a867..e30c94d7 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -1,6 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::{debug, err, info, utils::ReadyExt, warn, Err}; +use conduwuit::{debug, err, info, utils::ReadyExt, warn, Err}; use futures::StreamExt; use ruma::{ api::client::{ diff --git a/src/api/client/state.rs b/src/api/client/state.rs index aeabd606..881eca98 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use axum::extract::State; -use conduit::{err, pdu::PduBuilder, utils::BoolExt, Err, Error, PduEvent, Result}; +use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, Error, PduEvent, Result}; use ruma::{ api::client::{ error::ErrorKind, diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 6f791860..2b8d478c 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -1,7 +1,7 @@ mod v3; mod v4; -use conduit::{ +use conduwuit::{ utils::stream::{BroadbandExt, ReadyExt}, PduCount, }; diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 815ec87c..c5ec3886 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -5,7 +5,7 @@ use std::{ }; use axum::extract::State; -use conduit::{ +use conduwuit::{ at, err, error, extract_variant, is_equal_to, is_false, pdu::EventHash, result::{FlatOk, LogDebugErr}, @@ -18,7 +18,7 @@ use conduit::{ }, Error, PduCount, PduEvent, Result, }; -use conduit_service::{ +use conduwuit_service::{ rooms::short::{ShortStateHash, ShortStateKey}, Services, }; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index fddb81bf..140b647d 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -5,7 +5,7 @@ use std::{ }; use axum::extract::State; -use conduit::{ +use conduwuit::{ debug, error, extract_variant, utils::{ math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 906f779d..07badaf7 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::{at, PduCount, PduEvent}; +use conduwuit::{at, PduCount, PduEvent}; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index 2b37a9ec..aab59394 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduit::{Error, Result}; +use conduwuit::{Error, Result}; use futures::StreamExt; use ruma::{ api::{ diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 3660d674..c546d6a7 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduit::Err; +use conduwuit::Err; use futures::StreamExt; use ruma::{ api::{ diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index ed3ce37a..91fe5a7c 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -64,8 +64,8 @@ pub(crate) async fn get_supported_versions_route( /// `/_matrix/federation/v1/version` pub(crate) async fn conduwuit_server_version() -> Result { Ok(Json(serde_json::json!({ - "name": conduit::version::name(), - "version": conduit::version::version(), + "name": conduwuit::version::name(), + "version": conduwuit::version::version(), }))) } diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index feb48346..bc7460f0 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduit::utils::TryFutureExtExt; +use conduwuit::utils::TryFutureExtExt; use futures::{pin_mut, StreamExt}; use ruma::{ api::client::user_directory::search_users, diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index 7ee4571f..b41cc8a1 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -2,7 +2,7 @@ use std::time::{Duration, SystemTime}; use axum::extract::State; use base64::{engine::general_purpose, Engine as _}; -use conduit::{utils, Err}; +use conduwuit::{utils, Err}; use hmac::{Hmac, Mac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch, UserId}; use sha1::Sha1; diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index 674c9bb0..b66217e8 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -100,6 +100,6 @@ pub(crate) async fn syncv3_client_server_json(State(services): State Result { Ok(get_server_version::v1::Response { server: Some(get_server_version::v1::Server { - name: Some(conduit::version::name().into()), - version: Some(conduit::version::version().into()), + name: Some(conduwuit::version::name().into()), + version: Some(conduwuit::version::version().into()), }), }) } diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index b93f9a77..27c6da52 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "conduit_core" +name = "conduwuit_core" categories.workspace = true description.workspace = true edition.workspace = true @@ -62,7 +62,7 @@ cargo_toml.workspace = true checked_ops.workspace = true chrono.workspace = true clap.workspace = true -conduit-macros.workspace = true +conduwuit-macros.workspace = true const-str.workspace = true ctor.workspace = true cyborgtime.workspace = true diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 413aa7f4..a34514de 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -8,7 +8,7 @@ use std::{ path::PathBuf, }; -use conduit_macros::config_example_generator; +use conduwuit_macros::config_example_generator; use either::{ Either, Either::{Left, Right}, diff --git a/src/core/debug.rs b/src/core/debug.rs index f7420784..e1980234 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -3,7 +3,7 @@ use std::{any::Any, panic}; // Export debug proc_macros -pub use conduit_macros::recursion_depth; +pub use conduwuit_macros::recursion_depth; // Export all of the ancillary tools from here as well. pub use crate::{result::DebugInspect, utils::debug::*}; diff --git a/src/core/info/cargo.rs b/src/core/info/cargo.rs index 012a08e0..735ccee6 100644 --- a/src/core/info/cargo.rs +++ b/src/core/info/cargo.rs @@ -6,7 +6,7 @@ use std::sync::OnceLock; use cargo_toml::{DepsSet, Manifest}; -use conduit_macros::cargo_manifest; +use conduwuit_macros::cargo_manifest; use crate::Result; diff --git a/src/core/info/mod.rs b/src/core/info/mod.rs index 22ea7d3d..ca39b348 100644 --- a/src/core/info/mod.rs +++ b/src/core/info/mod.rs @@ -6,7 +6,7 @@ pub mod room_version; pub mod rustc; pub mod version; -pub use conduit_macros::rustc_flags_capture; +pub use conduwuit_macros::rustc_flags_capture; pub const MODULE_ROOT: &str = const_str::split!(std::module_path!(), "::")[0]; pub const CRATE_PREFIX: &str = const_str::split!(MODULE_ROOT, '_')[0]; diff --git a/src/core/mod.rs b/src/core/mod.rs index 4ab84730..87cb58ae 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -21,11 +21,11 @@ pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId}; pub use server::Server; pub use utils::{ctor, dtor, implement, result, result::Result}; -pub use crate as conduit_core; +pub use crate as conduwuit_core; rustc_flags_capture! {} -#[cfg(not(conduit_mods))] +#[cfg(not(conduwuit_mods))] pub mod mods { #[macro_export] macro_rules! mod_ctor { diff --git a/src/core/mods/mod.rs b/src/core/mods/mod.rs index 118bfc29..ac0c333b 100644 --- a/src/core/mods/mod.rs +++ b/src/core/mods/mod.rs @@ -1,4 +1,4 @@ -#![cfg(conduit_mods)] +#![cfg(conduwuit_mods)] pub(crate) use libloading::os::unix::{Library, Symbol}; diff --git a/src/core/server.rs b/src/core/server.rs index 627e125d..1e721517 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -56,7 +56,7 @@ impl Server { } pub fn reload(&self) -> Result<()> { - if cfg!(not(conduit_mods)) { + if cfg!(not(conduwuit_mods)) { return Err!("Reloading not enabled"); } diff --git a/src/core/utils/debug.rs b/src/core/utils/debug.rs index e4151f39..cb9d7bab 100644 --- a/src/core/utils/debug.rs +++ b/src/core/utils/debug.rs @@ -25,7 +25,7 @@ impl fmt::Debug for TruncatedSlice<'_, T> { /// See [`TruncatedSlice`]. Useful for `#[instrument]`: /// /// ``` -/// use conduit_core::utils::debug::slice_truncated; +/// use conduwuit_core::utils::debug::slice_truncated; /// /// #[tracing::instrument(fields(foos = slice_truncated(foos, 42)))] /// fn bar(foos: &[&str]); diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index f9310243..ad53bc42 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -19,7 +19,7 @@ pub mod sys; mod tests; pub mod time; -pub use ::conduit_macros::implement; +pub use ::conduwuit_macros::implement; pub use ::ctor::{ctor, dtor}; pub use self::{ diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index e65a3369..6baa9c35 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -84,7 +84,7 @@ where /// Find the common prefix from a collection of strings and return a slice /// ``` -/// use conduit_core::utils::string::common_prefix; +/// use conduwiit_core::utils::string::common_prefix; /// let input = ["conduwuit", "conduit", "construct"]; /// common_prefix(&input) == "con"; /// ``` diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 1deaf980..efd18a1a 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "conduit_database" +name = "conduwuit_database" categories.workspace = true description.workspace = true edition.workspace = true @@ -37,7 +37,7 @@ zstd_compression = [ [dependencies] arrayvec.workspace = true async-channel.workspace = true -conduit-core.workspace = true +conduwuit-core.workspace = true const-str.workspace = true futures.workspace = true log.workspace = true diff --git a/src/database/database.rs b/src/database/database.rs index 3df95dce..bd8dcb1a 100644 --- a/src/database/database.rs +++ b/src/database/database.rs @@ -1,6 +1,6 @@ use std::{ops::Index, sync::Arc}; -use conduit::{err, Result, Server}; +use conduwuit::{err, Result, Server}; use crate::{ maps, diff --git a/src/database/de.rs b/src/database/de.rs index d303eab2..ac8c021f 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -1,5 +1,5 @@ use arrayvec::ArrayVec; -use conduit::{checked, debug::DebugInspect, err, utils::string, Error, Result}; +use conduwuit::{checked, debug::DebugInspect, err, utils::string, Error, Result}; use serde::{ de, de::{DeserializeSeed, Visitor}, @@ -331,7 +331,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { fn deserialize_any>(self, visitor: V) -> Result { debug_assert_eq!( - conduit::debug::type_name::(), + conduwuit::debug::type_name::(), "serde_json::value::de::::deserialize::ValueVisitor", "deserialize_any: type not expected" diff --git a/src/database/deserialized.rs b/src/database/deserialized.rs index a59b2ce5..66541b2a 100644 --- a/src/database/deserialized.rs +++ b/src/database/deserialized.rs @@ -1,6 +1,6 @@ use std::convert::identity; -use conduit::Result; +use conduwuit::Result; use serde::Deserialize; pub trait Deserialized { diff --git a/src/database/engine.rs b/src/database/engine.rs index d3bb727e..72fe11e6 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -6,7 +6,7 @@ use std::{ sync::{atomic::AtomicU32, Arc, Mutex, RwLock}, }; -use conduit::{debug, error, info, utils::time::rfc2822_from_seconds, warn, Err, Result, Server}; +use conduwuit::{debug, error, info, utils::time::rfc2822_from_seconds, warn, Err, Result, Server}; use rocksdb::{ backup::{BackupEngine, BackupEngineOptions}, perf::get_memory_usage_stats, diff --git a/src/database/handle.rs b/src/database/handle.rs index 356bd859..b4d34b85 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -1,6 +1,6 @@ use std::{fmt, fmt::Debug, ops::Deref}; -use conduit::Result; +use conduwuit::Result; use rocksdb::DBPinnableSlice; use serde::{Deserialize, Serialize, Serializer}; diff --git a/src/database/keyval.rs b/src/database/keyval.rs index d4568600..056e53d1 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -1,4 +1,4 @@ -use conduit::Result; +use conduwuit::Result; use serde::{Deserialize, Serialize}; use smallvec::SmallVec; diff --git a/src/database/map.rs b/src/database/map.rs index 0f4d740a..09721b2a 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -27,7 +27,7 @@ use std::{ sync::Arc, }; -use conduit::Result; +use conduwuit::Result; use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, ReadTier, WriteOptions}; use crate::{watchers::Watchers, Engine}; diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index 7016b744..aa3ea676 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -1,7 +1,7 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; use arrayvec::ArrayVec; -use conduit::{ +use conduwuit::{ err, implement, utils::{future::TryExtExt, result::FlatOk}, Result, diff --git a/src/database/map/count.rs b/src/database/map/count.rs index 894fe12e..7b632bb3 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -1,6 +1,6 @@ use std::{fmt::Debug, future::Future, sync::Arc}; -use conduit::implement; +use conduwuit::implement; use futures::stream::StreamExt; use serde::Serialize; diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 79556656..88023b1c 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,7 +1,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; use arrayvec::ArrayVec; -use conduit::{err, implement, utils::result::MapExpect, Err, Result}; +use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; use futures::{Future, FutureExt}; use serde::Serialize; use tokio::task; diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index bb904943..5f444cce 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,6 +1,6 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduit::{err, implement, utils::IterStream, Result}; +use conduwuit::{err, implement, utils::IterStream, Result}; use futures::{Stream, StreamExt}; use serde::Serialize; diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index 5b2e816c..68c305af 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -6,7 +6,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; use arrayvec::ArrayVec; -use conduit::implement; +use conduwuit::implement; use rocksdb::WriteBatchWithTransaction; use serde::Serialize; diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 3c4d491b..3ab5bacc 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -1,4 +1,4 @@ -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{Stream, StreamExt}; use serde::Deserialize; diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 40c608f2..2f6d2020 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -1,6 +1,6 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index 9122d78e..24112ac1 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -1,6 +1,6 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{ future, stream::{Stream, StreamExt}, diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs index 18080c64..ec37bbfe 100644 --- a/src/database/map/remove.rs +++ b/src/database/map/remove.rs @@ -1,7 +1,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; use arrayvec::ArrayVec; -use conduit::implement; +use conduwuit::implement; use serde::Serialize; use crate::{keyval::KeyBuf, ser, util::or_else}; diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index 1e000a47..7eb4ce63 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -1,4 +1,4 @@ -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{Stream, StreamExt}; use serde::Deserialize; diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index b5565aa4..c895105c 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -1,6 +1,6 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index 69dc54f2..a5ed35ec 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -1,6 +1,6 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{ future, stream::{Stream, StreamExt}, diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index 5f61cb08..81359800 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -1,4 +1,4 @@ -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::stream::{Stream, StreamExt}; use serde::Deserialize; diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 542a5ba8..2d6a76b7 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -1,6 +1,6 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{ stream::{Stream, StreamExt}, FutureExt, TryFutureExt, TryStreamExt, diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index e5c2fbea..13aa40f2 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -1,6 +1,6 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{ future, stream::{Stream, StreamExt}, diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index 67cfff1b..c2d9b6b8 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -1,4 +1,4 @@ -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::stream::{Stream, StreamExt}; use serde::Deserialize; diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 6468846f..c6fe439a 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -1,6 +1,6 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{ stream::{Stream, StreamExt}, FutureExt, TryFutureExt, TryStreamExt, diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index 3c7bce2e..e7dad211 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -1,6 +1,6 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use futures::{ future, stream::{Stream, StreamExt}, diff --git a/src/database/maps.rs b/src/database/maps.rs index 9b8d326a..d69cc7fd 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use conduit::Result; +use conduwuit::Result; use crate::{Engine, Map}; diff --git a/src/database/mod.rs b/src/database/mod.rs index de060b3a..183cba8d 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -20,7 +20,7 @@ pub(crate) use self::{ util::{or_else, result}, }; -extern crate conduit_core as conduit; +extern crate conduwuit_core as conduwuit; extern crate rust_rocksdb as rocksdb; pub use self::{ @@ -33,6 +33,6 @@ pub use self::{ ser::{serialize, serialize_to, serialize_to_vec, Interfix, Json, Separator, SEP}, }; -conduit::mod_ctor! {} -conduit::mod_dtor! {} -conduit::rustc_flags_capture! {} +conduwuit::mod_ctor! {} +conduwuit::mod_dtor! {} +conduwuit::rustc_flags_capture! {} diff --git a/src/database/opts.rs b/src/database/opts.rs index b0b33927..52ed4818 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -1,6 +1,6 @@ use std::{cmp, collections::HashMap, convert::TryFrom}; -use conduit::{err, utils, Config, Result}; +use conduwuit::{err, utils, Config, Result}; use rocksdb::{ statistics::StatsLevel, BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Env, LogLevel, LruCacheOptions, Options, UniversalCompactOptions, UniversalCompactionStopStyle, diff --git a/src/database/pool.rs b/src/database/pool.rs index 79755ea6..3301b821 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -7,7 +7,7 @@ use std::{ }; use async_channel::{bounded, Receiver, RecvError, Sender}; -use conduit::{debug, debug_warn, defer, err, implement, result::DebugInspect, Result, Server}; +use conduwuit::{debug, debug_warn, defer, err, implement, result::DebugInspect, Result, Server}; use futures::{channel::oneshot, TryFutureExt}; use oneshot::Sender as ResultSender; use rocksdb::Direction; diff --git a/src/database/ser.rs b/src/database/ser.rs index a60812aa..8abe5521 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -1,6 +1,6 @@ use std::io::Write; -use conduit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; +use conduwuit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; use serde::{ser, Serialize}; use crate::util::unhandled; diff --git a/src/database/stream.rs b/src/database/stream.rs index 38c46596..775fb930 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -5,7 +5,7 @@ mod keys_rev; use std::sync::Arc; -use conduit::{utils::exchange, Error, Result}; +use conduwuit::{utils::exchange, Error, Result}; use rocksdb::{ColumnFamily, DBRawIteratorWithThreadMode, ReadOptions}; pub(crate) use self::{items::Items, items_rev::ItemsRev, keys::Keys, keys_rev::KeysRev}; diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index 77b08a0b..06cb6de9 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -1,6 +1,6 @@ use std::{convert, pin::Pin, sync::Arc}; -use conduit::Result; +use conduwuit::Result; use futures::{ stream::FusedStream, task::{Context, Poll}, diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index dfd3a107..2d0c4639 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -1,6 +1,6 @@ use std::{convert, pin::Pin, sync::Arc}; -use conduit::Result; +use conduwuit::Result; use futures::{ stream::FusedStream, task::{Context, Poll}, diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index 2ce88959..a901b342 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -1,6 +1,6 @@ use std::{convert, pin::Pin, sync::Arc}; -use conduit::Result; +use conduwuit::Result; use futures::{ stream::FusedStream, task::{Context, Poll}, diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index 12dae759..73758a85 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -1,6 +1,6 @@ use std::{convert, pin::Pin, sync::Arc}; -use conduit::Result; +use conduwuit::Result; use futures::{ stream::FusedStream, task::{Context, Poll}, diff --git a/src/database/tests.rs b/src/database/tests.rs index bfab99ef..7f486966 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -4,7 +4,7 @@ use std::fmt::Debug; use arrayvec::ArrayVec; -use conduit::ruma::{serde::Raw, RoomId, UserId}; +use conduwuit::ruma::{serde::Raw, RoomId, UserId}; use serde::Serialize; use crate::{ @@ -53,7 +53,7 @@ fn ser_overflow() { #[test] fn ser_complex() { - use conduit::ruma::Mxc; + use conduwuit::ruma::Mxc; #[derive(Debug, Serialize)] struct Dim { @@ -90,7 +90,7 @@ fn ser_complex() { #[test] fn ser_json() { - use conduit::ruma::api::client::filter::FilterDefinition; + use conduwuit::ruma::api::client::filter::FilterDefinition; let filter = FilterDefinition { event_fields: Some(vec!["content.body".to_owned()]), @@ -105,7 +105,7 @@ fn ser_json() { #[test] fn ser_json_value() { - use conduit::ruma::api::client::filter::FilterDefinition; + use conduwuit::ruma::api::client::filter::FilterDefinition; let filter = FilterDefinition { event_fields: Some(vec!["content.body".to_owned()]), @@ -146,7 +146,7 @@ fn ser_json_macro() { #[test] #[should_panic(expected = "serializing string at the top-level")] fn ser_json_raw() { - use conduit::ruma::api::client::filter::FilterDefinition; + use conduwuit::ruma::api::client::filter::FilterDefinition; let filter = FilterDefinition { event_fields: Some(vec!["content.body".to_owned()]), @@ -162,7 +162,7 @@ fn ser_json_raw() { #[test] #[should_panic(expected = "you can skip serialization instead")] fn ser_json_raw_json() { - use conduit::ruma::api::client::filter::FilterDefinition; + use conduwuit::ruma::api::client::filter::FilterDefinition; let filter = FilterDefinition { event_fields: Some(vec!["content.body".to_owned()]), diff --git a/src/database/util.rs b/src/database/util.rs index 21764361..0fca3b68 100644 --- a/src/database/util.rs +++ b/src/database/util.rs @@ -1,4 +1,4 @@ -use conduit::{err, Result}; +use conduwuit::{err, Result}; use rocksdb::{Direction, ErrorKind, IteratorMode}; //#[cfg(debug_assertions)] @@ -36,19 +36,19 @@ pub(crate) fn _into_direction(mode: &IteratorMode<'_>) -> Direction { } #[inline] -pub(crate) fn result(r: std::result::Result) -> Result { +pub(crate) fn result(r: std::result::Result) -> Result { r.map_or_else(or_else, and_then) } #[inline(always)] -pub(crate) fn and_then(t: T) -> Result { Ok(t) } +pub(crate) fn and_then(t: T) -> Result { Ok(t) } -pub(crate) fn or_else(e: rocksdb::Error) -> Result { Err(map_err(e)) } +pub(crate) fn or_else(e: rocksdb::Error) -> Result { Err(map_err(e)) } #[inline] pub(crate) fn is_incomplete(e: &rocksdb::Error) -> bool { e.kind() == ErrorKind::Incomplete } -pub(crate) fn map_err(e: rocksdb::Error) -> conduit::Error { +pub(crate) fn map_err(e: rocksdb::Error) -> conduwuit::Error { let string = e.into_string(); err!(Database(error!("{string}"))) } diff --git a/src/macros/Cargo.toml b/src/macros/Cargo.toml index 9e866578..167de8c0 100644 --- a/src/macros/Cargo.toml +++ b/src/macros/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "conduit_macros" +name = "conduwuit_macros" categories.workspace = true description.workspace = true edition.workspace = true @@ -10,7 +10,7 @@ repository.workspace = true version.workspace = true [lib] -name = "conduit_macros" +name = "conduwuit_macros" path = "mod.rs" proc-macro = true diff --git a/src/macros/admin.rs b/src/macros/admin.rs index d4ce7ad5..b0dc1956 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -8,7 +8,7 @@ use crate::{utils::camel_to_snake_string, Result}; pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result { let attr: Attribute = parse_quote! { - #[conduit_macros::implement(crate::Command, params = "<'_>")] + #[conduwuit_macros::implement(crate::Command, params = "<'_>")] }; item.attrs.push(attr); diff --git a/src/macros/rustc.rs b/src/macros/rustc.rs index d286ed33..f484e5f5 100644 --- a/src/macros/rustc.rs +++ b/src/macros/rustc.rs @@ -5,7 +5,7 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream { let cargo_crate_name = std::env::var("CARGO_CRATE_NAME"); let crate_name = match cargo_crate_name.as_ref() { Err(_) => return args, - Ok(crate_name) => crate_name.trim_start_matches("conduit_"), + Ok(crate_name) => crate_name.trim_start_matches("conduwuit_"), }; let flag = std::env::args().collect::>(); @@ -13,15 +13,15 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream { let ret = quote! { pub static RUSTC_FLAGS: [&str; #flag_len] = [#( #flag ),*]; - #[conduit_core::ctor] + #[conduwuit_core::ctor] fn _set_rustc_flags() { - conduit_core::info::rustc::FLAGS.lock().expect("locked").insert(#crate_name, &RUSTC_FLAGS); + conduwuit_core::info::rustc::FLAGS.lock().expect("locked").insert(#crate_name, &RUSTC_FLAGS); } // static strings have to be yanked on module unload - #[conduit_core::dtor] + #[conduwuit_core::dtor] fn _unset_rustc_flags() { - conduit_core::info::rustc::FLAGS.lock().expect("locked").remove(#crate_name); + conduwuit_core::info::rustc::FLAGS.lock().expect("locked").remove(#crate_name); } }; diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index ee434ceb..99d41614 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -1,7 +1,6 @@ [package] -# TODO: when can we rename to conduwuit? -name = "conduit" -default-run = "conduit" +name = "conduwuit" +default-run = "conduwuit" authors.workspace = true categories.workspace = true description.workspace = true @@ -48,49 +47,49 @@ default = [ ] brotli_compression = [ - "conduit-api/brotli_compression", - "conduit-core/brotli_compression", - "conduit-router/brotli_compression", - "conduit-service/brotli_compression", + "conduwuit-api/brotli_compression", + "conduwuit-core/brotli_compression", + "conduwuit-router/brotli_compression", + "conduwuit-service/brotli_compression", ] console = [ - "conduit-service/console", + "conduwuit-service/console", ] #dev_release_log_level = [ -# "conduit-admin/dev_release_log_level", -# "conduit-api/dev_release_log_level", -# "conduit-core/dev_release_log_level", -# "conduit-database/dev_release_log_level", -# "conduit-router/dev_release_log_level", -# "conduit-service/dev_release_log_level", +# "conduwuit-admin/dev_release_log_level", +# "conduwuit-api/dev_release_log_level", +# "conduwuit-core/dev_release_log_level", +# "conduwuit-database/dev_release_log_level", +# "conduwuit-router/dev_release_log_level", +# "conduwuit-service/dev_release_log_level", #] direct_tls = [ - "conduit-router/direct_tls" + "conduwuit-router/direct_tls" ] element_hacks = [ - "conduit-api/element_hacks", - "conduit-service/element_hacks", + "conduwuit-api/element_hacks", + "conduwuit-service/element_hacks", ] gzip_compression = [ - "conduit-api/gzip_compression", - "conduit-router/gzip_compression", - "conduit-service/gzip_compression", + "conduwuit-api/gzip_compression", + "conduwuit-router/gzip_compression", + "conduwuit-service/gzip_compression", ] hardened_malloc = [ - "conduit-core/hardened_malloc", + "conduwuit-core/hardened_malloc", ] io_uring = [ - "conduit-database/io_uring", + "conduwuit-database/io_uring", ] jemalloc = [ - "conduit-core/jemalloc", - "conduit-database/jemalloc", + "conduwuit-core/jemalloc", + "conduwuit-database/jemalloc", ] jemalloc_prof = [ - "conduit-core/jemalloc_prof", + "conduwuit-core/jemalloc_prof", ] jemalloc_stats = [ - "conduit-core/jemalloc_stats", + "conduwuit-core/jemalloc_stats", ] perf_measurements = [ "dep:opentelemetry", @@ -98,8 +97,8 @@ perf_measurements = [ "dep:tracing-opentelemetry", "dep:opentelemetry_sdk", "dep:opentelemetry-jaeger", - "conduit-core/perf_measurements", - "conduit-core/sentry_telemetry", + "conduwuit-core/perf_measurements", + "conduwuit-core/sentry_telemetry", ] # increases performance, reduces build times, and reduces binary size by not compiling or # genreating code for log level filters that users will generally not use (debug and trace) @@ -108,22 +107,22 @@ release_max_log_level = [ "tracing/release_max_level_info", "log/max_level_trace", "log/release_max_level_info", - "conduit-admin/release_max_log_level", - "conduit-api/release_max_log_level", - "conduit-core/release_max_log_level", - "conduit-database/release_max_log_level", - "conduit-router/release_max_log_level", - "conduit-service/release_max_log_level", + "conduwuit-admin/release_max_log_level", + "conduwuit-api/release_max_log_level", + "conduwuit-core/release_max_log_level", + "conduwuit-database/release_max_log_level", + "conduwuit-router/release_max_log_level", + "conduwuit-service/release_max_log_level", ] sentry_telemetry = [ "dep:sentry", "dep:sentry-tracing", "dep:sentry-tower", - "conduit-core/sentry_telemetry", - "conduit-router/sentry_telemetry", + "conduwuit-core/sentry_telemetry", + "conduwuit-router/sentry_telemetry", ] systemd = [ - "conduit-router/systemd", + "conduwuit-router/systemd", ] # enable the tokio_console server ncompatible with release_max_log_level tokio_console = [ @@ -131,19 +130,19 @@ tokio_console = [ "tokio/tracing", ] zstd_compression = [ - "conduit-api/zstd_compression", - "conduit-core/zstd_compression", - "conduit-database/zstd_compression", - "conduit-router/zstd_compression", + "conduwuit-api/zstd_compression", + "conduwuit-core/zstd_compression", + "conduwuit-database/zstd_compression", + "conduwuit-router/zstd_compression", ] [dependencies] -conduit-admin.workspace = true -conduit-api.workspace = true -conduit-core.workspace = true -conduit-database.workspace = true -conduit-router.workspace = true -conduit-service.workspace = true +conduwuit-admin.workspace = true +conduwuit-api.workspace = true +conduwuit-core.workspace = true +conduwuit-database.workspace = true +conduwuit-router.workspace = true +conduwuit-service.workspace = true tokio.workspace = true log.workspace = true @@ -184,5 +183,5 @@ hardened_malloc-rs.optional = true workspace = true [[bin]] -name = "conduit" +name = "conduwuit" path = "main.rs" diff --git a/src/main/clap.rs b/src/main/clap.rs index b10242be..7e70bd80 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -3,7 +3,7 @@ use std::path::PathBuf; use clap::Parser; -use conduit::{ +use conduwuit::{ config::{Figment, FigmentValue}, err, toml, utils::available_parallelism, @@ -12,7 +12,7 @@ use conduit::{ /// Commandline arguments #[derive(Parser, Debug)] -#[clap(version = conduit::version(), about, long_about = None, name = "conduwuit")] +#[clap(version = conduwuit::version(), about, long_about = None, name = "conduwuit")] pub(crate) struct Args { #[arg(short, long)] /// Path to the config TOML file (optional) diff --git a/src/main/logging.rs b/src/main/logging.rs index efa47bab..999265e7 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{ +use conduwuit::{ config::Config, debug_warn, err, log::{capture, fmt_span, LogLevelReloadHandles}, diff --git a/src/main/main.rs b/src/main/main.rs index 41b21b58..5c066584 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -6,14 +6,14 @@ mod sentry; mod server; mod signal; -extern crate conduit_core as conduit; +extern crate conduwuit_core as conduwuit; use std::{ sync::{atomic::Ordering, Arc}, time::Duration, }; -use conduit::{debug_info, error, rustc_flags_capture, Error, Result}; +use conduwuit::{debug_info, error, rustc_flags_capture, Error, Result}; use server::Server; use tokio::runtime; @@ -58,14 +58,14 @@ fn main() -> Result<(), Error> { /// Operate the server normally in release-mode static builds. This will start, /// run and stop the server within the asynchronous runtime. -#[cfg(not(conduit_mods))] +#[cfg(not(conduwuit_mods))] #[tracing::instrument( name = "main", parent = None, skip_all )] async fn async_main(server: &Arc) -> Result<(), Error> { - extern crate conduit_router as router; + extern crate conduwuit_router as router; match router::start(&server.server).await { Ok(services) => server.services.lock().await.insert(services), @@ -110,7 +110,7 @@ async fn async_main(server: &Arc) -> Result<(), Error> { /// Operate the server in developer-mode dynamic builds. This will start, run, /// and hot-reload portions of the server as-needed before returning for an /// actual shutdown. This is not available in release-mode or static builds. -#[cfg(conduit_mods)] +#[cfg(conduwuit_mods)] async fn async_main(server: &Arc) -> Result<(), Error> { let mut starts = true; let mut reloads = true; diff --git a/src/main/mods.rs b/src/main/mods.rs index 16d975cc..ca984a64 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -1,7 +1,7 @@ -#![cfg(conduit_mods)] +#![cfg(conduwuit_mods)] #[unsafe(no_link)] -extern crate conduit_service; +extern crate conduwuit_service; use std::{ future::Future, @@ -9,13 +9,13 @@ use std::{ sync::{atomic::Ordering, Arc}, }; -use conduit::{debug, error, mods, Error, Result}; -use conduit_service::Services; +use conduwuit::{debug, error, mods, Error, Result}; +use conduwuit_service::Services; use crate::Server; type StartFuncResult = Pin>> + Send>>; -type StartFuncProto = fn(&Arc) -> StartFuncResult; +type StartFuncProto = fn(&Arc) -> StartFuncResult; type RunFuncResult = Pin> + Send>>; type RunFuncProto = fn(&Arc) -> RunFuncResult; @@ -23,19 +23,19 @@ type RunFuncProto = fn(&Arc) -> RunFuncResult; type StopFuncResult = Pin> + Send>>; type StopFuncProto = fn(Arc) -> StopFuncResult; -const RESTART_THRESH: &str = "conduit_service"; +const RESTART_THRESH: &str = "conduwuit_service"; const MODULE_NAMES: &[&str] = &[ - //"conduit_core", - "conduit_database", - "conduit_service", - "conduit_api", - "conduit_admin", - "conduit_router", + //"conduwuit_core", + "conduwuit_database", + "conduwuit_service", + "conduwuit_api", + "conduwuit_admin", + "conduwuit_router", ]; #[cfg(panic_trap)] -conduit::mod_init! {{ - conduit::debug::set_panic_trap(); +conduwuit::mod_init! {{ + conduwuit::debug::set_panic_trap(); }} pub(crate) async fn run(server: &Arc, starts: bool) -> Result<(bool, bool), Error> { diff --git a/src/main/restart.rs b/src/main/restart.rs index 009e5228..e6f45b82 100644 --- a/src/main/restart.rs +++ b/src/main/restart.rs @@ -2,7 +2,7 @@ use std::{env, os::unix::process::CommandExt, process::Command}; -use conduit::{debug, info, utils}; +use conduwuit::{debug, info, utils}; #[cold] pub(super) fn restart() -> ! { diff --git a/src/main/sentry.rs b/src/main/sentry.rs index 04ad8654..02835ec8 100644 --- a/src/main/sentry.rs +++ b/src/main/sentry.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, OnceLock}, }; -use conduit::{config::Config, debug, trace}; +use conduwuit::{config::Config, debug, trace}; use sentry::{ types::{ protocol::v7::{Context, Event}, @@ -43,7 +43,7 @@ fn options(config: &Config) -> ClientOptions { traces_sample_rate: config.sentry_traces_sample_rate, debug: cfg!(debug_assertions), release: sentry::release_name!(), - user_agent: conduit::version::user_agent().into(), + user_agent: conduwuit::version::user_agent().into(), attach_stacktrace: config.sentry_attach_stacktrace, before_send: Some(Arc::new(before_send)), before_breadcrumb: Some(Arc::new(before_breadcrumb)), diff --git a/src/main/server.rs b/src/main/server.rs index 179749b5..7c3eec1f 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{config::Config, info, log::Log, utils::sys, Error, Result}; +use conduwuit::{config::Config, info, log::Log, utils::sys, Error, Result}; use tokio::{runtime, sync::Mutex}; use crate::{clap::Args, logging::TracingFlameGuard}; @@ -8,18 +8,18 @@ use crate::{clap::Args, logging::TracingFlameGuard}; /// Server runtime state; complete pub(crate) struct Server { /// Server runtime state; public portion - pub(crate) server: Arc, + pub(crate) server: Arc, - pub(crate) services: Mutex>>, + pub(crate) services: Mutex>>, _tracing_flame_guard: TracingFlameGuard, #[cfg(feature = "sentry_telemetry")] _sentry_guard: Option<::sentry::ClientInitGuard>, - #[cfg(conduit_mods)] + #[cfg(conduwuit_mods)] // Module instances; TODO: move to mods::loaded mgmt vector - pub(crate) mods: tokio::sync::RwLock>, + pub(crate) mods: tokio::sync::RwLock>, } impl Server { @@ -45,11 +45,11 @@ impl Server { database_path = ?config.database_path, log_levels = %config.log, "{}", - conduit::version(), + conduwuit::version(), ); Ok(Arc::new(Self { - server: Arc::new(conduit::Server::new( + server: Arc::new(conduwuit::Server::new( config, runtime.cloned(), Log { @@ -65,7 +65,7 @@ impl Server { #[cfg(feature = "sentry_telemetry")] _sentry_guard: sentry_guard, - #[cfg(conduit_mods)] + #[cfg(conduwuit_mods)] mods: tokio::sync::RwLock::new(Vec::new()), })) } diff --git a/src/main/signal.rs b/src/main/signal.rs index 139aab78..0f541099 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{debug_error, trace, warn}; +use conduwuit::{debug_error, trace, warn}; use tokio::signal; use super::server::Server; @@ -12,7 +12,7 @@ pub(super) async fn signal(server: Arc) { use unix::SignalKind; const CONSOLE: bool = cfg!(feature = "console"); - const RELOADING: bool = cfg!(all(conduit_mods, not(CONSOLE))); + const RELOADING: bool = cfg!(all(conduwuit_mods, not(CONSOLE))); let mut quit = unix::signal(SignalKind::quit()).expect("SIGQUIT handler"); let mut term = unix::signal(SignalKind::terminate()).expect("SIGTERM handler"); diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index e1535868..1b2c248e 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "conduit_router" +name = "conduwuit_router" categories.workspace = true description.workspace = true edition.workspace = true @@ -55,10 +55,10 @@ axum-server-dual-protocol.optional = true axum-server.workspace = true axum.workspace = true bytes.workspace = true -conduit-admin.workspace = true -conduit-api.workspace = true -conduit-core.workspace = true -conduit-service.workspace = true +conduwuit-admin.workspace = true +conduwuit-api.workspace = true +conduwuit-core.workspace = true +conduwuit-service.workspace = true const-str.workspace = true futures.workspace = true http.workspace = true diff --git a/src/router/layers.rs b/src/router/layers.rs index bb3ffe87..1c5beace 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -5,9 +5,9 @@ use axum::{ Router, }; use axum_client_ip::SecureClientIpSource; -use conduit::{error, Result, Server}; -use conduit_api::router::state::Guard; -use conduit_service::Services; +use conduwuit::{error, Result, Server}; +use conduwuit_api::router::state::Guard; +use conduwuit_service::Services; use http::{ header::{self, HeaderName}, HeaderValue, Method, StatusCode, @@ -155,7 +155,7 @@ fn body_limit_layer(server: &Server) -> DefaultBodyLimit { DefaultBodyLimit::max fn catch_panic(err: Box) -> http::Response> { //TODO: XXX /* - conduit_service::services() + conduwuit_service::services() .server .metrics .requests_panic diff --git a/src/router/mod.rs b/src/router/mod.rs index 215000cb..aab0b185 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -4,17 +4,17 @@ mod router; mod run; mod serve; -extern crate conduit_core as conduit; +extern crate conduwuit_core as conduwuit; use std::{panic::AssertUnwindSafe, pin::Pin, sync::Arc}; -use conduit::{Error, Result, Server}; -use conduit_service::Services; +use conduwuit::{Error, Result, Server}; +use conduwuit_service::Services; use futures::{Future, FutureExt, TryFutureExt}; -conduit::mod_ctor! {} -conduit::mod_dtor! {} -conduit::rustc_flags_capture! {} +conduwuit::mod_ctor! {} +conduwuit::mod_dtor! {} +conduwuit::rustc_flags_capture! {} #[unsafe(no_mangle)] pub extern "Rust" fn start(server: &Arc) -> Pin>> + Send>> { diff --git a/src/router/request.rs b/src/router/request.rs index 13d36981..b17e9c4f 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -4,8 +4,8 @@ use axum::{ extract::State, response::{IntoResponse, Response}, }; -use conduit::{debug, debug_error, debug_warn, defer, err, error, trace, Result}; -use conduit_service::Services; +use conduwuit::{debug, debug_error, debug_warn, defer, err, error, trace, Result}; +use conduwuit_service::Services; use http::{Method, StatusCode, Uri}; #[tracing::instrument( diff --git a/src/router/router.rs b/src/router/router.rs index 31b3f3e4..b3531418 100644 --- a/src/router/router.rs +++ b/src/router/router.rs @@ -1,16 +1,16 @@ use std::sync::Arc; use axum::{response::IntoResponse, routing::get, Router}; -use conduit::Error; -use conduit_api::router::{state, state::Guard}; -use conduit_service::Services; +use conduwuit::Error; +use conduwuit_api::router::{state, state::Guard}; +use conduwuit_service::Services; use http::{StatusCode, Uri}; use ruma::api::client::error::ErrorKind; pub(crate) fn build(services: &Arc) -> (Router, Guard) { let router = Router::::new(); let (state, guard) = state::create(services.clone()); - let router = conduit_api::router::build(router, &services.server) + let router = conduwuit_api::router::build(router, &services.server) .route("/", get(it_works)) .fallback(not_found) .with_state(state); diff --git a/src/router/run.rs b/src/router/run.rs index 93b1339b..6d5c8923 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -1,6 +1,6 @@ -extern crate conduit_admin as admin; -extern crate conduit_core as conduit; -extern crate conduit_service as service; +extern crate conduwuit_admin as admin; +extern crate conduwuit_core as conduwuit; +extern crate conduwuit_service as service; use std::{ sync::{atomic::Ordering, Arc}, @@ -8,7 +8,7 @@ use std::{ }; use axum_server::Handle as ServerHandle; -use conduit::{debug, debug_error, debug_info, error, info, Error, Result, Server}; +use conduwuit::{debug, debug_error, debug_info, error, info, Error, Result, Server}; use service::Services; use tokio::{ sync::broadcast::{self, Sender}, diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs index b0254772..35792359 100644 --- a/src/router/serve/mod.rs +++ b/src/router/serve/mod.rs @@ -6,8 +6,8 @@ mod unix; use std::sync::Arc; use axum_server::Handle as ServerHandle; -use conduit::Result; -use conduit_service::Services; +use conduwuit::Result; +use conduwuit_service::Services; use tokio::sync::broadcast; use super::layers; @@ -28,7 +28,7 @@ pub(super) async fn serve( return tls::serve(server, app, handle, addrs).await; #[cfg(not(feature = "direct_tls"))] - return conduit::Err!(Config( + return conduwuit::Err!(Config( "tls", "conduwuit was not built with direct TLS support (\"direct_tls\")" )); diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index 144bff85..f6b6fba4 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -5,7 +5,7 @@ use std::{ use axum::Router; use axum_server::{bind, Handle as ServerHandle}; -use conduit::{debug_info, info, Result, Server}; +use conduwuit::{debug_info, info, Result, Server}; use tokio::task::JoinSet; pub(super) async fn serve( diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index f8d69048..7f54cfcc 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -6,7 +6,7 @@ use axum_server_dual_protocol::{ axum_server::{bind_rustls, tls_rustls::RustlsConfig}, ServerExt, }; -use conduit::{err, Result, Server}; +use conduwuit::{err, Result, Server}; use tokio::task::JoinSet; use tracing::{debug, info, warn}; diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index dd2ecf6a..fb37e125 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -10,7 +10,7 @@ use axum::{ extract::{connect_info::IntoMakeServiceWithConnectInfo, Request}, Router, }; -use conduit::{debug, debug_error, info, result::UnwrapInfallible, trace, warn, Err, Result, Server}; +use conduwuit::{debug, debug_error, info, result::UnwrapInfallible, trace, warn, Err, Result, Server}; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ rt::{TokioExecutor, TokioIo}, diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 19747847..85c4ead9 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "conduit_service" +name = "conduwuit_service" categories.workspace = true description.workspace = true edition.workspace = true @@ -44,8 +44,8 @@ arrayvec.workspace = true async-trait.workspace = true base64.workspace = true bytes.workspace = true -conduit-core.workspace = true -conduit-database.workspace = true +conduwuit-core.workspace = true +conduwuit-database.workspace = true const-str.workspace = true either.workspace = true futures.workspace = true diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 5dc17640..536a24e8 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{ +use conduwuit::{ err, implement, utils::{result::LogErr, stream::TryIgnore, ReadyExt}, Err, Result, diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index 37af7452..c25eb6c6 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -4,7 +4,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduit::{debug, defer, error, log, Server}; +use conduwuit::{debug, defer, error, log, Server}; use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 897d412d..971fdf67 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use conduit::{pdu::PduBuilder, Result}; +use conduwuit::{pdu::PduBuilder, Result}; use ruma::{ events::room::{ canonical_alias::RoomCanonicalAliasEventContent, @@ -67,7 +67,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { ) .await?; - // 2. Make conduit bot join + // 2. Make server user/bot join services .rooms .timeline diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 090c0294..2b05bfc7 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use conduit::{error, implement, Result}; +use conduwuit::{error, implement, Result}; use ruma::{ events::{ room::{ @@ -16,9 +16,9 @@ use ruma::{ use crate::pdu::PduBuilder; -/// Invite the user to the conduit admin room. +/// Invite the user to the conduwuit admin room. /// -/// In conduit, this is equivalent to granting admin privileges. +/// This is equivalent to granting server admin privileges. #[implement(super::Service)] pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { let Ok(room_id) = self.get_admin_room().await else { diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 2860bd1b..c4783565 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -10,7 +10,7 @@ use std::{ }; use async_trait::async_trait; -use conduit::{debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server}; +use conduwuit::{debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server}; pub use create::create_admin_room; use futures::{FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; diff --git a/src/service/admin/startup.rs b/src/service/admin/startup.rs index 9c02c902..68ad4be1 100644 --- a/src/service/admin/startup.rs +++ b/src/service/admin/startup.rs @@ -1,4 +1,4 @@ -use conduit::{debug, debug_info, error, implement, info, Err, Result}; +use conduwuit::{debug, debug_info, error, implement, info, Err, Result}; use ruma::events::room::message::RoomMessageEventContent; use tokio::time::{sleep, Duration}; diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index a55d6534..cf2921a7 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -4,7 +4,7 @@ mod registration_info; use std::{collections::BTreeMap, sync::Arc}; use async_trait::async_trait; -use conduit::{err, utils::stream::TryIgnore, Result}; +use conduwuit::{err, utils::stream::TryIgnore, Result}; use database::Map; use futures::{Future, StreamExt, TryStreamExt}; use ruma::{api::appservice::Registration, RoomAliasId, RoomId, UserId}; diff --git a/src/service/appservice/namespace_regex.rs b/src/service/appservice/namespace_regex.rs index 3529fc0e..fe0fd91f 100644 --- a/src/service/appservice/namespace_regex.rs +++ b/src/service/appservice/namespace_regex.rs @@ -1,4 +1,4 @@ -use conduit::Result; +use conduwuit::Result; use regex::RegexSet; use ruma::api::appservice::Namespace; diff --git a/src/service/appservice/registration_info.rs b/src/service/appservice/registration_info.rs index 2c8595b1..612933be 100644 --- a/src/service/appservice/registration_info.rs +++ b/src/service/appservice/registration_info.rs @@ -1,4 +1,4 @@ -use conduit::Result; +use conduwuit::Result; use ruma::{api::appservice::Registration, UserId}; use super::NamespaceRegex; diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index 71545541..f2bc8256 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, time::Duration}; -use conduit::{err, implement, trace, Config, Result}; +use conduwuit::{err, implement, trace, Config, Result}; use either::Either; use ipaddress::IPAddress; use reqwest::redirect; @@ -124,7 +124,7 @@ fn base(config: &Config) -> Result { .timeout(Duration::from_secs(config.request_total_timeout)) .pool_idle_timeout(Duration::from_secs(config.request_idle_timeout)) .pool_max_idle_per_host(config.request_idle_per_host.into()) - .user_agent(conduit::version::user_agent()) + .user_agent(conduwuit::version::user_agent()) .redirect(redirect::Policy::limited(6)) .connection_verbose(true); @@ -188,7 +188,7 @@ fn builder_interface(builder: reqwest::ClientBuilder, config: Option<&str>) -> R #[cfg(not(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))] fn builder_interface(builder: reqwest::ClientBuilder, config: Option<&str>) -> Result { - use conduit::Err; + use conduwuit::Err; if let Some(iface) = config { Err!("Binding to network-interface {iface:?} by name is not supported on this platform.") diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index c99a0891..5063fbd4 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use async_trait::async_trait; -use conduit::{error, warn, Result}; +use conduwuit::{error, warn, Result}; use ruma::{ events::{push_rules::PushRulesEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType}, push::Ruleset, @@ -38,7 +38,7 @@ impl crate::Service for Service { self.set_emergency_access() .await - .inspect_err(|e| error!("Could not set the configured emergency password for the conduit user: {e}"))?; + .inspect_err(|e| error!("Could not set the configured emergency password for the server user: {e}"))?; Ok(()) } @@ -47,17 +47,17 @@ impl crate::Service for Service { } impl Service { - /// Sets the emergency password and push rules for the @conduit account in - /// case emergency password is set + /// Sets the emergency password and push rules for the server user account + /// in case emergency password is set async fn set_emergency_access(&self) -> Result { - let conduit_user = &self.services.globals.server_user; + let server_user = &self.services.globals.server_user; self.services .users - .set_password(conduit_user, self.services.globals.emergency_password().as_deref())?; + .set_password(server_user, self.services.globals.emergency_password().as_deref())?; let (ruleset, pwd_set) = match self.services.globals.emergency_password() { - Some(_) => (Ruleset::server_default(conduit_user), true), + Some(_) => (Ruleset::server_default(server_user), true), None => (Ruleset::new(), false), }; @@ -65,7 +65,7 @@ impl Service { .account_data .update( None, - conduit_user, + server_user, GlobalAccountDataEventType::PushRules.to_string().into(), &serde_json::to_value(&GlobalAccountDataEvent { content: PushRulesEventContent { @@ -83,7 +83,7 @@ impl Service { ); } else { // logs out any users still in the server service account and removes sessions - self.services.users.deactivate_account(conduit_user).await?; + self.services.users.deactivate_account(server_user).await?; } Ok(pwd_set) diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index f715e944..5edd1f30 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,6 +1,6 @@ use std::sync::{Arc, RwLock}; -use conduit::{utils, Result}; +use conduwuit::{utils, Result}; use database::{Database, Deserialized, Map}; pub struct Data { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 081794e2..2403b703 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,7 +7,7 @@ use std::{ time::Instant, }; -use conduit::{error, Config, Result}; +use conduwuit::{error, Config, Result}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, ServerName, UserId}; @@ -83,8 +83,8 @@ impl crate::Service for Service { .server .supported_room_version(&config.default_room_version) { - error!(config=?s.config.default_room_version, fallback=?conduit::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); - s.config.default_room_version = conduit::config::default_default_room_version(); + error!(config=?s.config.default_room_version, fallback=?conduwuit::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); + s.config.default_room_version = conduwuit::config::default_default_room_version(); }; Ok(Arc::new(s)) diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index bae6f214..140fc701 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use conduit::{ +use conduwuit::{ err, implement, utils::stream::{ReadyExt, TryIgnore}, Err, Result, diff --git a/src/service/manager.rs b/src/service/manager.rs index 7384e512..3ce2e0a5 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,6 +1,6 @@ use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; -use conduit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; +use conduwuit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; use futures::FutureExt; use tokio::{ sync::{Mutex, MutexGuard}, diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 3922dec9..71fb1cdb 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, time::Duration}; -use conduit::{ +use conduwuit::{ debug, debug_info, err, utils::{str_from_bytes, stream::TryIgnore, string_from_bytes, ReadyExt}, Err, Error, Result, diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 3d9c395e..5932643b 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -7,7 +7,7 @@ use std::{ time::Instant, }; -use conduit::{ +use conduwuit::{ debug, debug_info, debug_warn, error, info, utils::{stream::TryIgnore, ReadyExt}, warn, Config, Result, diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index c0b15726..989e1c27 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -9,7 +9,7 @@ use std::{path::PathBuf, sync::Arc, time::SystemTime}; use async_trait::async_trait; use base64::{engine::general_purpose, Engine as _}; -use conduit::{ +use conduwuit::{ debug, debug_error, debug_info, debug_warn, err, error, trace, utils::{self, MutexMap}, warn, Err, Result, Server, diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index eb9be560..d571ac56 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -1,7 +1,7 @@ use std::{io::Cursor, time::SystemTime}; -use conduit::{debug, utils, Err, Result}; -use conduit_core::implement; +use conduwuit::{debug, utils, Err, Result}; +use conduwuit_core::implement; use image::ImageReader as ImgReader; use ipaddress::IPAddress; use ruma::Mxc; diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index 1c6c9ca0..8ec917b7 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -1,6 +1,6 @@ use std::{fmt::Debug, time::Duration}; -use conduit::{debug_warn, err, implement, utils::content_disposition::make_content_disposition, Err, Error, Result}; +use conduwuit::{debug_warn, err, implement, utils::content_disposition::make_content_disposition, Err, Error, Result}; use http::header::{HeaderValue, CONTENT_DISPOSITION, CONTENT_TYPE}; use ruma::{ api::{ diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs index 04ec0303..42fc40e7 100644 --- a/src/service/media/thumbnail.rs +++ b/src/service/media/thumbnail.rs @@ -1,6 +1,6 @@ use std::{cmp, io::Cursor, num::Saturating as Sat}; -use conduit::{checked, err, Result}; +use conduwuit::{checked, err, Result}; use image::{imageops::FilterType, DynamicImage}; use ruma::{http_headers::ContentDisposition, media::Method, Mxc, UInt, UserId}; use tokio::{ diff --git a/src/service/migrations.rs b/src/service/migrations.rs index f9057036..102ac7d4 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -1,6 +1,6 @@ use std::cmp; -use conduit::{ +use conduwuit::{ debug, debug_info, debug_warn, error, info, result::NotFound, utils::{ @@ -41,9 +41,9 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> { // Matrix resource ownership is based on the server name; changing it // requires recreating the database from scratch. if users_count > 0 { - let conduit_user = &services.globals.server_user; - if !services.users.exists(conduit_user).await { - error!("The {conduit_user} server user does not exist, and the database is not new."); + let server_user = &services.globals.server_user; + if !services.users.exists(server_user).await { + error!("The {server_user} server user does not exist, and the database is not new."); return Err!(Database( "Cannot reuse an existing database after changing the server name, please delete the old one first.", )); diff --git a/src/service/mod.rs b/src/service/mod.rs index c7dcc0c6..789994d3 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -25,14 +25,14 @@ pub mod uiaa; pub mod updates; pub mod users; -extern crate conduit_core as conduit; -extern crate conduit_database as database; +extern crate conduwuit_core as conduwuit; +extern crate conduwuit_database as database; -pub use conduit::{pdu, PduBuilder, PduCount, PduEvent}; +pub use conduwuit::{pdu, PduBuilder, PduCount, PduEvent}; pub(crate) use service::{Args, Dep, Service}; pub use crate::services::Services; -conduit::mod_ctor! {} -conduit::mod_dtor! {} -conduit::rustc_flags_capture! {} +conduwuit::mod_ctor! {} +conduwuit::mod_dtor! {} +conduwuit::rustc_flags_capture! {} diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 68b2c3fe..dd0d8e72 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{ +use conduwuit::{ debug_warn, utils, utils::{stream::TryIgnore, ReadyExt}, Result, diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index 8e1521c5..7e80e05e 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -4,7 +4,7 @@ mod presence; use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduit::{checked, debug, error, result::LogErr, Error, Result, Server}; +use conduwuit::{checked, debug, error, result::LogErr, Error, Result, Server}; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; use tokio::{sync::Mutex, time::sleep}; diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index c4372003..aed4a3f8 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -1,4 +1,4 @@ -use conduit::{utils, Error, Result}; +use conduwuit::{utils, Error, Result}; use ruma::{ events::presence::{PresenceEvent, PresenceEventContent}, presence::PresenceState, diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index ff1837db..ffe822b7 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,7 +1,7 @@ use std::{fmt::Debug, mem, sync::Arc}; use bytes::BytesMut; -use conduit::{ +use conduwuit::{ debug_warn, err, trace, utils::{stream::TryIgnore, string_from_bytes}, warn, Err, PduEvent, Result, diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index a4475216..4d2da695 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -3,7 +3,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use conduit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; +use conduwuit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; use ruma::ServerName; diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index a13399dc..ca65db35 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -6,7 +6,7 @@ use std::{ }; use arrayvec::ArrayVec; -use conduit::{trace, utils::rand}; +use conduwuit::{trace, utils::rand}; use ruma::{OwnedServerName, ServerName}; use super::fed::FedDest; diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index d3e9f5c9..54905647 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -1,6 +1,6 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; -use conduit::{err, Result, Server}; +use conduwuit::{err, Result, Server}; use futures::FutureExt; use hickory_resolver::TokioAsyncResolver; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 111de292..e18cf0bd 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,7 +6,7 @@ mod tests; use std::{fmt::Write, sync::Arc}; -use conduit::{Result, Server}; +use conduwuit::{Result, Server}; use self::{cache::Cache, dns::Resolver}; use crate::{client, Dep}; diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 0cdec8ee..57db0e15 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -2,7 +2,7 @@ mod remote; use std::sync::Arc; -use conduit::{ +use conduwuit::{ err, utils::{stream::TryIgnore, ReadyExt}, Err, Error, Result, diff --git a/src/service/rooms/alias/remote.rs b/src/service/rooms/alias/remote.rs index d9acccc9..00661da2 100644 --- a/src/service/rooms/alias/remote.rs +++ b/src/service/rooms/alias/remote.rs @@ -1,6 +1,6 @@ use std::iter::once; -use conduit::{debug, debug_error, err, implement, Result}; +use conduwuit::{debug, debug_error, err, implement, Result}; use federation::query::get_room_information::v1::Response; use ruma::{api::federation, OwnedRoomId, OwnedServerName, RoomAliasId, ServerName}; diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 3c36928a..1548cd55 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduit::{err, utils, utils::math::usize_from_f64, Err, Result}; +use conduwuit::{err, utils, utils::math::usize_from_f64, Err, Result}; use database::Map; use lru_cache::LruCache; diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index e7e5edf4..b875bf9c 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -6,7 +6,7 @@ use std::{ sync::Arc, }; -use conduit::{ +use conduwuit::{ debug, debug_error, trace, utils::{stream::ReadyExt, IterStream}, validated, warn, Err, Result, diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 63ed3519..3046a328 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{implement, utils::stream::TryIgnore, Result}; use database::Map; use futures::Stream; use ruma::{api::client::room::Visibility, RoomId}; diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs index f2ff1b00..ef12a25c 100644 --- a/src/service/rooms/event_handler/acl_check.rs +++ b/src/service/rooms/event_handler/acl_check.rs @@ -1,4 +1,4 @@ -use conduit::{debug, implement, trace, warn, Err, Result}; +use conduwuit::{debug, implement, trace, warn, Err, Result}; use ruma::{ events::{room::server_acl::RoomServerAclEventContent, StateEventType}, RoomId, ServerName, diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 8c418acb..316a1722 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -4,7 +4,7 @@ use std::{ time::Instant, }; -use conduit::{ +use conduwuit::{ debug, debug_error, implement, info, pdu, trace, utils::math::continue_exponential_backoff_secs, warn, PduEvent, }; use futures::TryFutureExt; diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 4acdba1d..577b3ff2 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use conduit::{debug_warn, err, implement, PduEvent, Result}; +use conduwuit::{debug_warn, err, implement, PduEvent, Result}; use futures::{future, FutureExt}; use ruma::{ int, diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 228b7d0c..9c7bc65e 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use conduit::{debug, implement, warn, Err, Error, PduEvent, Result}; +use conduwuit::{debug, implement, warn, Err, Error, PduEvent, Result}; use futures::FutureExt; use ruma::{ api::federation::event::get_room_state_ids, events::StateEventType, EventId, RoomId, RoomVersionId, ServerName, diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 19367582..d63f96f9 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -4,7 +4,7 @@ use std::{ time::Instant, }; -use conduit::{debug, err, implement, warn, Error, Result}; +use conduwuit::{debug, err, implement, warn, Error, Result}; use futures::{FutureExt, TryFutureExt}; use ruma::{ api::client::error::ErrorKind, events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId, diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 21504b66..9391ebf3 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use conduit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; +use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; use futures::{future::ready, TryFutureExt}; use ruma::{ api::client::error::ErrorKind, diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 90ff7f06..1b4e9fe2 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -4,7 +4,7 @@ use std::{ time::Instant, }; -use conduit::{debug, implement, utils::math::continue_exponential_backoff_secs, Error, PduEvent, Result}; +use conduwuit::{debug, implement, utils::math::continue_exponential_backoff_secs, Error, PduEvent, Result}; use ruma::{api::client::error::ErrorKind, CanonicalJsonValue, EventId, RoomId, ServerName}; #[implement(super::Service)] diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 3fb7d5c4..de3d2f49 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -17,7 +17,7 @@ use std::{ time::Instant, }; -use conduit::{ +use conduwuit::{ utils::{MutexMap, TryFutureExtExt}, Err, PduEvent, Result, Server, }; diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 42f44dee..6c19f43f 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -1,4 +1,4 @@ -use conduit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; +use conduwuit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, RoomId}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 3329a146..8ba4e4f4 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -4,7 +4,7 @@ use std::{ sync::Arc, }; -use conduit::{ +use conduwuit::{ debug, err, implement, utils::stream::{IterStream, WidebandExt}, Result, diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 9b30a830..6c76d9b5 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -4,7 +4,7 @@ use std::{ sync::Arc, }; -use conduit::{ +use conduwuit::{ debug, err, implement, result::LogErr, utils::stream::{BroadbandExt, IterStream}, diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 13e2b281..83267563 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -5,7 +5,7 @@ use std::{ time::Instant, }; -use conduit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; +use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; use futures::{future::ready, StreamExt}; use ruma::{ api::client::error::ErrorKind, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 7a4da2a6..a4bd4e8f 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -4,7 +4,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduit::{ +use conduwuit::{ implement, utils::{stream::TryIgnore, ReadyExt}, PduCount, Result, diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 4ee390a5..8f65eec3 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{implement, utils::stream::TryIgnore, Result}; use database::Map; use futures::{Stream, StreamExt}; use ruma::RoomId; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 03e77838..9cd3d805 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 3d05a1c8..be9e4ba9 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,7 +1,7 @@ use std::{mem::size_of, sync::Arc}; use arrayvec::ArrayVec; -use conduit::{ +use conduwuit::{ result::LogErr, utils::{ stream::{TryIgnore, WidebandExt}, diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 82d2ee35..23c6d90b 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,7 +1,7 @@ mod data; use std::sync::Arc; -use conduit::{PduCount, Result}; +use conduwuit::{PduCount, Result}; use futures::StreamExt; use ruma::{api::Direction, EventId, RoomId, UserId}; diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index 9a1dba45..03d04b65 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{ +use conduwuit::{ utils::{stream::TryIgnore, ReadyExt}, Result, }; diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 5cac535d..d92b9759 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,7 +2,7 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; +use conduwuit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; use futures::{try_join, Stream, TryFutureExt}; use ruma::{ events::{ diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index ae3567ce..025fc42a 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use arrayvec::ArrayVec; -use conduit::{ +use conduwuit::{ implement, utils::{ set, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index a7c32856..465ce1a9 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; -pub use conduit::pdu::{ShortEventId, ShortId, ShortRoomId}; -use conduit::{err, implement, utils, utils::stream::ReadyExt, Result}; +pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId}; +use conduwuit::{err, implement, utils, utils::stream::ReadyExt, Result}; use database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 3e972ca6..aa523871 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -7,7 +7,7 @@ use std::{ sync::Arc, }; -use conduit::{ +use conduwuit::{ checked, debug_info, err, utils::{math::usize_from_f64, IterStream}, Error, Result, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index d0d21fa8..e083ed34 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -5,7 +5,7 @@ use std::{ sync::Arc, }; -use conduit::{ +use conduwuit::{ err, result::FlatOk, utils::{ diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index bca54069..9947b036 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,6 +1,6 @@ use std::{borrow::Borrow, collections::HashMap, sync::Arc}; -use conduit::{ +use conduwuit::{ at, err, ref_at, utils::stream::{BroadbandExt, IterStream, ReadyExt}, PduEvent, Result, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index bcc1263d..798bcbed 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -7,7 +7,7 @@ use std::{ sync::{Arc, Mutex as StdMutex, Mutex}, }; -use conduit::{ +use conduwuit::{ err, error, pdu::PduBuilder, utils::{math::usize_from_f64, ReadyExt}, diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index cbd72cdb..6b7d35d2 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, RwLock}, }; -use conduit::{ +use conduwuit::{ is_not_empty, result::LogErr, utils::{stream::TryIgnore, ReadyExt, StreamTools}, @@ -395,7 +395,7 @@ impl Service { pub fn get_shared_rooms<'a>( &'a self, user_a: &'a UserId, user_b: &'a UserId, ) -> impl Stream + Send + 'a { - use conduit::utils::set; + use conduwuit::utils::set; let a = self.rooms_joined(user_a); let b = self.rooms_joined(user_b); diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 8c6eccbe..36bc92c0 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use arrayvec::ArrayVec; -use conduit::{ +use conduwuit::{ at, checked, debug, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, Result, diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index a304e482..f65be902 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use conduit::{ +use conduwuit::{ err, utils::{ stream::{TryIgnore, WidebandExt}, diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 94621385..a32ff54f 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -4,7 +4,7 @@ use std::{ sync::Arc, }; -use conduit::{ +use conduwuit::{ at, err, result::{LogErr, NotFound}, utils, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 8603b7e7..2bc5cf73 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -8,13 +8,13 @@ use std::{ sync::Arc, }; -use conduit::{ +use conduwuit::{ debug, debug_warn, err, error, implement, info, pdu::{EventHash, PduBuilder, PduCount, PduEvent}, utils::{self, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, validated, warn, Err, Error, Result, Server, }; -pub use conduit::{PduId, RawPduId}; +pub use conduwuit::{PduId, RawPduId}; use futures::{future, future::ready, Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ api::federation, diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index 8ee34f44..59862a85 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use conduit::{ +use conduwuit::{ debug_info, trace, utils::{self, IterStream}, Result, Server, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 537fe69b..80d33de4 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use database::{Database, Deserialized, Map}; use ruma::{RoomId, UserId}; diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index 55092523..9008a21f 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -1,7 +1,7 @@ use std::{fmt::Debug, mem}; use bytes::BytesMut; -use conduit::{debug_error, err, trace, utils, warn, Err, Result}; +use conduwuit::{debug_error, err, trace, utils, warn, Err, Result}; use reqwest::Client; use ruma::api::{appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index bc70e875..372d8e14 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -1,6 +1,6 @@ use std::{fmt::Debug, sync::Arc}; -use conduit::{ +use conduwuit::{ at, utils, utils::{stream::TryIgnore, ReadyExt}, Error, Result, diff --git a/src/service/sending/dest.rs b/src/service/sending/dest.rs index d35350c5..0a5893b7 100644 --- a/src/service/sending/dest.rs +++ b/src/service/sending/dest.rs @@ -1,6 +1,6 @@ use std::fmt::Debug; -use conduit::implement; +use conduwuit::implement; use ruma::{OwnedServerName, OwnedUserId}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 611940be..f9828178 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -7,7 +7,7 @@ mod sender; use std::{fmt::Debug, iter::once, sync::Arc}; use async_trait::async_trait; -use conduit::{ +use conduwuit::{ debug_warn, err, utils::{ReadyExt, TryReadyExt}, warn, Result, Server, diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index df39bcd1..862d2a42 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -1,7 +1,7 @@ use std::mem; use bytes::Bytes; -use conduit::{ +use conduwuit::{ debug, debug_error, debug_warn, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, Err, Error, Result, }; diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 49ce1be4..77fd7d5c 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -6,7 +6,7 @@ use std::{ }; use base64::{engine::general_purpose, Engine as _}; -use conduit::{ +use conduwuit::{ debug, err, error, result::LogErr, trace, diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index 1080d79e..886c4750 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -4,7 +4,7 @@ use std::{ time::Duration, }; -use conduit::{debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn}; +use conduwuit::{debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn}; use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, OwnedServerName, diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index dc4627f7..94d2575a 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -1,6 +1,6 @@ use std::borrow::Borrow; -use conduit::{implement, Err, Result}; +use conduwuit::{implement, Err, Result}; use ruma::{api::federation::discovery::VerifyKey, CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId}; use super::{extract_key, PubKeyMap, PubKeys}; diff --git a/src/service/server_keys/keypair.rs b/src/service/server_keys/keypair.rs index 31a24cdf..927171f3 100644 --- a/src/service/server_keys/keypair.rs +++ b/src/service/server_keys/keypair.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{debug, debug_info, err, error, utils, utils::string_from_bytes, Result}; +use conduwuit::{debug, debug_info, err, error, utils, utils::string_from_bytes, Result}; use database::Database; use ruma::{api::federation::discovery::VerifyKey, serde::Base64, signatures::Ed25519KeyPair}; diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index 08bcefb6..45c01c0b 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -7,7 +7,7 @@ mod verify; use std::{collections::BTreeMap, sync::Arc, time::Duration}; -use conduit::{ +use conduwuit::{ implement, utils::{timepoint_from_now, IterStream}, Result, Server, diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index 7078f7cd..da7fa08a 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, fmt::Debug}; -use conduit::{debug, implement, Err, Result}; +use conduwuit::{debug, implement, Err, Result}; use ruma::{ api::federation::discovery::{ get_remote_server_keys, diff --git a/src/service/server_keys/sign.rs b/src/service/server_keys/sign.rs index 28fd7e80..71565380 100644 --- a/src/service/server_keys/sign.rs +++ b/src/service/server_keys/sign.rs @@ -1,4 +1,4 @@ -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use ruma::{CanonicalJsonObject, RoomVersionId}; #[implement(super::Service)] diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs index c836e324..d06b55ab 100644 --- a/src/service/server_keys/verify.rs +++ b/src/service/server_keys/verify.rs @@ -1,4 +1,4 @@ -use conduit::{implement, pdu::gen_event_id_canonical_json, Err, Result}; +use conduwuit::{implement, pdu::gen_event_id_canonical_json, Err, Result}; use ruma::{signatures::Verified, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/service.rs b/src/service/service.rs index 7ec2ea0f..4b1774cc 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -7,7 +7,7 @@ use std::{ }; use async_trait::async_trait; -use conduit::{err, error::inspect_log, utils::string::SplitInfallible, Err, Result, Server}; +use conduwuit::{err, error::inspect_log, utils::string::SplitInfallible, Err, Result, Server}; use database::Database; /// Abstract interface for a Service diff --git a/src/service/services.rs b/src/service/services.rs index b86e7a72..9f9d10f5 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, RwLock}, }; -use conduit::{debug, debug_info, info, trace, Result, Server}; +use conduwuit::{debug, debug_info, info, trace, Result, Server}; use database::Database; use tokio::sync::Mutex; diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index f1a6ae75..a4523246 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, Mutex, Mutex as StdMutex}, }; -use conduit::{Result, Server}; +use conduwuit::{Result, Server}; use database::Map; use ruma::{ api::client::sync::sync_events::{ diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs index 3eb663c1..50959ded 100644 --- a/src/service/sync/watch.rs +++ b/src/service/sync/watch.rs @@ -1,4 +1,4 @@ -use conduit::{implement, trace, Result}; +use conduwuit::{implement, trace, Result}; use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; use ruma::{DeviceId, UserId}; diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 72f60adb..2b979f99 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduit::{implement, Result}; +use conduwuit::{implement, Result}; use database::{Handle, Map}; use ruma::{DeviceId, TransactionId, UserId}; diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index d2865d88..4d30a612 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, RwLock}, }; -use conduit::{ +use conduwuit::{ err, error, implement, utils, utils::{hash, string::EMPTY}, Error, Result, diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index 82decd26..1d13337d 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduit::{debug, info, warn, Result, Server}; +use conduwuit::{debug, info, warn, Result, Server}; use database::{Deserialized, Map}; use ruma::events::room::message::RoomMessageEventContent; use serde::Deserialize; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 70868ec8..8ceec2a0 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; -use conduit::{ +use conduwuit::{ debug_warn, err, utils, utils::{stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, diff --git a/tests/cargo_smoke.sh b/tests/cargo_smoke.sh index 6ee20ee2..946790c3 100755 --- a/tests/cargo_smoke.sh +++ b/tests/cargo_smoke.sh @@ -38,8 +38,8 @@ element () { run cargo "$TOOLCHAIN" build $ELEMENT_OPTS --all-targets run cargo "$TOOLCHAIN" test $ELEMENT_OPTS --all-targets run cargo "$TOOLCHAIN" bench $ELEMENT_OPTS --all-targets - run cargo "$TOOLCHAIN" run $ELEMENT_OPTS --bin conduit -- -V - run conduwuit "$TOOLCHAIN" run $ELEMENT_OPTS --bin conduit + run cargo "$TOOLCHAIN" run $ELEMENT_OPTS --bin conduwuit -- -V + run conduwuit "$TOOLCHAIN" run $ELEMENT_OPTS --bin conduwuit fi } From 77e0b764080feb567f1d124ce04b037d7770f12f Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 00:05:47 -0500 Subject: [PATCH 0397/1248] apply new rustfmt.toml changes, fix some clippy lints Signed-off-by: strawberry --- .editorconfig | 1 + clippy.toml | 6 +- rustfmt.toml | 12 +- src/admin/admin.rs | 30 +- src/admin/appservice/commands.rs | 36 +- src/admin/check/commands.rs | 5 +- src/admin/check/mod.rs | 7 +- src/admin/debug/commands.rs | 229 +++++--- src/admin/federation/commands.rs | 33 +- src/admin/media/commands.rs | 121 +++-- src/admin/processor.rs | 41 +- src/admin/query/account_data.rs | 17 +- src/admin/query/appservice.rs | 11 +- src/admin/query/globals.rs | 15 +- src/admin/query/mod.rs | 5 +- src/admin/query/presence.rs | 13 +- src/admin/query/pusher.rs | 9 +- src/admin/query/resolver.rs | 23 +- src/admin/query/room_alias.rs | 15 +- src/admin/query/room_state_cache.rs | 71 +-- src/admin/query/sending.rs | 87 +-- src/admin/query/users.rs | 54 +- src/admin/room/alias.rs | 147 +++--- src/admin/room/commands.rs | 12 +- src/admin/room/directory.rs | 21 +- src/admin/room/info.rs | 6 +- src/admin/room/mod.rs | 3 +- src/admin/room/moderation.rs | 215 +++++--- src/admin/server/commands.rs | 34 +- src/admin/user/commands.rs | 177 ++++--- src/admin/utils.rs | 10 +- src/api/client/account.rs | 201 ++++--- src/api/client/account_data.rs | 49 +- src/api/client/alias.rs | 17 +- src/api/client/appservice.rs | 14 +- src/api/client/backup.rs | 75 +-- src/api/client/capabilities.rs | 22 +- src/api/client/context.rs | 21 +- src/api/client/device.rs | 31 +- src/api/client/directory.rs | 80 ++- src/api/client/filter.rs | 6 +- src/api/client/keys.rs | 87 +-- src/api/client/media.rs | 46 +- src/api/client/media_legacy.rs | 83 ++- src/api/client/membership.rs | 496 +++++++++++------- src/api/client/message.rs | 54 +- src/api/client/openid.rs | 3 +- src/api/client/presence.rs | 16 +- src/api/client/profile.rs | 94 ++-- src/api/client/push.rs | 135 ++--- src/api/client/read_marker.rs | 40 +- src/api/client/redact.rs | 11 +- src/api/client/relations.rs | 46 +- src/api/client/report.rs | 32 +- src/api/client/room/aliases.rs | 3 +- src/api/client/room/create.rs | 198 ++++--- src/api/client/room/event.rs | 7 +- src/api/client/room/initial_sync.rs | 3 +- src/api/client/room/upgrade.rs | 46 +- src/api/client/search.rs | 20 +- src/api/client/send.rs | 24 +- src/api/client/session.rs | 74 ++- src/api/client/space.rs | 3 +- src/api/client/state.rs | 74 ++- src/api/client/sync/mod.rs | 13 +- src/api/client/sync/v3.rs | 287 ++++++---- src/api/client/sync/v4.rs | 391 +++++++------- src/api/client/tag.rs | 25 +- src/api/client/thirdparty.rs | 4 +- src/api/client/threads.rs | 3 +- src/api/client/to_device.rs | 29 +- src/api/client/typing.rs | 3 +- src/api/client/unstable.rs | 64 ++- src/api/client/unversioned.rs | 4 +- src/api/client/user_directory.rs | 14 +- src/api/client/voip.rs | 6 +- src/api/client/well_known.rs | 41 +- src/api/router.rs | 13 +- src/api/router/args.rs | 31 +- src/api/router/auth.rs | 137 +++-- src/api/router/handler.rs | 18 +- src/api/router/request.rs | 16 +- src/api/router/state.rs | 4 +- src/api/server/backfill.rs | 3 +- src/api/server/event.rs | 3 +- src/api/server/event_auth.rs | 11 +- src/api/server/get_missing_events.rs | 7 +- src/api/server/hierarchy.rs | 3 +- src/api/server/invite.rs | 14 +- src/api/server/key.rs | 8 +- src/api/server/make_join.rs | 65 ++- src/api/server/make_leave.rs | 8 +- src/api/server/media.rs | 12 +- src/api/server/openid.rs | 3 +- src/api/server/publicrooms.rs | 14 +- src/api/server/query.rs | 19 +- src/api/server/send.rs | 131 +++-- src/api/server/send_join.rs | 65 ++- src/api/server/send_leave.rs | 13 +- src/api/server/state.rs | 8 +- src/api/server/state_ids.rs | 8 +- src/api/server/user.rs | 19 +- src/api/server/utils.rs | 9 +- src/api/server/well_known.rs | 7 +- src/core/alloc/je.rs | 5 +- src/core/alloc/mod.rs | 16 +- src/core/config/check.rs | 97 ++-- src/core/config/mod.rs | 87 +-- src/core/config/proxy.rs | 40 +- src/core/debug.rs | 8 +- src/core/error/err.rs | 7 +- src/core/error/log.rs | 24 +- src/core/error/mod.rs | 35 +- src/core/error/panic.rs | 12 +- src/core/error/response.rs | 34 +- src/core/info/cargo.rs | 13 +- src/core/info/room_version.rs | 4 +- src/core/log/capture/layer.rs | 16 +- src/core/log/capture/mod.rs | 4 +- src/core/log/capture/state.rs | 6 +- src/core/log/color.rs | 20 +- src/core/log/fmt.rs | 3 +- src/core/log/fmt_span.rs | 16 +- src/core/log/suppress.rs | 5 +- src/core/metrics/mod.rs | 4 +- src/core/pdu/builder.rs | 6 +- src/core/pdu/count.rs | 44 +- src/core/pdu/event.rs | 12 +- src/core/pdu/event_id.rs | 8 +- src/core/pdu/filter.rs | 4 +- src/core/pdu/mod.rs | 3 +- src/core/pdu/raw_id.rs | 22 +- src/core/pdu/redact.rs | 8 +- src/core/pdu/strip.rs | 6 +- src/core/pdu/unsigned.rs | 8 +- src/core/server.rs | 10 +- src/core/utils/bool.rs | 12 +- src/core/utils/bytes.rs | 4 +- src/core/utils/content_disposition.rs | 32 +- src/core/utils/debug.rs | 8 +- src/core/utils/defer.rs | 4 +- src/core/utils/future/try_ext_ext.rs | 46 +- src/core/utils/hash/argon.rs | 4 +- src/core/utils/html.rs | 12 +- src/core/utils/json.rs | 16 +- src/core/utils/math.rs | 14 +- src/core/utils/mod.rs | 4 +- src/core/utils/mutex_map.rs | 7 +- src/core/utils/result.rs | 6 +- src/core/utils/result/flat_ok.rs | 8 +- src/core/utils/result/log_err.rs | 4 +- src/core/utils/set.rs | 4 +- src/core/utils/stream/expect.rs | 8 +- src/core/utils/stream/ignore.rs | 8 +- src/core/utils/stream/iter_stream.rs | 13 +- src/core/utils/stream/ready.rs | 87 ++- src/core/utils/stream/tools.rs | 18 +- src/core/utils/stream/try_broadband.rs | 17 +- src/core/utils/stream/try_ready.rs | 48 +- src/core/utils/string/between.rs | 4 +- src/core/utils/string/split.rs | 8 +- src/core/utils/string/unquote.rs | 4 +- src/core/utils/sys.rs | 7 +- src/core/utils/tests.rs | 1 + src/core/utils/time.rs | 57 +- src/database/cork.rs | 6 +- src/database/database.rs | 9 +- src/database/de.rs | 44 +- src/database/engine.rs | 55 +- src/database/handle.rs | 6 +- src/database/map.rs | 13 +- src/database/map/contains.rs | 11 +- src/database/map/count.rs | 15 +- src/database/map/get.rs | 19 +- src/database/map/get_batch.rs | 13 +- src/database/map/keys_from.rs | 10 +- src/database/map/keys_prefix.rs | 18 +- src/database/map/rev_keys_from.rs | 20 +- src/database/map/rev_keys_prefix.rs | 18 +- src/database/map/rev_stream_from.rs | 16 +- src/database/map/rev_stream_prefix.rs | 14 +- src/database/map/stream_from.rs | 18 +- src/database/map/stream_prefix.rs | 14 +- src/database/opts.rs | 123 +++-- src/database/pool.rs | 12 +- src/database/ser.rs | 113 ++-- src/database/stream/items.rs | 16 +- src/database/stream/items_rev.rs | 16 +- src/database/stream/keys.rs | 16 +- src/database/stream/keys_rev.rs | 16 +- src/database/tests.rs | 24 +- src/database/util.rs | 8 +- src/database/watchers.rs | 9 +- src/macros/admin.rs | 6 +- src/macros/config.rs | 87 +-- src/macros/implement.rs | 11 +- src/macros/refutable.rs | 12 +- src/macros/rustc.rs | 4 +- src/macros/utils.rs | 16 +- src/main/logging.rs | 30 +- src/main/main.rs | 4 +- src/main/mods.rs | 4 +- src/main/server.rs | 23 +- src/router/layers.rs | 20 +- src/router/mod.rs | 12 +- src/router/request.rs | 8 +- src/router/run.rs | 24 +- src/router/serve/mod.rs | 4 +- src/router/serve/plain.rs | 8 +- src/router/serve/tls.rs | 14 +- src/router/serve/unix.rs | 21 +- src/service/account_data/mod.rs | 35 +- src/service/admin/console.rs | 23 +- src/service/admin/create.rs | 68 +-- src/service/admin/grant.rs | 30 +- src/service/admin/mod.rs | 49 +- src/service/admin/startup.rs | 6 +- src/service/appservice/mod.rs | 11 +- src/service/appservice/registration_info.rs | 6 +- src/service/client/mod.rs | 14 +- src/service/emergency/mod.rs | 34 +- src/service/globals/data.rs | 4 +- src/service/globals/mod.rs | 91 ++-- src/service/key_backups/mod.rs | 68 ++- src/service/manager.rs | 32 +- src/service/media/data.rs | 69 ++- src/service/media/migrations.rs | 11 +- src/service/media/mod.rs | 91 ++-- src/service/media/preview.rs | 54 +- src/service/media/remote.rs | 189 ++++--- src/service/media/tests.rs | 32 +- src/service/media/thumbnail.rs | 40 +- src/service/migrations.rs | 35 +- src/service/presence/data.rs | 35 +- src/service/presence/mod.rs | 53 +- src/service/presence/presence.rs | 14 +- src/service/pusher/mod.rs | 109 ++-- src/service/resolver/actual.rs | 161 +++--- src/service/resolver/cache.rs | 20 +- src/service/resolver/dns.rs | 19 +- src/service/resolver/fed.rs | 28 +- src/service/rooms/alias/mod.rs | 74 ++- src/service/rooms/alias/remote.rs | 30 +- src/service/rooms/auth_chain/data.rs | 8 +- src/service/rooms/auth_chain/mod.rs | 37 +- src/service/rooms/directory/mod.rs | 8 +- .../fetch_and_handle_outliers.rs | 80 ++- src/service/rooms/event_handler/fetch_prev.rs | 20 +- .../rooms/event_handler/fetch_state.rs | 27 +- .../event_handler/handle_incoming_pdu.rs | 15 +- .../rooms/event_handler/handle_outlier_pdu.rs | 33 +- .../rooms/event_handler/handle_prev_pdu.rs | 22 +- src/service/rooms/event_handler/mod.rs | 14 +- .../rooms/event_handler/parse_incoming_pdu.rs | 15 +- .../rooms/event_handler/resolve_state.rs | 19 +- .../rooms/event_handler/state_at_incoming.rs | 22 +- .../event_handler/upgrade_outlier_pdu.rs | 35 +- src/service/rooms/lazy_loading/mod.rs | 21 +- src/service/rooms/metadata/mod.rs | 16 +- src/service/rooms/pdu_metadata/data.rs | 21 +- src/service/rooms/pdu_metadata/mod.rs | 24 +- src/service/rooms/read_receipt/data.rs | 23 +- src/service/rooms/read_receipt/mod.rs | 71 +-- src/service/rooms/search/mod.rs | 34 +- src/service/rooms/short/mod.rs | 31 +- src/service/rooms/spaces/mod.rs | 200 ++++--- src/service/rooms/spaces/tests.rs | 21 +- src/service/rooms/state/mod.rs | 112 ++-- src/service/rooms/state_accessor/data.rs | 52 +- src/service/rooms/state_accessor/mod.rs | 155 ++++-- src/service/rooms/state_cache/mod.rs | 156 ++++-- src/service/rooms/state_compressor/mod.rs | 134 +++-- src/service/rooms/threads/mod.rs | 25 +- src/service/rooms/timeline/data.rs | 68 ++- src/service/rooms/timeline/mod.rs | 235 ++++++--- src/service/rooms/typing/mod.rs | 26 +- src/service/rooms/user/mod.rs | 13 +- src/service/sending/appservice.rs | 20 +- src/service/sending/data.rs | 50 +- src/service/sending/dest.rs | 6 +- src/service/sending/mod.rs | 56 +- src/service/sending/send.rs | 79 ++- src/service/sending/sender.rs | 183 ++++--- src/service/server_keys/acquire.rs | 47 +- src/service/server_keys/get.rs | 34 +- src/service/server_keys/keypair.rs | 7 +- src/service/server_keys/mod.rs | 13 +- src/service/server_keys/request.rs | 8 +- src/service/server_keys/sign.rs | 6 +- src/service/server_keys/verify.rs | 30 +- src/service/service.rs | 4 +- src/service/sync/mod.rs | 116 ++-- src/service/transaction_ids/mod.rs | 13 +- src/service/uiaa/mod.rs | 67 ++- src/service/updates/mod.rs | 7 +- src/service/users/mod.rs | 231 ++++++-- 296 files changed, 7147 insertions(+), 4300 deletions(-) diff --git a/.editorconfig b/.editorconfig index b2455005..2d7438a4 100644 --- a/.editorconfig +++ b/.editorconfig @@ -21,3 +21,4 @@ indent_size = 2 [*.rs] indent_style = tab +max_line_length = 98 diff --git a/clippy.toml b/clippy.toml index 3a0aa695..d9dd99ca 100644 --- a/clippy.toml +++ b/clippy.toml @@ -3,7 +3,7 @@ cognitive-complexity-threshold = 94 # TODO reduce me ALARA excessive-nesting-threshold = 11 # TODO reduce me to 4 or 5 future-size-threshold = 7745 # TODO reduce me ALARA stack-size-threshold = 196608 # reduce me ALARA -too-many-lines-threshold = 700 # TODO reduce me to <= 100 +too-many-lines-threshold = 780 # TODO reduce me to <= 100 type-complexity-threshold = 250 # reduce me to ~200 disallowed-macros = [ @@ -13,3 +13,7 @@ disallowed-macros = [ { path = "log::debug", reason = "use conduwuit_core::debug" }, { path = "log::trace", reason = "use conduwuit_core::trace" }, ] + +disallowed-methods = [ + { path = "tokio::spawn", reason = "use and pass conduuwit_core::server::Server::runtime() to spawn from"}, +] diff --git a/rustfmt.toml b/rustfmt.toml index fd912a19..635ec8f8 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -2,9 +2,8 @@ array_width = 80 chain_width = 60 comment_width = 80 condense_wildcard_suffixes = true -edition = "2021" +edition = "2024" fn_call_width = 80 -fn_params_layout = "Compressed" fn_single_line = true format_code_in_doc_comments = true format_macro_bodies = true @@ -14,13 +13,20 @@ group_imports = "StdExternalCrate" hard_tabs = true hex_literal_case = "Upper" imports_granularity = "Crate" +match_arm_blocks = false +match_arm_leading_pipes = "Always" match_block_trailing_comma = true -max_width = 120 +max_width = 98 newline_style = "Unix" normalize_comments = false +overflow_delimited_expr = true reorder_impl_items = true reorder_imports = true +single_line_if_else_max_width = 60 +single_line_let_else_max_width = 80 +struct_lit_width = 40 tab_spaces = 4 +unstable_features = true use_field_init_shorthand = true use_small_heuristics = "Off" use_try_shorthand = true diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 7b707446..9097a613 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -3,9 +3,10 @@ use conduwuit::Result; use ruma::events::room::message::RoomMessageEventContent; use crate::{ - appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command, debug, - debug::DebugCommand, federation, federation::FederationCommand, media, media::MediaCommand, query, - query::QueryCommand, room, room::RoomCommand, server, server::ServerCommand, user, user::UserCommand, + appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command, + debug, debug::DebugCommand, federation, federation::FederationCommand, media, + media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server, + server::ServerCommand, user, user::UserCommand, }; #[derive(Debug, Parser)] @@ -49,18 +50,21 @@ pub(super) enum AdminCommand { } #[tracing::instrument(skip_all, name = "command")] -pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + command: AdminCommand, + context: &Command<'_>, +) -> Result { use AdminCommand::*; Ok(match command { - Appservices(command) => appservice::process(command, context).await?, - Media(command) => media::process(command, context).await?, - Users(command) => user::process(command, context).await?, - Rooms(command) => room::process(command, context).await?, - Federation(command) => federation::process(command, context).await?, - Server(command) => server::process(command, context).await?, - Debug(command) => debug::process(command, context).await?, - Query(command) => query::process(command, context).await?, - Check(command) => check::process(command, context).await?, + | Appservices(command) => appservice::process(command, context).await?, + | Media(command) => media::process(command, context).await?, + | Users(command) => user::process(command, context).await?, + | Rooms(command) => room::process(command, context).await?, + | Federation(command) => federation::process(command, context).await?, + | Server(command) => server::process(command, context).await?, + | Debug(command) => debug::process(command, context).await?, + | Query(command) => query::process(command, context).await?, + | Check(command) => check::process(command, context).await?, }) } diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs index 6d661308..4f02531a 100644 --- a/src/admin/appservice/commands.rs +++ b/src/admin/appservice/commands.rs @@ -4,7 +4,9 @@ use crate::{admin_command, Result}; #[admin_command] pub(super) async fn register(&self) -> Result { - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" + if self.body.len() < 2 + || !self.body[0].trim().starts_with("```") + || self.body.last().unwrap_or(&"").trim() != "```" { return Ok(RoomMessageEventContent::text_plain( "Expected code block in command body. Add --help for details.", @@ -14,55 +16,63 @@ pub(super) async fn register(&self) -> Result { let appservice_config_body = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); let parsed_config = serde_yaml::from_str::(&appservice_config_body); match parsed_config { - Ok(registration) => match self + | Ok(registration) => match self .services .appservice .register_appservice(®istration, &appservice_config_body) .await { - Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( + | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( "Appservice registered with ID: {}", registration.id ))), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( "Failed to register appservice: {e}" ))), }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( "Could not parse appservice config as YAML: {e}" ))), } } #[admin_command] -pub(super) async fn unregister(&self, appservice_identifier: String) -> Result { +pub(super) async fn unregister( + &self, + appservice_identifier: String, +) -> Result { match self .services .appservice .unregister_appservice(&appservice_identifier) .await { - Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + | Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")), + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( "Failed to unregister appservice: {e}" ))), } } #[admin_command] -pub(super) async fn show_appservice_config(&self, appservice_identifier: String) -> Result { +pub(super) async fn show_appservice_config( + &self, + appservice_identifier: String, +) -> Result { match self .services .appservice .get_registration(&appservice_identifier) .await { - Some(config) => { - let config_str = serde_yaml::to_string(&config).expect("config should've been validated on register"); - let output = format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",); + | Some(config) => { + let config_str = serde_yaml::to_string(&config) + .expect("config should've been validated on register"); + let output = + format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",); Ok(RoomMessageEventContent::notice_markdown(output)) }, - None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")), + | None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")), } } diff --git a/src/admin/check/commands.rs b/src/admin/check/commands.rs index ecb18c1a..7e27362f 100644 --- a/src/admin/check/commands.rs +++ b/src/admin/check/commands.rs @@ -19,8 +19,9 @@ pub(super) async fn check_all_users(&self) -> Result { let ok_count = users.iter().filter(|_user| true).count(); let message = format!( - "Database query completed in {query_time:?}:\n\n```\nTotal entries: {total:?}\nFailure/Invalid user count: \ - {err_count:?}\nSuccess/Valid user count: {ok_count:?}\n```" + "Database query completed in {query_time:?}:\n\n```\nTotal entries: \ + {total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \ + {ok_count:?}\n```" ); Ok(RoomMessageEventContent::notice_markdown(message)) diff --git a/src/admin/check/mod.rs b/src/admin/check/mod.rs index 44b1ddf2..4790a6de 100644 --- a/src/admin/check/mod.rs +++ b/src/admin/check/mod.rs @@ -11,8 +11,11 @@ pub(super) enum CheckCommand { AllUsers, } -pub(super) async fn process(command: CheckCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + command: CheckCommand, + context: &Command<'_>, +) -> Result { Ok(match command { - CheckCommand::AllUsers => context.check_all_users().await?, + | CheckCommand::AllUsers => context.check_all_users().await?, }) } diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 9d77a1de..0c5a7cbd 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,9 @@ use std::{ time::{Instant, SystemTime}, }; -use conduwuit::{debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result}; +use conduwuit::{ + debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result, +}; use futures::{FutureExt, StreamExt}; use ruma::{ api::{client::error::ErrorKind, federation::event::get_room_state}, @@ -26,7 +28,10 @@ pub(super) async fn echo(&self, message: Vec) -> Result) -> Result { +pub(super) async fn get_auth_chain( + &self, + event_id: Box, +) -> Result { let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else { return Ok(RoomMessageEventContent::notice_plain("Event not found.")); }; @@ -68,20 +73,26 @@ pub(super) async fn parse_pdu(&self) -> Result { let string = self.body[1..self.body.len().saturating_sub(1)].join("\n"); match serde_json::from_str(&string) { - Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { - Ok(hash) => { + | Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + | Ok(hash) => { let event_id = EventId::parse(format!("${hash}")); - match serde_json::from_value::(serde_json::to_value(value).expect("value is json")) { - Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!("EventId: {event_id:?}\n{pdu:#?}"))), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + match serde_json::from_value::( + serde_json::to_value(value).expect("value is json"), + ) { + | Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!( + "EventId: {event_id:?}\n{pdu:#?}" + ))), + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( "EventId: {event_id:?}\nCould not parse event: {e}" ))), } }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Could not parse PDU JSON: {e:?}"))), + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Could not parse PDU JSON: {e:?}" + ))), }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( "Invalid json in command body: {e}" ))), } @@ -103,8 +114,9 @@ pub(super) async fn get_pdu(&self, event_id: Box) -> Result { - let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + | Ok(json) => { + let json_text = + serde_json::to_string_pretty(&json).expect("canonical json is valid json"); Ok(RoomMessageEventContent::notice_markdown(format!( "{}\n```json\n{}\n```", if outlier { @@ -115,13 +127,15 @@ pub(super) async fn get_pdu(&self, event_id: Box) -> Result Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), + | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } } #[admin_command] pub(super) async fn get_remote_pdu_list( - &self, server: Box, force: bool, + &self, + server: Box, + force: bool, ) -> Result { if !self.services.globals.config.allow_federation { return Ok(RoomMessageEventContent::text_plain( @@ -131,8 +145,8 @@ pub(super) async fn get_remote_pdu_list( if server == self.services.globals.server_name() { return Ok(RoomMessageEventContent::text_plain( - "Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local PDUs from \ - the database.", + "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ + fetching local PDUs from the database.", )); } @@ -184,7 +198,9 @@ pub(super) async fn get_remote_pdu_list( #[admin_command] pub(super) async fn get_remote_pdu( - &self, event_id: Box, server: Box, + &self, + event_id: Box, + server: Box, ) -> Result { if !self.services.globals.config.allow_federation { return Ok(RoomMessageEventContent::text_plain( @@ -194,30 +210,32 @@ pub(super) async fn get_remote_pdu( if server == self.services.globals.server_name() { return Ok(RoomMessageEventContent::text_plain( - "Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local PDUs.", + "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ + fetching local PDUs.", )); } match self .services .sending - .send_federation_request( - &server, - ruma::api::federation::event::get_event::v1::Request { - event_id: event_id.clone().into(), - include_unredacted_content: None, - }, - ) + .send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request { + event_id: event_id.clone().into(), + include_unredacted_content: None, + }) .await { - Ok(response) => { - let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { - warn!( - "Requested event ID {event_id} from server but failed to convert from RawValue to \ - CanonicalJsonObject (malformed event/response?): {e}" - ); - Error::BadRequest(ErrorKind::Unknown, "Received response from server but failed to parse PDU") - })?; + | Ok(response) => { + let json: CanonicalJsonObject = + serde_json::from_str(response.pdu.get()).map_err(|e| { + warn!( + "Requested event ID {event_id} from server but failed to convert from \ + RawValue to CanonicalJsonObject (malformed event/response?): {e}" + ); + Error::BadRequest( + ErrorKind::Unknown, + "Received response from server but failed to parse PDU", + ) + })?; trace!("Attempting to parse PDU: {:?}", &response.pdu); let _parsed_pdu = { @@ -229,8 +247,8 @@ pub(super) async fn get_remote_pdu( .await; let (event_id, value, room_id) = match parsed_result { - Ok(t) => t, - Err(e) => { + | Ok(t) => t, + | Err(e) => { warn!("Failed to parse PDU: {e}"); info!("Full PDU: {:?}", &response.pdu); return Ok(RoomMessageEventContent::text_plain(format!( @@ -250,21 +268,27 @@ pub(super) async fn get_remote_pdu( .boxed() .await?; - let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + let json_text = + serde_json::to_string_pretty(&json).expect("canonical json is valid json"); Ok(RoomMessageEventContent::notice_markdown(format!( "{}\n```json\n{}\n```", - "Got PDU from specified server and handled as backfilled PDU successfully. Event body:", json_text + "Got PDU from specified server and handled as backfilled PDU successfully. \ + Event body:", + json_text ))) }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( "Remote server did not have PDU or failed sending request to remote server: {e}" ))), } } #[admin_command] -pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result { +pub(super) async fn get_room_state( + &self, + room: OwnedRoomOrAliasId, +) -> Result { let room_id = self.services.rooms.alias.resolve(&room).await?; let room_state: Vec<_> = self .services @@ -285,7 +309,8 @@ pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result) -> Result { + | Ok(response) => { let ping_time = timer.elapsed(); let json_text_res = serde_json::to_string_pretty(&response.server); @@ -323,8 +351,11 @@ pub(super) async fn ping(&self, server: Box) -> Result { - warn!("Failed sending federation request to specified server from ping debug command: {e}"); + | Err(e) => { + warn!( + "Failed sending federation request to specified server from ping debug command: \ + {e}" + ); Ok(RoomMessageEventContent::text_plain(format!( "Failed sending federation request to specified server:\n\n{e}", ))) @@ -347,13 +378,17 @@ pub(super) async fn force_device_list_updates(&self) -> Result, reset: bool) -> Result { +pub(super) async fn change_log_level( + &self, + filter: Option, + reset: bool, +) -> Result { let handles = &["console"]; if reset { let old_filter_layer = match EnvFilter::try_new(&self.services.globals.config.log) { - Ok(s) => s, - Err(e) => { + | Ok(s) => s, + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Log level from config appears to be invalid now: {e}" ))); @@ -367,13 +402,13 @@ pub(super) async fn change_log_level(&self, filter: Option, reset: bool) .reload .reload(&old_filter_layer, Some(handles)) { - Ok(()) => { + | Ok(()) => { return Ok(RoomMessageEventContent::text_plain(format!( "Successfully changed log level back to config value {}", self.services.globals.config.log ))); }, - Err(e) => { + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to modify and reload the global tracing log level: {e}" ))); @@ -383,8 +418,8 @@ pub(super) async fn change_log_level(&self, filter: Option, reset: bool) if let Some(filter) = filter { let new_filter_layer = match EnvFilter::try_new(filter) { - Ok(s) => s, - Err(e) => { + | Ok(s) => s, + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Invalid log level filter specified: {e}" ))); @@ -398,10 +433,10 @@ pub(super) async fn change_log_level(&self, filter: Option, reset: bool) .reload .reload(&new_filter_layer, Some(handles)) { - Ok(()) => { + | Ok(()) => { return Ok(RoomMessageEventContent::text_plain("Successfully changed log level")); }, - Err(e) => { + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to modify and reload the global tracing log level: {e}" ))); @@ -414,7 +449,9 @@ pub(super) async fn change_log_level(&self, filter: Option, reset: bool) #[admin_command] pub(super) async fn sign_json(&self) -> Result { - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" + if self.body.len() < 2 + || !self.body[0].trim().starts_with("```") + || self.body.last().unwrap_or(&"").trim() != "```" { return Ok(RoomMessageEventContent::text_plain( "Expected code block in command body. Add --help for details.", @@ -423,21 +460,24 @@ pub(super) async fn sign_json(&self) -> Result { let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str(&string) { - Ok(mut value) => { + | Ok(mut value) => { self.services .server_keys .sign_json(&mut value) .expect("our request json is what ruma expects"); - let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json"); + let json_text = + serde_json::to_string_pretty(&value).expect("canonical json is valid json"); Ok(RoomMessageEventContent::text_plain(json_text)) }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } } #[admin_command] pub(super) async fn verify_json(&self) -> Result { - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" + if self.body.len() < 2 + || !self.body[0].trim().starts_with("```") + || self.body.last().unwrap_or(&"").trim() != "```" { return Ok(RoomMessageEventContent::text_plain( "Expected code block in command body. Add --help for details.", @@ -446,13 +486,13 @@ pub(super) async fn verify_json(&self) -> Result { let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str::(&string) { - Ok(value) => match self.services.server_keys.verify_json(&value, None).await { - Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + | Ok(value) => match self.services.server_keys.verify_json(&value, None).await { + | Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( "Signature verification failed: {e}" ))), }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } } @@ -462,9 +502,10 @@ pub(super) async fn verify_pdu(&self, event_id: Box) -> Result "signatures OK, but content hash failed (redaction).", - Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.", - Err(e) => return Err(e), + | Ok(ruma::signatures::Verified::Signatures) => + "signatures OK, but content hash failed (redaction).", + | Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.", + | Err(e) => return Err(e), }; Ok(RoomMessageEventContent::notice_plain(msg)) @@ -472,7 +513,10 @@ pub(super) async fn verify_pdu(&self, event_id: Box) -> Result) -> Result { +pub(super) async fn first_pdu_in_room( + &self, + room_id: Box, +) -> Result { if !self .services .rooms @@ -498,7 +542,10 @@ pub(super) async fn first_pdu_in_room(&self, room_id: Box) -> Result) -> Result { +pub(super) async fn latest_pdu_in_room( + &self, + room_id: Box, +) -> Result { if !self .services .rooms @@ -525,7 +572,9 @@ pub(super) async fn latest_pdu_in_room(&self, room_id: Box) -> Result, server_name: Box, + &self, + room_id: Box, + server_name: Box, ) -> Result { if !self .services @@ -554,13 +603,10 @@ pub(super) async fn force_set_room_state_from_server( let remote_state_response = self .services .sending - .send_federation_request( - &server_name, - get_room_state::v1::Request { - room_id: room_id.clone().into(), - event_id: first_pdu.event_id.clone().into(), - }, - ) + .send_federation_request(&server_name, get_room_state::v1::Request { + room_id: room_id.clone().into(), + event_id: first_pdu.event_id.clone().into(), + }) .await?; for pdu in remote_state_response.pdus.clone() { @@ -571,8 +617,8 @@ pub(super) async fn force_set_room_state_from_server( .parse_incoming_pdu(&pdu) .await { - Ok(t) => t, - Err(e) => { + | Ok(t) => t, + | Err(e) => { warn!("Could not parse PDU, ignoring: {e}"); continue; }, @@ -654,8 +700,8 @@ pub(super) async fn force_set_room_state_from_server( .await?; info!( - "Updating joined counts for room just in case (e.g. we may have found a difference in the room's \ - m.room.member state" + "Updating joined counts for room just in case (e.g. we may have found a difference in \ + the room's m.room.member state" ); self.services .rooms @@ -672,9 +718,13 @@ pub(super) async fn force_set_room_state_from_server( #[admin_command] pub(super) async fn get_signing_keys( - &self, server_name: Option>, notary: Option>, query: bool, + &self, + server_name: Option>, + notary: Option>, + query: bool, ) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); + let server_name = + server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); if let Some(notary) = notary { let signing_keys = self @@ -706,8 +756,12 @@ pub(super) async fn get_signing_keys( } #[admin_command] -pub(super) async fn get_verify_keys(&self, server_name: Option>) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); +pub(super) async fn get_verify_keys( + &self, + server_name: Option>, +) -> Result { + let server_name = + server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); let keys = self .services @@ -727,7 +781,9 @@ pub(super) async fn get_verify_keys(&self, server_name: Option>) #[admin_command] pub(super) async fn resolve_true_destination( - &self, server_name: Box, no_cache: bool, + &self, + server_name: Box, + no_cache: bool, ) -> Result { if !self.services.globals.config.allow_federation { return Ok(RoomMessageEventContent::text_plain( @@ -737,7 +793,8 @@ pub(super) async fn resolve_true_destination( if server_name == self.services.globals.config.server_name { return Ok(RoomMessageEventContent::text_plain( - "Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local PDUs.", + "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ + fetching local PDUs.", )); } @@ -846,7 +903,9 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result, map: Option, + &self, + property: Option, + map: Option, ) -> Result { let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); let map_name = map.as_ref().map_or(EMPTY, String::as_str); diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index e0c1598d..75635b1b 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -2,7 +2,9 @@ use std::fmt::Write; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId}; +use ruma::{ + events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId, +}; use crate::{admin_command, get_room_info}; @@ -38,7 +40,10 @@ pub(super) async fn incoming_federation(&self) -> Result) -> Result { +pub(super) async fn fetch_support_well_known( + &self, + server_name: Box, +) -> Result { let response = self .services .client @@ -60,16 +65,20 @@ pub(super) async fn fetch_support_well_known(&self, server_name: Box } let json: serde_json::Value = match serde_json::from_str(&text) { - Ok(json) => json, - Err(_) => { - return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON.")); + | Ok(json) => json, + | Err(_) => { + return Ok(RoomMessageEventContent::text_plain( + "Response text/body is not valid JSON.", + )); }, }; let pretty_json: String = match serde_json::to_string_pretty(&json) { - Ok(json) => json, - Err(_) => { - return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON.")); + | Ok(json) => json, + | Err(_) => { + return Ok(RoomMessageEventContent::text_plain( + "Response text/body is not valid JSON.", + )); }, }; @@ -79,10 +88,14 @@ pub(super) async fn fetch_support_well_known(&self, server_name: Box } #[admin_command] -pub(super) async fn remote_user_in_rooms(&self, user_id: Box) -> Result { +pub(super) async fn remote_user_in_rooms( + &self, + user_id: Box, +) -> Result { if user_id.server_name() == self.services.globals.config.server_name { return Ok(RoomMessageEventContent::text_plain( - "User belongs to our server, please use `list-joined-rooms` user admin command instead.", + "User belongs to our server, please use `list-joined-rooms` user admin command \ + instead.", )); } diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index 977ecdae..3d0a9473 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,16 +1,21 @@ use std::time::Duration; -use conduwuit::{debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result}; +use conduwuit::{ + debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result, +}; use conduwuit_service::media::Dim; use ruma::{ - events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName, + events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri, + OwnedServerName, ServerName, }; use crate::{admin_command, utils::parse_local_user_id}; #[admin_command] pub(super) async fn delete( - &self, mxc: Option>, event_id: Option>, + &self, + mxc: Option>, + event_id: Option>, ) -> Result { if event_id.is_some() && mxc.is_some() { return Ok(RoomMessageEventContent::text_plain( @@ -52,7 +57,10 @@ pub(super) async fn delete( let final_url = url.to_string().replace('"', ""); mxc_urls.push(final_url); } else { - info!("Found a URL in the event ID {event_id} but did not start with mxc://, ignoring"); + info!( + "Found a URL in the event ID {event_id} but did not start with \ + mxc://, ignoring" + ); } } @@ -67,17 +75,24 @@ pub(super) async fn delete( debug!("Found a thumbnail_url in info key: {thumbnail_url}"); if thumbnail_url.to_string().starts_with("\"mxc://") { - debug!("Pushing thumbnail URL {thumbnail_url} to list of MXCs to delete"); - let final_thumbnail_url = thumbnail_url.to_string().replace('"', ""); + debug!( + "Pushing thumbnail URL {thumbnail_url} to list of MXCs \ + to delete" + ); + let final_thumbnail_url = + thumbnail_url.to_string().replace('"', ""); mxc_urls.push(final_thumbnail_url); } else { info!( - "Found a thumbnail URL in the event ID {event_id} but did not start with \ - mxc://, ignoring" + "Found a thumbnail URL in the event ID {event_id} but \ + did not start with mxc://, ignoring" ); } } else { - info!("No \"thumbnail_url\" key in \"info\" key, assuming no thumbnails."); + info!( + "No \"thumbnail_url\" key in \"info\" key, assuming no \ + thumbnails." + ); } } } @@ -98,8 +113,8 @@ pub(super) async fn delete( mxc_urls.push(final_url); } else { info!( - "Found a URL in the event ID {event_id} but did not start with mxc://, \ - ignoring" + "Found a URL in the event ID {event_id} but did not \ + start with mxc://, ignoring" ); } } else { @@ -109,13 +124,14 @@ pub(super) async fn delete( } } else { return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key or failed parsing the event ID JSON.", + "Event ID does not have a \"content\" key or failed parsing the event \ + ID JSON.", )); } } else { return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key, this is not a message or an event type that contains \ - media.", + "Event ID does not have a \"content\" key, this is not a message or an \ + event type that contains media.", )); } } else { @@ -126,7 +142,9 @@ pub(super) async fn delete( if mxc_urls.is_empty() { info!("Parsed event ID {event_id} but did not contain any MXC URLs."); - return Ok(RoomMessageEventContent::text_plain("Parsed event ID but found no MXC URLs.")); + return Ok(RoomMessageEventContent::text_plain( + "Parsed event ID but found no MXC URLs.", + )); } let mut mxc_deletion_count: usize = 0; @@ -138,11 +156,11 @@ pub(super) async fn delete( .delete(&mxc_url.as_str().try_into()?) .await { - Ok(()) => { + | Ok(()) => { debug_info!("Successfully deleted {mxc_url} from filesystem and database"); mxc_deletion_count = mxc_deletion_count.saturating_add(1); }, - Err(e) => { + | Err(e) => { debug_warn!("Failed to delete {mxc_url}, ignoring error and skipping: {e}"); continue; }, @@ -150,19 +168,22 @@ pub(super) async fn delete( } return Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from event ID {event_id}." + "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from \ + event ID {event_id}." ))); } Ok(RoomMessageEventContent::text_plain( - "Please specify either an MXC using --mxc or an event ID using --event-id of the message containing an image. \ - See --help for details.", + "Please specify either an MXC using --mxc or an event ID using --event-id of the \ + message containing an image. See --help for details.", )) } #[admin_command] pub(super) async fn delete_list(&self) -> Result { - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" + if self.body.len() < 2 + || !self.body[0].trim().starts_with("```") + || self.body.last().unwrap_or(&"").trim() != "```" { return Ok(RoomMessageEventContent::text_plain( "Expected code block in command body. Add --help for details.", @@ -192,11 +213,11 @@ pub(super) async fn delete_list(&self) -> Result { for mxc in &mxc_list { trace!(%failed_parsed_mxcs, %mxc_deletion_count, "Deleting MXC {mxc} in bulk"); match self.services.media.delete(mxc).await { - Ok(()) => { + | Ok(()) => { debug_info!("Successfully deleted {mxc} from filesystem and database"); mxc_deletion_count = mxc_deletion_count.saturating_add(1); }, - Err(e) => { + | Err(e) => { debug_warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); continue; }, @@ -204,14 +225,18 @@ pub(super) async fn delete_list(&self) -> Result { } Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database and the filesystem. \ - {failed_parsed_mxcs} MXCs failed to be parsed from the database.", + "Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \ + and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.", ))) } #[admin_command] pub(super) async fn delete_past_remote_media( - &self, duration: String, before: bool, after: bool, yes_i_want_to_delete_local_media: bool, + &self, + duration: String, + before: bool, + after: bool, + yes_i_want_to_delete_local_media: bool, ) -> Result { if before && after { return Ok(RoomMessageEventContent::text_plain( @@ -224,7 +249,12 @@ pub(super) async fn delete_past_remote_media( let deleted_count = self .services .media - .delete_all_remote_media_at_after_time(duration, before, after, yes_i_want_to_delete_local_media) + .delete_all_remote_media_at_after_time( + duration, + before, + after, + yes_i_want_to_delete_local_media, + ) .await?; Ok(RoomMessageEventContent::text_plain(format!( @@ -233,7 +263,10 @@ pub(super) async fn delete_past_remote_media( } #[admin_command] -pub(super) async fn delete_all_from_user(&self, username: String) -> Result { +pub(super) async fn delete_all_from_user( + &self, + username: String, +) -> Result { let user_id = parse_local_user_id(self.services, &username)?; let deleted_count = self.services.media.delete_from_user(&user_id).await?; @@ -245,7 +278,9 @@ pub(super) async fn delete_all_from_user(&self, username: String) -> Result, yes_i_want_to_delete_local_media: bool, + &self, + server_name: Box, + yes_i_want_to_delete_local_media: bool, ) -> Result { if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media { return Ok(RoomMessageEventContent::text_plain( @@ -260,20 +295,26 @@ pub(super) async fn delete_all_from_server( .await .inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}")) else { - return Ok(RoomMessageEventContent::text_plain("Failed to get MXC URIs from our database")); + return Ok(RoomMessageEventContent::text_plain( + "Failed to get MXC URIs from our database", + )); }; let mut deleted_count: usize = 0; for mxc in all_mxcs { let Ok(mxc_server_name) = mxc.server_name().inspect_err(|e| { - debug_warn!("Failed to parse MXC {mxc} server name from database, ignoring error and skipping: {e}"); + debug_warn!( + "Failed to parse MXC {mxc} server name from database, ignoring error and \ + skipping: {e}" + ); }) else { continue; }; if mxc_server_name != server_name - || (self.services.globals.server_is_ours(mxc_server_name) && !yes_i_want_to_delete_local_media) + || (self.services.globals.server_is_ours(mxc_server_name) + && !yes_i_want_to_delete_local_media) { trace!("skipping MXC URI {mxc}"); continue; @@ -282,10 +323,10 @@ pub(super) async fn delete_all_from_server( let mxc: Mxc<'_> = mxc.as_str().try_into()?; match self.services.media.delete(&mxc).await { - Ok(()) => { + | Ok(()) => { deleted_count = deleted_count.saturating_add(1); }, - Err(e) => { + | Err(e) => { debug_warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); continue; }, @@ -307,7 +348,10 @@ pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result, timeout: u32, + &self, + mxc: OwnedMxcUri, + server: Option, + timeout: u32, ) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); @@ -327,7 +371,12 @@ pub(super) async fn get_remote_file( #[admin_command] pub(super) async fn get_remote_thumbnail( - &self, mxc: OwnedMxcUri, server: Option, timeout: u32, width: u32, height: u32, + &self, + mxc: OwnedMxcUri, + server: Option, + timeout: u32, + width: u32, + height: u32, ) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); diff --git a/src/admin/processor.rs b/src/admin/processor.rs index e041a889..ed7d5ed1 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -53,8 +53,8 @@ async fn handle_command(services: Arc, command: CommandInput) -> Proce async fn process_command(services: Arc, input: &CommandInput) -> ProcessorResult { let (command, args, body) = match parse(&services, input) { - Err(error) => return Err(error), - Ok(parsed) => parsed, + | Err(error) => return Err(error), + | Ok(parsed) => parsed, }; let context = Command { @@ -68,7 +68,8 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce } fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { - let link = "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; + let link = + "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}"); let content = RoomMessageEventContent::notice_markdown(msg); error!("Panic while processing command: {error:?}"); @@ -76,7 +77,11 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { } // Parse and process a message from the admin room -async fn process(context: &Command<'_>, command: AdminCommand, args: &[String]) -> ProcessorResult { +async fn process( + context: &Command<'_>, + command: AdminCommand, + args: &[String], +) -> ProcessorResult { let (capture, logs) = capture_create(context); let capture_scope = capture.start(); @@ -100,11 +105,12 @@ async fn process(context: &Command<'_>, command: AdminCommand, args: &[String]) drop(logs); match result { - Ok(content) => { - write!(&mut output, "{0}", content.body()).expect("failed to format command result to output buffer"); + | Ok(content) => { + write!(&mut output, "{0}", content.body()) + .expect("failed to format command result to output buffer"); Ok(Some(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id))) }, - Err(error) => { + | Err(error) => { write!(&mut output, "Command failed with error:\n```\n{error:#?}\n```") .expect("failed to format command result to output"); Err(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id)) @@ -128,8 +134,9 @@ fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { .and_then(LevelFilter::into_level) .unwrap_or(Level::DEBUG); - let filter = - move |data: capture::Data<'_>| data.level() <= log_level && data.our_modules() && data.scope.contains(&"admin"); + let filter = move |data: capture::Data<'_>| { + data.level() <= log_level && data.our_modules() && data.scope.contains(&"admin") + }; let logs = Arc::new(Mutex::new( collect_stream(|s| markdown_table_head(s)).expect("markdown table header"), @@ -146,14 +153,15 @@ fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { // Parse chat messages from the admin room into an AdminCommand object fn parse<'a>( - services: &Arc, input: &'a CommandInput, + services: &Arc, + input: &'a CommandInput, ) -> Result<(AdminCommand, Vec, Vec<&'a str>), CommandOutput> { let lines = input.command.lines().filter(|line| !line.trim().is_empty()); let command_line = lines.clone().next().expect("command missing first line"); let body = lines.skip(1).collect(); match parse_command(command_line) { - Ok((command, args)) => Ok((command, args, body)), - Err(error) => { + | Ok((command, args)) => Ok((command, args, body)), + | Err(error) => { let message = error .to_string() .replace("server.name", services.globals.server_name().as_str()); @@ -255,11 +263,12 @@ fn parse_line(command_line: &str) -> Vec { argv } -fn reply(mut content: RoomMessageEventContent, reply_id: Option<&EventId>) -> RoomMessageEventContent { +fn reply( + mut content: RoomMessageEventContent, + reply_id: Option<&EventId>, +) -> RoomMessageEventContent { content.relates_to = reply_id.map(|event_id| Reply { - in_reply_to: InReplyTo { - event_id: event_id.to_owned(), - }, + in_reply_to: InReplyTo { event_id: event_id.to_owned() }, }); content diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index 53ed64dd..43762789 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -30,15 +30,14 @@ pub(crate) enum AccountDataCommand { } /// All the getters and iterators from src/database/key_value/account_data.rs -pub(super) async fn process(subcommand: AccountDataCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + subcommand: AccountDataCommand, + context: &Command<'_>, +) -> Result { let services = context.services; match subcommand { - AccountDataCommand::ChangesSince { - user_id, - since, - room_id, - } => { + | AccountDataCommand::ChangesSince { user_id, since, room_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .account_data @@ -51,11 +50,7 @@ pub(super) async fn process(subcommand: AccountDataCommand, context: &Command<'_ "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - AccountDataCommand::Get { - user_id, - kind, - room_id, - } => { + | AccountDataCommand::Get { user_id, kind, room_id } => { let timer = tokio::time::Instant::now(); let results = services .account_data diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index 6bf9b9ad..fe4861bc 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -18,13 +18,14 @@ pub(crate) enum AppserviceCommand { } /// All the getters and iterators from src/database/key_value/appservice.rs -pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + subcommand: AppserviceCommand, + context: &Command<'_>, +) -> Result { let services = context.services; match subcommand { - AppserviceCommand::GetRegistration { - appservice_id, - } => { + | AppserviceCommand::GetRegistration { appservice_id } => { let timer = tokio::time::Instant::now(); let results = services.appservice.get_registration(&appservice_id).await; @@ -34,7 +35,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_> "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - AppserviceCommand::All => { + | AppserviceCommand::All => { let timer = tokio::time::Instant::now(); let results = services.appservice.all().await; let query_time = timer.elapsed(); diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 68f68648..e356453f 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -21,11 +21,14 @@ pub(crate) enum GlobalsCommand { } /// All the getters and iterators from src/database/key_value/globals.rs -pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + subcommand: GlobalsCommand, + context: &Command<'_>, +) -> Result { let services = context.services; match subcommand { - GlobalsCommand::DatabaseVersion => { + | GlobalsCommand::DatabaseVersion => { let timer = tokio::time::Instant::now(); let results = services.globals.db.database_version().await; let query_time = timer.elapsed(); @@ -34,7 +37,7 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - GlobalsCommand::CurrentCount => { + | GlobalsCommand::CurrentCount => { let timer = tokio::time::Instant::now(); let results = services.globals.db.current_count(); let query_time = timer.elapsed(); @@ -43,7 +46,7 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - GlobalsCommand::LastCheckForUpdatesId => { + | GlobalsCommand::LastCheckForUpdatesId => { let timer = tokio::time::Instant::now(); let results = services.updates.last_check_for_updates_id().await; let query_time = timer.elapsed(); @@ -52,9 +55,7 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - GlobalsCommand::SigningKeysFor { - origin, - } => { + | GlobalsCommand::SigningKeysFor { origin } => { let timer = tokio::time::Instant::now(); let results = services.server_keys.verify_keys_for(&origin).await; let query_time = timer.elapsed(); diff --git a/src/admin/query/mod.rs b/src/admin/query/mod.rs index b1849091..ab269a40 100644 --- a/src/admin/query/mod.rs +++ b/src/admin/query/mod.rs @@ -14,8 +14,9 @@ use conduwuit::Result; use self::{ account_data::AccountDataCommand, appservice::AppserviceCommand, globals::GlobalsCommand, - presence::PresenceCommand, pusher::PusherCommand, resolver::ResolverCommand, room_alias::RoomAliasCommand, - room_state_cache::RoomStateCacheCommand, sending::SendingCommand, users::UsersCommand, + presence::PresenceCommand, pusher::PusherCommand, resolver::ResolverCommand, + room_alias::RoomAliasCommand, room_state_cache::RoomStateCacheCommand, + sending::SendingCommand, users::UsersCommand, }; use crate::admin_command_dispatch; diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 73e33bf6..45bb6bd9 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -23,13 +23,14 @@ pub(crate) enum PresenceCommand { } /// All the getters and iterators in key_value/presence.rs -pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + subcommand: PresenceCommand, + context: &Command<'_>, +) -> Result { let services = context.services; match subcommand { - PresenceCommand::GetPresence { - user_id, - } => { + | PresenceCommand::GetPresence { user_id } => { let timer = tokio::time::Instant::now(); let results = services.presence.db.get_presence(&user_id).await; let query_time = timer.elapsed(); @@ -38,9 +39,7 @@ pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - PresenceCommand::PresenceSince { - since, - } => { + | PresenceCommand::PresenceSince { since } => { let timer = tokio::time::Instant::now(); let results: Vec<(_, _, _)> = services .presence diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 3004af8b..55532e54 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -13,13 +13,14 @@ pub(crate) enum PusherCommand { }, } -pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + subcommand: PusherCommand, + context: &Command<'_>, +) -> Result { let services = context.services; match subcommand { - PusherCommand::GetPushers { - user_id, - } => { + | PusherCommand::GetPushers { user_id } => { let timer = tokio::time::Instant::now(); let results = services.pusher.get_pushers(&user_id).await; let query_time = timer.elapsed(); diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 43c63770..3b950d13 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -22,20 +22,16 @@ pub(crate) enum ResolverCommand { } #[admin_command] -async fn destinations_cache(&self, server_name: Option) -> Result { +async fn destinations_cache( + &self, + server_name: Option, +) -> Result { use service::resolver::cache::CachedDest; let mut out = String::new(); writeln!(out, "| Server Name | Destination | Hostname | Expires |")?; writeln!(out, "| ----------- | ----------- | -------- | ------- |")?; - let row = |( - name, - &CachedDest { - ref dest, - ref host, - expire, - }, - )| { + let row = |(name, &CachedDest { ref dest, ref host, expire })| { let expire = time::format(expire, "%+"); writeln!(out, "| {name} | {dest} | {host} | {expire} |").expect("wrote line"); }; @@ -64,14 +60,7 @@ async fn overrides_cache(&self, server_name: Option) -> Result) -> Result { +pub(super) async fn process( + subcommand: RoomAliasCommand, + context: &Command<'_>, +) -> Result { let services = context.services; match subcommand { - RoomAliasCommand::ResolveLocalAlias { - alias, - } => { + | RoomAliasCommand::ResolveLocalAlias { alias } => { let timer = tokio::time::Instant::now(); let results = services.rooms.alias.resolve_local_alias(&alias).await; let query_time = timer.elapsed(); @@ -39,9 +40,7 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomAliasCommand::LocalAliasesForRoom { - room_id, - } => { + | RoomAliasCommand::LocalAliasesForRoom { room_id } => { let timer = tokio::time::Instant::now(); let aliases: Vec<_> = services .rooms @@ -56,7 +55,7 @@ pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```" ))) }, - RoomAliasCommand::AllLocalAliases => { + | RoomAliasCommand::AllLocalAliases => { let timer = tokio::time::Instant::now(); let aliases = services .rooms diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 6e84507a..cd7f5af7 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -77,15 +77,13 @@ pub(crate) enum RoomStateCacheCommand { } pub(super) async fn process( - subcommand: RoomStateCacheCommand, context: &Command<'_>, + subcommand: RoomStateCacheCommand, + context: &Command<'_>, ) -> Result { let services = context.services; match subcommand { - RoomStateCacheCommand::ServerInRoom { - server, - room_id, - } => { + | RoomStateCacheCommand::ServerInRoom { server, room_id } => { let timer = tokio::time::Instant::now(); let result = services .rooms @@ -98,9 +96,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" ))) }, - RoomStateCacheCommand::RoomServers { - room_id, - } => { + | RoomStateCacheCommand::RoomServers { room_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -115,9 +111,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::ServerRooms { - server, - } => { + | RoomStateCacheCommand::ServerRooms { server } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -132,9 +126,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::RoomMembers { - room_id, - } => { + | RoomStateCacheCommand::RoomMembers { room_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -149,9 +141,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::LocalUsersInRoom { - room_id, - } => { + | RoomStateCacheCommand::LocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -166,9 +156,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::ActiveLocalUsersInRoom { - room_id, - } => { + | RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -183,9 +171,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::RoomJoinedCount { - room_id, - } => { + | RoomStateCacheCommand::RoomJoinedCount { room_id } => { let timer = tokio::time::Instant::now(); let results = services.rooms.state_cache.room_joined_count(&room_id).await; let query_time = timer.elapsed(); @@ -194,9 +180,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::RoomInvitedCount { - room_id, - } => { + | RoomStateCacheCommand::RoomInvitedCount { room_id } => { let timer = tokio::time::Instant::now(); let results = services .rooms @@ -209,9 +193,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::RoomUserOnceJoined { - room_id, - } => { + | RoomStateCacheCommand::RoomUserOnceJoined { room_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -226,9 +208,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::RoomMembersInvited { - room_id, - } => { + | RoomStateCacheCommand::RoomMembersInvited { room_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -243,10 +223,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::GetInviteCount { - room_id, - user_id, - } => { + | RoomStateCacheCommand::GetInviteCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); let results = services .rooms @@ -259,10 +236,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::GetLeftCount { - room_id, - user_id, - } => { + | RoomStateCacheCommand::GetLeftCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); let results = services .rooms @@ -275,9 +249,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::RoomsJoined { - user_id, - } => { + | RoomStateCacheCommand::RoomsJoined { user_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -292,9 +264,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::RoomsInvited { - user_id, - } => { + | RoomStateCacheCommand::RoomsInvited { user_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -308,9 +278,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::RoomsLeft { - user_id, - } => { + | RoomStateCacheCommand::RoomsLeft { user_id } => { let timer = tokio::time::Instant::now(); let results: Vec<_> = services .rooms @@ -324,10 +292,7 @@ pub(super) async fn process( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - RoomStateCacheCommand::InviteState { - user_id, - room_id, - } => { + | RoomStateCacheCommand::InviteState { user_id, room_id } => { let timer = tokio::time::Instant::now(); let results = services .rooms diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index efb4275f..696067b7 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -62,11 +62,14 @@ pub(crate) enum SendingCommand { } /// All the getters and iterators in key_value/sending.rs -pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + subcommand: SendingCommand, + context: &Command<'_>, +) -> Result { let services = context.services; match subcommand { - SendingCommand::ActiveRequests => { + | SendingCommand::ActiveRequests => { let timer = tokio::time::Instant::now(); let results = services.sending.db.active_requests(); let active_requests = results.collect::>().await; @@ -76,25 +79,29 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" ))) }, - SendingCommand::QueuedRequests { + | SendingCommand::QueuedRequests { appservice_id, server_name, user_id, push_key, } => { - if appservice_id.is_none() && server_name.is_none() && user_id.is_none() && push_key.is_none() { + if appservice_id.is_none() + && server_name.is_none() + && user_id.is_none() + && push_key.is_none() + { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via arguments. See \ - --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. See --help for more details.", )); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { - (Some(appservice_id), None, None, None) => { + | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via \ - arguments. See --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. See --help for more details.", )); } @@ -103,15 +110,15 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - .db .queued_requests(&Destination::Appservice(appservice_id)) }, - (None, Some(server_name), None, None) => services + | (None, Some(server_name), None, None) => services .sending .db .queued_requests(&Destination::Normal(server_name.into())), - (None, None, Some(user_id), Some(push_key)) => { + | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via \ - arguments. See --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. See --help for more details.", )); } @@ -120,16 +127,16 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - .db .queued_requests(&Destination::Push(user_id.into(), push_key)) }, - (Some(_), Some(_), Some(_), Some(_)) => { + | (Some(_), Some(_), Some(_), Some(_)) => { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via arguments. \ - Not all of them See --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. Not all of them See --help for more details.", )); }, - _ => { + | _ => { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via arguments. \ - See --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. See --help for more details.", )); }, }; @@ -141,26 +148,30 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" ))) }, - SendingCommand::ActiveRequestsFor { + | SendingCommand::ActiveRequestsFor { appservice_id, server_name, user_id, push_key, } => { - if appservice_id.is_none() && server_name.is_none() && user_id.is_none() && push_key.is_none() { + if appservice_id.is_none() + && server_name.is_none() + && user_id.is_none() + && push_key.is_none() + { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via arguments. See \ - --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. See --help for more details.", )); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { - (Some(appservice_id), None, None, None) => { + | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via \ - arguments. See --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. See --help for more details.", )); } @@ -169,15 +180,15 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - .db .active_requests_for(&Destination::Appservice(appservice_id)) }, - (None, Some(server_name), None, None) => services + | (None, Some(server_name), None, None) => services .sending .db .active_requests_for(&Destination::Normal(server_name.into())), - (None, None, Some(user_id), Some(push_key)) => { + | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via \ - arguments. See --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. See --help for more details.", )); } @@ -186,16 +197,16 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - .db .active_requests_for(&Destination::Push(user_id.into(), push_key)) }, - (Some(_), Some(_), Some(_), Some(_)) => { + | (Some(_), Some(_), Some(_), Some(_)) => { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via arguments. \ - Not all of them See --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. Not all of them See --help for more details.", )); }, - _ => { + | _ => { return Ok(RoomMessageEventContent::text_plain( - "An appservice ID, server name, or a user ID with push key must be specified via arguments. \ - See --help for more details.", + "An appservice ID, server name, or a user ID with push key must be \ + specified via arguments. See --help for more details.", )); }, }; @@ -207,9 +218,7 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" ))) }, - SendingCommand::GetLatestEduCount { - server_name, - } => { + | SendingCommand::GetLatestEduCount { server_name } => { let timer = tokio::time::Instant::now(); let results = services.sending.db.get_latest_educount(&server_name).await; let query_time = timer.elapsed(); diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index d58e2d2a..2149a103 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -1,7 +1,9 @@ use clap::Subcommand; use conduwuit::Result; use futures::stream::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId}; +use ruma::{ + events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId, +}; use crate::{admin_command, admin_command_dispatch}; @@ -91,7 +93,11 @@ pub(crate) enum UsersCommand { #[admin_command] async fn get_backup_session( - &self, user_id: OwnedUserId, version: String, room_id: OwnedRoomId, session_id: String, + &self, + user_id: OwnedUserId, + version: String, + room_id: OwnedRoomId, + session_id: String, ) -> Result { let timer = tokio::time::Instant::now(); let result = self @@ -108,7 +114,10 @@ async fn get_backup_session( #[admin_command] async fn get_room_backups( - &self, user_id: OwnedUserId, version: String, room_id: OwnedRoomId, + &self, + user_id: OwnedUserId, + version: String, + room_id: OwnedRoomId, ) -> Result { let timer = tokio::time::Instant::now(); let result = self @@ -124,7 +133,11 @@ async fn get_room_backups( } #[admin_command] -async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result { +async fn get_all_backups( + &self, + user_id: OwnedUserId, + version: String, +) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.key_backups.get_all(&user_id, &version).await; let query_time = timer.elapsed(); @@ -135,7 +148,11 @@ async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result } #[admin_command] -async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result { +async fn get_backup_algorithm( + &self, + user_id: OwnedUserId, + version: String, +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -150,7 +167,10 @@ async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> R } #[admin_command] -async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result { +async fn get_latest_backup_version( + &self, + user_id: OwnedUserId, +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -244,7 +264,11 @@ async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result Result { +async fn get_device_metadata( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, +) -> Result { let timer = tokio::time::Instant::now(); let device = self .services @@ -270,7 +294,11 @@ async fn get_devices_version(&self, user_id: OwnedUserId) -> Result Result { +async fn count_one_time_keys( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -285,7 +313,11 @@ async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDevice } #[admin_command] -async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { +async fn get_device_keys( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -327,7 +359,9 @@ async fn get_master_key(&self, user_id: OwnedUserId) -> Result Result { let timer = tokio::time::Instant::now(); let result = self diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 841bc51a..835138b2 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -3,7 +3,10 @@ use std::fmt::Write; use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; +use ruma::{ + events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, + RoomId, +}; use crate::{escape_html, Command}; @@ -42,82 +45,92 @@ pub(crate) enum RoomAliasCommand { }, } -pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + command: RoomAliasCommand, + context: &Command<'_>, +) -> Result { let services = context.services; let server_user = &services.globals.server_user; match command { - RoomAliasCommand::Set { - ref room_alias_localpart, - .. - } - | RoomAliasCommand::Remove { - ref room_alias_localpart, - } - | RoomAliasCommand::Which { - ref room_alias_localpart, - } => { - let room_alias_str = format!("#{}:{}", room_alias_localpart, services.globals.server_name()); + | RoomAliasCommand::Set { ref room_alias_localpart, .. } + | RoomAliasCommand::Remove { ref room_alias_localpart } + | RoomAliasCommand::Which { ref room_alias_localpart } => { + let room_alias_str = + format!("#{}:{}", room_alias_localpart, services.globals.server_name()); let room_alias = match RoomAliasId::parse_box(room_alias_str) { - Ok(alias) => alias, - Err(err) => return Ok(RoomMessageEventContent::text_plain(format!("Failed to parse alias: {err}"))), + | Ok(alias) => alias, + | Err(err) => + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to parse alias: {err}" + ))), }; match command { - RoomAliasCommand::Set { - force, - room_id, - .. - } => match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) { - (true, Ok(id)) => match services - .rooms - .alias - .set_alias(&room_alias, &room_id, server_user) - { - Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully overwrote alias (formerly {id})" + | RoomAliasCommand::Set { force, room_id, .. } => + match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) { + | (true, Ok(id)) => { + match services.rooms.alias.set_alias( + &room_alias, + &room_id, + server_user, + ) { + | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( + "Successfully overwrote alias (formerly {id})" + ))), + | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( + "Failed to remove alias: {err}" + ))), + } + }, + | (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!( + "Refusing to overwrite in use alias for {id}, use -f or --force to \ + overwrite" ))), - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))), + | (_, Err(_)) => { + match services.rooms.alias.set_alias( + &room_alias, + &room_id, + server_user, + ) { + | Ok(()) => Ok(RoomMessageEventContent::text_plain( + "Successfully set alias", + )), + | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( + "Failed to remove alias: {err}" + ))), + } + }, }, - (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!( - "Refusing to overwrite in use alias for {id}, use -f or --force to overwrite" - ))), - (_, Err(_)) => match services - .rooms - .alias - .set_alias(&room_alias, &room_id, server_user) - { - Ok(()) => Ok(RoomMessageEventContent::text_plain("Successfully set alias")), - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))), + | RoomAliasCommand::Remove { .. } => + match services.rooms.alias.resolve_local_alias(&room_alias).await { + | Ok(id) => match services + .rooms + .alias + .remove_alias(&room_alias, server_user) + .await + { + | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( + "Removed alias from {id}" + ))), + | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( + "Failed to remove alias: {err}" + ))), + }, + | Err(_) => + Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), }, - }, - RoomAliasCommand::Remove { - .. - } => match services.rooms.alias.resolve_local_alias(&room_alias).await { - Ok(id) => match services - .rooms - .alias - .remove_alias(&room_alias, server_user) - .await - { - Ok(()) => Ok(RoomMessageEventContent::text_plain(format!("Removed alias from {id}"))), - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))), + | RoomAliasCommand::Which { .. } => + match services.rooms.alias.resolve_local_alias(&room_alias).await { + | Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( + "Alias resolves to {id}" + ))), + | Err(_) => + Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), }, - Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, - RoomAliasCommand::Which { - .. - } => match services.rooms.alias.resolve_local_alias(&room_alias).await { - Ok(id) => Ok(RoomMessageEventContent::text_plain(format!("Alias resolves to {id}"))), - Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, - RoomAliasCommand::List { - .. - } => unreachable!(), + | RoomAliasCommand::List { .. } => unreachable!(), } }, - RoomAliasCommand::List { - room_id, - } => { + | RoomAliasCommand::List { room_id } => if let Some(room_id) = room_id { let aliases: Vec = services .rooms @@ -128,7 +141,8 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> .await; let plain_list = aliases.iter().fold(String::new(), |mut output, alias| { - writeln!(output, "- {alias}").expect("should be able to write to string buffer"); + writeln!(output, "- {alias}") + .expect("should be able to write to string buffer"); output }); @@ -176,7 +190,6 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> let plain = format!("Aliases:\n{plain_list}"); let html = format!("Aliases:\n
      {html_list}
    "); Ok(RoomMessageEventContent::text_html(plain, html)) - } - }, + }, } } diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index d2fdbfca..b58d04c5 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -6,7 +6,11 @@ use crate::{admin_command, get_room_info, PAGE_SIZE}; #[admin_command] pub(super) async fn list_rooms( - &self, page: Option, exclude_disabled: bool, exclude_banned: bool, no_details: bool, + &self, + page: Option, + exclude_disabled: bool, + exclude_banned: bool, + no_details: bool, ) -> Result { // TODO: i know there's a way to do this with clap, but i can't seem to find it let page = page.unwrap_or(1); @@ -16,10 +20,12 @@ pub(super) async fn list_rooms( .metadata .iter_ids() .filter_map(|room_id| async move { - (!exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await).then_some(room_id) + (!exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await) + .then_some(room_id) }) .filter_map(|room_id| async move { - (!exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await).then_some(room_id) + (!exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await) + .then_some(room_id) }) .then(|room_id| get_room_info(self.services, room_id)) .collect::>() diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index d3ed2575..81f25478 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -25,24 +25,21 @@ pub(crate) enum RoomDirectoryCommand { }, } -pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result { +pub(super) async fn process( + command: RoomDirectoryCommand, + context: &Command<'_>, +) -> Result { let services = context.services; match command { - RoomDirectoryCommand::Publish { - room_id, - } => { + | RoomDirectoryCommand::Publish { room_id } => { services.rooms.directory.set_public(&room_id); Ok(RoomMessageEventContent::notice_plain("Room published")) }, - RoomDirectoryCommand::Unpublish { - room_id, - } => { + | RoomDirectoryCommand::Unpublish { room_id } => { services.rooms.directory.set_not_public(&room_id); Ok(RoomMessageEventContent::notice_plain("Room unpublished")) }, - RoomDirectoryCommand::List { - page, - } => { + | RoomDirectoryCommand::List { page } => { // TODO: i know there's a way to do this with clap, but i can't seem to find it let page = page.unwrap_or(1); let mut rooms: Vec<_> = services @@ -70,7 +67,9 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_> "Rooms (page {page}):\n```\n{}\n```", rooms .iter() - .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) + .map(|(id, members, name)| format!( + "{id} | Members: {members} | Name: {name}" + )) .collect::>() .join("\n") ); diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index 6a6ec695..34abf8a9 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -27,7 +27,11 @@ pub(crate) enum RoomInfoCommand { } #[admin_command] -async fn list_joined_members(&self, room_id: Box, local_only: bool) -> Result { +async fn list_joined_members( + &self, + room_id: Box, + local_only: bool, +) -> Result { let room_name = self .services .rooms diff --git a/src/admin/room/mod.rs b/src/admin/room/mod.rs index ad05d16d..26d2c2d8 100644 --- a/src/admin/room/mod.rs +++ b/src/admin/room/mod.rs @@ -9,7 +9,8 @@ use conduwuit::Result; use ruma::OwnedRoomId; use self::{ - alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand, moderation::RoomModerationCommand, + alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand, + moderation::RoomModerationCommand, }; use crate::admin_command_dispatch; diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index 427f3e42..b16aff1f 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -6,7 +6,10 @@ use conduwuit::{ warn, Result, }; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId}; +use ruma::{ + events::room::message::RoomMessageEventContent, OwnedRoomId, RoomAliasId, RoomId, + RoomOrAliasId, +}; use crate::{admin_command, admin_command_dispatch, get_room_info}; @@ -75,7 +78,10 @@ pub(crate) enum RoomModerationCommand { #[admin_command] async fn ban_room( - &self, force: bool, disable_federation: bool, room: Box, + &self, + force: bool, + disable_federation: bool, + room: Box, ) -> Result { debug!("Got room alias or ID: {}", room); @@ -89,13 +95,13 @@ async fn ban_room( let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { - Ok(room_id) => room_id, - Err(e) => { + | Ok(room_id) => room_id, + | Err(e) => return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse room ID {room}. Please note that this requires a full room ID \ - (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`): {e}" - ))) - }, + "Failed to parse room ID {room}. Please note that this requires a full room \ + ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ + (`#roomalias:example.com`): {e}" + ))), }; debug!("Room specified is a room ID, banning room ID"); @@ -105,18 +111,18 @@ async fn ban_room( room_id } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { - Ok(room_alias) => room_alias, - Err(e) => { + | Ok(room_alias) => room_alias, + | Err(e) => return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse room ID {room}. Please note that this requires a full room ID \ - (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`): {e}" - ))) - }, + "Failed to parse room ID {room}. Please note that this requires a full room \ + ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ + (`#roomalias:example.com`): {e}" + ))), }; debug!( - "Room specified is not a room ID, attempting to resolve room alias to a room ID locally, if not using \ - get_alias_helper to fetch room ID remotely" + "Room specified is not a room ID, attempting to resolve room alias to a room ID \ + locally, if not using get_alias_helper to fetch room ID remotely" ); let room_id = if let Ok(room_id) = self @@ -128,7 +134,10 @@ async fn ban_room( { room_id } else { - debug!("We don't have this room alias to a room ID locally, attempting to fetch room ID over federation"); + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch room \ + ID over federation" + ); match self .services @@ -137,11 +146,15 @@ async fn ban_room( .resolve_alias(&room_alias, None) .await { - Ok((room_id, servers)) => { - debug!(?room_id, ?servers, "Got federation response fetching room ID for {room}"); + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for {room}" + ); room_id }, - Err(e) => { + | Err(e) => { return Ok(RoomMessageEventContent::notice_plain(format!( "Failed to resolve room alias {room} to a room ID: {e}" ))); @@ -154,8 +167,9 @@ async fn ban_room( room_id } else { return Ok(RoomMessageEventContent::text_plain( - "Room specified is not a room ID or room alias. Please note that this requires a full room ID \ - (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`)", + "Room specified is not a room ID or room alias. Please note that this requires a \ + full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ + (`#roomalias:example.com`)", )); }; @@ -171,8 +185,8 @@ async fn ban_room( while let Some(local_user) = users.next().await { debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring all errors, evicting \ - admins too)", + "Attempting leave for user {local_user} in room {room_id} (forced, ignoring all \ + errors, evicting admins too)", ); if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { @@ -196,12 +210,14 @@ async fn ban_room( debug!("Attempting leave for user {} in room {}", &local_user, &room_id); if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { error!( - "Error attempting to make local user {} leave room {} during room banning: {}", + "Error attempting to make local user {} leave room {} during room banning: \ + {}", &local_user, &room_id, e ); return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room banning (room is still banned \ - but not removing any more users): {}\nIf you would like to ignore errors, use --force", + "Error attempting to make local user {} leave room {} during room banning \ + (room is still banned but not removing any more users): {}\nIf you would \ + like to ignore errors, use --force", &local_user, &room_id, e ))); } @@ -232,19 +248,26 @@ async fn ban_room( if disable_federation { self.services.rooms.metadata.disable_room(&room_id, true); return Ok(RoomMessageEventContent::text_plain( - "Room banned, removed all our local users, and disabled incoming federation with room.", + "Room banned, removed all our local users, and disabled incoming federation with \ + room.", )); } Ok(RoomMessageEventContent::text_plain( - "Room banned and removed all our local users, use `!admin federation disable-room` to stop receiving new \ - inbound federation events as well if needed.", + "Room banned and removed all our local users, use `!admin federation disable-room` to \ + stop receiving new inbound federation events as well if needed.", )) } #[admin_command] -async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Result { - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" +async fn ban_list_of_rooms( + &self, + force: bool, + disable_federation: bool, +) -> Result { + if self.body.len() < 2 + || !self.body[0].trim().starts_with("```") + || self.body.last().unwrap_or(&"").trim() != "```" { return Ok(RoomMessageEventContent::text_plain( "Expected code block in command body. Add --help for details.", @@ -264,9 +287,10 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu for &room in &rooms_s { match <&RoomOrAliasId>::try_from(room) { - Ok(room_alias_or_id) => { + | Ok(room_alias_or_id) => { if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { - if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias) { + if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias) + { info!("User specified admin room in bulk ban list, ignoring"); continue; } @@ -274,19 +298,20 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu if room_alias_or_id.is_room_id() { let room_id = match RoomId::parse(room_alias_or_id) { - Ok(room_id) => room_id, - Err(e) => { + | Ok(room_id) => room_id, + | Err(e) => { if force { // ignore rooms we failed to parse if we're force banning warn!( - "Error parsing room \"{room}\" during bulk room banning, ignoring error and \ - logging here: {e}" + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" ); continue; } return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the list and try again: {e}" + "{room} is not a valid room ID or room alias, please fix the \ + list and try again: {e}" ))); }, }; @@ -296,7 +321,7 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu if room_alias_or_id.is_room_alias_id() { match RoomAliasId::parse(room_alias_or_id) { - Ok(room_alias) => { + | Ok(room_alias) => { let room_id = if let Ok(room_id) = self .services .rooms @@ -307,8 +332,8 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu room_id } else { debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room ID \ - over federation" + "We don't have this room alias to a room ID locally, \ + attempting to fetch room ID over federation" ); match self @@ -318,7 +343,7 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu .resolve_alias(&room_alias, None) .await { - Ok((room_id, servers)) => { + | Ok((room_id, servers)) => { debug!( ?room_id, ?servers, @@ -326,15 +351,19 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu ); room_id }, - Err(e) => { + | Err(e) => { // don't fail if force blocking if force { - warn!("Failed to resolve room alias {room} to a room ID: {e}"); + warn!( + "Failed to resolve room alias {room} to a room \ + ID: {e}" + ); continue; } return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" + "Failed to resolve room alias {room} to a room ID: \ + {e}" ))); }, } @@ -342,34 +371,37 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu room_ids.push(room_id); }, - Err(e) => { + | Err(e) => { if force { // ignore rooms we failed to parse if we're force deleting error!( - "Error parsing room \"{room}\" during bulk room banning, ignoring error and \ - logging here: {e}" + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" ); continue; } return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the list and try again: {e}" + "{room} is not a valid room ID or room alias, please fix the \ + list and try again: {e}" ))); }, } } }, - Err(e) => { + | Err(e) => { if force { // ignore rooms we failed to parse if we're force deleting error!( - "Error parsing room \"{room}\" during bulk room banning, ignoring error and logging here: {e}" + "Error parsing room \"{room}\" during bulk room banning, ignoring error \ + and logging here: {e}" ); continue; } return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the list and try again: {e}" + "{room} is not a valid room ID or room alias, please fix the list and try \ + again: {e}" ))); }, } @@ -393,8 +425,8 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu while let Some(local_user) = users.next().await { debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring all errors, evicting \ - admins too)", + "Attempting leave for user {local_user} in room {room_id} (forced, ignoring \ + all errors, evicting admins too)", ); if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { @@ -418,14 +450,15 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu debug!("Attempting leave for user {local_user} in room {room_id}"); if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { error!( - "Error attempting to make local user {local_user} leave room {room_id} during bulk room \ - banning: {e}", + "Error attempting to make local user {local_user} leave room {room_id} \ + during bulk room banning: {e}", ); return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room banning (room is still \ - banned but not removing any more users and not banning any more rooms): {}\nIf you would \ - like to ignore errors, use --force", + "Error attempting to make local user {} leave room {} during room \ + banning (room is still banned but not removing any more users and not \ + banning any more rooms): {}\nIf you would like to ignore errors, use \ + --force", &local_user, &room_id, e ))); } @@ -458,8 +491,8 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu if disable_federation { Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and disabled incoming \ - federation with the room." + "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, \ + and disabled incoming federation with the room." ))) } else { Ok(RoomMessageEventContent::text_plain(format!( @@ -469,16 +502,20 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu } #[admin_command] -async fn unban_room(&self, enable_federation: bool, room: Box) -> Result { +async fn unban_room( + &self, + enable_federation: bool, + room: Box, +) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { - Ok(room_id) => room_id, - Err(e) => { + | Ok(room_id) => room_id, + | Err(e) => return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse room ID {room}. Please note that this requires a full room ID \ - (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`): {e}" - ))) - }, + "Failed to parse room ID {room}. Please note that this requires a full room \ + ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ + (`#roomalias:example.com`): {e}" + ))), }; debug!("Room specified is a room ID, unbanning room ID"); @@ -488,18 +525,18 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> room_id } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { - Ok(room_alias) => room_alias, - Err(e) => { + | Ok(room_alias) => room_alias, + | Err(e) => return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse room ID {room}. Please note that this requires a full room ID \ - (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`): {e}" - ))) - }, + "Failed to parse room ID {room}. Please note that this requires a full room \ + ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ + (`#roomalias:example.com`): {e}" + ))), }; debug!( - "Room specified is not a room ID, attempting to resolve room alias to a room ID locally, if not using \ - get_alias_helper to fetch room ID remotely" + "Room specified is not a room ID, attempting to resolve room alias to a room ID \ + locally, if not using get_alias_helper to fetch room ID remotely" ); let room_id = if let Ok(room_id) = self @@ -511,7 +548,10 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> { room_id } else { - debug!("We don't have this room alias to a room ID locally, attempting to fetch room ID over federation"); + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch room \ + ID over federation" + ); match self .services @@ -520,11 +560,15 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> .resolve_alias(&room_alias, None) .await { - Ok((room_id, servers)) => { - debug!(?room_id, ?servers, "Got federation response fetching room ID for room {room}"); + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for room {room}" + ); room_id }, - Err(e) => { + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to resolve room alias {room} to a room ID: {e}" ))); @@ -537,8 +581,9 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> room_id } else { return Ok(RoomMessageEventContent::text_plain( - "Room specified is not a room ID or room alias. Please note that this requires a full room ID \ - (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`)", + "Room specified is not a room ID or room alias. Please note that this requires a \ + full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ + (`#roomalias:example.com`)", )); }; @@ -548,8 +593,8 @@ async fn unban_room(&self, enable_federation: bool, room: Box) -> } Ok(RoomMessageEventContent::text_plain( - "Room unbanned, you may need to re-enable federation with the room using enable-room if this is a remote room \ - to make it fully functional.", + "Room unbanned, you may need to re-enable federation with the room using enable-room if \ + this is a remote room to make it fully functional.", )) } diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 676f2d33..8d3358a8 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -29,13 +29,12 @@ pub(super) async fn show_config(&self) -> Result { #[admin_command] pub(super) async fn list_features( - &self, available: bool, enabled: bool, comma: bool, + &self, + available: bool, + enabled: bool, + comma: bool, ) -> Result { - let delim = if comma { - "," - } else { - " " - }; + let delim = if comma { "," } else { " " }; if enabled && !available { let features = info::rustc::features().join(delim); let out = format!("`\n{features}\n`"); @@ -53,16 +52,8 @@ pub(super) async fn list_features( let available = info::cargo::features(); for feature in available { let active = enabled.contains(&feature.as_str()); - let emoji = if active { - "✅" - } else { - "❌" - }; - let remark = if active { - "[enabled]" - } else { - "" - }; + let emoji = if active { "✅" } else { "❌" }; + let remark = if active { "[enabled]" } else { "" }; writeln!(features, "{emoji} {feature} {remark}")?; } @@ -73,7 +64,8 @@ pub(super) async fn list_features( pub(super) async fn memory_usage(&self) -> Result { let services_usage = self.services.memory_usage().await?; let database_usage = self.services.db.db.memory_usage()?; - let allocator_usage = conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}")); + let allocator_usage = + conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}")); Ok(RoomMessageEventContent::text_plain(format!( "Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}", @@ -106,8 +98,8 @@ pub(super) async fn backup_database(&self) -> Result { .server .runtime() .spawn_blocking(move || match globals.db.backup() { - Ok(()) => String::new(), - Err(e) => e.to_string(), + | Ok(()) => String::new(), + | Err(e) => e.to_string(), }) .await?; @@ -147,8 +139,8 @@ pub(super) async fn restart(&self, force: bool) -> Result Result { } #[admin_command] -pub(super) async fn create_user(&self, username: String, password: Option) -> Result { +pub(super) async fn create_user( + &self, + username: String, + password: Option, +) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &username)?; if self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain(format!("Userid {user_id} already exists"))); + return Ok(RoomMessageEventContent::text_plain(format!( + "Userid {user_id} already exists" + ))); } if user_id.is_historical() { @@ -120,7 +126,9 @@ pub(super) async fn create_user(&self, username: String, password: Option { + | Ok(_response) => { info!("Automatically joined room {room} for user {user_id}"); }, - Err(e) => { + | Err(e) => { self.services .admin .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to automatically join room {room} for user {user_id}: {e}" + "Failed to automatically join room {room} for user {user_id}: \ + {e}" ))) .await .ok(); // don't return this error so we don't fail registrations - error!("Failed to automatically join room {room} for user {user_id}: {e}"); + error!( + "Failed to automatically join room {room} for user {user_id}: {e}" + ); }, }; } @@ -181,7 +195,11 @@ pub(super) async fn create_user(&self, username: String, password: Option Result { +pub(super) async fn deactivate( + &self, + no_leave_rooms: bool, + user_id: String, +) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &user_id)?; @@ -229,7 +247,8 @@ pub(super) async fn reset_password(&self, username: String) -> Result Result Ok(RoomMessageEventContent::text_plain(format!( + | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( "Successfully reset the password for user {user_id}: `{new_password}`" ))), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( "Couldn't reset the password for user {user_id}: {e}" ))), } } #[admin_command] -pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result { - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" +pub(super) async fn deactivate_all( + &self, + no_leave_rooms: bool, + force: bool, +) -> Result { + if self.body.len() < 2 + || !self.body[0].trim().starts_with("```") + || self.body.last().unwrap_or(&"").trim() != "```" { return Ok(RoomMessageEventContent::text_plain( "Expected code block in command body. Add --help for details.", @@ -269,7 +294,7 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> for username in usernames { match parse_active_local_user_id(self.services, username).await { - Ok(user_id) => { + | Ok(user_id) => { if self.services.users.is_admin(&user_id).await && !force { self.services .admin @@ -296,7 +321,7 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> user_ids.push(user_id); }, - Err(e) => { + | Err(e) => { self.services .admin .send_message(RoomMessageEventContent::text_plain(format!( @@ -313,7 +338,7 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> for user_id in user_ids { match self.services.users.deactivate_account(&user_id).await { - Ok(()) => { + | Ok(()) => { deactivation_count = deactivation_count.saturating_add(1); if !no_leave_rooms { info!("Forcing user {user_id} to leave all rooms apart of deactivate-all"); @@ -328,14 +353,17 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?; update_displayname(self.services, &user_id, None, &all_joined_rooms).await; - update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms).await; + update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms) + .await; leave_all_rooms(self.services, &user_id).await; } }, - Err(e) => { + | Err(e) => { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!("Failed deactivating user: {e}"))) + .send_message(RoomMessageEventContent::text_plain(format!( + "Failed deactivating user: {e}" + ))) .await .ok(); }, @@ -348,8 +376,8 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> ))) } else { Ok(RoomMessageEventContent::text_plain(format!( - "Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use --force to deactivate admin \ - accounts", + "Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \ + --force to deactivate admin accounts", admins.join(", ") ))) } @@ -391,9 +419,13 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result Result { - if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" + if self.body.len() < 2 + || !self.body[0].trim().starts_with("```") + || self.body.last().unwrap_or(&"").trim() != "```" { return Ok(RoomMessageEventContent::text_plain( "Expected code block in command body. Add --help for details.", @@ -402,8 +434,8 @@ pub(super) async fn force_join_list_of_local_users( if !yes_i_want_to_do_this { return Ok(RoomMessageEventContent::notice_markdown( - "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force bulk join all \ - specified local users.", + "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ + bulk join all specified local users.", )); } @@ -462,7 +494,7 @@ pub(super) async fn force_join_list_of_local_users( for username in usernames { match parse_active_local_user_id(self.services, username).await { - Ok(user_id) => { + | Ok(user_id) => { // don't make the server service account join if user_id == self.services.globals.server_user { self.services @@ -477,7 +509,7 @@ pub(super) async fn force_join_list_of_local_users( user_ids.push(user_id); }, - Err(e) => { + | Err(e) => { self.services .admin .send_message(RoomMessageEventContent::text_plain(format!( @@ -505,10 +537,10 @@ pub(super) async fn force_join_list_of_local_users( ) .await { - Ok(_res) => { + | Ok(_res) => { successful_joins = successful_joins.saturating_add(1); }, - Err(e) => { + | Err(e) => { debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, @@ -516,18 +548,21 @@ pub(super) async fn force_join_list_of_local_users( } Ok(RoomMessageEventContent::notice_markdown(format!( - "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins failed.", + "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ + failed.", ))) } #[admin_command] pub(super) async fn force_join_all_local_users( - &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, + &self, + room_id: OwnedRoomOrAliasId, + yes_i_want_to_do_this: bool, ) -> Result { if !yes_i_want_to_do_this { return Ok(RoomMessageEventContent::notice_markdown( - "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force bulk join all local \ - users.", + "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ + bulk join all local users.", )); } @@ -598,10 +633,10 @@ pub(super) async fn force_join_all_local_users( ) .await { - Ok(_res) => { + | Ok(_res) => { successful_joins = successful_joins.saturating_add(1); }, - Err(e) => { + | Err(e) => { debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, @@ -609,13 +644,16 @@ pub(super) async fn force_join_all_local_users( } Ok(RoomMessageEventContent::notice_markdown(format!( - "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins failed.", + "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ + failed.", ))) } #[admin_command] pub(super) async fn force_join_room( - &self, user_id: String, room_id: OwnedRoomOrAliasId, + &self, + user_id: String, + room_id: OwnedRoomOrAliasId, ) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let (room_id, servers) = self @@ -629,7 +667,8 @@ pub(super) async fn force_join_room( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); - join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None).await?; + join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None) + .await?; Ok(RoomMessageEventContent::notice_markdown(format!( "{user_id} has been joined to {room_id}.", @@ -638,7 +677,9 @@ pub(super) async fn force_join_room( #[admin_command] pub(super) async fn force_leave_room( - &self, user_id: String, room_id: OwnedRoomOrAliasId, + &self, + user_id: String, + room_id: OwnedRoomOrAliasId, ) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -656,7 +697,9 @@ pub(super) async fn force_leave_room( #[admin_command] pub(super) async fn force_demote( - &self, user_id: String, room_id: OwnedRoomOrAliasId, + &self, + user_id: String, + room_id: OwnedRoomOrAliasId, ) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -672,14 +715,19 @@ pub(super) async fn force_demote( .services .rooms .state_accessor - .room_state_get_content::(&room_id, &StateEventType::RoomPowerLevels, "") + .room_state_get_content::( + &room_id, + &StateEventType::RoomPowerLevels, + "", + ) .await .ok(); let user_can_demote_self = room_power_levels .as_ref() .is_some_and(|power_levels_content| { - RoomPowerLevels::from(power_levels_content.clone()).user_can_change_user_power_level(&user_id, &user_id) + RoomPowerLevels::from(power_levels_content.clone()) + .user_can_change_user_power_level(&user_id, &user_id) }) || self .services .rooms @@ -710,7 +758,8 @@ pub(super) async fn force_demote( .await?; Ok(RoomMessageEventContent::notice_markdown(format!( - "User {user_id} demoted themselves to the room default power level in {room_id} - {event_id}" + "User {user_id} demoted themselves to the room default power level in {room_id} - \ + {event_id}" ))) } @@ -731,7 +780,10 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result, tag: String, + &self, + user_id: String, + room_id: Box, + tag: String, ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; @@ -741,9 +793,7 @@ pub(super) async fn put_room_tag( .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) .await .unwrap_or(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, + content: TagEventContent { tags: BTreeMap::new() }, }); tags_event @@ -768,7 +818,10 @@ pub(super) async fn put_room_tag( #[admin_command] pub(super) async fn delete_room_tag( - &self, user_id: String, room_id: Box, tag: String, + &self, + user_id: String, + room_id: Box, + tag: String, ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; @@ -778,9 +831,7 @@ pub(super) async fn delete_room_tag( .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) .await .unwrap_or(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, + content: TagEventContent { tags: BTreeMap::new() }, }); tags_event.content.tags.remove(&tag.clone().into()); @@ -796,12 +847,17 @@ pub(super) async fn delete_room_tag( .await?; Ok(RoomMessageEventContent::text_plain(format!( - "Successfully updated room account data for {user_id} and room {room_id}, deleting room tag {tag}" + "Successfully updated room account data for {user_id} and room {room_id}, deleting room \ + tag {tag}" ))) } #[admin_command] -pub(super) async fn get_room_tags(&self, user_id: String, room_id: Box) -> Result { +pub(super) async fn get_room_tags( + &self, + user_id: String, + room_id: Box, +) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let tags_event = self @@ -810,9 +866,7 @@ pub(super) async fn get_room_tags(&self, user_id: String, room_id: Box) .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) .await .unwrap_or(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, + content: TagEventContent { tags: BTreeMap::new() }, }); Ok(RoomMessageEventContent::notice_markdown(format!( @@ -822,7 +876,10 @@ pub(super) async fn get_room_tags(&self, user_id: String, room_id: Box) } #[admin_command] -pub(super) async fn redact_event(&self, event_id: Box) -> Result { +pub(super) async fn redact_event( + &self, + event_id: Box, +) -> Result { let Ok(event) = self .services .rooms @@ -841,7 +898,9 @@ pub(super) async fn redact_event(&self, event_id: Box) -> Result String { .replace('>', ">") } -pub(crate) async fn get_room_info(services: &Services, room_id: &RoomId) -> (OwnedRoomId, u64, String) { +pub(crate) async fn get_room_info( + services: &Services, + room_id: &RoomId, +) -> (OwnedRoomId, u64, String) { ( room_id.into(), services @@ -44,7 +47,10 @@ pub(crate) fn parse_local_user_id(services: &Services, user_id: &str) -> Result< } /// Parses user ID that is an active (not guest or deactivated) local user -pub(crate) async fn parse_active_local_user_id(services: &Services, user_id: &str) -> Result { +pub(crate) async fn parse_active_local_user_id( + services: &Services, + user_id: &str, +) -> Result { let user_id = parse_local_user_id(services, user_id)?; if !services.users.exists(&user_id).await { diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 71e18fd3..e6748124 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -2,16 +2,19 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result}; +use conduwuit::{ + debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result, +}; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ api::client::{ account::{ - change_password, check_registration_token_validity, deactivate, get_3pids, get_username_availability, + change_password, check_registration_token_validity, deactivate, get_3pids, + get_username_availability, register::{self, LoginType}, - request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, - ThirdPartyIdRemovalStatus, + request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, + whoami, ThirdPartyIdRemovalStatus, }, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, @@ -45,7 +48,8 @@ const RANDOM_USER_ID_LENGTH: usize = 10; /// invalid when trying to register #[tracing::instrument(skip_all, fields(%client), name = "register_available")] pub(crate) async fn get_register_available_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue @@ -66,7 +70,8 @@ pub(crate) async fn get_register_available_route( let user_id = UserId::parse_with_server_name(body_username, services.globals.server_name()) .ok() .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) && services.globals.user_is_local(user_id) + (!user_id.is_historical() || is_matrix_appservice_irc) + && services.globals.user_is_local(user_id) }) .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; @@ -86,9 +91,7 @@ pub(crate) async fn get_register_available_route( // TODO add check for appservice namespaces // If no if check is true we have an username that's available to be used. - Ok(get_username_availability::v3::Response { - available: true, - }) + Ok(get_username_availability::v3::Response { available: true }) } /// # `POST /_matrix/client/v3/register` @@ -111,12 +114,14 @@ pub(crate) async fn get_register_available_route( #[allow(clippy::doc_markdown)] #[tracing::instrument(skip_all, fields(%client), name = "register")] pub(crate) async fn register_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, ) -> Result { if !services.globals.allow_registration() && body.appservice_info.is_none() { info!( - "Registration disabled and request not from known appservice, rejecting registration attempt for username \ - \"{}\"", + "Registration disabled and request not from known appservice, rejecting \ + registration attempt for username \"{}\"", body.username.as_deref().unwrap_or("") ); return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration has been disabled.")); @@ -126,11 +131,12 @@ pub(crate) async fn register_route( if is_guest && (!services.globals.allow_guest_registration() - || (services.globals.allow_registration() && services.globals.registration_token.is_some())) + || (services.globals.allow_registration() + && services.globals.registration_token.is_some())) { info!( - "Guest registration disabled / registration enabled with token configured, rejecting guest registration \ - attempt, initial device name: \"{}\"", + "Guest registration disabled / registration enabled with token configured, \ + rejecting guest registration attempt, initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); return Err(Error::BadRequest( @@ -143,21 +149,25 @@ pub(crate) async fn register_route( // generic user error. if is_guest && services.users.count().await < 2 { warn!( - "Guest account attempted to register before a real admin user has been registered, rejecting \ - registration. Guest's initial device name: \"{}\"", + "Guest account attempted to register before a real admin user has been registered, \ + rejecting registration. Guest's initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration temporarily disabled.")); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Registration temporarily disabled.", + )); } let user_id = match (&body.username, is_guest) { - (Some(username), false) => { + | (Some(username), false) => { // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue - let is_matrix_appservice_irc = body.appservice_info.as_ref().is_some_and(|appservice| { - appservice.registration.id == "irc" - || appservice.registration.id.contains("matrix-appservice-irc") - || appservice.registration.id.contains("matrix_appservice_irc") - }); + let is_matrix_appservice_irc = + body.appservice_info.as_ref().is_some_and(|appservice| { + appservice.registration.id == "irc" + || appservice.registration.id.contains("matrix-appservice-irc") + || appservice.registration.id.contains("matrix_appservice_irc") + }); // don't force the username lowercase if it's from matrix-appservice-irc let body_username = if is_matrix_appservice_irc { @@ -166,15 +176,23 @@ pub(crate) async fn register_route( username.to_lowercase() }; - let proposed_user_id = UserId::parse_with_server_name(body_username, services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let proposed_user_id = + UserId::parse_with_server_name(body_username, services.globals.server_name()) + .ok() + .filter(|user_id| { + (!user_id.is_historical() || is_matrix_appservice_irc) + && services.globals.user_is_local(user_id) + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; if services.users.exists(&proposed_user_id).await { - return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); + return Err(Error::BadRequest( + ErrorKind::UserInUse, + "Desired user ID is already taken.", + )); } if services @@ -187,7 +205,7 @@ pub(crate) async fn register_route( proposed_user_id }, - _ => loop { + | _ => loop { let proposed_user_id = UserId::parse_with_server_name( utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), services.globals.server_name(), @@ -228,9 +246,7 @@ pub(crate) async fn register_route( } else { // No registration token necessary, but clients must still go through the flow uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::Dummy], - }], + flows: vec![AuthFlow { stages: vec![AuthType::Dummy] }], completed: Vec::new(), params: Box::default(), session: None, @@ -244,7 +260,8 @@ pub(crate) async fn register_route( let (worked, uiaainfo) = services .uiaa .try_auth( - &UserId::parse_with_server_name("", services.globals.server_name()).expect("we know this is valid"), + &UserId::parse_with_server_name("", services.globals.server_name()) + .expect("we know this is valid"), "".into(), auth, &uiaainfo, @@ -257,7 +274,8 @@ pub(crate) async fn register_route( } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services.uiaa.create( - &UserId::parse_with_server_name("", services.globals.server_name()).expect("we know this is valid"), + &UserId::parse_with_server_name("", services.globals.server_name()) + .expect("we know this is valid"), "".into(), &uiaainfo, &json, @@ -268,11 +286,7 @@ pub(crate) async fn register_route( } } - let password = if is_guest { - None - } else { - body.password.as_deref() - }; + let password = if is_guest { None } else { body.password.as_deref() }; // Create user services.users.create(&user_id, password)?; @@ -282,7 +296,9 @@ pub(crate) async fn register_route( // If `new_user_displayname_suffix` is set, registration will push whatever // content is set to the user's display name with a space before it - if !services.globals.new_user_displayname_suffix().is_empty() && body.appservice_info.is_none() { + if !services.globals.new_user_displayname_suffix().is_empty() + && body.appservice_info.is_none() + { write!(displayname, " {}", services.globals.config.new_user_displayname_suffix) .expect("should be able to write to string buffer"); } @@ -319,12 +335,8 @@ pub(crate) async fn register_route( } // Generate new device id if the user didn't specify one - let device_id = if is_guest { - None - } else { - body.device_id.clone() - } - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); + let device_id = if is_guest { None } else { body.device_id.clone() } + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); // Generate new token for the device let token = utils::random_string(TOKEN_LENGTH); @@ -349,15 +361,16 @@ pub(crate) async fn register_route( if body.appservice_info.is_none() && !is_guest { if !device_display_name.is_empty() { info!( - "New user \"{user_id}\" registered on this server with device display name: \"{device_display_name}\"" + "New user \"{user_id}\" registered on this server with device display name: \ + \"{device_display_name}\"" ); if services.globals.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( - "New user \"{user_id}\" registered on this server from IP {client} and device display name \ - \"{device_display_name}\"" + "New user \"{user_id}\" registered on this server from IP {client} and \ + device display name \"{device_display_name}\"" ))) .await .ok(); @@ -386,8 +399,8 @@ pub(crate) async fn register_route( services .admin .send_message(RoomMessageEventContent::notice_plain(format!( - "Guest user \"{user_id}\" with device display name \"{device_display_name}\" registered on \ - this server from IP {client}" + "Guest user \"{user_id}\" with device display name \ + \"{device_display_name}\" registered on this server from IP {client}" ))) .await .ok(); @@ -398,8 +411,8 @@ pub(crate) async fn register_route( services .admin .send_message(RoomMessageEventContent::notice_plain(format!( - "Guest user \"{user_id}\" with no device display name registered on this server from IP \ - {client}", + "Guest user \"{user_id}\" with no device display name registered on \ + this server from IP {client}", ))) .await .ok(); @@ -430,7 +443,10 @@ pub(crate) async fn register_route( { for room in &services.globals.config.auto_join_rooms { let Ok(room_id) = services.rooms.alias.resolve(room).await else { - error!("Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"); + error!( + "Failed to resolve room alias to room ID when attempting to auto join \ + {room}, skipping" + ); continue; }; @@ -440,7 +456,9 @@ pub(crate) async fn register_route( .server_in_room(services.globals.server_name(), &room_id) .await { - warn!("Skipping room {room} to automatically join as we have never joined before."); + warn!( + "Skipping room {room} to automatically join as we have never joined before." + ); continue; } @@ -494,7 +512,8 @@ pub(crate) async fn register_route( /// - Triggers device list updates #[tracing::instrument(skip_all, fields(%client), name = "change_password")] pub(crate) async fn change_password_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { // Authentication for this endpoint was made optional, but we need @@ -506,9 +525,7 @@ pub(crate) async fn change_password_route( let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::Password], - }], + flows: vec![AuthFlow { stages: vec![AuthType::Password] }], completed: Vec::new(), params: Box::default(), session: None, @@ -572,7 +589,8 @@ pub(crate) async fn change_password_route( /// /// Note: Also works for Application Services pub(crate) async fn whoami_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device_id = body.sender_device.clone(); @@ -580,7 +598,8 @@ pub(crate) async fn whoami_route( Ok(whoami::v3::Response { user_id: sender_user.clone(), device_id, - is_guest: services.users.is_deactivated(sender_user).await? && body.appservice_info.is_none(), + is_guest: services.users.is_deactivated(sender_user).await? + && body.appservice_info.is_none(), }) } @@ -597,7 +616,8 @@ pub(crate) async fn whoami_route( /// - Removes ability to log in again #[tracing::instrument(skip_all, fields(%client), name = "deactivate")] pub(crate) async fn deactivate_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { // Authentication for this endpoint was made optional, but we need @@ -609,9 +629,7 @@ pub(crate) async fn deactivate_route( let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::Password], - }], + flows: vec![AuthFlow { stages: vec![AuthType::Password] }], completed: Vec::new(), params: Box::default(), session: None, @@ -675,7 +693,9 @@ pub(crate) async fn deactivate_route( /// Get a list of third party identifiers associated with this account. /// /// - Currently always returns empty list -pub(crate) async fn third_party_route(body: Ruma) -> Result { +pub(crate) async fn third_party_route( + body: Ruma, +) -> Result { let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_3pids::v3::Response::new(Vec::new())) @@ -720,7 +740,8 @@ pub(crate) async fn request_3pid_management_token_via_msisdn_route( /// Currently does not have any ratelimiting, and this isn't very practical as /// there is only one registration token allowed. pub(crate) async fn check_registration_token_validity( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let Some(reg_token) = services.globals.registration_token.clone() else { return Err(Error::BadRequest( @@ -729,9 +750,7 @@ pub(crate) async fn check_registration_token_validity( )); }; - Ok(check_registration_token_validity::v1::Response { - valid: reg_token == body.token, - }) + Ok(check_registration_token_validity::v1::Response { valid: reg_token == body.token }) } /// Runs through all the deactivation steps: @@ -742,7 +761,9 @@ pub(crate) async fn check_registration_token_validity( /// - Removing all profile data /// - Leaving all rooms (and forgets all of them) pub async fn full_user_deactivate( - services: &Services, user_id: &UserId, all_joined_rooms: &[OwnedRoomId], + services: &Services, + user_id: &UserId, + all_joined_rooms: &[OwnedRoomId], ) -> Result<()> { services.users.deactivate_account(user_id).await.ok(); super::update_displayname(services, user_id, None, all_joined_rooms).await; @@ -751,7 +772,9 @@ pub async fn full_user_deactivate( services .users .all_profile_keys(user_id) - .ready_for_each(|(profile_key, _)| services.users.set_profile_key(user_id, &profile_key, None)) + .ready_for_each(|(profile_key, _)| { + services.users.set_profile_key(user_id, &profile_key, None); + }) .await; for room_id in all_joined_rooms { @@ -760,20 +783,26 @@ pub async fn full_user_deactivate( let room_power_levels = services .rooms .state_accessor - .room_state_get_content::(room_id, &StateEventType::RoomPowerLevels, "") + .room_state_get_content::( + room_id, + &StateEventType::RoomPowerLevels, + "", + ) .await .ok(); - let user_can_demote_self = room_power_levels - .as_ref() - .is_some_and(|power_levels_content| { - RoomPowerLevels::from(power_levels_content.clone()).user_can_change_user_power_level(user_id, user_id) - }) || services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - .is_ok_and(|event| event.sender == user_id); + let user_can_demote_self = + room_power_levels + .as_ref() + .is_some_and(|power_levels_content| { + RoomPowerLevels::from(power_levels_content.clone()) + .user_can_change_user_power_level(user_id, user_id) + }) || services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + .is_ok_and(|event| event.sender == user_id); if user_can_demote_self { let mut power_levels_content = room_power_levels.unwrap_or_default(); diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs index 2fc78808..9f84f227 100644 --- a/src/api/client/account_data.rs +++ b/src/api/client/account_data.rs @@ -2,11 +2,12 @@ use axum::extract::State; use conduwuit::{err, Err}; use ruma::{ api::client::config::{ - get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, + get_global_account_data, get_room_account_data, set_global_account_data, + set_room_account_data, }, events::{ - AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent, GlobalAccountDataEventType, - RoomAccountDataEventType, + AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent, + GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, RoomId, UserId, @@ -20,7 +21,8 @@ use crate::{service::Services, Result, Ruma}; /// /// Sets some account data for the sender user. pub(crate) async fn set_global_account_data_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -28,7 +30,14 @@ pub(crate) async fn set_global_account_data_route( return Err!(Request(Forbidden("You cannot set account data for other users."))); } - set_account_data(&services, None, &body.user_id, &body.event_type.to_string(), body.data.json()).await?; + set_account_data( + &services, + None, + &body.user_id, + &body.event_type.to_string(), + body.data.json(), + ) + .await?; Ok(set_global_account_data::v3::Response {}) } @@ -37,7 +46,8 @@ pub(crate) async fn set_global_account_data_route( /// /// Sets some room account data for the sender user. pub(crate) async fn set_room_account_data_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -61,7 +71,8 @@ pub(crate) async fn set_room_account_data_route( /// /// Gets some account data for the sender user. pub(crate) async fn get_global_account_data_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -75,16 +86,15 @@ pub(crate) async fn get_global_account_data_route( .await .map_err(|_| err!(Request(NotFound("Data not found."))))?; - Ok(get_global_account_data::v3::Response { - account_data: account_data.content, - }) + Ok(get_global_account_data::v3::Response { account_data: account_data.content }) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Gets some room account data for the sender user. pub(crate) async fn get_room_account_data_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -98,17 +108,20 @@ pub(crate) async fn get_room_account_data_route( .await .map_err(|_| err!(Request(NotFound("Data not found."))))?; - Ok(get_room_account_data::v3::Response { - account_data: account_data.content, - }) + Ok(get_room_account_data::v3::Response { account_data: account_data.content }) } async fn set_account_data( - services: &Services, room_id: Option<&RoomId>, sender_user: &UserId, event_type_s: &str, data: &RawJsonValue, + services: &Services, + room_id: Option<&RoomId>, + sender_user: &UserId, + event_type_s: &str, + data: &RawJsonValue, ) -> Result { if event_type_s == RoomAccountDataEventType::FullyRead.to_cow_str() { return Err!(Request(BadJson( - "This endpoint cannot be used for marking a room as fully read (setting m.fully_read)" + "This endpoint cannot be used for marking a room as fully read (setting \ + m.fully_read)" ))); } @@ -118,8 +131,8 @@ async fn set_account_data( ))); } - let data: serde_json::Value = - serde_json::from_str(data.get()).map_err(|e| err!(Request(BadJson(warn!("Invalid JSON provided: {e}")))))?; + let data: serde_json::Value = serde_json::from_str(data.get()) + .map_err(|e| err!(Request(BadJson(warn!("Invalid JSON provided: {e}")))))?; services .account_data diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index c60a2f4c..e1af416e 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -14,7 +14,8 @@ use crate::Ruma; /// /// Creates a new room alias on this server. pub(crate) async fn create_alias_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -58,7 +59,8 @@ pub(crate) async fn create_alias_route( /// /// - TODO: Update canonical alias event pub(crate) async fn delete_alias_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -83,11 +85,13 @@ pub(crate) async fn delete_alias_route( /// /// Resolve an alias locally or over federation. pub(crate) async fn get_alias_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let room_alias = body.body.room_alias; - let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await else { + let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await + else { return Err!(Request(NotFound("Room with alias not found."))); }; @@ -98,7 +102,10 @@ pub(crate) async fn get_alias_route( } async fn room_available_servers( - services: &Services, room_id: &RoomId, room_alias: &RoomAliasId, pre_servers: Vec, + services: &Services, + room_id: &RoomId, + room_alias: &RoomAliasId, + pre_servers: Vec, ) -> Vec { // find active servers in room state cache to suggest let mut servers: Vec = services diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs index 9dbd141e..e4071ab0 100644 --- a/src/api/client/appservice.rs +++ b/src/api/client/appservice.rs @@ -9,12 +9,12 @@ use crate::Ruma; /// Ask the homeserver to ping the application service to ensure the connection /// works. pub(crate) async fn appservice_ping( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { - let appservice_info = body - .appservice_info - .as_ref() - .ok_or_else(|| err!(Request(Forbidden("This endpoint can only be called by appservices."))))?; + let appservice_info = body.appservice_info.as_ref().ok_or_else(|| { + err!(Request(Forbidden("This endpoint can only be called by appservices."))) + })?; if body.appservice_id != appservice_info.registration.id { return Err!(Request(Forbidden( @@ -41,7 +41,5 @@ pub(crate) async fn appservice_ping( .await? .expect("We already validated if an appservice URL exists above"); - Ok(request_ping::v1::Response { - duration: timer.elapsed(), - }) + Ok(request_ping::v1::Response { duration: timer.elapsed() }) } diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 9e4746ca..d330952d 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -2,10 +2,11 @@ use axum::extract::State; use conduwuit::{err, Err}; use ruma::{ api::client::backup::{ - add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, - delete_backup_keys, delete_backup_keys_for_room, delete_backup_keys_for_session, delete_backup_version, - get_backup_info, get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session, - get_latest_backup_info, update_backup_version, + add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, + create_backup_version, delete_backup_keys, delete_backup_keys_for_room, + delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys, + get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, + update_backup_version, }, UInt, }; @@ -16,15 +17,14 @@ use crate::{Result, Ruma}; /// /// Creates a new backup. pub(crate) async fn create_backup_version_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let version = services .key_backups .create_backup(body.sender_user(), &body.algorithm)?; - Ok(create_backup_version::v3::Response { - version, - }) + Ok(create_backup_version::v3::Response { version }) } /// # `PUT /_matrix/client/r0/room_keys/version/{version}` @@ -32,7 +32,8 @@ pub(crate) async fn create_backup_version_route( /// Update information about an existing backup. Only `auth_data` can be /// modified. pub(crate) async fn update_backup_version_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { services .key_backups @@ -46,7 +47,8 @@ pub(crate) async fn update_backup_version_route( /// /// Get information about the latest backup version. pub(crate) async fn get_latest_backup_info_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let (version, algorithm) = services .key_backups @@ -75,13 +77,16 @@ pub(crate) async fn get_latest_backup_info_route( /// /// Get information about an existing backup. pub(crate) async fn get_backup_info_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let algorithm = services .key_backups .get_backup(body.sender_user(), &body.version) .await - .map_err(|_| err!(Request(NotFound("Key backup does not exist at version {:?}", body.version))))?; + .map_err(|_| { + err!(Request(NotFound("Key backup does not exist at version {:?}", body.version))) + })?; Ok(get_backup_info::v3::Response { algorithm, @@ -105,7 +110,8 @@ pub(crate) async fn get_backup_info_route( /// - Deletes both information about the backup, as well as all key data related /// to the backup pub(crate) async fn delete_backup_version_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { services .key_backups @@ -124,7 +130,8 @@ pub(crate) async fn delete_backup_version_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub(crate) async fn add_backup_keys_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if services .key_backups @@ -168,7 +175,8 @@ pub(crate) async fn add_backup_keys_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub(crate) async fn add_backup_keys_for_room_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if services .key_backups @@ -210,7 +218,8 @@ pub(crate) async fn add_backup_keys_for_room_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub(crate) async fn add_backup_keys_for_session_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if services .key_backups @@ -251,56 +260,56 @@ pub(crate) async fn add_backup_keys_for_session_route( /// /// Retrieves all keys from the backup. pub(crate) async fn get_backup_keys_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let rooms = services .key_backups .get_all(body.sender_user(), &body.version) .await; - Ok(get_backup_keys::v3::Response { - rooms, - }) + Ok(get_backup_keys::v3::Response { rooms }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Retrieves all keys from the backup for a given room. pub(crate) async fn get_backup_keys_for_room_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sessions = services .key_backups .get_room(body.sender_user(), &body.version, &body.room_id) .await; - Ok(get_backup_keys_for_room::v3::Response { - sessions, - }) + Ok(get_backup_keys_for_room::v3::Response { sessions }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Retrieves a key from the backup. pub(crate) async fn get_backup_keys_for_session_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let key_data = services .key_backups .get_session(body.sender_user(), &body.version, &body.room_id, &body.session_id) .await - .map_err(|_| err!(Request(NotFound(debug_error!("Backup key not found for this user's session.")))))?; + .map_err(|_| { + err!(Request(NotFound(debug_error!("Backup key not found for this user's session.")))) + })?; - Ok(get_backup_keys_for_session::v3::Response { - key_data, - }) + Ok(get_backup_keys_for_session::v3::Response { key_data }) } /// # `DELETE /_matrix/client/r0/room_keys/keys` /// /// Delete the keys from the backup. pub(crate) async fn delete_backup_keys_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { services .key_backups @@ -324,7 +333,8 @@ pub(crate) async fn delete_backup_keys_route( /// /// Delete the keys from the backup for a given room. pub(crate) async fn delete_backup_keys_for_room_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { services .key_backups @@ -348,7 +358,8 @@ pub(crate) async fn delete_backup_keys_for_room_route( /// /// Delete a key from the backup. pub(crate) async fn delete_backup_keys_for_session_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { services .key_backups diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 20f9cb58..e122611f 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -3,8 +3,8 @@ use std::collections::BTreeMap; use axum::extract::State; use ruma::{ api::client::discovery::get_capabilities::{ - self, Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability, - ThirdPartyIdChangesCapability, + self, Capabilities, GetLoginTokenCapability, RoomVersionStability, + RoomVersionsCapability, ThirdPartyIdChangesCapability, }, RoomVersionId, }; @@ -17,9 +17,11 @@ use crate::{Result, Ruma}; /// Get information on the supported feature set and other relevent capabilities /// of this server. pub(crate) async fn get_capabilities_route( - State(services): State, _body: Ruma, + State(services): State, + _body: Ruma, ) -> Result { - let available: BTreeMap = services.server.available_room_versions().collect(); + let available: BTreeMap = + services.server.available_room_versions().collect(); let mut capabilities = Capabilities::default(); capabilities.room_versions = RoomVersionsCapability { @@ -28,21 +30,15 @@ pub(crate) async fn get_capabilities_route( }; // we do not implement 3PID stuff - capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability { - enabled: false, - }; + capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability { enabled: false }; // we dont support generating tokens yet - capabilities.get_login_token = GetLoginTokenCapability { - enabled: false, - }; + capabilities.get_login_token = GetLoginTokenCapability { enabled: false }; // MSC4133 capability capabilities .set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true})) .expect("this is valid JSON we created"); - Ok(get_capabilities::v3::Response { - capabilities, - }) + Ok(get_capabilities::v3::Response { capabilities }) } diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 52f27692..30ba170d 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -32,7 +32,8 @@ const LIMIT_DEFAULT: usize = 10; /// - Only works if the user is joined (TODO: always allow, but only show events /// if the user was joined, depending on history_visibility) pub(crate) async fn get_context_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let filter = &body.filter; let sender = body.sender(); @@ -50,9 +51,8 @@ pub(crate) async fn get_context_route( // members for "inline" profiles on the timeline to work properly let lazy_load_enabled = matches!(filter.lazy_load_options, LazyLoadOptions::Enabled { .. }); - let lazy_load_redundant = if let LazyLoadOptions::Enabled { - include_redundant_members, - } = filter.lazy_load_options + let lazy_load_redundant = if let LazyLoadOptions::Enabled { include_redundant_members } = + filter.lazy_load_options { include_redundant_members } else { @@ -91,10 +91,11 @@ pub(crate) async fn get_context_route( return Err!(Request(Forbidden("You don't have permission to view this event."))); } - let events_before = services - .rooms - .timeline - .pdus_rev(Some(sender_user), room_id, Some(base_token)); + let events_before = + services + .rooms + .timeline + .pdus_rev(Some(sender_user), room_id, Some(base_token)); let events_after = services .rooms @@ -166,7 +167,9 @@ pub(crate) async fn get_context_route( .filter(|&user_id: &&UserId| lazy.contains(user_id)) .map(|_| event_id) }) - .broad_filter_map(|event_id: &OwnedEventId| services.rooms.timeline.get_pdu(event_id).ok()) + .broad_filter_map(|event_id: &OwnedEventId| { + services.rooms.timeline.get_pdu(event_id).ok() + }) .map(|pdu| pdu.to_state_event()) .collect() .await; diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 63f0c210..bb0773dd 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -18,7 +18,8 @@ use crate::{utils, Error, Result, Ruma}; /// /// Get metadata on all devices of the sender user. pub(crate) async fn get_devices_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -28,16 +29,15 @@ pub(crate) async fn get_devices_route( .collect() .await; - Ok(get_devices::v3::Response { - devices, - }) + Ok(get_devices::v3::Response { devices }) } /// # `GET /_matrix/client/r0/devices/{deviceId}` /// /// Get metadata on a single device of the sender user. pub(crate) async fn get_device_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -47,9 +47,7 @@ pub(crate) async fn get_device_route( .await .map_err(|_| err!(Request(NotFound("Device not found."))))?; - Ok(get_device::v3::Response { - device, - }) + Ok(get_device::v3::Response { device }) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -57,7 +55,8 @@ pub(crate) async fn get_device_route( /// Updates the metadata on a given device of the sender user. #[tracing::instrument(skip_all, fields(%client), name = "update_device")] pub(crate) async fn update_device_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -93,16 +92,15 @@ pub(crate) async fn update_device_route( /// - Forgets to-device events /// - Triggers device list updates pub(crate) async fn delete_device_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); // UIAA let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::Password], - }], + flows: vec![AuthFlow { stages: vec![AuthType::Password] }], completed: Vec::new(), params: Box::default(), session: None, @@ -151,16 +149,15 @@ pub(crate) async fn delete_device_route( /// - Forgets to-device events /// - Triggers device list updates pub(crate) async fn delete_devices_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); // UIAA let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::Password], - }], + flows: vec![AuthFlow { stages: vec![AuthType::Password] }], completed: Vec::new(), params: Box::default(), session: None, diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 9407a0bd..c8faaa46 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -5,7 +5,10 @@ use futures::{StreamExt, TryFutureExt}; use ruma::{ api::{ client::{ - directory::{get_public_rooms, get_public_rooms_filtered, get_room_visibility, set_room_visibility}, + directory::{ + get_public_rooms, get_public_rooms_filtered, get_room_visibility, + set_room_visibility, + }, error::ErrorKind, room, }, @@ -32,7 +35,8 @@ use crate::Ruma; /// - Rooms are ordered by the number of joined members #[tracing::instrument(skip_all, fields(%client), name = "publicrooms")] pub(crate) async fn get_public_rooms_filtered_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { if let Some(server) = &body.server { @@ -57,7 +61,10 @@ pub(crate) async fn get_public_rooms_filtered_route( .await .map_err(|e| { warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest(ErrorKind::Unknown, "Failed to return the requested server's public room list.") + Error::BadRequest( + ErrorKind::Unknown, + "Failed to return the requested server's public room list.", + ) })?; Ok(response) @@ -70,7 +77,8 @@ pub(crate) async fn get_public_rooms_filtered_route( /// - Rooms are ordered by the number of joined members #[tracing::instrument(skip_all, fields(%client), name = "publicrooms")] pub(crate) async fn get_public_rooms_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { if let Some(server) = &body.server { @@ -95,7 +103,10 @@ pub(crate) async fn get_public_rooms_route( .await .map_err(|e| { warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest(ErrorKind::Unknown, "Failed to return the requested server's public room list.") + Error::BadRequest( + ErrorKind::Unknown, + "Failed to return the requested server's public room list.", + ) })?; Ok(get_public_rooms::v3::Response { @@ -111,7 +122,8 @@ pub(crate) async fn get_public_rooms_route( /// Sets the visibility of a given room in the room directory. #[tracing::instrument(skip_all, fields(%client), name = "room_directory")] pub(crate) async fn set_room_visibility_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -139,14 +151,14 @@ pub(crate) async fn set_room_visibility_route( } match &body.visibility { - room::Visibility::Public => { + | room::Visibility::Public => { if services.globals.config.lockdown_public_room_directory && !services.users.is_admin(sender_user).await && body.appservice_info.is_none() { info!( - "Non-admin user {sender_user} tried to publish {0} to the room directory while \ - \"lockdown_public_room_directory\" is enabled", + "Non-admin user {sender_user} tried to publish {0} to the room directory \ + while \"lockdown_public_room_directory\" is enabled", body.room_id ); @@ -154,8 +166,8 @@ pub(crate) async fn set_room_visibility_route( services .admin .send_text(&format!( - "Non-admin user {sender_user} tried to publish {0} to the room directory while \ - \"lockdown_public_room_directory\" is enabled", + "Non-admin user {sender_user} tried to publish {0} to the room \ + directory while \"lockdown_public_room_directory\" is enabled", body.room_id )) .await; @@ -172,13 +184,16 @@ pub(crate) async fn set_room_visibility_route( if services.globals.config.admin_room_notices { services .admin - .send_text(&format!("{sender_user} made {} public to the room directory", body.room_id)) + .send_text(&format!( + "{sender_user} made {} public to the room directory", + body.room_id + )) .await; } info!("{sender_user} made {0} public to the room directory", body.room_id); }, - room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id), - _ => { + | room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id), + | _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Room visibility type is not supported.", @@ -193,7 +208,8 @@ pub(crate) async fn set_room_visibility_route( /// /// Gets the visibility of a given room in the room directory. pub(crate) async fn get_room_visibility_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist @@ -210,10 +226,16 @@ pub(crate) async fn get_room_visibility_route( } pub(crate) async fn get_public_rooms_filtered_helper( - services: &Services, server: Option<&ServerName>, limit: Option, since: Option<&str>, filter: &Filter, + services: &Services, + server: Option<&ServerName>, + limit: Option, + since: Option<&str>, + filter: &Filter, _network: &RoomNetwork, ) -> Result { - if let Some(other_server) = server.filter(|server_name| !services.globals.server_is_ours(server_name)) { + if let Some(other_server) = + server.filter(|server_name| !services.globals.server_is_ours(server_name)) + { let response = services .sending .send_federation_request( @@ -245,9 +267,10 @@ pub(crate) async fn get_public_rooms_filtered_helper( if let Some(s) = &since { let mut characters = s.chars(); let backwards = match characters.next() { - Some('n') => false, - Some('p') => true, - _ => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")), + | Some('n') => false, + | Some('p') => true, + | _ => + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")), }; num_since = characters @@ -337,7 +360,11 @@ pub(crate) async fn get_public_rooms_filtered_helper( /// Check whether the user can publish to the room directory via power levels of /// room history visibility event or room creator -async fn user_can_publish_room(services: &Services, user_id: &UserId, room_id: &RoomId) -> Result { +async fn user_can_publish_room( + services: &Services, + user_id: &UserId, + room_id: &RoomId, +) -> Result { if let Ok(event) = services .rooms .state_accessor @@ -347,7 +374,8 @@ async fn user_can_publish_room(services: &Services, user_id: &UserId, room_id: & serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) .map(|content: RoomPowerLevelsEventContent| { - RoomPowerLevels::from(content).user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) + RoomPowerLevels::from(content) + .user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) }) } else if let Ok(event) = services .rooms @@ -406,10 +434,10 @@ async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> Public .state_accessor .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { - JoinRule::Public => PublicRoomJoinRule::Public, - JoinRule::Knock => "knock".into(), - JoinRule::KnockRestricted(_) => "knock_restricted".into(), - _ => "invite".into(), + | JoinRule::Public => PublicRoomJoinRule::Public, + | JoinRule::Knock => "knock".into(), + | JoinRule::KnockRestricted(_) => "knock_restricted".into(), + | _ => "invite".into(), }) .await .unwrap_or_default(), diff --git a/src/api/client/filter.rs b/src/api/client/filter.rs index a1576e8e..84086452 100644 --- a/src/api/client/filter.rs +++ b/src/api/client/filter.rs @@ -10,7 +10,8 @@ use crate::{Result, Ruma}; /// /// - A user can only access their own filters pub(crate) async fn get_filter_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -26,7 +27,8 @@ pub(crate) async fn get_filter_route( /// /// Creates a new filter to be used by other endpoints. pub(crate) async fn create_filter_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 13dc3467..7bf0a5da 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -7,7 +7,10 @@ use ruma::{ api::{ client::{ error::ErrorKind, - keys::{claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, upload_signing_keys}, + keys::{ + claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + upload_signing_keys, + }, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, federation, @@ -31,7 +34,8 @@ use crate::{ /// - If there are no device keys yet: Adds device keys (TODO: merge with /// existing keys?) pub(crate) async fn upload_keys_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let (sender_user, sender_device) = body.sender(); @@ -75,7 +79,8 @@ pub(crate) async fn upload_keys_route( /// - The master and self-signing keys contain signatures that the user is /// allowed to see pub(crate) async fn get_keys_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -93,7 +98,8 @@ pub(crate) async fn get_keys_route( /// /// Claims one-time keys pub(crate) async fn claim_keys_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { claim_keys_helper(&services, &body.one_time_keys).await } @@ -104,16 +110,15 @@ pub(crate) async fn claim_keys_route( /// /// - Requires UIAA to verify password pub(crate) async fn upload_signing_keys_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); // UIAA let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::Password], - }], + flows: vec![AuthFlow { stages: vec![AuthType::Password] }], completed: Vec::new(), params: Box::default(), session: None, @@ -161,7 +166,8 @@ pub(crate) async fn upload_signing_keys_route( /// /// Uploads end-to-end key signatures from the sender user. pub(crate) async fn upload_signatures_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -174,7 +180,10 @@ pub(crate) async fn upload_signatures_route( .get("signatures") .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Missing signatures field."))? .get(sender_user.to_string()) - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid user in signatures field."))? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid user in signatures field.", + ))? .as_object() .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature."))? .clone() @@ -185,7 +194,10 @@ pub(crate) async fn upload_signatures_route( signature .1 .as_str() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature value."))? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid signature value.", + ))? .to_owned(), ); @@ -209,7 +221,8 @@ pub(crate) async fn upload_signatures_route( /// /// - TODO: left users pub(crate) async fn get_key_changes_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -255,8 +268,11 @@ pub(crate) async fn get_key_changes_route( } pub(crate) async fn get_keys_helper( - services: &Services, sender_user: Option<&UserId>, device_keys_input: &BTreeMap>, - allowed_signatures: F, include_display_names: bool, + services: &Services, + sender_user: Option<&UserId>, + device_keys_input: &BTreeMap>, + allowed_signatures: F, + include_display_names: bool, ) -> Result where F: Fn(&UserId) -> bool + Send + Sync, @@ -289,7 +305,9 @@ where .users .get_device_metadata(user_id, device_id) .await - .map_err(|_| err!(Database("all_device_keys contained nonexistent device.")))?; + .map_err(|_| { + err!(Database("all_device_keys contained nonexistent device.")) + })?; add_unsigned_device_display_name(&mut keys, metadata, include_display_names) .map_err(|_| err!(Database("invalid device keys in database")))?; @@ -307,7 +325,11 @@ where .users .get_device_metadata(user_id, device_id) .await - .map_err(|_| err!(Request(InvalidParam("Tried to get keys for nonexistent device."))))?; + .map_err(|_| { + err!(Request(InvalidParam( + "Tried to get keys for nonexistent device." + ))) + })?; add_unsigned_device_display_name(&mut keys, metadata, include_display_names) .map_err(|_| err!(Database("invalid device keys in database")))?; @@ -350,9 +372,8 @@ where device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); } - let request = federation::keys::get_keys::v1::Request { - device_keys: device_keys_input_fed, - }; + let request = + federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed }; let response = services .sending @@ -382,8 +403,8 @@ where .users .add_cross_signing_keys( &user, &raw, &None, &None, - false, /* Dont notify. A notification would trigger another key request resulting in an - * endless loop */ + false, /* Dont notify. A notification would trigger another key request + * resulting in an endless loop */ ) .await?; master_keys.insert(user.clone(), raw); @@ -406,7 +427,8 @@ where } fn add_unsigned_device_display_name( - keys: &mut Raw, metadata: ruma::api::client::device::Device, + keys: &mut Raw, + metadata: ruma::api::client::device::Device, include_display_names: bool, ) -> serde_json::Result<()> { if let Some(display_name) = metadata.display_name { @@ -431,7 +453,8 @@ fn add_unsigned_device_display_name( } pub(crate) async fn claim_keys_helper( - services: &Services, one_time_keys_input: &BTreeMap>, + services: &Services, + one_time_keys_input: &BTreeMap>, ) -> Result { let mut one_time_keys = BTreeMap::new(); @@ -473,12 +496,9 @@ pub(crate) async fn claim_keys_helper( server, services .sending - .send_federation_request( - server, - federation::keys::claim_keys::v1::Request { - one_time_keys: one_time_keys_input_fed, - }, - ) + .send_federation_request(server, federation::keys::claim_keys::v1::Request { + one_time_keys: one_time_keys_input_fed, + }) .await, ) }) @@ -486,17 +506,14 @@ pub(crate) async fn claim_keys_helper( while let Some((server, response)) = futures.next().await { match response { - Ok(keys) => { + | Ok(keys) => { one_time_keys.extend(keys.one_time_keys); }, - Err(_e) => { + | Err(_e) => { failures.insert(server.to_string(), json!({})); }, } } - Ok(claim_keys::v3::Response { - failures, - one_time_keys, - }) + Ok(claim_keys::v3::Response { failures, one_time_keys }) } diff --git a/src/api/client/media.rs b/src/api/client/media.rs index a9792c3b..e58ba626 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -15,7 +15,8 @@ use reqwest::Url; use ruma::{ api::client::{ authenticated_media::{ - get_content, get_content_as_filename, get_content_thumbnail, get_media_config, get_media_preview, + get_content, get_content_as_filename, get_content_thumbnail, get_media_config, + get_media_preview, }, media::create_content, }, @@ -26,7 +27,8 @@ use crate::Ruma; /// # `GET /_matrix/client/v1/media/config` pub(crate) async fn get_media_config_route( - State(services): State, _body: Ruma, + State(services): State, + _body: Ruma, ) -> Result { Ok(get_media_config::v1::Response { upload_size: ruma_from_usize(services.globals.config.max_request_size), @@ -46,7 +48,8 @@ pub(crate) async fn get_media_config_route( fields(%client), )] pub(crate) async fn create_content_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let user = body.sender_user.as_ref().expect("user is authenticated"); @@ -79,7 +82,8 @@ pub(crate) async fn create_content_route( fields(%client), )] pub(crate) async fn get_content_thumbnail_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let user = body.sender_user.as_ref().expect("user is authenticated"); @@ -115,7 +119,8 @@ pub(crate) async fn get_content_thumbnail_route( fields(%client), )] pub(crate) async fn get_content_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let user = body.sender_user.as_ref().expect("user is authenticated"); @@ -150,7 +155,8 @@ pub(crate) async fn get_content_route( fields(%client), )] pub(crate) async fn get_content_as_filename_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let user = body.sender_user.as_ref().expect("user is authenticated"); @@ -185,7 +191,8 @@ pub(crate) async fn get_content_as_filename_route( fields(%client), )] pub(crate) async fn get_media_preview_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -223,7 +230,11 @@ pub(crate) async fn get_media_preview_route( } async fn fetch_thumbnail( - services: &Services, mxc: &Mxc<'_>, user: &UserId, timeout_ms: Duration, dim: &Dim, + services: &Services, + mxc: &Mxc<'_>, + user: &UserId, + timeout_ms: Duration, + dim: &Dim, ) -> Result { let FileMeta { content, @@ -245,7 +256,11 @@ async fn fetch_thumbnail( } async fn fetch_file( - services: &Services, mxc: &Mxc<'_>, user: &UserId, timeout_ms: Duration, filename: Option<&str>, + services: &Services, + mxc: &Mxc<'_>, + user: &UserId, + timeout_ms: Duration, + filename: Option<&str>, ) -> Result { let FileMeta { content, @@ -267,7 +282,11 @@ async fn fetch_file( } async fn fetch_thumbnail_meta( - services: &Services, mxc: &Mxc<'_>, user: &UserId, timeout_ms: Duration, dim: &Dim, + services: &Services, + mxc: &Mxc<'_>, + user: &UserId, + timeout_ms: Duration, + dim: &Dim, ) -> Result { if let Some(filemeta) = services.media.get_thumbnail(mxc, dim).await? { return Ok(filemeta); @@ -283,7 +302,12 @@ async fn fetch_thumbnail_meta( .await } -async fn fetch_file_meta(services: &Services, mxc: &Mxc<'_>, user: &UserId, timeout_ms: Duration) -> Result { +async fn fetch_file_meta( + services: &Services, + mxc: &Mxc<'_>, + user: &UserId, + timeout_ms: Duration, +) -> Result { if let Some(filemeta) = services.media.get(mxc).await? { return Ok(filemeta); } diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index a8b366ed..6f54a683 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -11,8 +11,8 @@ use conduwuit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROS use reqwest::Url; use ruma::{ api::client::media::{ - create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, - get_media_preview, + create_content, get_content, get_content_as_filename, get_content_thumbnail, + get_media_config, get_media_preview, }, Mxc, }; @@ -23,7 +23,8 @@ use crate::{client::create_content_route, Ruma, RumaResponse}; /// /// Returns max upload size. pub(crate) async fn get_media_config_legacy_route( - State(services): State, _body: Ruma, + State(services): State, + _body: Ruma, ) -> Result { Ok(get_media_config::v3::Response { upload_size: ruma_from_usize(services.globals.config.max_request_size), @@ -38,7 +39,8 @@ pub(crate) async fn get_media_config_legacy_route( /// /// Returns max upload size. pub(crate) async fn get_media_config_legacy_legacy_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result> { get_media_config_legacy_route(State(services), body) .await @@ -50,7 +52,8 @@ pub(crate) async fn get_media_config_legacy_legacy_route( /// Returns URL preview. #[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy")] pub(crate) async fn get_media_preview_legacy_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -91,7 +94,8 @@ pub(crate) async fn get_media_preview_legacy_route( /// /// Returns URL preview. pub(crate) async fn get_media_preview_legacy_legacy_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result> { get_media_preview_legacy_route(State(services), InsecureClientIp(client), body) @@ -110,7 +114,8 @@ pub(crate) async fn get_media_preview_legacy_legacy_route( /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory pub(crate) async fn create_content_legacy_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result> { create_content_route(State(services), InsecureClientIp(client), body) @@ -128,7 +133,8 @@ pub(crate) async fn create_content_legacy_route( /// seconds #[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] pub(crate) async fn get_content_legacy_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let mxc = Mxc { @@ -142,7 +148,8 @@ pub(crate) async fn get_content_legacy_route( content_disposition, }) = services.media.get(&mxc).await? { - let content_disposition = make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + let content_disposition = + make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); Ok(get_content::v3::Response { file: content.expect("entire file contents"), @@ -156,10 +163,15 @@ pub(crate) async fn get_content_legacy_route( .media .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) .await - .map_err(|e| err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))))?; + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = - make_content_disposition(response.content_disposition.as_ref(), response.content_type.as_deref(), None); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); Ok(get_content::v3::Response { file: response.file, @@ -187,7 +199,8 @@ pub(crate) async fn get_content_legacy_route( /// seconds #[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] pub(crate) async fn get_content_legacy_legacy_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result> { get_content_legacy_route(State(services), InsecureClientIp(client), body) @@ -205,7 +218,8 @@ pub(crate) async fn get_content_legacy_legacy_route( /// seconds #[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] pub(crate) async fn get_content_as_filename_legacy_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let mxc = Mxc { @@ -219,8 +233,11 @@ pub(crate) async fn get_content_as_filename_legacy_route( content_disposition, }) = services.media.get(&mxc).await? { - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), Some(&body.filename)); + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + Some(&body.filename), + ); Ok(get_content_as_filename::v3::Response { file: content.expect("entire file contents"), @@ -234,10 +251,15 @@ pub(crate) async fn get_content_as_filename_legacy_route( .media .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) .await - .map_err(|e| err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))))?; + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = - make_content_disposition(response.content_disposition.as_ref(), response.content_type.as_deref(), None); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); Ok(get_content_as_filename::v3::Response { content_disposition: Some(content_disposition), @@ -264,7 +286,8 @@ pub(crate) async fn get_content_as_filename_legacy_route( /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds pub(crate) async fn get_content_as_filename_legacy_legacy_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result> { get_content_as_filename_legacy_route(State(services), InsecureClientIp(client), body) @@ -282,7 +305,8 @@ pub(crate) async fn get_content_as_filename_legacy_legacy_route( /// seconds #[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy")] pub(crate) async fn get_content_thumbnail_legacy_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let mxc = Mxc { @@ -297,7 +321,8 @@ pub(crate) async fn get_content_thumbnail_legacy_route( content_disposition, }) = services.media.get_thumbnail(&mxc, &dim).await? { - let content_disposition = make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + let content_disposition = + make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); Ok(get_content_thumbnail::v3::Response { file: content.expect("entire file contents"), @@ -311,10 +336,15 @@ pub(crate) async fn get_content_thumbnail_legacy_route( .media .fetch_remote_thumbnail_legacy(&body) .await - .map_err(|e| err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))))?; + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = - make_content_disposition(response.content_disposition.as_ref(), response.content_type.as_deref(), None); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); Ok(get_content_thumbnail::v3::Response { file: response.file, @@ -341,7 +371,8 @@ pub(crate) async fn get_content_thumbnail_legacy_route( /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds pub(crate) async fn get_content_thumbnail_legacy_legacy_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result> { get_content_thumbnail_legacy_route(State(services), InsecureClientIp(client), body) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 02166271..0ddcab32 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -20,7 +20,8 @@ use ruma::{ client::{ error::ErrorKind, membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, + ban_user, forget_room, get_member_events, invite_user, join_room_by_id, + join_room_by_id_or_alias, joined_members::{self, v3::RoomMember}, joined_rooms, kick_user, leave_room, unban_user, ThirdPartySigned, }, @@ -36,8 +37,8 @@ use ruma::{ }, StateEventType, }, - state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, - RoomVersionId, ServerName, UserId, + state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use service::{ appservice::RegistrationInfo, @@ -54,7 +55,10 @@ use crate::{client::full_user_deactivate, Ruma}; /// enabled #[tracing::instrument(skip(services))] async fn banned_room_check( - services: &Services, user_id: &UserId, room_id: Option<&RoomId>, server_name: Option<&ServerName>, + services: &Services, + user_id: &UserId, + room_id: Option<&RoomId>, + server_name: Option<&ServerName>, client_ip: IpAddr, ) -> Result { if services.users.is_admin(user_id).await { @@ -70,19 +74,21 @@ async fn banned_room_check( .contains(&room_id.server_name().unwrap().to_owned()) { warn!( - "User {user_id} who is not an admin attempted to send an invite for or attempted to join a banned \ - room or banned room server name: {room_id}" + "User {user_id} who is not an admin attempted to send an invite for or \ + attempted to join a banned room or banned room server name: {room_id}" ); if services.globals.config.auto_deactivate_banned_room_attempts { - warn!("Automatically deactivating user {user_id} due to attempted banned room join"); + warn!( + "Automatically deactivating user {user_id} due to attempted banned room join" + ); if services.globals.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::text_plain(format!( - "Automatically deactivating user {user_id} due to attempted banned room join from IP \ - {client_ip}" + "Automatically deactivating user {user_id} due to attempted banned \ + room join from IP {client_ip}" ))) .await .ok(); @@ -109,19 +115,21 @@ async fn banned_room_check( .contains(&server_name.to_owned()) { warn!( - "User {user_id} who is not an admin tried joining a room which has the server name {server_name} that \ - is globally forbidden. Rejecting.", + "User {user_id} who is not an admin tried joining a room which has the server \ + name {server_name} that is globally forbidden. Rejecting.", ); if services.globals.config.auto_deactivate_banned_room_attempts { - warn!("Automatically deactivating user {user_id} due to attempted banned room join"); + warn!( + "Automatically deactivating user {user_id} due to attempted banned room join" + ); if services.globals.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::text_plain(format!( - "Automatically deactivating user {user_id} due to attempted banned room join from IP \ - {client_ip}" + "Automatically deactivating user {user_id} due to attempted banned \ + room join from IP {client_ip}" ))) .await .ok(); @@ -155,12 +163,20 @@ async fn banned_room_check( /// federation #[tracing::instrument(skip_all, fields(%client), name = "join")] pub(crate) async fn join_room_by_id_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user(); - banned_room_check(&services, sender_user, Some(&body.room_id), body.room_id.server_name(), client).await?; + banned_room_check( + &services, + sender_user, + Some(&body.room_id), + body.room_id.server_name(), + client, + ) + .await?; // There is no body.server_name for /roomId/join let mut servers: Vec<_> = services @@ -216,7 +232,8 @@ pub(crate) async fn join_room_by_id_route( /// via room alias server name and room ID server name #[tracing::instrument(skip_all, fields(%client), name = "join")] pub(crate) async fn join_room_by_id_or_alias_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); @@ -224,8 +241,15 @@ pub(crate) async fn join_room_by_id_or_alias_route( let body = body.body; let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { - Ok(room_id) => { - banned_room_check(&services, sender_user, Some(&room_id), room_id.server_name(), client).await?; + | Ok(room_id) => { + banned_room_check( + &services, + sender_user, + Some(&room_id), + room_id.server_name(), + client, + ) + .await?; let mut servers = body.via.clone(); servers.extend( @@ -261,14 +285,21 @@ pub(crate) async fn join_room_by_id_or_alias_route( (servers, room_id) }, - Err(room_alias) => { + | Err(room_alias) => { let (room_id, mut servers) = services .rooms .alias .resolve_alias(&room_alias, Some(body.via.clone())) .await?; - banned_room_check(&services, sender_user, Some(&room_id), Some(room_alias.server_name()), client).await?; + banned_room_check( + &services, + sender_user, + Some(&room_id), + Some(room_alias.server_name()), + client, + ) + .await?; let addl_via_servers = services .rooms @@ -314,9 +345,7 @@ pub(crate) async fn join_room_by_id_or_alias_route( .boxed() .await?; - Ok(join_room_by_id_or_alias::v3::Response { - room_id: join_room_response.room_id, - }) + Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id }) } /// # `POST /_matrix/client/v3/rooms/{roomId}/leave` @@ -325,7 +354,8 @@ pub(crate) async fn join_room_by_id_or_alias_route( /// /// - This should always work if the user is currently joined. pub(crate) async fn leave_room_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()).await?; @@ -337,7 +367,8 @@ pub(crate) async fn leave_room_route( /// Tries to send an invite event into the room. #[tracing::instrument(skip_all, fields(%client), name = "invite")] pub(crate) async fn invite_user_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -350,12 +381,16 @@ pub(crate) async fn invite_user_route( return Err!(Request(Forbidden("Invites are not allowed on this server."))); } - banned_room_check(&services, sender_user, Some(&body.room_id), body.room_id.server_name(), client).await?; + banned_room_check( + &services, + sender_user, + Some(&body.room_id), + body.room_id.server_name(), + client, + ) + .await?; - if let invite_user::v3::InvitationRecipient::UserId { - user_id, - } = &body.recipient - { + if let invite_user::v3::InvitationRecipient::UserId { user_id } = &body.recipient { let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); let recipient_ignored_by_sender = services.users.user_is_ignored(user_id, sender_user); @@ -363,7 +398,9 @@ pub(crate) async fn invite_user_route( join!(sender_ignored_recipient, recipient_ignored_by_sender); if sender_ignored_recipient { - return Err!(Request(Forbidden("You cannot invite users you have ignored to rooms."))); + return Err!(Request(Forbidden( + "You cannot invite users you have ignored to rooms." + ))); } if recipient_ignored_by_sender { @@ -386,7 +423,8 @@ pub(crate) async fn invite_user_route( /// /// Tries to send a kick event into the room. pub(crate) async fn kick_user_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; @@ -405,17 +443,14 @@ pub(crate) async fn kick_user_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - body.user_id.to_string(), - &RoomMemberEventContent { - membership: MembershipState::Leave, - reason: body.reason.clone(), - is_direct: None, - join_authorized_via_users_server: None, - third_party_invite: None, - ..event - }, - ), + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason: body.reason.clone(), + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, + ..event + }), body.sender_user(), &body.room_id, &state_lock, @@ -431,7 +466,8 @@ pub(crate) async fn kick_user_route( /// /// Tries to send a ban event into the room. pub(crate) async fn ban_user_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -452,19 +488,16 @@ pub(crate) async fn ban_user_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - body.user_id.to_string(), - &RoomMemberEventContent { - membership: MembershipState::Ban, - reason: body.reason.clone(), - displayname: None, // display name may be offensive - avatar_url: None, // avatar may be offensive - is_direct: None, - join_authorized_via_users_server: None, - third_party_invite: None, - ..current_member_content - }, - ), + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Ban, + reason: body.reason.clone(), + displayname: None, // display name may be offensive + avatar_url: None, // avatar may be offensive + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, + ..current_member_content + }), sender_user, &body.room_id, &state_lock, @@ -480,7 +513,8 @@ pub(crate) async fn ban_user_route( /// /// Tries to send an unban event into the room. pub(crate) async fn unban_user_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; @@ -502,17 +536,14 @@ pub(crate) async fn unban_user_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - body.user_id.to_string(), - &RoomMemberEventContent { - membership: MembershipState::Leave, - reason: body.reason.clone(), - join_authorized_via_users_server: None, - third_party_invite: None, - is_direct: None, - ..current_member_content - }, - ), + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason: body.reason.clone(), + join_authorized_via_users_server: None, + third_party_invite: None, + is_direct: None, + ..current_member_content + }), body.sender_user(), &body.room_id, &state_lock, @@ -534,7 +565,8 @@ pub(crate) async fn unban_user_route( /// Note: Other devices of the user have no way of knowing the room was /// forgotten, so this has to be called from every device pub(crate) async fn forget_room_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -559,7 +591,8 @@ pub(crate) async fn forget_room_route( /// /// Lists all rooms the user has joined. pub(crate) async fn joined_rooms_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { Ok(joined_rooms::v3::Response { joined_rooms: services @@ -579,7 +612,8 @@ pub(crate) async fn joined_rooms_route( /// /// - Only works if the user is currently joined pub(crate) async fn get_member_events_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -612,7 +646,8 @@ pub(crate) async fn get_member_events_route( /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined pub(crate) async fn joined_members_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -631,25 +666,25 @@ pub(crate) async fn joined_members_route( .room_members(&body.room_id) .map(ToOwned::to_owned) .then(|user| async move { - ( - user.clone(), - RoomMember { - display_name: services.users.displayname(&user).await.ok(), - avatar_url: services.users.avatar_url(&user).await.ok(), - }, - ) + (user.clone(), RoomMember { + display_name: services.users.displayname(&user).await.ok(), + avatar_url: services.users.avatar_url(&user).await.ok(), + }) }) .collect() .await; - Ok(joined_members::v3::Response { - joined, - }) + Ok(joined_members::v3::Response { joined }) } pub async fn join_room_by_id_helper( - services: &Services, sender_user: &UserId, room_id: &RoomId, reason: Option, servers: &[OwnedServerName], - third_party_signed: Option<&ThirdPartySigned>, appservice_info: &Option, + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + third_party_signed: Option<&ThirdPartySigned>, + appservice_info: &Option, ) -> Result { let state_lock = services.rooms.state.mutex.lock(room_id).await; @@ -671,9 +706,7 @@ pub async fn join_room_by_id_helper( .await { debug_warn!("{sender_user} is already joined in {room_id}"); - return Ok(join_room_by_id::v3::Response { - room_id: room_id.into(), - }); + return Ok(join_room_by_id::v3::Response { room_id: room_id.into() }); } if let Ok(membership) = services @@ -694,18 +727,35 @@ pub async fn join_room_by_id_helper( .server_in_room(services.globals.server_name(), room_id) .await; - let local_join = - server_in_room || servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + let local_join = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); if local_join { - join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, third_party_signed, state_lock) - .boxed() - .await?; + join_room_by_id_helper_local( + services, + sender_user, + room_id, + reason, + servers, + third_party_signed, + state_lock, + ) + .boxed() + .await?; } else { // Ask a remote server if we are not participating in this room - join_room_by_id_helper_remote(services, sender_user, room_id, reason, servers, third_party_signed, state_lock) - .boxed() - .await?; + join_room_by_id_helper_remote( + services, + sender_user, + room_id, + reason, + servers, + third_party_signed, + state_lock, + ) + .boxed() + .await?; } Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) @@ -713,12 +763,18 @@ pub async fn join_room_by_id_helper( #[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")] async fn join_room_by_id_helper_remote( - services: &Services, sender_user: &UserId, room_id: &RoomId, reason: Option, servers: &[OwnedServerName], - _third_party_signed: Option<&ThirdPartySigned>, state_lock: RoomMutexGuard, + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + _third_party_signed: Option<&ThirdPartySigned>, + state_lock: RoomMutexGuard, ) -> Result { info!("Joining {room_id} over federation."); - let (make_join_response, remote_server) = make_join_request(services, sender_user, room_id, servers).await?; + let (make_join_response, remote_server) = + make_join_request(services, sender_user, room_id, servers).await?; info!("make_join finished"); @@ -783,8 +839,8 @@ async fn join_room_by_id_helper_remote( // We keep the "event_id" in the pdu only in v1 or // v2 rooms match room_version_id { - RoomVersionId::V1 | RoomVersionId::V2 => {}, - _ => { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { join_event_stub.remove("event_id"); }, }; @@ -799,7 +855,8 @@ async fn join_room_by_id_helper_remote( let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; // Add event_id back - join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + join_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); // It has enough fields to be called a proper event now let mut join_event = join_event_stub; @@ -825,12 +882,16 @@ async fn join_room_by_id_helper_remote( if join_authorized_via_users_server.is_some() { if let Some(signed_raw) = &send_join_response.room_state.event { debug_info!( - "There is a signed event with join_authorized_via_users_server. This room is probably using \ - restricted joins. Adding signature to our event" + "There is a signed event with join_authorized_via_users_server. This room is \ + probably using restricted joins. Adding signature to our event" ); - let (signed_event_id, signed_value) = gen_event_id_canonical_json(signed_raw, &room_version_id) - .map_err(|e| err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))))?; + let (signed_event_id, signed_value) = + gen_event_id_canonical_json(signed_raw, &room_version_id).map_err(|e| { + err!(Request(BadJson(warn!( + "Could not convert event to canonical JSON: {e}" + )))) + })?; if signed_event_id != event_id { return Err!(Request(BadJson( @@ -840,15 +901,20 @@ async fn join_room_by_id_helper_remote( match signed_value["signatures"] .as_object() - .ok_or_else(|| err!(BadServerResponse(warn!("Server {remote_server} sent invalid signatures type")))) + .ok_or_else(|| { + err!(BadServerResponse(warn!( + "Server {remote_server} sent invalid signatures type" + ))) + }) .and_then(|e| { e.get(remote_server.as_str()).ok_or_else(|| { err!(BadServerResponse(warn!( - "Server {remote_server} did not send its signature for a restricted room" + "Server {remote_server} did not send its signature for a restricted \ + room" ))) }) }) { - Ok(signature) => { + | Ok(signature) => { join_event .get_mut("signatures") .expect("we created a valid pdu") @@ -856,10 +922,10 @@ async fn join_room_by_id_helper_remote( .expect("we created a valid pdu") .insert(remote_server.to_string(), signature.clone()); }, - Err(e) => { + | Err(e) => { warn!( - "Server {remote_server} sent invalid signature in send_join signatures for event \ - {signed_value:?}: {e:?}", + "Server {remote_server} sent invalid signature in send_join signatures \ + for event {signed_value:?}: {e:?}", ); }, } @@ -900,8 +966,8 @@ async fn join_room_by_id_helper_remote( .ready_filter_map(Result::ok) .fold(HashMap::new(), |mut state, (event_id, value)| async move { let pdu = match PduEvent::from_id_val(&event_id, value.clone()) { - Ok(pdu) => pdu, - Err(e) => { + | Ok(pdu) => pdu, + | Err(e) => { debug_warn!("Invalid PDU in send_join response: {e:?}: {value:#?}"); return state; }, @@ -937,7 +1003,9 @@ async fn join_room_by_id_helper_remote( .validate_and_add_event_id_no_fetch(pdu, &room_version_id) }) .ready_filter_map(Result::ok) - .ready_for_each(|(event_id, value)| services.rooms.outlier.add_pdu_outlier(&event_id, &value)) + .ready_for_each(|(event_id, value)| { + services.rooms.outlier.add_pdu_outlier(&event_id, &value); + }) .await; drop(cork); @@ -1031,29 +1099,38 @@ async fn join_room_by_id_helper_remote( #[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")] async fn join_room_by_id_helper_local( - services: &Services, sender_user: &UserId, room_id: &RoomId, reason: Option, servers: &[OwnedServerName], - _third_party_signed: Option<&ThirdPartySigned>, state_lock: RoomMutexGuard, + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + _third_party_signed: Option<&ThirdPartySigned>, + state_lock: RoomMutexGuard, ) -> Result { debug_info!("We can join locally"); let join_rules_event_content = services .rooms .state_accessor - .room_state_get_content::(room_id, &StateEventType::RoomJoinRules, "") + .room_state_get_content::( + room_id, + &StateEventType::RoomJoinRules, + "", + ) .await; let restriction_rooms = match join_rules_event_content { - Ok(RoomJoinRulesEventContent { + | Ok(RoomJoinRulesEventContent { join_rule: JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted), }) => restricted .allow .into_iter() .filter_map(|a| match a { - AllowRule::RoomMembership(r) => Some(r.room_id), - _ => None, + | AllowRule::RoomMembership(r) => Some(r.room_id), + | _ => None, }) .collect(), - _ => Vec::new(), + | _ => Vec::new(), }; let join_authorized_via_users_server: Option = { @@ -1073,10 +1150,12 @@ async fn join_room_by_id_helper_local( .state_cache .local_users_in_room(room_id) .filter(|user| { - services - .rooms - .state_accessor - .user_can_invite(room_id, user, sender_user, &state_lock) + services.rooms.state_accessor.user_can_invite( + room_id, + user, + sender_user, + &state_lock, + ) }) .boxed() .next() @@ -1112,13 +1191,18 @@ async fn join_room_by_id_helper_local( }; if restriction_rooms.is_empty() - && (servers.is_empty() || servers.len() == 1 && services.globals.server_is_ours(&servers[0])) + && (servers.is_empty() + || servers.len() == 1 && services.globals.server_is_ours(&servers[0])) { return Err(error); } - warn!("We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements"); - let Ok((make_join_response, remote_server)) = make_join_request(services, sender_user, room_id, servers).await + warn!( + "We couldn't do the join locally, maybe federation can help to satisfy the restricted \ + join requirements" + ); + let Ok((make_join_response, remote_server)) = + make_join_request(services, sender_user, room_id, servers).await else { return Err(error); }; @@ -1133,8 +1217,10 @@ async fn join_room_by_id_helper_local( )); } - let mut join_event_stub: CanonicalJsonObject = serde_json::from_str(make_join_response.event.get()) - .map_err(|e| err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")))?; + let mut join_event_stub: CanonicalJsonObject = + serde_json::from_str(make_join_response.event.get()).map_err(|e| { + err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")) + })?; let join_authorized_via_users_server = join_event_stub .get("content") @@ -1173,8 +1259,8 @@ async fn join_room_by_id_helper_local( // We keep the "event_id" in the pdu only in v1 or // v2 rooms match room_version_id { - RoomVersionId::V1 | RoomVersionId::V2 => {}, - _ => { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { join_event_stub.remove("event_id"); }, }; @@ -1189,7 +1275,8 @@ async fn join_room_by_id_helper_local( let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; // Add event_id back - join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + join_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); // It has enough fields to be called a proper event now let join_event = join_event_stub; @@ -1211,8 +1298,10 @@ async fn join_room_by_id_helper_local( .await?; if let Some(signed_raw) = send_join_response.room_state.event { - let (signed_event_id, signed_value) = gen_event_id_canonical_json(&signed_raw, &room_version_id) - .map_err(|e| err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))))?; + let (signed_event_id, signed_value) = + gen_event_id_canonical_json(&signed_raw, &room_version_id).map_err(|e| { + err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) + })?; if signed_event_id != event_id { return Err!(Request(BadJson( @@ -1234,9 +1323,13 @@ async fn join_room_by_id_helper_local( } async fn make_join_request( - services: &Services, sender_user: &UserId, room_id: &RoomId, servers: &[OwnedServerName], + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + servers: &[OwnedServerName], ) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> { - let mut make_join_response_and_server = Err!(BadServerResponse("No server available to assist in joining.")); + let mut make_join_response_and_server = + Err!(BadServerResponse("No server available to assist in joining.")); let mut make_join_counter: usize = 0; let mut incompatible_room_version_count: usize = 0; @@ -1266,23 +1359,28 @@ async fn make_join_request( e.kind(), ErrorKind::IncompatibleRoomVersion { .. } | ErrorKind::UnsupportedRoomVersion ) { - incompatible_room_version_count = incompatible_room_version_count.saturating_add(1); + incompatible_room_version_count = + incompatible_room_version_count.saturating_add(1); } if incompatible_room_version_count > 15 { info!( - "15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or M_UNSUPPORTED_ROOM_VERSION, \ - assuming that conduwuit does not support the room version {room_id}: {e}" + "15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or \ + M_UNSUPPORTED_ROOM_VERSION, assuming that conduwuit does not support the \ + room version {room_id}: {e}" ); - make_join_response_and_server = Err!(BadServerResponse("Room version is not supported by Conduwuit")); + make_join_response_and_server = + Err!(BadServerResponse("Room version is not supported by Conduwuit")); return make_join_response_and_server; } if make_join_counter > 40 { warn!( - "40 servers failed to provide valid make_join response, assuming no server can assist in joining." + "40 servers failed to provide valid make_join response, assuming no server \ + can assist in joining." ); - make_join_response_and_server = Err!(BadServerResponse("No server available to assist in joining.")); + make_join_response_and_server = + Err!(BadServerResponse("No server available to assist in joining.")); return make_join_response_and_server; } } @@ -1298,11 +1396,18 @@ async fn make_join_request( } pub(crate) async fn invite_helper( - services: &Services, sender_user: &UserId, user_id: &UserId, room_id: &RoomId, reason: Option, + services: &Services, + sender_user: &UserId, + user_id: &UserId, + room_id: &RoomId, + reason: Option, is_direct: bool, ) -> Result { if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { - info!("User {sender_user} is not an admin and attempted to send an invite to room {room_id}"); + info!( + "User {sender_user} is not an admin and attempted to send an invite to room \ + {room_id}" + ); return Err!(Request(Forbidden("Invites are not allowed on this server."))); } @@ -1339,31 +1444,30 @@ pub(crate) async fn invite_helper( let response = services .sending - .send_federation_request( - user_id.server_name(), - create_invite::v2::Request { - room_id: room_id.to_owned(), - event_id: (*pdu.event_id).to_owned(), - room_version: room_version_id.clone(), - event: services - .sending - .convert_to_outgoing_federation_event(pdu_json.clone()) - .await, - invite_room_state, - via: services - .rooms - .state_cache - .servers_route_via(room_id) - .await - .ok(), - }, - ) + .send_federation_request(user_id.server_name(), create_invite::v2::Request { + room_id: room_id.to_owned(), + event_id: (*pdu.event_id).to_owned(), + room_version: room_version_id.clone(), + event: services + .sending + .convert_to_outgoing_federation_event(pdu_json.clone()) + .await, + invite_room_state, + via: services + .rooms + .state_cache + .servers_route_via(room_id) + .await + .ok(), + }) .await?; // We do not add the event_id field to the pdu here because of signature and // hashes checks let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id) - .map_err(|e| err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))))?; + .map_err(|e| { + err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) + })?; if pdu.event_id != event_id { return Err!(Request(BadJson( @@ -1379,14 +1483,18 @@ pub(crate) async fn invite_helper( ) .expect("CanonicalJson is valid json value"), ) - .map_err(|e| err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))))?; + .map_err(|e| { + err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))) + })?; let pdu_id = services .rooms .event_handler .handle_incoming_pdu(&origin, room_id, &event_id, value, true) .await? - .ok_or_else(|| err!(Request(InvalidParam("Could not accept incoming PDU as timeline event."))))?; + .ok_or_else(|| { + err!(Request(InvalidParam("Could not accept incoming PDU as timeline event."))) + })?; return services.sending.send_pdu_room(room_id, &pdu_id).await; } @@ -1456,7 +1564,12 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { } } -pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, reason: Option) -> Result<()> { +pub async fn leave_room( + services: &Services, + user_id: &UserId, + room_id: &RoomId, + reason: Option, +) -> Result<()> { //use conduwuit::utils::stream::OptionStream; use futures::TryFutureExt; @@ -1500,7 +1613,11 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, let Ok(event) = services .rooms .state_accessor - .room_state_get_content::(room_id, &StateEventType::RoomMember, user_id.as_str()) + .room_state_get_content::( + room_id, + &StateEventType::RoomMember, + user_id.as_str(), + ) .await else { // Fix for broken rooms @@ -1527,14 +1644,11 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent { - membership: MembershipState::Leave, - reason, - ..event - }, - ), + PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason, + ..event + }), user_id, room_id, &state_lock, @@ -1545,8 +1659,13 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, Ok(()) } -async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut make_leave_response_and_server = Err!(BadServerResponse("No server available to assist in leaving.")); +async fn remote_leave_room( + services: &Services, + user_id: &UserId, + room_id: &RoomId, +) -> Result<()> { + let mut make_leave_response_and_server = + Err!(BadServerResponse("No server available to assist in leaving.")); let invite_state = services .rooms @@ -1608,8 +1727,12 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room )); } - let mut leave_event_stub = serde_json::from_str::(make_leave_response.event.get()) - .map_err(|e| err!(BadServerResponse("Invalid make_leave event json received from server: {e:?}")))?; + let mut leave_event_stub = serde_json::from_str::( + make_leave_response.event.get(), + ) + .map_err(|e| { + err!(BadServerResponse("Invalid make_leave event json received from server: {e:?}")) + })?; // TODO: Is origin needed? leave_event_stub.insert( @@ -1627,8 +1750,8 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room // room v3 and above removed the "event_id" field from remote PDU format match room_version_id { - RoomVersionId::V1 | RoomVersionId::V2 => {}, - _ => { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { leave_event_stub.remove("event_id"); }, }; @@ -1643,7 +1766,8 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room let event_id = pdu::gen_event_id(&leave_event_stub, &room_version_id)?; // Add event_id back - leave_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + leave_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); // It has enough fields to be called a proper event now let leave_event = leave_event_stub; diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 32e0b1e3..93582de0 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -56,7 +56,8 @@ const LIMIT_DEFAULT: usize = 10; /// - Only works if the user is joined (TODO: always allow, but only show events /// where the user was joined, depending on `history_visibility`) pub(crate) async fn get_message_events_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender = body.sender(); let (sender_user, sender_device) = sender; @@ -69,8 +70,8 @@ pub(crate) async fn get_message_events_route( .map(str::parse) .transpose()? .unwrap_or_else(|| match body.dir { - Direction::Forward => PduCount::min(), - Direction::Backward => PduCount::max(), + | Direction::Forward => PduCount::min(), + | Direction::Backward => PduCount::max(), }); let to: Option = body.to.as_deref().map(str::parse).flat_ok(); @@ -81,10 +82,12 @@ pub(crate) async fn get_message_events_route( .unwrap_or(LIMIT_DEFAULT) .min(LIMIT_MAX); - services - .rooms - .lazy_loading - .lazy_load_confirm_delivery(sender_user, sender_device, room_id, from); + services.rooms.lazy_loading.lazy_load_confirm_delivery( + sender_user, + sender_device, + room_id, + from, + ); if matches!(body.dir, Direction::Backward) { services @@ -98,14 +101,14 @@ pub(crate) async fn get_message_events_route( } let it = match body.dir { - Direction::Forward => services + | Direction::Forward => services .rooms .timeline .pdus(Some(sender_user), room_id, Some(from)) .await? .boxed(), - Direction::Backward => services + | Direction::Backward => services .rooms .timeline .pdus_rev(Some(sender_user), room_id, Some(from)) @@ -141,10 +144,13 @@ pub(crate) async fn get_message_events_route( if !cfg!(feature = "element_hacks") { if let Some(next_token) = next_token { - services - .rooms - .lazy_loading - .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy, next_token); + services.rooms.lazy_loading.lazy_load_mark_sent( + sender_user, + sender_device, + room_id, + lazy, + next_token, + ); } } @@ -162,7 +168,11 @@ pub(crate) async fn get_message_events_route( }) } -async fn get_member_event(services: &Services, room_id: &RoomId, user_id: &UserId) -> Option> { +async fn get_member_event( + services: &Services, + room_id: &RoomId, + user_id: &UserId, +) -> Option> { services .rooms .state_accessor @@ -173,7 +183,11 @@ async fn get_member_event(services: &Services, room_id: &RoomId, user_id: &UserI } pub(crate) async fn update_lazy( - services: &Services, room_id: &RoomId, sender: (&UserId, &DeviceId), mut lazy: LazySet, item: &PdusIterItem, + services: &Services, + room_id: &RoomId, + sender: (&UserId, &DeviceId), + mut lazy: LazySet, + item: &PdusIterItem, force: bool, ) -> LazySet { let (_, event) = &item; @@ -204,7 +218,11 @@ pub(crate) async fn update_lazy( lazy } -pub(crate) async fn ignored_filter(services: &Services, item: PdusIterItem, user_id: &UserId) -> Option { +pub(crate) async fn ignored_filter( + services: &Services, + item: PdusIterItem, + user_id: &UserId, +) -> Option { let (_, pdu) = &item; // exclude Synapse's dummy events from bloating up response bodies. clients @@ -223,7 +241,9 @@ pub(crate) async fn ignored_filter(services: &Services, item: PdusIterItem, user } pub(crate) async fn visibility_filter( - services: &Services, item: PdusIterItem, user_id: &UserId, + services: &Services, + item: PdusIterItem, + user_id: &UserId, ) -> Option { let (_, pdu) = &item; diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index b8bbfb91..4b2ff727 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -16,7 +16,8 @@ use crate::{Error, Result, Ruma}; /// /// - The token generated is only valid for the OpenID API pub(crate) async fn create_openid_token_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index 948d6caa..1a3ad26e 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -12,10 +12,14 @@ use crate::{Error, Result, Ruma}; /// /// Sets the presence state of the sender user. pub(crate) async fn set_presence_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.globals.allow_local_presence() { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Presence is disabled on this server")); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Presence is disabled on this server", + )); } let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -40,10 +44,14 @@ pub(crate) async fn set_presence_route( /// /// - Only works if you share a room with the user pub(crate) async fn get_presence_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.globals.allow_local_presence() { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Presence is disabled on this server")); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Presence is disabled on this server", + )); } let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index ea74cdaf..584adfc1 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -11,7 +11,9 @@ use ruma::{ api::{ client::{ error::ErrorKind, - profile::{get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name}, + profile::{ + get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, + }, }, federation, }, @@ -29,7 +31,8 @@ use crate::Ruma; /// /// - Also makes sure other users receive the update using presence EDUs pub(crate) async fn set_displayname_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -45,7 +48,8 @@ pub(crate) async fn set_displayname_route( .collect() .await; - update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms).await; + update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms) + .await; if services.globals.allow_local_presence() { // Presence update @@ -65,7 +69,8 @@ pub(crate) async fn set_displayname_route( /// - If user is on another server and we do not have a local copy already fetch /// displayname over federation pub(crate) async fn get_displayname_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.globals.user_is_local(&body.user_id) { // Create and update our local copy of the user @@ -94,9 +99,7 @@ pub(crate) async fn get_displayname_route( .users .set_blurhash(&body.user_id, response.blurhash.clone()); - return Ok(get_display_name::v3::Response { - displayname: response.displayname, - }); + return Ok(get_display_name::v3::Response { displayname: response.displayname }); } } @@ -117,7 +120,8 @@ pub(crate) async fn get_displayname_route( /// /// - Also makes sure other users receive the update using presence EDUs pub(crate) async fn set_avatar_url_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -161,7 +165,8 @@ pub(crate) async fn set_avatar_url_route( /// - If user is on another server and we do not have a local copy already fetch /// `avatar_url` and blurhash over federation pub(crate) async fn get_avatar_url_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.globals.user_is_local(&body.user_id) { // Create and update our local copy of the user @@ -218,7 +223,8 @@ pub(crate) async fn get_avatar_url_route( /// - If user is on another server and we do not have a local copy already, /// fetch profile over federation. pub(crate) async fn get_profile_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.globals.user_is_local(&body.user_id) { // Create and update our local copy of the user @@ -254,9 +260,11 @@ pub(crate) async fn get_profile_route( .set_timezone(&body.user_id, response.tz.clone()); for (profile_key, profile_key_value) in &response.custom_profile_fields { - services - .users - .set_profile_key(&body.user_id, profile_key, Some(profile_key_value.clone())); + services.users.set_profile_key( + &body.user_id, + profile_key, + Some(profile_key_value.clone()), + ); } return Ok(get_profile::v3::Response { @@ -295,7 +303,10 @@ pub(crate) async fn get_profile_route( } pub async fn update_displayname( - services: &Services, user_id: &UserId, displayname: Option, all_joined_rooms: &[OwnedRoomId], + services: &Services, + user_id: &UserId, + displayname: Option, + all_joined_rooms: &[OwnedRoomId], ) { let (current_avatar_url, current_blurhash, current_displayname) = join3( services.users.avatar_url(user_id), @@ -322,19 +333,16 @@ pub async fn update_displayname( .iter() .try_stream() .and_then(|room_id: &OwnedRoomId| async move { - let pdu = PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent { - displayname: displayname.clone(), - membership: MembershipState::Join, - avatar_url: avatar_url.clone(), - blurhash: blurhash.clone(), - join_authorized_via_users_server: None, - reason: None, - is_direct: None, - third_party_invite: None, - }, - ); + let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { + displayname: displayname.clone(), + membership: MembershipState::Join, + avatar_url: avatar_url.clone(), + blurhash: blurhash.clone(), + join_authorized_via_users_server: None, + reason: None, + is_direct: None, + third_party_invite: None, + }); Ok((pdu, room_id)) }) @@ -346,7 +354,10 @@ pub async fn update_displayname( } pub async fn update_avatar_url( - services: &Services, user_id: &UserId, avatar_url: Option, blurhash: Option, + services: &Services, + user_id: &UserId, + avatar_url: Option, + blurhash: Option, all_joined_rooms: &[OwnedRoomId], ) { let (current_avatar_url, current_blurhash, current_displayname) = join3( @@ -375,19 +386,16 @@ pub async fn update_avatar_url( .iter() .try_stream() .and_then(|room_id: &OwnedRoomId| async move { - let pdu = PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent { - avatar_url: avatar_url.clone(), - blurhash: blurhash.clone(), - membership: MembershipState::Join, - displayname: displayname.clone(), - join_authorized_via_users_server: None, - reason: None, - is_direct: None, - third_party_invite: None, - }, - ); + let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { + avatar_url: avatar_url.clone(), + blurhash: blurhash.clone(), + membership: MembershipState::Join, + displayname: displayname.clone(), + join_authorized_via_users_server: None, + reason: None, + is_direct: None, + third_party_invite: None, + }); Ok((pdu, room_id)) }) @@ -399,7 +407,9 @@ pub async fn update_avatar_url( } pub async fn update_all_rooms( - services: &Services, all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>, user_id: &UserId, + services: &Services, + all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>, + user_id: &UserId, ) { for (pdu_builder, room_id) in all_joined_rooms { let state_lock = services.rooms.state.mutex.lock(room_id).await; diff --git a/src/api/client/push.rs b/src/api/client/push.rs index e290c952..ed7371e4 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -4,15 +4,19 @@ use ruma::{ api::client::{ error::ErrorKind, push::{ - delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, - get_pushrules_global_scope, set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, + delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, + get_pushrule_enabled, get_pushrules_all, get_pushrules_global_scope, set_pusher, + set_pushrule, set_pushrule_actions, set_pushrule_enabled, }, }, events::{ push_rules::{PushRulesEvent, PushRulesEventContent}, GlobalAccountDataEventType, }, - push::{InsertPushRuleError, PredefinedContentRuleId, PredefinedOverrideRuleId, RemovePushRuleError, Ruleset}, + push::{ + InsertPushRuleError, PredefinedContentRuleId, PredefinedOverrideRuleId, + RemovePushRuleError, Ruleset, + }, CanonicalJsonObject, CanonicalJsonValue, }; use service::Services; @@ -23,7 +27,8 @@ use crate::{Error, Result, Ruma}; /// /// Retrieves the push rules event for this user. pub(crate) async fn get_pushrules_all_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -40,8 +45,10 @@ pub(crate) async fn get_pushrules_all_route( return recreate_push_rules_and_return(&services, sender_user).await; }; - let account_data_content = serde_json::from_value::(content_value.into()) - .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; + let account_data_content = + serde_json::from_value::(content_value.into()).map_err(|e| { + err!(Database(warn!("Invalid push rules account data event in database: {e}"))) + })?; let mut global_ruleset = account_data_content.global; @@ -79,9 +86,7 @@ pub(crate) async fn get_pushrules_all_route( sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { - global: global_ruleset.clone(), - }, + content: PushRulesEventContent { global: global_ruleset.clone() }, }) .expect("to json always works"), ) @@ -89,9 +94,7 @@ pub(crate) async fn get_pushrules_all_route( } }; - Ok(get_pushrules_all::v3::Response { - global: global_ruleset, - }) + Ok(get_pushrules_all::v3::Response { global: global_ruleset }) } /// # `GET /_matrix/client/r0/pushrules/global/` @@ -100,7 +103,8 @@ pub(crate) async fn get_pushrules_all_route( /// /// This appears to be the exact same as `GET /_matrix/client/r0/pushrules/`. pub(crate) async fn get_pushrules_global_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -134,8 +138,10 @@ pub(crate) async fn get_pushrules_global_route( }); }; - let account_data_content = serde_json::from_value::(content_value.into()) - .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; + let account_data_content = + serde_json::from_value::(content_value.into()).map_err(|e| { + err!(Database(warn!("Invalid push rules account data event in database: {e}"))) + })?; let mut global_ruleset = account_data_content.global; @@ -173,9 +179,7 @@ pub(crate) async fn get_pushrules_global_route( sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { - global: global_ruleset.clone(), - }, + content: PushRulesEventContent { global: global_ruleset.clone() }, }) .expect("to json always works"), ) @@ -183,16 +187,15 @@ pub(crate) async fn get_pushrules_global_route( } }; - Ok(get_pushrules_global_scope::v3::Response { - global: global_ruleset, - }) + Ok(get_pushrules_global_scope::v3::Response { global: global_ruleset }) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Retrieves a single specified push rule for this user. pub(crate) async fn get_pushrule_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -218,9 +221,7 @@ pub(crate) async fn get_pushrule_route( .map(Into::into); if let Some(rule) = rule { - Ok(get_pushrule::v3::Response { - rule, - }) + Ok(get_pushrule::v3::Response { rule }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")) } @@ -230,7 +231,8 @@ pub(crate) async fn get_pushrule_route( /// /// Creates a single specified push rule for this user. pub(crate) async fn set_pushrule_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -241,32 +243,33 @@ pub(crate) async fn set_pushrule_route( .await .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - if let Err(error) = - account_data - .content - .global - .insert(body.rule.clone(), body.after.as_deref(), body.before.as_deref()) - { + if let Err(error) = account_data.content.global.insert( + body.rule.clone(), + body.after.as_deref(), + body.before.as_deref(), + ) { let err = match error { - InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest( + | InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest( ErrorKind::InvalidParam, "Rule IDs starting with a dot are reserved for server-default rules.", ), - InsertPushRuleError::InvalidRuleId => { - Error::BadRequest(ErrorKind::InvalidParam, "Rule ID containing invalid characters.") - }, - InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest( + | InsertPushRuleError::InvalidRuleId => Error::BadRequest( + ErrorKind::InvalidParam, + "Rule ID containing invalid characters.", + ), + | InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest( ErrorKind::InvalidParam, "Can't place a push rule relatively to a server-default rule.", ), - InsertPushRuleError::UnknownRuleId => { - Error::BadRequest(ErrorKind::NotFound, "The before or after rule could not be found.") - }, - InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest( + | InsertPushRuleError::UnknownRuleId => Error::BadRequest( + ErrorKind::NotFound, + "The before or after rule could not be found.", + ), + | InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest( ErrorKind::InvalidParam, "The before rule has a higher priority than the after rule.", ), - _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), + | _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), }; return Err(err); @@ -289,7 +292,8 @@ pub(crate) async fn set_pushrule_route( /// /// Gets the actions of a single specified push rule for this user. pub(crate) async fn get_pushrule_actions_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -315,16 +319,15 @@ pub(crate) async fn get_pushrule_actions_route( .map(|rule| rule.actions().to_owned()) .ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?; - Ok(get_pushrule_actions::v3::Response { - actions, - }) + Ok(get_pushrule_actions::v3::Response { actions }) } /// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions` /// /// Sets the actions of a single specified push rule for this user. pub(crate) async fn set_pushrule_actions_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -360,7 +363,8 @@ pub(crate) async fn set_pushrule_actions_route( /// /// Gets the enabled status of a single specified push rule for this user. pub(crate) async fn get_pushrule_enabled_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -370,9 +374,7 @@ pub(crate) async fn get_pushrule_enabled_route( || body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str() || body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str() { - return Ok(get_pushrule_enabled::v3::Response { - enabled: false, - }); + return Ok(get_pushrule_enabled::v3::Response { enabled: false }); } let event: PushRulesEvent = services @@ -388,16 +390,15 @@ pub(crate) async fn get_pushrule_enabled_route( .map(ruma::push::AnyPushRuleRef::enabled) .ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?; - Ok(get_pushrule_enabled::v3::Response { - enabled, - }) + Ok(get_pushrule_enabled::v3::Response { enabled }) } /// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled` /// /// Sets the enabled status of a single specified push rule for this user. pub(crate) async fn set_pushrule_enabled_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -433,7 +434,8 @@ pub(crate) async fn set_pushrule_enabled_route( /// /// Deletes a single specified push rule for this user. pub(crate) async fn delete_pushrule_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -449,11 +451,13 @@ pub(crate) async fn delete_pushrule_route( .remove(body.kind.clone(), &body.rule_id) { let err = match error { - RemovePushRuleError::ServerDefault => { - Error::BadRequest(ErrorKind::InvalidParam, "Cannot delete a server-default pushrule.") - }, - RemovePushRuleError::NotFound => Error::BadRequest(ErrorKind::NotFound, "Push rule not found."), - _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), + | RemovePushRuleError::ServerDefault => Error::BadRequest( + ErrorKind::InvalidParam, + "Cannot delete a server-default pushrule.", + ), + | RemovePushRuleError::NotFound => + Error::BadRequest(ErrorKind::NotFound, "Push rule not found."), + | _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), }; return Err(err); @@ -476,7 +480,8 @@ pub(crate) async fn delete_pushrule_route( /// /// Gets all currently active pushers for the sender user. pub(crate) async fn get_pushers_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -491,7 +496,8 @@ pub(crate) async fn get_pushers_route( /// /// - TODO: Handle `append` pub(crate) async fn set_pushers_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -506,7 +512,8 @@ pub(crate) async fn set_pushers_route( /// user somehow has bad push rules, these must always exist per spec. /// so recreate it and return server default silently async fn recreate_push_rules_and_return( - services: &Services, sender_user: &ruma::UserId, + services: &Services, + sender_user: &ruma::UserId, ) -> Result { services .account_data diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index d7f24101..2e98afbc 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -21,15 +21,14 @@ use crate::{Result, Ruma}; /// - If `read_receipt` is set: Update private marker and public read receipt /// EDU pub(crate) async fn set_read_marker_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); if let Some(event) = &body.fully_read { let fully_read_event = ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { - event_id: event.clone(), - }, + content: ruma::events::fully_read::FullyReadEventContent { event_id: event.clone() }, }; services @@ -55,13 +54,10 @@ pub(crate) async fn set_read_marker_route( event.to_owned(), BTreeMap::from_iter([( ReceiptType::Read, - BTreeMap::from_iter([( - sender_user.to_owned(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - thread: ReceiptThread::Unthreaded, - }, - )]), + BTreeMap::from_iter([(sender_user.to_owned(), ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + })]), )]), )]); @@ -88,7 +84,9 @@ pub(crate) async fn set_read_marker_route( .map_err(|_| err!(Request(NotFound("Event not found."))))?; let PduCount::Normal(count) = count else { - return Err!(Request(InvalidParam("Event is a backfilled PDU and cannot be marked as read."))); + return Err!(Request(InvalidParam( + "Event is a backfilled PDU and cannot be marked as read." + ))); }; services @@ -104,7 +102,8 @@ pub(crate) async fn set_read_marker_route( /// /// Sets private read marker and public read receipt EDU. pub(crate) async fn create_receipt_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); @@ -119,7 +118,7 @@ pub(crate) async fn create_receipt_route( } match body.receipt_type { - create_receipt::v3::ReceiptType::FullyRead => { + | create_receipt::v3::ReceiptType::FullyRead => { let fully_read_event = ruma::events::fully_read::FullyReadEvent { content: ruma::events::fully_read::FullyReadEventContent { event_id: body.event_id.clone(), @@ -135,7 +134,7 @@ pub(crate) async fn create_receipt_route( ) .await?; }, - create_receipt::v3::ReceiptType::Read => { + | create_receipt::v3::ReceiptType::Read => { let receipt_content = BTreeMap::from_iter([( body.event_id.clone(), BTreeMap::from_iter([( @@ -163,7 +162,7 @@ pub(crate) async fn create_receipt_route( ) .await; }, - create_receipt::v3::ReceiptType::ReadPrivate => { + | create_receipt::v3::ReceiptType::ReadPrivate => { let count = services .rooms .timeline @@ -172,7 +171,9 @@ pub(crate) async fn create_receipt_route( .map_err(|_| err!(Request(NotFound("Event not found."))))?; let PduCount::Normal(count) = count else { - return Err!(Request(InvalidParam("Event is a backfilled PDU and cannot be marked as read."))); + return Err!(Request(InvalidParam( + "Event is a backfilled PDU and cannot be marked as read." + ))); }; services @@ -180,12 +181,11 @@ pub(crate) async fn create_receipt_route( .read_receipt .private_read_set(&body.room_id, sender_user, count); }, - _ => { + | _ => return Err!(Request(InvalidParam(warn!( "Received unknown read receipt type: {}", &body.receipt_type - )))) - }, + )))), } Ok(create_receipt::v3::Response {}) diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index a986dc18..0b01238f 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -1,5 +1,7 @@ use axum::extract::State; -use ruma::{api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent}; +use ruma::{ + api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, +}; use crate::{service::pdu::PduBuilder, Result, Ruma}; @@ -9,7 +11,8 @@ use crate::{service::pdu::PduBuilder, Result, Ruma}; /// /// - TODO: Handle txn id pub(crate) async fn redact_event_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -35,7 +38,5 @@ pub(crate) async fn redact_event_route( drop(state_lock); - Ok(redact_event::v3::Response { - event_id: event_id.into(), - }) + Ok(redact_event::v3::Response { event_id: event_id.into() }) } diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index c47bc60f..87fb1eac 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -8,7 +8,8 @@ use futures::StreamExt; use ruma::{ api::{ client::relations::{ - get_relating_events, get_relating_events_with_rel_type, get_relating_events_with_rel_type_and_event_type, + get_relating_events, get_relating_events_with_rel_type, + get_relating_events_with_rel_type_and_event_type, }, Direction, }, @@ -21,7 +22,8 @@ use crate::Ruma; /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { paginate_relations_with_filter( &services, @@ -47,7 +49,8 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route( /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}` pub(crate) async fn get_relating_events_with_rel_type_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { paginate_relations_with_filter( &services, @@ -73,7 +76,8 @@ pub(crate) async fn get_relating_events_with_rel_type_route( /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}` pub(crate) async fn get_relating_events_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { paginate_relations_with_filter( &services, @@ -93,16 +97,24 @@ pub(crate) async fn get_relating_events_route( #[allow(clippy::too_many_arguments)] async fn paginate_relations_with_filter( - services: &Services, sender_user: &UserId, room_id: &RoomId, target: &EventId, - filter_event_type: Option, filter_rel_type: Option, from: Option<&str>, - to: Option<&str>, limit: Option, recurse: bool, dir: Direction, + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + target: &EventId, + filter_event_type: Option, + filter_rel_type: Option, + from: Option<&str>, + to: Option<&str>, + limit: Option, + recurse: bool, + dir: Direction, ) -> Result { let start: PduCount = from .map(str::parse) .transpose()? .unwrap_or_else(|| match dir { - Direction::Forward => PduCount::min(), - Direction::Backward => PduCount::max(), + | Direction::Forward => PduCount::min(), + | Direction::Backward => PduCount::max(), }); let to: Option = to.map(str::parse).flat_ok(); @@ -115,11 +127,7 @@ async fn paginate_relations_with_filter( .min(100); // Spec (v1.10) recommends depth of at least 3 - let depth: u8 = if recurse { - 3 - } else { - 1 - }; + let depth: u8 = if recurse { 3 } else { 1 }; let events: Vec = services .rooms @@ -145,8 +153,8 @@ async fn paginate_relations_with_filter( .await; let next_batch = match dir { - Direction::Forward => events.last(), - Direction::Backward => events.first(), + | Direction::Forward => events.last(), + | Direction::Backward => events.first(), } .map(at!(0)) .as_ref() @@ -164,7 +172,11 @@ async fn paginate_relations_with_filter( }) } -async fn visibility_filter(services: &Services, sender_user: &UserId, item: PdusIterItem) -> Option { +async fn visibility_filter( + services: &Services, + sender_user: &UserId, + item: PdusIterItem, +) -> Option { let (_, pdu) = &item; services diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 429cd033..fe23b7bd 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -25,7 +25,8 @@ use crate::{ /// Reports an abusive room to homeserver admins #[tracing::instrument(skip_all, fields(%client), name = "report_room")] pub(crate) async fn report_room_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { // user authentication @@ -78,14 +79,16 @@ pub(crate) async fn report_room_route( /// Reports an inappropriate event to homeserver admins #[tracing::instrument(skip_all, fields(%client), name = "report_event")] pub(crate) async fn report_event_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { // user authentication let sender_user = body.sender_user.as_ref().expect("user is authenticated"); info!( - "Received event report by user {sender_user} for room {} and event ID {}, with reason: \"{}\"", + "Received event report by user {sender_user} for room {} and event ID {}, with reason: \ + \"{}\"", body.room_id, body.event_id, body.reason.as_deref().unwrap_or("") @@ -114,8 +117,8 @@ pub(crate) async fn report_event_route( services .admin .send_message(message::RoomMessageEventContent::text_markdown(format!( - "@room Event report received from {} -\n\nEvent ID: {}\nRoom ID: {}\nSent By: {}\n\nReport Score: \ - {}\nReport Reason: {}", + "@room Event report received from {} -\n\nEvent ID: {}\nRoom ID: {}\nSent By: \ + {}\n\nReport Score: {}\nReport Reason: {}", sender_user.to_owned(), pdu.event_id, pdu.room_id, @@ -136,10 +139,18 @@ pub(crate) async fn report_event_route( /// check if report reasoning is less than or equal to 750 characters /// check if reporting user is in the reporting room async fn is_event_report_valid( - services: &Services, event_id: &EventId, room_id: &RoomId, sender_user: &UserId, reason: Option<&String>, - score: Option, pdu: &PduEvent, + services: &Services, + event_id: &EventId, + room_id: &RoomId, + sender_user: &UserId, + reason: Option<&String>, + score: Option, + pdu: &PduEvent, ) -> Result<()> { - debug_info!("Checking if report from user {sender_user} for event {event_id} in room {room_id} is valid"); + debug_info!( + "Checking if report from user {sender_user} for event {event_id} in room {room_id} is \ + valid" + ); if room_id != pdu.room_id { return Err(Error::BadRequest( @@ -183,6 +194,9 @@ async fn is_event_report_valid( /// enumerating for potential events existing in our server. async fn delay_response() { let time_to_wait = rand::thread_rng().gen_range(2..5); - debug_info!("Got successful /report request, waiting {time_to_wait} seconds before sending successful response."); + debug_info!( + "Got successful /report request, waiting {time_to_wait} seconds before sending \ + successful response." + ); sleep(Duration::from_secs(time_to_wait)).await; } diff --git a/src/api/client/room/aliases.rs b/src/api/client/room/aliases.rs index f500e9c7..3f0016af 100644 --- a/src/api/client/room/aliases.rs +++ b/src/api/client/room/aliases.rs @@ -12,7 +12,8 @@ use crate::Ruma; /// - Only users joined to the room are allowed to call this, or if /// `history_visibility` is world readable in the room pub(crate) async fn get_room_aliases_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index b98f1dab..89cddc0f 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -24,7 +24,8 @@ use ruma::{ }, int, serde::{JsonObject, Raw}, - CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, RoomVersionId, + CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, + RoomVersionId, }; use serde_json::{json, value::to_raw_value}; use service::{appservice::RegistrationInfo, Services}; @@ -49,7 +50,8 @@ use crate::{client::invite_helper, Ruma}; /// - Send invite events #[allow(clippy::large_stack_frames)] pub(crate) async fn create_room_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { use create_room::v3::RoomPreset; @@ -59,7 +61,10 @@ pub(crate) async fn create_room_route( && body.appservice_info.is_none() && !services.users.is_admin(sender_user).await { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Room creation has been disabled.")); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Room creation has been disabled.", + )); } let room_id: OwnedRoomId = if let Some(custom_room_id) = &body.room_id { @@ -91,8 +96,8 @@ pub(crate) async fn create_room_route( services .admin .send_text(&format!( - "Non-admin user {sender_user} tried to publish {0} to the room directory while \ - \"lockdown_public_room_directory\" is enabled", + "Non-admin user {sender_user} tried to publish {0} to the room directory \ + while \"lockdown_public_room_directory\" is enabled", &room_id )) .await; @@ -115,7 +120,7 @@ pub(crate) async fn create_room_route( }; let room_version = match body.room_version.clone() { - Some(room_version) => { + | Some(room_version) => if services.server.supported_room_version(&room_version) { room_version } else { @@ -123,13 +128,12 @@ pub(crate) async fn create_room_route( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", )); - } - }, - None => services.server.config.default_room_version.clone(), + }, + | None => services.server.config.default_room_version.clone(), }; let create_content = match &body.creation_content { - Some(content) => { + | Some(content) => { use RoomVersionId::*; let mut content = content @@ -139,7 +143,7 @@ pub(crate) async fn create_room_route( Error::bad_database("Failed to deserialise content as canonical JSON.") })?; match room_version { - V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { content.insert( "creator".into(), json!(&sender_user).try_into().map_err(|e| { @@ -148,24 +152,25 @@ pub(crate) async fn create_room_route( })?, ); }, - _ => { + | _ => { // V11+ removed the "creator" key }, } content.insert( "room_version".into(), - json!(room_version.as_str()) - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?, + json!(room_version.as_str()).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, ); content }, - None => { + | None => { use RoomVersionId::*; let content = match room_version { - V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => RoomCreateEventContent::new_v1(sender_user.clone()), - _ => RoomCreateEventContent::new_v11(), + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => + RoomCreateEventContent::new_v1(sender_user.clone()), + | _ => RoomCreateEventContent::new_v11(), }; let mut content = serde_json::from_str::( to_raw_value(&content) @@ -190,7 +195,8 @@ pub(crate) async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&create_content).expect("create event content serialization"), + content: to_raw_value(&create_content) + .expect("create event content serialization"), state_key: Some(String::new()), ..Default::default() }, @@ -206,16 +212,13 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - sender_user.to_string(), - &RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - is_direct: Some(body.is_direct), - ..RoomMemberEventContent::new(MembershipState::Join) - }, - ), + PduBuilder::state(sender_user.to_string(), &RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + is_direct: Some(body.is_direct), + ..RoomMemberEventContent::new(MembershipState::Join) + }), sender_user, &room_id, &state_lock, @@ -227,8 +230,8 @@ pub(crate) async fn create_room_route( // Figure out preset. We need it for preset specific events let preset = body.preset.clone().unwrap_or(match &body.visibility { - room::Visibility::Public => RoomPreset::PublicChat, - _ => RoomPreset::PrivateChat, // Room visibility should not be custom + | room::Visibility::Public => RoomPreset::PublicChat, + | _ => RoomPreset::PrivateChat, // Room visibility should not be custom }); let mut users = BTreeMap::from_iter([(sender_user.clone(), int!(100))]); @@ -236,7 +239,9 @@ pub(crate) async fn create_room_route( if preset == RoomPreset::TrustedPrivateChat { for invite in &body.invite { if services.users.user_is_ignored(sender_user, invite).await { - return Err!(Request(Forbidden("You cannot invite users you have ignored to rooms."))); + return Err!(Request(Forbidden( + "You cannot invite users you have ignored to rooms." + ))); } else if services.users.user_is_ignored(invite, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked @@ -247,8 +252,11 @@ pub(crate) async fn create_room_route( } } - let power_levels_content = - default_power_levels_content(body.power_level_content_override.as_ref(), &body.visibility, users)?; + let power_levels_content = default_power_levels_content( + body.power_level_content_override.as_ref(), + &body.visibility, + users, + )?; services .rooms @@ -256,7 +264,8 @@ pub(crate) async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&power_levels_content).expect("serialized power_levels event content"), + content: to_raw_value(&power_levels_content) + .expect("serialized power_levels event content"), state_key: Some(String::new()), ..Default::default() }, @@ -273,13 +282,10 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomCanonicalAliasEventContent { - alias: Some(room_alias_id.to_owned()), - alt_aliases: vec![], - }, - ), + PduBuilder::state(String::new(), &RoomCanonicalAliasEventContent { + alias: Some(room_alias_id.to_owned()), + alt_aliases: vec![], + }), sender_user, &room_id, &state_lock, @@ -298,9 +304,9 @@ pub(crate) async fn create_room_route( PduBuilder::state( String::new(), &RoomJoinRulesEventContent::new(match preset { - RoomPreset::PublicChat => JoinRule::Public, + | RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default - _ => JoinRule::Invite, + | _ => JoinRule::Invite, }), ), sender_user, @@ -334,8 +340,8 @@ pub(crate) async fn create_room_route( PduBuilder::state( String::new(), &RoomGuestAccessEventContent::new(match preset { - RoomPreset::PublicChat => GuestAccess::Forbidden, - _ => GuestAccess::CanJoin, + | RoomPreset::PublicChat => GuestAccess::Forbidden, + | _ => GuestAccess::CanJoin, }), ), sender_user, @@ -367,7 +373,9 @@ pub(crate) async fn create_room_route( pdu_builder.state_key.get_or_insert_with(String::new); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == TimelineEventType::RoomEncryption && !services.globals.allow_encryption() { + if pdu_builder.event_type == TimelineEventType::RoomEncryption + && !services.globals.allow_encryption() + { continue; } @@ -399,12 +407,7 @@ pub(crate) async fn create_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomTopicEventContent { - topic: topic.clone(), - }, - ), + PduBuilder::state(String::new(), &RoomTopicEventContent { topic: topic.clone() }), sender_user, &room_id, &state_lock, @@ -417,16 +420,19 @@ pub(crate) async fn create_room_route( drop(state_lock); for user_id in &body.invite { if services.users.user_is_ignored(sender_user, user_id).await { - return Err!(Request(Forbidden("You cannot invite users you have ignored to rooms."))); + return Err!(Request(Forbidden( + "You cannot invite users you have ignored to rooms." + ))); } else if services.users.user_is_ignored(user_id, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked continue; } - if let Err(e) = invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct) - .boxed() - .await + if let Err(e) = + invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct) + .boxed() + .await { warn!(%e, "Failed to send invite"); } @@ -446,7 +452,10 @@ pub(crate) async fn create_room_route( if services.globals.config.admin_room_notices { services .admin - .send_text(&format!("{sender_user} made {} public to the room directory", &room_id)) + .send_text(&format!( + "{sender_user} made {} public to the room directory", + &room_id + )) .await; } info!("{sender_user} made {0} public to the room directory", &room_id); @@ -459,21 +468,24 @@ pub(crate) async fn create_room_route( /// creates the power_levels_content for the PDU builder fn default_power_levels_content( - power_level_content_override: Option<&Raw>, visibility: &room::Visibility, + power_level_content_override: Option<&Raw>, + visibility: &room::Visibility, users: BTreeMap, ) -> Result { - let mut power_levels_content = serde_json::to_value(RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"); + let mut power_levels_content = + serde_json::to_value(RoomPowerLevelsEventContent { users, ..Default::default() }) + .expect("event is valid, we just created it"); // secure proper defaults of sensitive/dangerous permissions that moderators // (power level 50) should not have easy access to - power_levels_content["events"]["m.room.power_levels"] = serde_json::to_value(100).expect("100 is valid Value"); - power_levels_content["events"]["m.room.server_acl"] = serde_json::to_value(100).expect("100 is valid Value"); - power_levels_content["events"]["m.room.tombstone"] = serde_json::to_value(100).expect("100 is valid Value"); - power_levels_content["events"]["m.room.encryption"] = serde_json::to_value(100).expect("100 is valid Value"); + power_levels_content["events"]["m.room.power_levels"] = + serde_json::to_value(100).expect("100 is valid Value"); + power_levels_content["events"]["m.room.server_acl"] = + serde_json::to_value(100).expect("100 is valid Value"); + power_levels_content["events"]["m.room.tombstone"] = + serde_json::to_value(100).expect("100 is valid Value"); + power_levels_content["events"]["m.room.encryption"] = + serde_json::to_value(100).expect("100 is valid Value"); power_levels_content["events"]["m.room.history_visibility"] = serde_json::to_value(100).expect("100 is valid Value"); @@ -481,14 +493,18 @@ fn default_power_levels_content( // useful in read-only announcement rooms that post a public poll. power_levels_content["events"]["org.matrix.msc3381.poll.response"] = serde_json::to_value(0).expect("0 is valid Value"); - power_levels_content["events"]["m.poll.response"] = serde_json::to_value(0).expect("0 is valid Value"); + power_levels_content["events"]["m.poll.response"] = + serde_json::to_value(0).expect("0 is valid Value"); // synapse does this too. clients do not expose these permissions. it prevents // default users from calling public rooms, for obvious reasons. if *visibility == room::Visibility::Public { - power_levels_content["events"]["m.call.invite"] = serde_json::to_value(50).expect("50 is valid Value"); - power_levels_content["events"]["m.call"] = serde_json::to_value(50).expect("50 is valid Value"); - power_levels_content["events"]["m.call.member"] = serde_json::to_value(50).expect("50 is valid Value"); + power_levels_content["events"]["m.call.invite"] = + serde_json::to_value(50).expect("50 is valid Value"); + power_levels_content["events"]["m.call"] = + serde_json::to_value(50).expect("50 is valid Value"); + power_levels_content["events"]["m.call.member"] = + serde_json::to_value(50).expect("50 is valid Value"); power_levels_content["events"]["org.matrix.msc3401.call"] = serde_json::to_value(50).expect("50 is valid Value"); power_levels_content["events"]["org.matrix.msc3401.call.member"] = @@ -497,7 +513,9 @@ fn default_power_levels_content( if let Some(power_level_content_override) = power_level_content_override { let json: JsonObject = serde_json::from_str(power_level_content_override.json().get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override."))?; + .map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") + })?; for (key, value) in json { power_levels_content[key] = value; @@ -509,14 +527,16 @@ fn default_power_levels_content( /// if a room is being created with a room alias, run our checks async fn room_alias_check( - services: &Services, room_alias_name: &str, appservice_info: Option<&RegistrationInfo>, + services: &Services, + room_alias_name: &str, + appservice_info: Option<&RegistrationInfo>, ) -> Result { // Basic checks on the room alias validity if room_alias_name.contains(':') { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "Room alias contained `:` which is not allowed. Please note that this expects a localpart, not the full \ - room alias.", + "Room alias contained `:` which is not allowed. Please note that this expects a \ + localpart, not the full room alias.", )); } else if room_alias_name.contains(char::is_whitespace) { return Err(Error::BadRequest( @@ -534,8 +554,11 @@ async fn room_alias_check( return Err(Error::BadRequest(ErrorKind::Unknown, "Room alias name is forbidden.")); } - let full_room_alias = RoomAliasId::parse(format!("#{}:{}", room_alias_name, services.globals.config.server_name)) - .map_err(|e| { + let full_room_alias = RoomAliasId::parse(format!( + "#{}:{}", + room_alias_name, services.globals.config.server_name + )) + .map_err(|e| { info!("Failed to parse room alias {room_alias_name}: {e}"); Error::BadRequest(ErrorKind::InvalidParam, "Invalid room alias specified.") })?; @@ -552,14 +575,20 @@ async fn room_alias_check( if let Some(info) = appservice_info { if !info.aliases.is_match(full_room_alias.as_str()) { - return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias is not in namespace.")); + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "Room alias is not in namespace.", + )); } } else if services .appservice .is_exclusive_alias(&full_room_alias) .await { - return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias reserved by appservice.")); + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "Room alias reserved by appservice.", + )); } debug_info!("Full room alias: {full_room_alias}"); @@ -581,8 +610,8 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Result, ref body: Ruma, + State(services): State, + ref body: Ruma, ) -> Result { let event = services .rooms @@ -47,7 +48,5 @@ pub(crate) async fn get_room_event_route( let event = event.to_room_event(); - Ok(get_room_event::v3::Response { - event, - }) + Ok(get_room_event::v3::Response { event }) } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 8b2e45df..cc3c9420 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -8,7 +8,8 @@ use crate::Ruma; const LIMIT_MAX: usize = 100; pub(crate) async fn room_initial_sync_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let room_id = &body.room_id; diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 401bf800..cc6cca5e 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -43,7 +43,8 @@ const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 9] = &[ /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking pub(crate) async fn upgrade_room_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -72,13 +73,10 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomTombstoneEventContent { - body: "This room has been replaced".to_owned(), - replacement_room: replacement_room.clone(), - }, - ), + PduBuilder::state(String::new(), &RoomTombstoneEventContent { + body: "This room has been replaced".to_owned(), + replacement_room: replacement_room.clone(), + }), sender_user, &body.room_id, &state_lock, @@ -108,7 +106,7 @@ pub(crate) async fn upgrade_room_route( { use RoomVersionId::*; match body.new_version { - V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { create_event_content.insert( "creator".into(), json!(&sender_user).try_into().map_err(|e| { @@ -117,7 +115,7 @@ pub(crate) async fn upgrade_room_route( })?, ); }, - _ => { + | _ => { // "creator" key no longer exists in V11+ rooms create_event_content.remove("creator"); }, @@ -154,7 +152,8 @@ pub(crate) async fn upgrade_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&create_event_content).expect("event is valid, we just created it"), + content: to_raw_value(&create_event_content) + .expect("event is valid, we just created it"), unsigned: None, state_key: Some(String::new()), redacts: None, @@ -203,8 +202,8 @@ pub(crate) async fn upgrade_room_route( .room_state_get(&body.room_id, event_type, "") .await { - Ok(v) => v.content.clone(), - Err(_) => continue, // Skipping missing events. + | Ok(v) => v.content.clone(), + | Err(_) => continue, // Skipping missing events. }; services @@ -258,7 +257,9 @@ pub(crate) async fn upgrade_room_route( power_levels_event_content .users_default .checked_add(int!(1)) - .ok_or_else(|| err!(Request(BadJson("users_default power levels event content is not valid"))))?, + .ok_or_else(|| { + err!(Request(BadJson("users_default power levels event content is not valid"))) + })?, ); // Modify the power levels in the old room to prevent sending of events and @@ -267,14 +268,11 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomPowerLevelsEventContent { - events_default: new_level, - invite: new_level, - ..power_levels_event_content - }, - ), + PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { + events_default: new_level, + invite: new_level, + ..power_levels_event_content + }), sender_user, &body.room_id, &state_lock, @@ -284,7 +282,5 @@ pub(crate) async fn upgrade_room_route( drop(state_lock); // Return the replacement room id - Ok(upgrade_room::v3::Response { - replacement_room, - }) + Ok(upgrade_room::v3::Response { replacement_room }) } diff --git a/src/api/client/search.rs b/src/api/client/search.rs index 28a8891c..e60bd26d 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -35,7 +35,10 @@ const BATCH_MAX: usize = 20; /// /// - Only works if the user is currently joined to the room (TODO: Respect /// history visibility) -pub(crate) async fn search_events_route(State(services): State, body: Ruma) -> Result { +pub(crate) async fn search_events_route( + State(services): State, + body: Ruma, +) -> Result { let sender_user = body.sender_user(); let next_batch = body.next_batch.as_deref(); let room_events_result: OptionFuture<_> = body @@ -56,7 +59,10 @@ pub(crate) async fn search_events_route(State(services): State, bo #[allow(clippy::map_unwrap_or)] async fn category_room_events( - services: &Services, sender_user: &UserId, next_batch: Option<&str>, criteria: &Criteria, + services: &Services, + sender_user: &UserId, + next_batch: Option<&str>, + criteria: &Criteria, ) -> Result { let filter = &criteria.filter; @@ -186,11 +192,17 @@ async fn procure_room_state(services: &Services, room_id: &RoomId) -> Result Result { +async fn check_room_visible( + services: &Services, + user_id: &UserId, + room_id: &RoomId, + search: &Criteria, +) -> Result { let check_visible = search.filter.rooms.is_some(); let check_state = check_visible && search.include_state.is_some_and(is_true!()); - let is_joined = !check_visible || services.rooms.state_cache.is_joined(user_id, room_id).await; + let is_joined = + !check_visible || services.rooms.state_cache.is_joined(user_id, room_id).await; let state_visible = !check_state || services diff --git a/src/api/client/send.rs b/src/api/client/send.rs index e909ebbf..9afa7e8c 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -17,14 +17,17 @@ use crate::{service::pdu::PduBuilder, utils, Result, Ruma}; /// - Tries to send the event into the room, auth rules will determine if it is /// allowed pub(crate) async fn send_message_event_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user(); let sender_device = body.sender_device.as_deref(); let appservice_info = body.appservice_info.as_ref(); // Forbid m.room.encrypted if encryption is disabled - if MessageLikeEventType::RoomEncrypted == body.event_type && !services.globals.allow_encryption() { + if MessageLikeEventType::RoomEncrypted == body.event_type + && !services.globals.allow_encryption() + { return Err!(Request(Forbidden("Encryption has been disabled"))); } @@ -60,8 +63,8 @@ pub(crate) async fn send_message_event_route( let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); - let content = - from_str(body.body.body.json().get()).map_err(|e| err!(Request(BadJson("Invalid JSON body: {e}"))))?; + let content = from_str(body.body.body.json().get()) + .map_err(|e| err!(Request(BadJson("Invalid JSON body: {e}"))))?; let event_id = services .rooms @@ -80,13 +83,14 @@ pub(crate) async fn send_message_event_route( ) .await?; - services - .transaction_ids - .add_txnid(sender_user, sender_device, &body.txn_id, event_id.as_bytes()); + services.transaction_ids.add_txnid( + sender_user, + sender_device, + &body.txn_id, + event_id.as_bytes(), + ); drop(state_lock); - Ok(send_message_event::v3::Response { - event_id: event_id.into(), - }) + Ok(send_message_event::v3::Response { event_id: event_id.into() }) } diff --git a/src/api/client/session.rs b/src/api/client/session.rs index e30c94d7..90cd4cb4 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -37,7 +37,8 @@ struct Claims { /// the `type` field when logging in. #[tracing::instrument(skip_all, fields(%client), name = "login")] pub(crate) async fn get_login_types_route( - InsecureClientIp(client): InsecureClientIp, _body: Ruma, + InsecureClientIp(client): InsecureClientIp, + _body: Ruma, ) -> Result { Ok(get_login_types::v3::Response::new(vec![ get_login_types::v3::LoginType::Password(PasswordLoginType::default()), @@ -61,13 +62,15 @@ pub(crate) async fn get_login_types_route( /// supported login types. #[tracing::instrument(skip_all, fields(%client), name = "login")] pub(crate) async fn login_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, ) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { #[allow(deprecated)] - login::v3::LoginInfo::Password(login::v3::Password { + | login::v3::LoginInfo::Password(login::v3::Password { identifier, password, user, @@ -75,7 +78,10 @@ pub(crate) async fn login_route( }) => { debug!("Got password login type"); let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name(user_id.to_lowercase(), services.globals.server_name()) + UserId::parse_with_server_name( + user_id.to_lowercase(), + services.globals.server_name(), + ) } else if let Some(user) = user { UserId::parse(user) } else { @@ -100,22 +106,29 @@ pub(crate) async fn login_route( user_id }, - login::v3::LoginInfo::Token(login::v3::Token { - token, - }) => { + | login::v3::LoginInfo::Token(login::v3::Token { token }) => { debug!("Got token login type"); if let Some(jwt_decoding_key) = services.globals.jwt_decoding_key() { - let token = - jsonwebtoken::decode::(token, jwt_decoding_key, &jsonwebtoken::Validation::default()) - .map_err(|e| { - warn!("Failed to parse JWT token from user logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid.") - })?; + let token = jsonwebtoken::decode::( + token, + jwt_decoding_key, + &jsonwebtoken::Validation::default(), + ) + .map_err(|e| { + warn!("Failed to parse JWT token from user logging in: {e}"); + Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid.") + })?; let username = token.claims.sub.to_lowercase(); - UserId::parse_with_server_name(username, services.globals.server_name()) - .map_err(|e| err!(Request(InvalidUsername(debug_error!(?e, "Failed to parse login username")))))? + UserId::parse_with_server_name(username, services.globals.server_name()).map_err( + |e| { + err!(Request(InvalidUsername(debug_error!( + ?e, + "Failed to parse login username" + )))) + }, + )? } else { return Err!(Request(Unknown( "Token login is not supported (server has no jwt decoding key)." @@ -123,13 +136,16 @@ pub(crate) async fn login_route( } }, #[allow(deprecated)] - login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { + | login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { identifier, user, }) => { debug!("Got appservice login type"); let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name(user_id.to_lowercase(), services.globals.server_name()) + UserId::parse_with_server_name( + user_id.to_lowercase(), + services.globals.server_name(), + ) } else if let Some(user) = user { UserId::parse(user) } else { @@ -143,18 +159,27 @@ pub(crate) async fn login_route( if let Some(ref info) = body.appservice_info { if !info.is_user_match(&user_id) { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace.")); + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); } } else { - return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing appservice token.")); + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing appservice token.", + )); } user_id }, - _ => { + | _ => { warn!("Unsupported or unknown login type: {:?}", &body.login_info); debug!("JSON body: {:?}", &body.json_body); - return Err(Error::BadRequest(ErrorKind::Unknown, "Unsupported or unknown login type.")); + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Unsupported or unknown login type.", + )); }, }; @@ -233,7 +258,9 @@ pub(crate) async fn login_route( /// - Triggers device list updates #[tracing::instrument(skip_all, fields(%client), name = "logout")] pub(crate) async fn logout_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, body: Ruma, + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -261,7 +288,8 @@ pub(crate) async fn logout_route( /// user. #[tracing::instrument(skip_all, fields(%client), name = "logout")] pub(crate) async fn logout_all_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client/space.rs b/src/api/client/space.rs index a3031a3a..409c9083 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -13,7 +13,8 @@ use crate::{service::rooms::spaces::PaginationToken, Error, Result, Ruma}; /// Paginates over the space tree in a depth-first manner to locate child rooms /// of a given space. pub(crate) async fn get_hierarchy_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 881eca98..6a65f0f9 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -31,7 +31,8 @@ use crate::{Ruma, RumaResponse}; /// allowed /// - If event is new `canonical_alias`: Rejects if alias is incorrect pub(crate) async fn send_state_event_for_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -63,7 +64,8 @@ pub(crate) async fn send_state_event_for_key_route( /// allowed /// - If event is new `canonical_alias`: Rejects if alias is incorrect pub(crate) async fn send_state_event_for_empty_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result> { send_state_event_for_key_route(State(services), body) .await @@ -77,7 +79,8 @@ pub(crate) async fn send_state_event_for_empty_key_route( /// - If not joined: Only works if current room history visibility is world /// readable pub(crate) async fn get_state_events_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -111,7 +114,8 @@ pub(crate) async fn get_state_events_route( /// - If not joined: Only works if current room history visibility is world /// readable pub(crate) async fn get_state_events_for_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -157,7 +161,8 @@ pub(crate) async fn get_state_events_for_key_route( /// - If not joined: Only works if current room history visibility is world /// readable pub(crate) async fn get_state_events_for_empty_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result> { get_state_events_for_key_route(State(services), body) .await @@ -165,8 +170,13 @@ pub(crate) async fn get_state_events_for_empty_key_route( } async fn send_state_event_for_key_helper( - services: &Services, sender: &UserId, room_id: &RoomId, event_type: &StateEventType, - json: &Raw, state_key: String, timestamp: Option, + services: &Services, + sender: &UserId, + room_id: &RoomId, + event_type: &StateEventType, + json: &Raw, + state_key: String, + timestamp: Option, ) -> Result> { allowed_to_send_state_event(services, room_id, event_type, json).await?; let state_lock = services.rooms.state.mutex.lock(room_id).await; @@ -191,20 +201,27 @@ async fn send_state_event_for_key_helper( } async fn allowed_to_send_state_event( - services: &Services, room_id: &RoomId, event_type: &StateEventType, json: &Raw, + services: &Services, + room_id: &RoomId, + event_type: &StateEventType, + json: &Raw, ) -> Result { match event_type { // Forbid m.room.encryption if encryption is disabled - StateEventType::RoomEncryption => { + | StateEventType::RoomEncryption => if !services.globals.allow_encryption() { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Encryption has been disabled")); - } - }, + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Encryption has been disabled", + )); + }, // admin room is a sensitive room, it should not ever be made public - StateEventType::RoomJoinRules => { + | StateEventType::RoomJoinRules => { if let Ok(admin_room_id) = services.admin.get_admin_room().await { if admin_room_id == room_id { - if let Ok(join_rule) = serde_json::from_str::(json.json().get()) { + if let Ok(join_rule) = + serde_json::from_str::(json.json().get()) + { if join_rule.join_rule == JoinRule::Public { return Err(Error::BadRequest( ErrorKind::forbidden(), @@ -216,16 +233,20 @@ async fn allowed_to_send_state_event( } }, // admin room is a sensitive room, it should not ever be made world readable - StateEventType::RoomHistoryVisibility => { + | StateEventType::RoomHistoryVisibility => { if let Ok(admin_room_id) = services.admin.get_admin_room().await { if admin_room_id == room_id { - if let Ok(visibility_content) = - serde_json::from_str::(json.json().get()) + if let Ok(visibility_content) = serde_json::from_str::< + RoomHistoryVisibilityEventContent, + >(json.json().get()) { - if visibility_content.history_visibility == HistoryVisibility::WorldReadable { + if visibility_content.history_visibility + == HistoryVisibility::WorldReadable + { return Err(Error::BadRequest( ErrorKind::forbidden(), - "Admin room is not allowed to be made world readable (public room history).", + "Admin room is not allowed to be made world readable (public \ + room history).", )); } } @@ -233,8 +254,10 @@ async fn allowed_to_send_state_event( } }, // TODO: allow alias if it previously existed - StateEventType::RoomCanonicalAlias => { - if let Ok(canonical_alias) = serde_json::from_str::(json.json().get()) { + | StateEventType::RoomCanonicalAlias => { + if let Ok(canonical_alias) = + serde_json::from_str::(json.json().get()) + { let mut aliases = canonical_alias.alt_aliases.clone(); if let Some(alias) = canonical_alias.alias { @@ -243,7 +266,9 @@ async fn allowed_to_send_state_event( for alias in aliases { if !services.globals.server_is_ours(alias.server_name()) { - return Err!(Request(Forbidden("canonical_alias must be for this server"))); + return Err!(Request(Forbidden( + "canonical_alias must be for this server" + ))); } if !services @@ -255,13 +280,14 @@ async fn allowed_to_send_state_event( // Make sure it's the right room { return Err!(Request(Forbidden( - "You are only allowed to send canonical_alias events when its aliases already exist" + "You are only allowed to send canonical_alias events when its \ + aliases already exist" ))); } } } }, - _ => (), + | _ => (), } Ok(()) diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 2b8d478c..b772fbf1 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -12,8 +12,12 @@ pub(crate) use self::{v3::sync_events_route, v4::sync_events_v4_route}; use crate::{service::Services, Error, PduEvent, Result}; async fn load_timeline( - services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, - next_batch: Option, limit: usize, + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + roomsincecount: PduCount, + next_batch: Option, + limit: usize, ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { let last_timeline_count = services .rooms @@ -51,7 +55,10 @@ async fn load_timeline( } async fn share_encrypted_room( - services: &Services, sender_user: &UserId, user_id: &UserId, ignore_room: Option<&RoomId>, + services: &Services, + sender_user: &UserId, + user_id: &UserId, + ignore_room: Option<&RoomId>, ) -> bool { services .rooms diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index c5ec3886..a05bcf98 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -32,8 +32,9 @@ use ruma::{ sync::sync_events::{ self, v3::{ - Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, Presence, - RoomAccountData, RoomSummary, Rooms, State as RoomState, Timeline, ToDevice, + Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, + LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State as RoomState, + Timeline, ToDevice, }, DeviceLists, UnreadNotificationsCount, }, @@ -107,7 +108,8 @@ type PresenceUpdates = HashMap; ) )] pub(crate) async fn sync_events_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result> { let (sender_user, sender_device) = body.sender(); @@ -127,9 +129,9 @@ pub(crate) async fn sync_events_route( // Load filter let filter = match body.body.filter.as_ref() { - None => FilterDefinition::default(), - Some(Filter::FilterDefinition(ref filter)) => filter.clone(), - Some(Filter::FilterId(ref filter_id)) => services + | None => FilterDefinition::default(), + | Some(Filter::FilterDefinition(ref filter)) => filter.clone(), + | Some(Filter::FilterId(ref filter_id)) => services .users .get_filter(sender_user, filter_id) .await @@ -138,11 +140,11 @@ pub(crate) async fn sync_events_route( // some clients, at least element, seem to require knowledge of redundant // members for "inline" profiles on the timeline to work properly - let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options { - LazyLoadOptions::Enabled { - include_redundant_members, - } => (true, include_redundant_members), - LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")), + let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options + { + | LazyLoadOptions::Enabled { include_redundant_members } => + (true, include_redundant_members), + | LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")), }; let full_state = body.body.full_state; @@ -230,9 +232,7 @@ pub(crate) async fn sync_events_route( } let invited_room = InvitedRoom { - invite_state: InviteState { - events: invite_state, - }, + invite_state: InviteState { events: invite_state }, }; invited_rooms.insert(room_id, invited_room); @@ -268,9 +268,10 @@ pub(crate) async fn sync_events_route( .count_one_time_keys(sender_user, sender_device); // Remove all to-device events the device received *last time* - let remove_to_device_events = services - .users - .remove_to_device_events(sender_user, sender_device, since); + let remove_to_device_events = + services + .users + .remove_to_device_events(sender_user, sender_device, since); let rooms = join3(joined_rooms, left_rooms, invited_rooms); let ephemeral = join3(remove_to_device_events, to_device_events, presence_updates); @@ -290,7 +291,8 @@ pub(crate) async fn sync_events_route( .into_iter() .stream() .broad_filter_map(|user_id| async move { - let no_shared_encrypted_room = !share_encrypted_room(&services, sender_user, &user_id, None).await; + let no_shared_encrypted_room = + !share_encrypted_room(&services, sender_user, &user_id, None).await; no_shared_encrypted_room.then_some(user_id) }) .ready_fold(HashSet::new(), |mut device_list_left, user_id| { @@ -300,9 +302,7 @@ pub(crate) async fn sync_events_route( .await; let response = sync_events::v3::Response { - account_data: GlobalAccountData { - events: account_data, - }, + account_data: GlobalAccountData { events: account_data }, device_lists: DeviceLists { changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), @@ -324,9 +324,7 @@ pub(crate) async fn sync_events_route( invite: invited_rooms, knock: BTreeMap::new(), // TODO }, - to_device: ToDevice { - events: to_device_events, - }, + to_device: ToDevice { events: to_device_events }, }; // TODO: Retry the endpoint instead of returning @@ -348,7 +346,11 @@ pub(crate) async fn sync_events_route( } #[tracing::instrument(name = "presence", level = "debug", skip_all)] -async fn process_presence_updates(services: &Services, since: u64, syncing_user: &UserId) -> PresenceUpdates { +async fn process_presence_updates( + services: &Services, + since: u64, + syncing_user: &UserId, +) -> PresenceUpdates { services .presence .presence_since(since) @@ -367,10 +369,10 @@ async fn process_presence_updates(services: &Services, since: u64, syncing_user: }) .ready_fold(PresenceUpdates::new(), |mut updates, (user_id, event)| { match updates.entry(user_id.into()) { - Entry::Vacant(slot) => { + | Entry::Vacant(slot) => { slot.insert(event); }, - Entry::Occupied(mut slot) => { + | Entry::Occupied(mut slot) => { let curr_event = slot.get_mut(); let curr_content = &mut curr_event.content; let new_content = event.content; @@ -380,7 +382,8 @@ async fn process_presence_updates(services: &Services, since: u64, syncing_user: curr_content.status_msg = new_content .status_msg .or_else(|| curr_content.status_msg.take()); - curr_content.last_active_ago = new_content.last_active_ago.or(curr_content.last_active_ago); + curr_content.last_active_ago = + new_content.last_active_ago.or(curr_content.last_active_ago); curr_content.displayname = new_content .displayname .or_else(|| curr_content.displayname.take()); @@ -410,8 +413,13 @@ async fn process_presence_updates(services: &Services, since: u64, syncing_user: )] #[allow(clippy::too_many_arguments)] async fn handle_left_room( - services: &Services, since: u64, ref room_id: OwnedRoomId, sender_user: &UserId, next_batch_string: &str, - full_state: bool, lazy_load_enabled: bool, + services: &Services, + since: u64, + ref room_id: OwnedRoomId, + sender_user: &UserId, + next_batch_string: &str, + full_state: bool, + lazy_load_enabled: bool, ) -> Result> { // Get and drop the lock to wait for remaining operations to finish let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; @@ -440,7 +448,8 @@ async fn handle_left_room( .try_into() .expect("Timestamp is valid js_int value"), kind: RoomMember, - content: serde_json::from_str(r#"{"membership":"leave"}"#).expect("this is valid JSON"), + content: serde_json::from_str(r#"{"membership":"leave"}"#) + .expect("this is valid JSON"), state_key: Some(sender_user.to_string()), unsigned: None, // The following keys are dropped on conversion @@ -449,16 +458,12 @@ async fn handle_left_room( depth: uint!(1), auth_events: vec![], redacts: None, - hashes: EventHash { - sha256: String::new(), - }, + hashes: EventHash { sha256: String::new() }, signatures: None, }; return Ok(Some(LeftRoom { - account_data: RoomAccountData { - events: Vec::new(), - }, + account_data: RoomAccountData { events: Vec::new() }, timeline: Timeline { limited: false, prev_batch: Some(next_batch_string.to_owned()), @@ -479,8 +484,8 @@ async fn handle_left_room( .await; let since_state_ids = match since_shortstatehash { - Ok(s) => services.rooms.state_accessor.state_full_ids(s).await?, - Err(_) => HashMap::new(), + | Ok(s) => services.rooms.state_accessor.state_full_ids(s).await?, + | Err(_) => HashMap::new(), }; let Ok(left_event_id): Result = services @@ -542,17 +547,14 @@ async fn handle_left_room( } Ok(Some(LeftRoom { - account_data: RoomAccountData { - events: Vec::new(), - }, + account_data: RoomAccountData { events: Vec::new() }, timeline: Timeline { - limited: true, // TODO: support left timeline events so we dont need to set this to true + limited: true, /* TODO: support left timeline events so we dont need to set this to + * true */ prev_batch: Some(next_batch_string.to_owned()), events: Vec::new(), // and so we dont need to set this to empty vec }, - state: RoomState { - events: left_state_events, - }, + state: RoomState { events: left_state_events }, })) } @@ -566,8 +568,15 @@ async fn handle_left_room( )] #[allow(clippy::too_many_arguments)] async fn load_joined_room( - services: &Services, sender_user: &UserId, sender_device: &DeviceId, ref room_id: OwnedRoomId, since: u64, - next_batch: u64, lazy_load_enabled: bool, lazy_load_send_redundant: bool, full_state: bool, + services: &Services, + sender_user: &UserId, + sender_device: &DeviceId, + ref room_id: OwnedRoomId, + since: u64, + next_batch: u64, + lazy_load_enabled: bool, + lazy_load_send_redundant: bool, + full_state: bool, ) -> Result<(JoinedRoom, HashSet, HashSet)> { // Get and drop the lock to wait for remaining operations to finish // This will make sure the we have all events until next_batch @@ -590,18 +599,26 @@ async fn load_joined_room( .ok() .map(Ok); - let timeline = load_timeline(services, sender_user, room_id, sincecount, Some(next_batchcount), 10_usize); + let timeline = load_timeline( + services, + sender_user, + room_id, + sincecount, + Some(next_batchcount), + 10_usize, + ); let (current_shortstatehash, since_shortstatehash, timeline) = try_join3(current_shortstatehash, since_shortstatehash, timeline).await?; let (timeline_pdus, limited) = timeline; - let timeline_users = timeline_pdus - .iter() - .fold(HashSet::new(), |mut timeline_users, (_, event)| { - timeline_users.insert(event.sender.as_str().to_owned()); - timeline_users - }); + let timeline_users = + timeline_pdus + .iter() + .fold(HashSet::new(), |mut timeline_users, (_, event)| { + timeline_users.insert(event.sender.as_str().to_owned()); + timeline_users + }); let last_notification_read: OptionFuture<_> = timeline_pdus .is_empty() @@ -617,13 +634,16 @@ async fn load_joined_room( .is_none_or(|&count| count > since) .await; - services - .rooms - .lazy_loading - .lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount); + services.rooms.lazy_loading.lazy_load_confirm_delivery( + sender_user, + sender_device, + room_id, + sincecount, + ); let no_state_changes = timeline_pdus.is_empty() - && (since_shortstatehash.is_none() || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))); + && (since_shortstatehash.is_none() + || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))); let mut device_list_updates = HashSet::::new(); let mut left_encrypted_users = HashSet::::new(); @@ -732,9 +752,10 @@ async fn load_joined_room( let events = join4(room_events, account_data_events, receipt_events, typing_events); let unread_notifications = join(notification_count, highlight_count); - let (unread_notifications, events, device_updates) = join3(unread_notifications, events, device_updates) - .boxed() - .await; + let (unread_notifications, events, device_updates) = + join3(unread_notifications, events, device_updates) + .boxed() + .await; let (room_events, account_data_events, receipt_events, typing_events) = events; let (notification_count, highlight_count) = unread_notifications; @@ -773,9 +794,7 @@ async fn load_joined_room( .await; let joined_room = JoinedRoom { - account_data: RoomAccountData { - events: account_data_events, - }, + account_data: RoomAccountData { events: account_data_events }, summary: RoomSummary { joined_member_count: joined_member_count.map(ruma_from_u64), invited_member_count: invited_member_count.map(ruma_from_u64), @@ -786,10 +805,7 @@ async fn load_joined_room( .filter_map(Result::ok) .collect(), }, - unread_notifications: UnreadNotificationsCount { - highlight_count, - notification_count, - }, + unread_notifications: UnreadNotificationsCount { highlight_count, notification_count }, timeline: Timeline { limited: limited || joined_since_last_sync, events: room_events, @@ -805,9 +821,7 @@ async fn load_joined_room( .map(PduEvent::to_sync_state_event) .collect(), }, - ephemeral: Ephemeral { - events: edus, - }, + ephemeral: Ephemeral { events: edus }, unread_thread_notifications: BTreeMap::new(), }; @@ -827,11 +841,20 @@ async fn load_joined_room( )] #[allow(clippy::too_many_arguments)] async fn calculate_state_changes( - services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, next_batchcount: PduCount, - lazy_load_enabled: bool, lazy_load_send_redundant: bool, full_state: bool, - device_list_updates: &mut HashSet, left_encrypted_users: &mut HashSet, - since_shortstatehash: Option, current_shortstatehash: ShortStateHash, - timeline_pdus: &Vec<(PduCount, PduEvent)>, timeline_users: &HashSet, + services: &Services, + sender_user: &UserId, + sender_device: &DeviceId, + room_id: &RoomId, + next_batchcount: PduCount, + lazy_load_enabled: bool, + lazy_load_send_redundant: bool, + full_state: bool, + device_list_updates: &mut HashSet, + left_encrypted_users: &mut HashSet, + since_shortstatehash: Option, + current_shortstatehash: ShortStateHash, + timeline_pdus: &Vec<(PduCount, PduEvent)>, + timeline_users: &HashSet, ) -> Result { let since_sender_member: OptionFuture<_> = since_shortstatehash .map(|short| { @@ -843,12 +866,13 @@ async fn calculate_state_changes( }) .into(); - let joined_since_last_sync = since_sender_member - .await - .flatten() - .map_or(true, |content: RoomMemberEventContent| { - content.membership != MembershipState::Join - }); + let joined_since_last_sync = + since_sender_member + .await + .flatten() + .is_none_or(|content: RoomMemberEventContent| { + content.membership != MembershipState::Join + }); if since_shortstatehash.is_none() || joined_since_last_sync { calculate_state_initial( @@ -886,8 +910,14 @@ async fn calculate_state_changes( #[tracing::instrument(name = "initial", level = "trace", skip_all)] #[allow(clippy::too_many_arguments)] async fn calculate_state_initial( - services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, next_batchcount: PduCount, - lazy_load_enabled: bool, full_state: bool, current_shortstatehash: ShortStateHash, + services: &Services, + sender_user: &UserId, + sender_device: &DeviceId, + room_id: &RoomId, + next_batchcount: PduCount, + lazy_load_enabled: bool, + full_state: bool, + current_shortstatehash: ShortStateHash, timeline_users: &HashSet, ) -> Result { // Probably since = 0, we will do an initial sync @@ -956,10 +986,13 @@ async fn calculate_state_initial( // The state_events above should contain all timeline_users, let's mark them as // lazy loaded. - services - .rooms - .lazy_loading - .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount); + services.rooms.lazy_loading.lazy_load_mark_sent( + sender_user, + sender_device, + room_id, + lazy_loaded, + next_batchcount, + ); Ok(StateChanges { heroes, @@ -973,13 +1006,23 @@ async fn calculate_state_initial( #[tracing::instrument(name = "incremental", level = "trace", skip_all)] #[allow(clippy::too_many_arguments)] async fn calculate_state_incremental( - services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, next_batchcount: PduCount, - lazy_load_send_redundant: bool, full_state: bool, device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, since_shortstatehash: Option, - current_shortstatehash: ShortStateHash, timeline_pdus: &Vec<(PduCount, PduEvent)>, joined_since_last_sync: bool, + services: &Services, + sender_user: &UserId, + sender_device: &DeviceId, + room_id: &RoomId, + next_batchcount: PduCount, + lazy_load_send_redundant: bool, + full_state: bool, + device_list_updates: &mut HashSet, + left_encrypted_users: &mut HashSet, + since_shortstatehash: Option, + current_shortstatehash: ShortStateHash, + timeline_pdus: &Vec<(PduCount, PduEvent)>, + joined_since_last_sync: bool, ) -> Result { // Incremental /sync - let since_shortstatehash = since_shortstatehash.expect("missing since_shortstatehash on incremental sync"); + let since_shortstatehash = + since_shortstatehash.expect("missing since_shortstatehash on incremental sync"); let mut delta_state_events = Vec::new(); @@ -994,8 +1037,10 @@ async fn calculate_state_incremental( .state_accessor .state_full_ids(since_shortstatehash); - let (current_state_ids, since_state_ids): (HashMap<_, OwnedEventId>, HashMap<_, OwnedEventId>) = - try_join(current_state_ids, since_state_ids).await?; + let (current_state_ids, since_state_ids): ( + HashMap<_, OwnedEventId>, + HashMap<_, OwnedEventId>, + ) = try_join(current_state_ids, since_state_ids).await?; current_state_ids .iter() @@ -1044,17 +1089,19 @@ async fn calculate_state_incremental( let content: RoomMemberEventContent = state_event.get_content()?; match content.membership { - MembershipState::Join => { + | MembershipState::Join => { // A new user joined an encrypted room - if !share_encrypted_room(services, sender_user, &user_id, Some(room_id)).await { + if !share_encrypted_room(services, sender_user, &user_id, Some(room_id)) + .await + { device_list_updates.insert(user_id); } }, - MembershipState::Leave => { + | MembershipState::Leave => { // Write down users that have left encrypted rooms we are in left_encrypted_users.insert(user_id); }, - _ => {}, + | _ => {}, } } } @@ -1139,10 +1186,13 @@ async fn calculate_state_incremental( state_events.push(member_event); } - services - .rooms - .lazy_loading - .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount); + services.rooms.lazy_loading.lazy_load_mark_sent( + sender_user, + sender_device, + room_id, + lazy_loaded, + next_batchcount, + ); Ok(StateChanges { heroes, @@ -1154,7 +1204,9 @@ async fn calculate_state_incremental( } async fn calculate_counts( - services: &Services, room_id: &RoomId, sender_user: &UserId, + services: &Services, + room_id: &RoomId, + sender_user: &UserId, ) -> Result<(Option, Option, Option>)> { let joined_member_count = services .rooms @@ -1168,7 +1220,8 @@ async fn calculate_counts( .room_invited_count(room_id) .unwrap_or(0); - let (joined_member_count, invited_member_count) = join(joined_member_count, invited_member_count).await; + let (joined_member_count, invited_member_count) = + join(joined_member_count, invited_member_count).await; let small_room = joined_member_count.saturating_add(invited_member_count) > 5; @@ -1179,20 +1232,32 @@ async fn calculate_counts( Ok((Some(joined_member_count), Some(invited_member_count), heroes.await)) } -async fn calculate_heroes(services: &Services, room_id: &RoomId, sender_user: &UserId) -> Vec { +async fn calculate_heroes( + services: &Services, + room_id: &RoomId, + sender_user: &UserId, +) -> Vec { services .rooms .timeline .all_pdus(sender_user, room_id) .ready_filter(|(_, pdu)| pdu.kind == RoomMember) - .fold_default(|heroes: Vec<_>, (_, pdu)| fold_hero(heroes, services, room_id, sender_user, pdu)) + .fold_default(|heroes: Vec<_>, (_, pdu)| { + fold_hero(heroes, services, room_id, sender_user, pdu) + }) .await } async fn fold_hero( - mut heroes: Vec, services: &Services, room_id: &RoomId, sender_user: &UserId, pdu: PduEvent, + mut heroes: Vec, + services: &Services, + room_id: &RoomId, + sender_user: &UserId, + pdu: PduEvent, ) -> Vec { - let Some(user_id): Option<&UserId> = pdu.state_key.as_deref().map(TryInto::try_into).flat_ok() else { + let Some(user_id): Option<&UserId> = + pdu.state_key.as_deref().map(TryInto::try_into).flat_ok() + else { return heroes; }; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 140b647d..7e24adff 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -46,7 +46,8 @@ const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = /// /// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`) pub(crate) async fn sync_events_v4_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); @@ -81,16 +82,19 @@ pub(crate) async fn sync_events_v4_route( } if globalsince == 0 { - services - .sync - .forget_sync_request_connection(sender_user.clone(), sender_device.clone(), conn_id.clone()); + services.sync.forget_sync_request_connection( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ); } // Get sticky parameters from cache - let known_rooms = - services - .sync - .update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body); + let known_rooms = services.sync.update_sync_request_with_cache( + sender_user.clone(), + sender_device.clone(), + &mut body, + ); let all_joined_rooms: Vec<_> = services .rooms @@ -125,9 +129,7 @@ pub(crate) async fn sync_events_v4_route( let mut device_list_changes = HashSet::new(); let mut device_list_left = HashSet::new(); - let mut receipts = sync_events::v4::Receipts { - rooms: BTreeMap::new(), - }; + let mut receipts = sync_events::v4::Receipts { rooms: BTreeMap::new() }; let mut account_data = sync_events::v4::AccountData { global: Vec::new(), @@ -168,7 +170,9 @@ pub(crate) async fn sync_events_v4_route( ); for room_id in &all_joined_rooms { - let Ok(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id).await else { + let Ok(current_shortstatehash) = + services.rooms.state.get_room_shortstatehash(room_id).await + else { error!("Room {room_id} has no state"); continue; }; @@ -202,12 +206,17 @@ pub(crate) async fn sync_events_v4_route( let since_sender_member: Option = services .rooms .state_accessor - .state_get_content(since_shortstatehash, &StateEventType::RoomMember, sender_user.as_str()) + .state_get_content( + since_shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) .ok() .await; - let joined_since_last_sync = - since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + let joined_since_last_sync = since_sender_member + .as_ref() + .is_none_or(|member| member.membership != MembershipState::Join); let new_encrypted_room = encrypted_room && since_encryption.is_err(); @@ -232,8 +241,10 @@ pub(crate) async fn sync_events_v4_route( }; if pdu.kind == RoomMember { if let Some(state_key) = &pdu.state_key { - let user_id = UserId::parse(state_key.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + let user_id = + UserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; if user_id == *sender_user { continue; @@ -241,19 +252,25 @@ pub(crate) async fn sync_events_v4_route( let content: RoomMemberEventContent = pdu.get_content()?; match content.membership { - MembershipState::Join => { + | MembershipState::Join => { // A new user joined an encrypted room - if !share_encrypted_room(&services, sender_user, &user_id, Some(room_id)) - .await + if !share_encrypted_room( + &services, + sender_user, + &user_id, + Some(room_id), + ) + .await { device_list_changes.insert(user_id); } }, - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in + | MembershipState::Leave => { + // Write down users that have left encrypted rooms we + // are in left_encrypted_users.insert(user_id); }, - _ => {}, + | _ => {}, } } } @@ -293,7 +310,8 @@ pub(crate) async fn sync_events_v4_route( } for user_id in left_encrypted_users { - let dont_share_encrypted_room = !share_encrypted_room(&services, sender_user, &user_id, None).await; + let dont_share_encrypted_room = + !share_encrypted_room(&services, sender_user, &user_id, None).await; // If the user doesn't share an encrypted room with the target anymore, we need // to tell them @@ -308,85 +326,85 @@ pub(crate) async fn sync_events_v4_route( for (list_id, list) in &body.lists { let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { - Some(true) => &all_invited_rooms, - Some(false) => &all_joined_rooms, - None => &all_rooms, + | Some(true) => &all_invited_rooms, + | Some(false) => &all_joined_rooms, + | None => &all_rooms, }; let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { - Some(filter) if filter.is_empty() => active_rooms.clone(), - Some(value) => filter_rooms(&services, active_rooms, &value, true).await, - None => active_rooms.clone(), + | Some(filter) if filter.is_empty() => active_rooms.clone(), + | Some(value) => filter_rooms(&services, active_rooms, &value, true).await, + | None => active_rooms.clone(), }; let active_rooms = match list.filters.clone().map(|f| f.room_types) { - Some(filter) if filter.is_empty() => active_rooms.clone(), - Some(value) => filter_rooms(&services, &active_rooms, &value, false).await, - None => active_rooms, + | Some(filter) if filter.is_empty() => active_rooms.clone(), + | Some(value) => filter_rooms(&services, &active_rooms, &value, false).await, + | None => active_rooms, }; let mut new_known_rooms = BTreeSet::new(); let ranges = list.ranges.clone(); - lists.insert( - list_id.clone(), - sync_events::v4::SyncList { - ops: ranges - .into_iter() - .map(|mut r| { - r.0 = r.0.clamp( - uint!(0), - UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX), + lists.insert(list_id.clone(), sync_events::v4::SyncList { + ops: ranges + .into_iter() + .map(|mut r| { + r.0 = r.0.clamp( + uint!(0), + UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX), + ); + r.1 = r.1.clamp( + r.0, + UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX), + ); + + let room_ids = if !active_rooms.is_empty() { + active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec() + } else { + Vec::new() + }; + + new_known_rooms.extend(room_ids.iter().cloned()); + for room_id in &room_ids { + let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( + BTreeSet::new(), + 0_usize, + u64::MAX, + )); + + let limit: usize = list + .room_details + .timeline_limit + .map(u64::from) + .map_or(10, usize_from_u64_truncated) + .min(100); + + todo_room + .0 + .extend(list.room_details.required_state.iter().cloned()); + + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get(list_id.as_str()) + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), ); - r.1 = - r.1.clamp(r.0, UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX)); - - let room_ids = if !active_rooms.is_empty() { - active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec() - } else { - Vec::new() - }; - - new_known_rooms.extend(room_ids.iter().cloned()); - for room_id in &room_ids { - let todo_room = - todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0_usize, u64::MAX)); - - let limit: usize = list - .room_details - .timeline_limit - .map(u64::from) - .map_or(10, usize_from_u64_truncated) - .min(100); - - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); - - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get(list_id.as_str()) - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - } - sync_events::v4::SyncOp { - op: SlidingOp::Sync, - range: Some(r), - index: None, - room_ids, - room_id: None, - } - }) - .collect(), - count: ruma_from_usize(active_rooms.len()), - }, - ); + } + sync_events::v4::SyncOp { + op: SlidingOp::Sync, + range: Some(r), + index: None, + room_ids, + room_id: None, + } + }) + .collect(), + count: ruma_from_usize(active_rooms.len()), + }); if let Some(conn_id) = &body.conn_id { services.sync.update_sync_known_rooms( @@ -405,9 +423,10 @@ pub(crate) async fn sync_events_v4_route( if !services.rooms.metadata.exists(room_id).await { continue; } - let todo_room = todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0_usize, u64::MAX)); + let todo_room = + todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0_usize, u64::MAX)); let limit: usize = room .timeline_limit @@ -471,14 +490,22 @@ pub(crate) async fn sync_events_v4_route( (timeline_pdus, limited) = (Vec::new(), true); } else { - (timeline_pdus, limited) = - match load_timeline(&services, sender_user, room_id, roomsincecount, None, *timeline_limit).await { - Ok(value) => value, - Err(err) => { - warn!("Encountered missing timeline in {}, error {}", room_id, err); - continue; - }, - }; + (timeline_pdus, limited) = match load_timeline( + &services, + sender_user, + room_id, + roomsincecount, + None, + *timeline_limit, + ) + .await + { + | Ok(value) => value, + | Err(err) => { + warn!("Encountered missing timeline in {}, error {}", room_id, err); + continue; + }, + }; } account_data.rooms.insert( @@ -543,11 +570,11 @@ pub(crate) async fn sync_events_v4_route( .first() .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { Ok(Some(match pdu_count { - PduCount::Backfilled(_) => { + | PduCount::Backfilled(_) => { error!("timeline in backfill state?!"); "0".to_owned() }, - PduCount::Normal(c) => c.to_string(), + | PduCount::Normal(c) => c.to_string(), })) })? .or_else(|| { @@ -568,7 +595,9 @@ pub(crate) async fn sync_events_v4_route( for (_, pdu) in timeline_pdus { let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts); - if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) && timestamp.is_none_or(|time| time <= ts) { + if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) + && timestamp.is_none_or(|time| time <= ts) + { timestamp = Some(ts); } } @@ -611,7 +640,7 @@ pub(crate) async fn sync_events_v4_route( .await; let name = match heroes.len().cmp(&(1_usize)) { - Ordering::Greater => { + | Ordering::Greater => { let firsts = heroes[1..] .iter() .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) @@ -625,13 +654,13 @@ pub(crate) async fn sync_events_v4_route( Some(format!("{firsts} and {last}")) }, - Ordering::Equal => Some( + | Ordering::Equal => Some( heroes[0] .name .clone() .unwrap_or_else(|| heroes[0].user_id.to_string()), ), - Ordering::Less => None, + | Ordering::Less => None, }; let heroes_avatar = if heroes.len() == 1 { @@ -640,77 +669,74 @@ pub(crate) async fn sync_events_v4_route( None }; - rooms.insert( - room_id.clone(), - sync_events::v4::SlidingSyncRoom { - name: services - .rooms - .state_accessor - .get_name(room_id) - .await - .ok() - .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { - ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), - ruma::JsOption::Null => ruma::JsOption::Null, - ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } - }, - initial: Some(roomsince == &0), - is_dm: None, - invite_state, - unread_notifications: UnreadNotificationsCount { - highlight_count: Some( - services - .rooms - .user - .highlight_count(sender_user, room_id) - .await - .try_into() - .expect("notification count can't go that high"), - ), - notification_count: Some( - services - .rooms - .user - .notification_count(sender_user, room_id) - .await - .try_into() - .expect("notification count can't go that high"), - ), - }, - timeline: room_events, - required_state, - prev_batch, - limited, - joined_count: Some( - services - .rooms - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0) - .try_into() - .unwrap_or_else(|_| uint!(0)), - ), - invited_count: Some( - services - .rooms - .state_cache - .room_invited_count(room_id) - .await - .unwrap_or(0) - .try_into() - .unwrap_or_else(|_| uint!(0)), - ), - num_live: None, // Count events in timeline greater than global sync counter - timestamp, - heroes: Some(heroes), + rooms.insert(room_id.clone(), sync_events::v4::SlidingSyncRoom { + name: services + .rooms + .state_accessor + .get_name(room_id) + .await + .ok() + .or(name), + avatar: if let Some(heroes_avatar) = heroes_avatar { + ruma::JsOption::Some(heroes_avatar) + } else { + match services.rooms.state_accessor.get_avatar(room_id).await { + | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), + | ruma::JsOption::Null => ruma::JsOption::Null, + | ruma::JsOption::Undefined => ruma::JsOption::Undefined, + } }, - ); + initial: Some(roomsince == &0), + is_dm: None, + invite_state, + unread_notifications: UnreadNotificationsCount { + highlight_count: Some( + services + .rooms + .user + .highlight_count(sender_user, room_id) + .await + .try_into() + .expect("notification count can't go that high"), + ), + notification_count: Some( + services + .rooms + .user + .notification_count(sender_user, room_id) + .await + .try_into() + .expect("notification count can't go that high"), + ), + }, + timeline: room_events, + required_state, + prev_batch, + limited, + joined_count: Some( + services + .rooms + .state_cache + .room_joined_count(room_id) + .await + .unwrap_or(0) + .try_into() + .unwrap_or_else(|_| uint!(0)), + ), + invited_count: Some( + services + .rooms + .state_cache + .room_invited_count(room_id) + .await + .unwrap_or(0) + .try_into() + .unwrap_or_else(|_| uint!(0)), + ), + num_live: None, // Count events in timeline greater than global sync counter + timestamp, + heroes: Some(heroes), + }); } if rooms @@ -757,16 +783,17 @@ pub(crate) async fn sync_events_v4_route( }, account_data, receipts, - typing: sync_events::v4::Typing { - rooms: BTreeMap::new(), - }, + typing: sync_events::v4::Typing { rooms: BTreeMap::new() }, }, delta_token: None, }) } async fn filter_rooms( - services: &Services, rooms: &[OwnedRoomId], filter: &[RoomTypeFilter], negate: bool, + services: &Services, + rooms: &[OwnedRoomId], + filter: &[RoomTypeFilter], + negate: bool, ) -> Vec { rooms .iter() diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index b5fa19e3..820ee4a1 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -17,7 +17,8 @@ use crate::{Result, Ruma}; /// /// - Inserts the tag into the tag event of the room account data. pub(crate) async fn update_tag_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -26,9 +27,7 @@ pub(crate) async fn update_tag_route( .get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag) .await .unwrap_or(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, + content: TagEventContent { tags: BTreeMap::new() }, }); tags_event @@ -55,7 +54,8 @@ pub(crate) async fn update_tag_route( /// /// - Removes the tag from the tag event of the room account data. pub(crate) async fn delete_tag_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -64,9 +64,7 @@ pub(crate) async fn delete_tag_route( .get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag) .await .unwrap_or(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, + content: TagEventContent { tags: BTreeMap::new() }, }); tags_event.content.tags.remove(&body.tag.clone().into()); @@ -90,7 +88,8 @@ pub(crate) async fn delete_tag_route( /// /// - Gets the tag event of the room account data. pub(crate) async fn get_tags_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -99,12 +98,8 @@ pub(crate) async fn get_tags_route( .get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag) .await .unwrap_or(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, + content: TagEventContent { tags: BTreeMap::new() }, }); - Ok(get_tags::v3::Response { - tags: tags_event.content.tags, - }) + Ok(get_tags::v3::Response { tags: tags_event.content.tags }) } diff --git a/src/api/client/thirdparty.rs b/src/api/client/thirdparty.rs index f6af8729..790b27d3 100644 --- a/src/api/client/thirdparty.rs +++ b/src/api/client/thirdparty.rs @@ -11,9 +11,7 @@ pub(crate) async fn get_protocols_route( _body: Ruma, ) -> Result { // TODO - Ok(get_protocols::v3::Response { - protocols: BTreeMap::new(), - }) + Ok(get_protocols::v3::Response { protocols: BTreeMap::new() }) } /// # `GET /_matrix/client/unstable/thirdparty/protocols` diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 07badaf7..f0cbf467 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -7,7 +7,8 @@ use crate::{Result, Ruma}; /// # `GET /_matrix/client/r0/rooms/{roomId}/threads` pub(crate) async fn get_threads_route( - State(services): State, ref body: Ruma, + State(services): State, + ref body: Ruma, ) -> Result { // Use limit or else 10, with maximum 100 let limit = body diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index aab59394..2ded04e7 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -17,7 +17,8 @@ use crate::Ruma; /// /// Send a to-device event to a set of client devices. pub(crate) async fn send_event_to_device_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -43,12 +44,14 @@ pub(crate) async fn send_event_to_device_route( services.sending.send_edu_server( target_user_id.server_name(), - serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(DirectDeviceContent { - sender: sender_user.clone(), - ev_type: body.event_type.clone(), - message_id: count.to_string().into(), - messages, - })) + serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( + DirectDeviceContent { + sender: sender_user.clone(), + ev_type: body.event_type.clone(), + message_id: count.to_string().into(), + messages, + }, + )) .expect("DirectToDevice EDU can be serialized"), )?; @@ -62,14 +65,20 @@ pub(crate) async fn send_event_to_device_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?; match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => { + | DeviceIdOrAllDevices::DeviceId(target_device_id) => { services .users - .add_to_device_event(sender_user, target_user_id, target_device_id, event_type, event) + .add_to_device_event( + sender_user, + target_user_id, + target_device_id, + event_type, + event, + ) .await; }, - DeviceIdOrAllDevices::AllDevices => { + | DeviceIdOrAllDevices::AllDevices => { let (event_type, event) = (&event_type, &event); services .users diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index 932d221e..4ae3ac5e 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -7,7 +7,8 @@ use crate::{utils, Error, Result, Ruma}; /// /// Sets the typing state of the sender user. pub(crate) async fn create_typing_event_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { use create_typing_event::v3::Typing; diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index c546d6a7..2c9add44 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -10,8 +10,8 @@ use ruma::{ error::ErrorKind, membership::mutual_rooms, profile::{ - delete_profile_key, delete_timezone_key, get_profile_key, get_timezone_key, set_profile_key, - set_timezone_key, + delete_profile_key, delete_timezone_key, get_profile_key, get_timezone_key, + set_profile_key, set_timezone_key, }, room::get_summary, }, @@ -34,7 +34,8 @@ use crate::{Error, Result, Ruma, RumaResponse}; /// An implementation of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) #[tracing::instrument(skip_all, fields(%client), name = "mutual_rooms")] pub(crate) async fn get_mutual_rooms_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -47,10 +48,7 @@ pub(crate) async fn get_mutual_rooms_route( } if !services.users.exists(&body.user_id).await { - return Ok(mutual_rooms::unstable::Response { - joined: vec![], - next_batch_token: None, - }); + return Ok(mutual_rooms::unstable::Response { joined: vec![], next_batch_token: None }); } let mutual_rooms: Vec = services @@ -77,7 +75,8 @@ pub(crate) async fn get_mutual_rooms_route( /// /// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) pub(crate) async fn get_room_summary_legacy( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result> { get_room_summary(State(services), InsecureClientIp(client), body) @@ -94,7 +93,8 @@ pub(crate) async fn get_room_summary_legacy( /// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) #[tracing::instrument(skip_all, fields(%client), name = "room_summary")] pub(crate) async fn get_room_summary( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref(); @@ -194,7 +194,8 @@ pub(crate) async fn get_room_summary( /// /// - Also makes sure other users receive the update using presence EDUs pub(crate) async fn delete_timezone_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -221,7 +222,8 @@ pub(crate) async fn delete_timezone_key_route( /// /// - Also makes sure other users receive the update using presence EDUs pub(crate) async fn set_timezone_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -248,7 +250,8 @@ pub(crate) async fn set_timezone_key_route( /// /// This also handles the avatar_url and displayname being updated. pub(crate) async fn set_profile_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -264,7 +267,9 @@ pub(crate) async fn set_profile_key_route( if body.kv_pair.len() > 1 { // TODO: support PATCH or "recursively" adding keys in some sort - return Err!(Request(BadJson("This endpoint can only take one key-value pair at a time"))); + return Err!(Request(BadJson( + "This endpoint can only take one key-value pair at a time" + ))); } let Some(profile_key_value) = body.kv_pair.get(&body.key) else { @@ -294,7 +299,13 @@ pub(crate) async fn set_profile_key_route( .collect() .await; - update_displayname(&services, &body.user_id, Some(profile_key_value.to_string()), &all_joined_rooms).await; + update_displayname( + &services, + &body.user_id, + Some(profile_key_value.to_string()), + &all_joined_rooms, + ) + .await; } else if body.key == "avatar_url" { let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string()); @@ -330,7 +341,8 @@ pub(crate) async fn set_profile_key_route( /// /// This also handles the avatar_url and displayname being updated. pub(crate) async fn delete_profile_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -340,7 +352,9 @@ pub(crate) async fn delete_profile_key_route( if body.kv_pair.len() > 1 { // TODO: support PATCH or "recursively" adding keys in some sort - return Err!(Request(BadJson("This endpoint can only take one key-value pair at a time"))); + return Err!(Request(BadJson( + "This endpoint can only take one key-value pair at a time" + ))); } if body.key == "displayname" { @@ -387,7 +401,8 @@ pub(crate) async fn delete_profile_key_route( /// - If user is on another server and we do not have a local copy already fetch /// `timezone` over federation pub(crate) async fn get_timezone_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.globals.user_is_local(&body.user_id) { // Create and update our local copy of the user @@ -422,9 +437,7 @@ pub(crate) async fn get_timezone_key_route( .users .set_timezone(&body.user_id, response.tz.clone()); - return Ok(get_timezone_key::unstable::Response { - tz: response.tz, - }); + return Ok(get_timezone_key::unstable::Response { tz: response.tz }); } } @@ -446,7 +459,8 @@ pub(crate) async fn get_timezone_key_route( /// - If user is on another server and we do not have a local copy already fetch /// `timezone` over federation pub(crate) async fn get_profile_key_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let mut profile_key_value: BTreeMap = BTreeMap::new(); @@ -492,9 +506,7 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("The requested profile key does not exist."))); } - return Ok(get_profile_key::unstable::Response { - value: profile_key_value, - }); + return Ok(get_profile_key::unstable::Response { value: profile_key_value }); } } @@ -510,7 +522,5 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("The requested profile key does not exist."))); } - Ok(get_profile_key::unstable::Response { - value: profile_key_value, - }) + Ok(get_profile_key::unstable::Response { value: profile_key_value }) } diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 91fe5a7c..b4856d72 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -74,7 +74,9 @@ pub(crate) async fn conduwuit_server_version() -> Result { /// conduwuit-specific API to return the amount of users registered on this /// homeserver. Endpoint is disabled if federation is disabled for privacy. This /// only includes active users (not deactivated, no guests, etc) -pub(crate) async fn conduwuit_local_user_count(State(services): State) -> Result { +pub(crate) async fn conduwuit_local_user_count( + State(services): State, +) -> Result { let user_count = services.users.list_local_users().count().await; Ok(Json(serde_json::json!({ diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index bc7460f0..182e30db 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -18,7 +18,8 @@ use crate::{Result, Ruma}; /// - Hides any local users that aren't in any public rooms (i.e. those that /// have the join rule set to public) and don't share a room with the sender pub(crate) async fn search_users_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let limit = usize::try_from(body.limit).map_or(10, usize::from).min(100); // default limit is 10 @@ -61,7 +62,11 @@ pub(crate) async fn search_users_route( services .rooms .state_accessor - .room_state_get_content::(room, &StateEventType::RoomJoinRules, "") + .room_state_get_content::( + room, + &StateEventType::RoomJoinRules, + "", + ) .map_ok_or(false, |content| content.join_rule == JoinRule::Public) }) .await; @@ -89,8 +94,5 @@ pub(crate) async fn search_users_route( let results = users.take(limit).collect().await; - Ok(search_users::v3::Response { - results, - limited, - }) + Ok(search_users::v3::Response { results, limited }) } diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index b41cc8a1..ec804570 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -17,7 +17,8 @@ type HmacSha1 = Hmac; /// /// TODO: Returns information about the recommended turn server. pub(crate) async fn turn_server_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { // MSC4166: return M_NOT_FOUND 404 if no TURN URIs are specified in any way if services.server.config.turn_uris.is_empty() { @@ -44,7 +45,8 @@ pub(crate) async fn turn_server_route( let username: String = format!("{}:{}", expiry.get(), user); - let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes()).expect("HMAC can take key of any size"); + let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes()) + .expect("HMAC can take key of any size"); mac.update(username.as_bytes()); let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes()); diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index b66217e8..5c53d013 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -13,21 +13,18 @@ use crate::{Error, Result, Ruma}; /// /// Returns the .well-known URL if it is configured, otherwise returns 404. pub(crate) async fn well_known_client( - State(services): State, _body: Ruma, + State(services): State, + _body: Ruma, ) -> Result { let client_url = match services.server.config.well_known.client.as_ref() { - Some(url) => url.to_string(), - None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), + | Some(url) => url.to_string(), + | None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), }; Ok(discover_homeserver::Response { - homeserver: HomeserverInfo { - base_url: client_url.clone(), - }, + homeserver: HomeserverInfo { base_url: client_url.clone() }, identity_server: None, - sliding_sync_proxy: Some(SlidingSyncProxyInfo { - url: client_url, - }), + sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }), tile_server: None, }) } @@ -36,7 +33,8 @@ pub(crate) async fn well_known_client( /// /// Server support contact and support page of a homeserver's domain. pub(crate) async fn well_known_support( - State(services): State, _body: Ruma, + State(services): State, + _body: Ruma, ) -> Result { let support_page = services .server @@ -65,11 +63,7 @@ pub(crate) async fn well_known_support( let mut contacts: Vec = vec![]; if let Some(role) = role { - let contact = Contact { - role, - email_address, - matrix_id, - }; + let contact = Contact { role, email_address, matrix_id }; contacts.push(contact); } @@ -79,22 +73,21 @@ pub(crate) async fn well_known_support( return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); } - Ok(discover_support::Response { - contacts, - support_page, - }) + Ok(discover_support::Response { contacts, support_page }) } /// # `GET /client/server.json` /// /// Endpoint provided by sliding sync proxy used by some clients such as Element /// Web as a non-standard health check. -pub(crate) async fn syncv3_client_server_json(State(services): State) -> Result { +pub(crate) async fn syncv3_client_server_json( + State(services): State, +) -> Result { let server_url = match services.server.config.well_known.client.as_ref() { - Some(url) => url.to_string(), - None => match services.server.config.well_known.server.as_ref() { - Some(url) => url.to_string(), - None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), + | Some(url) => url.to_string(), + | None => match services.server.config.well_known.server.as_ref() { + | Some(url) => url.to_string(), + | None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), }, }; diff --git a/src/api/router.rs b/src/api/router.rs index 99e066e4..1b38670d 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -190,7 +190,10 @@ pub fn build(router: Router, server: &Server) -> Router { router = router .ruma_route(&server::get_server_version_route) .route("/_matrix/key/v2/server", get(server::get_server_keys_route)) - .route("/_matrix/key/v2/server/:key_id", get(server::get_server_keys_deprecated_route)) + .route( + "/_matrix/key/v2/server/:key_id", + get(server::get_server_keys_deprecated_route), + ) .ruma_route(&server::get_public_rooms_route) .ruma_route(&server::get_public_rooms_filtered_route) .ruma_route(&server::send_transaction_message_route) @@ -284,6 +287,10 @@ async fn redirect_legacy_preview(uri: Uri) -> impl IntoResponse { Redirect::temporary(&uri) } -async fn legacy_media_disabled() -> impl IntoResponse { err!(Request(Forbidden("Unauthenticated media is disabled."))) } +async fn legacy_media_disabled() -> impl IntoResponse { + err!(Request(Forbidden("Unauthenticated media is disabled."))) +} -async fn federation_disabled() -> impl IntoResponse { err!(Request(Forbidden("Federation is disabled."))) } +async fn federation_disabled() -> impl IntoResponse { + err!(Request(Forbidden("Federation is disabled."))) +} diff --git a/src/api/router/args.rs b/src/api/router/args.rs index f5666ed6..582f0c56 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -4,8 +4,8 @@ use axum::{async_trait, body::Body, extract::FromRequest}; use bytes::{BufMut, Bytes, BytesMut}; use conduwuit::{debug, debug_warn, err, trace, utils::string::EMPTY, Error, Result}; use ruma::{ - api::IncomingRequest, CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, - OwnedUserId, ServerName, UserId, + api::IncomingRequest, CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, + OwnedServerName, OwnedUserId, ServerName, UserId, }; use service::Services; @@ -43,7 +43,9 @@ where T: IncomingRequest + Send + Sync + 'static, { #[inline] - pub(crate) fn sender(&self) -> (&UserId, &DeviceId) { (self.sender_user(), self.sender_device()) } + pub(crate) fn sender(&self) -> (&UserId, &DeviceId) { + (self.sender_user(), self.sender_device()) + } #[inline] pub(crate) fn sender_user(&self) -> &UserId { @@ -83,7 +85,10 @@ where { type Rejection = Error; - async fn from_request(request: hyper::Request, services: &State) -> Result { + async fn from_request( + request: hyper::Request, + services: &State, + ) -> Result { let mut request = request::from(services, request).await?; let mut json_body = serde_json::from_slice::(&request.body).ok(); @@ -96,7 +101,10 @@ where && !request.parts.uri.path().contains("/media/") { trace!("json_body from_request: {:?}", json_body.clone()); - debug_warn!("received a POST request with an empty body, defaulting/assuming to {{}} like Synapse does"); + debug_warn!( + "received a POST request with an empty body, defaulting/assuming to {{}} like \ + Synapse does" + ); json_body = Some(CanonicalJsonValue::Object(CanonicalJsonObject::new())); } let auth = auth::auth(services, &mut request, json_body.as_ref(), &T::METADATA).await?; @@ -112,14 +120,18 @@ where } fn make_body( - services: &Services, request: &mut Request, json_body: Option<&mut CanonicalJsonValue>, auth: &Auth, + services: &Services, + request: &mut Request, + json_body: Option<&mut CanonicalJsonValue>, + auth: &Auth, ) -> Result where T: IncomingRequest, { let body = take_body(services, request, json_body, auth); let http_request = into_http_request(request, body); - T::try_from_http_request(http_request, &request.path).map_err(|e| err!(Request(BadJson(debug_warn!("{e}"))))) + T::try_from_http_request(http_request, &request.path) + .map_err(|e| err!(Request(BadJson(debug_warn!("{e}"))))) } fn into_http_request(request: &Request, body: Bytes) -> hyper::Request { @@ -141,7 +153,10 @@ fn into_http_request(request: &Request, body: Bytes) -> hyper::Request { #[allow(clippy::needless_pass_by_value)] fn take_body( - services: &Services, request: &mut Request, json_body: Option<&mut CanonicalJsonValue>, auth: &Auth, + services: &Services, + request: &mut Request, + json_body: Option<&mut CanonicalJsonValue>, + auth: &Auth, ) -> Bytes { let Some(CanonicalJsonValue::Object(json_body)) = json_body else { return mem::take(&mut request.body); diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 243fd46b..c5b040e0 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -10,7 +10,9 @@ use ruma::{ client::{ directory::get_public_rooms, error::ErrorKind, - profile::{get_avatar_url, get_display_name, get_profile, get_profile_key, get_timezone_key}, + profile::{ + get_avatar_url, get_display_name, get_profile, get_profile_key, get_timezone_key, + }, voip::get_turn_server_info, }, federation::openid::get_openid_userinfo, @@ -42,12 +44,15 @@ pub(super) struct Auth { } pub(super) async fn auth( - services: &Services, request: &mut Request, json_body: Option<&CanonicalJsonValue>, metadata: &Metadata, + services: &Services, + request: &mut Request, + json_body: Option<&CanonicalJsonValue>, + metadata: &Metadata, ) -> Result { let bearer: Option>> = request.parts.extract().await?; let token = match &bearer { - Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), - None => request.query.access_token.as_deref(), + | Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), + | None => request.query.access_token.as_deref(), }; let token = if let Some(token) = token { @@ -64,56 +69,64 @@ pub(super) async fn auth( if metadata.authentication == AuthScheme::None { match metadata { - &get_public_rooms::v3::Request::METADATA => { + | &get_public_rooms::v3::Request::METADATA => { if !services .globals .config .allow_public_room_directory_without_auth { match token { - Token::Appservice(_) | Token::User(_) => { + | Token::Appservice(_) | Token::User(_) => { // we should have validated the token above // already }, - Token::None | Token::Invalid => { - return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing or invalid access token.")); + | Token::None | Token::Invalid => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing or invalid access token.", + )); }, } } }, - &get_profile::v3::Request::METADATA + | &get_profile::v3::Request::METADATA | &get_profile_key::unstable::Request::METADATA | &get_display_name::v3::Request::METADATA | &get_avatar_url::v3::Request::METADATA | &get_timezone_key::unstable::Request::METADATA => { if services.globals.config.require_auth_for_profile_requests { match token { - Token::Appservice(_) | Token::User(_) => { + | Token::Appservice(_) | Token::User(_) => { // we should have validated the token above // already }, - Token::None | Token::Invalid => { - return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing or invalid access token.")); + | Token::None | Token::Invalid => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing or invalid access token.", + )); }, } } }, - _ => {}, + | _ => {}, }; } match (metadata.authentication, token) { - (AuthScheme::AccessToken, Token::Appservice(info)) => Ok(auth_appservice(services, request, info).await?), - (AuthScheme::None | AuthScheme::AccessTokenOptional | AuthScheme::AppserviceToken, Token::Appservice(info)) => { - Ok(Auth { - origin: None, - sender_user: None, - sender_device: None, - appservice_info: Some(*info), - }) - }, - (AuthScheme::AccessToken, Token::None) => match metadata { - &get_turn_server_info::v3::Request::METADATA => { + | (AuthScheme::AccessToken, Token::Appservice(info)) => + Ok(auth_appservice(services, request, info).await?), + | ( + AuthScheme::None | AuthScheme::AccessTokenOptional | AuthScheme::AppserviceToken, + Token::Appservice(info), + ) => Ok(Auth { + origin: None, + sender_user: None, + sender_device: None, + appservice_info: Some(*info), + }), + | (AuthScheme::AccessToken, Token::None) => match metadata { + | &get_turn_server_info::v3::Request::METADATA => { if services.globals.config.turn_allow_guests { Ok(Auth { origin: None, @@ -125,9 +138,9 @@ pub(super) async fn auth( Err(Error::BadRequest(ErrorKind::MissingToken, "Missing access token.")) } }, - _ => Err(Error::BadRequest(ErrorKind::MissingToken, "Missing access token.")), + | _ => Err(Error::BadRequest(ErrorKind::MissingToken, "Missing access token.")), }, - ( + | ( AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None, Token::User((user_id, device_id)), ) => Ok(Auth { @@ -136,26 +149,33 @@ pub(super) async fn auth( sender_device: Some(device_id), appservice_info: None, }), - (AuthScheme::ServerSignatures, Token::None) => Ok(auth_server(services, request, json_body).await?), - (AuthScheme::None | AuthScheme::AppserviceToken | AuthScheme::AccessTokenOptional, Token::None) => Ok(Auth { + | (AuthScheme::ServerSignatures, Token::None) => + Ok(auth_server(services, request, json_body).await?), + | ( + AuthScheme::None | AuthScheme::AppserviceToken | AuthScheme::AccessTokenOptional, + Token::None, + ) => Ok(Auth { sender_user: None, sender_device: None, origin: None, appservice_info: None, }), - (AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => Err(Error::BadRequest( - ErrorKind::Unauthorized, - "Only server signatures should be used on this endpoint.", - )), - (AuthScheme::AppserviceToken, Token::User(_)) => Err(Error::BadRequest( + | (AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => + Err(Error::BadRequest( + ErrorKind::Unauthorized, + "Only server signatures should be used on this endpoint.", + )), + | (AuthScheme::AppserviceToken, Token::User(_)) => Err(Error::BadRequest( ErrorKind::Unauthorized, "Only appservice access tokens should be used on this endpoint.", )), - (AuthScheme::None, Token::Invalid) => { + | (AuthScheme::None, Token::Invalid) => { // OpenID federation endpoint uses a query param with the same name, drop this // once query params for user auth are removed from the spec. This is // required to make integration manager work. - if request.query.access_token.is_some() && metadata == &get_openid_userinfo::v1::Request::METADATA { + if request.query.access_token.is_some() + && metadata == &get_openid_userinfo::v1::Request::METADATA + { Ok(Auth { origin: None, sender_user: None, @@ -164,25 +184,29 @@ pub(super) async fn auth( }) } else { Err(Error::BadRequest( - ErrorKind::UnknownToken { - soft_logout: false, - }, + ErrorKind::UnknownToken { soft_logout: false }, "Unknown access token.", )) } }, - (_, Token::Invalid) => Err(Error::BadRequest( - ErrorKind::UnknownToken { - soft_logout: false, - }, + | (_, Token::Invalid) => Err(Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, "Unknown access token.", )), } } -async fn auth_appservice(services: &Services, request: &Request, info: Box) -> Result { - let user_id_default = - || UserId::parse_with_server_name(info.registration.sender_localpart.as_str(), services.globals.server_name()); +async fn auth_appservice( + services: &Services, + request: &Request, + info: Box, +) -> Result { + let user_id_default = || { + UserId::parse_with_server_name( + info.registration.sender_localpart.as_str(), + services.globals.server_name(), + ) + }; let Ok(user_id) = request .query @@ -205,7 +229,11 @@ async fn auth_appservice(services: &Services, request: &Request, info: Box) -> Result { +async fn auth_server( + services: &Services, + request: &mut Request, + body: Option<&CanonicalJsonValue>, +) -> Result { type Member = (String, CanonicalJsonValue); type Object = CanonicalJsonObject; type Value = CanonicalJsonValue; @@ -222,7 +250,8 @@ async fn auth_server(services: &Services, request: &mut Request, body: Option<&C .expect("all requests have a path") .to_string(); - let signature: [Member; 1] = [(x_matrix.key.as_str().into(), Value::String(x_matrix.sig.to_string()))]; + let signature: [Member; 1] = + [(x_matrix.key.as_str().into(), Value::String(x_matrix.sig.to_string()))]; let signatures: [Member; 1] = [(origin.as_str().into(), Value::Object(signature.into()))]; @@ -261,8 +290,8 @@ async fn auth_server(services: &Services, request: &mut Request, body: Option<&C debug_error!("Failed to verify federation request from {origin}: {e}"); if request.parts.uri.to_string().contains('@') { warn!( - "Request uri contained '@' character. Make sure your reverse proxy gives conduwuit the raw uri \ - (apache: use nocanon)" + "Request uri contained '@' character. Make sure your reverse proxy gives \ + conduwuit the raw uri (apache: use nocanon)" ); } @@ -294,7 +323,9 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { .forbidden_remote_server_names .contains(origin) { - return Err!(Request(Forbidden(debug_warn!("Federation requests from {origin} denied.")))); + return Err!(Request(Forbidden(debug_warn!( + "Federation requests from {origin} denied." + )))); } Ok(()) @@ -307,9 +338,9 @@ async fn parse_x_matrix(request: &mut Request) -> Result { .await .map_err(|e| { let msg = match e.reason() { - TypedHeaderRejectionReason::Missing => "Missing Authorization header.", - TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.", - _ => "Unknown header-related error", + | TypedHeaderRejectionReason::Missing => "Missing Authorization header.", + | TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.", + | _ => "Unknown header-related error", }; err!(Request(Forbidden(warn!("{msg}: {e}")))) diff --git a/src/api/router/handler.rs b/src/api/router/handler.rs index 246dc843..cfb8fb6e 100644 --- a/src/api/router/handler.rs +++ b/src/api/router/handler.rs @@ -66,14 +66,14 @@ ruma_handler!(T1, T2, T3, T4); const fn method_to_filter(method: &Method) -> MethodFilter { match *method { - Method::DELETE => MethodFilter::DELETE, - Method::GET => MethodFilter::GET, - Method::HEAD => MethodFilter::HEAD, - Method::OPTIONS => MethodFilter::OPTIONS, - Method::PATCH => MethodFilter::PATCH, - Method::POST => MethodFilter::POST, - Method::PUT => MethodFilter::PUT, - Method::TRACE => MethodFilter::TRACE, - _ => panic!("Unsupported HTTP method"), + | Method::DELETE => MethodFilter::DELETE, + | Method::GET => MethodFilter::GET, + | Method::HEAD => MethodFilter::HEAD, + | Method::OPTIONS => MethodFilter::OPTIONS, + | Method::PATCH => MethodFilter::PATCH, + | Method::POST => MethodFilter::POST, + | Method::PUT => MethodFilter::PUT, + | Method::TRACE => MethodFilter::TRACE, + | _ => panic!("Unsupported HTTP method"), } } diff --git a/src/api/router/request.rs b/src/api/router/request.rs index fd57d841..627abd30 100644 --- a/src/api/router/request.rs +++ b/src/api/router/request.rs @@ -20,14 +20,17 @@ pub(super) struct Request { pub(super) parts: Parts, } -pub(super) async fn from(services: &Services, request: hyper::Request) -> Result { +pub(super) async fn from( + services: &Services, + request: hyper::Request, +) -> Result { let limited = request.with_limited_body(); let (mut parts, body) = limited.into_parts(); let path: Path> = parts.extract().await?; let query = parts.uri.query().unwrap_or_default(); - let query = - serde_html_form::from_str(query).map_err(|e| err!(Request(Unknown("Failed to read query parameters: {e}"))))?; + let query = serde_html_form::from_str(query) + .map_err(|e| err!(Request(Unknown("Failed to read query parameters: {e}"))))?; let max_body_size = services.globals.config.max_request_size; @@ -35,10 +38,5 @@ pub(super) async fn from(services: &Services, request: hyper::Request) -> (State, Guard) { services: Arc::into_raw(services.clone()), }; - let guard = Guard { - services, - }; + let guard = Guard { services }; (state, guard) } diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 22fb6a8e..fac0e540 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -16,7 +16,8 @@ use crate::Ruma; /// Retrieves events from before the sender joined the room, if the room's /// history visibility allows. pub(crate) async fn get_backfill_route( - State(services): State, ref body: Ruma, + State(services): State, + ref body: Ruma, ) -> Result { AccessCheck { services: &services, diff --git a/src/api/server/event.rs b/src/api/server/event.rs index cbd9100b..629dd6a2 100644 --- a/src/api/server/event.rs +++ b/src/api/server/event.rs @@ -12,7 +12,8 @@ use crate::Ruma; /// - Only works if a user of this server is currently invited or joined the /// room pub(crate) async fn get_event_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let event = services .rooms diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 2d0893eb..93e867a0 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -17,7 +17,8 @@ use crate::Ruma; /// /// - This does not include the event itself pub(crate) async fn get_event_authorization_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { AccessCheck { services: &services, @@ -40,8 +41,8 @@ pub(crate) async fn get_event_authorization_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database."))?; - let room_id = - <&RoomId>::try_from(room_id_str).map_err(|_| Error::bad_database("Invalid room_id in event in database."))?; + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room_id in event in database."))?; let auth_chain = services .rooms @@ -53,7 +54,5 @@ pub(crate) async fn get_event_authorization_route( .collect() .await; - Ok(get_event_authorization::v1::Response { - auth_chain, - }) + Ok(get_event_authorization::v1::Response { auth_chain }) } diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index a07a1a37..ea06015a 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -12,7 +12,8 @@ use crate::Ruma; /// /// Retrieves events that the sender is missing. pub(crate) async fn get_missing_events_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { AccessCheck { services: &services, @@ -87,7 +88,5 @@ pub(crate) async fn get_missing_events_route( i = i.saturating_add(1); } - Ok(get_missing_events::v1::Response { - events, - }) + Ok(get_missing_events::v1::Response { events }) } diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index e3ce7108..a10df6ac 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -8,7 +8,8 @@ use crate::{Error, Result, Ruma}; /// Gets the space tree in a depth-first manner to locate child rooms of a given /// space. pub(crate) async fn get_hierarchy_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if services.rooms.metadata.exists(&body.room_id).await { services diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index f76573c7..a5b4a11c 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -16,7 +16,8 @@ use crate::Ruma; /// Invites a remote user to a room. #[tracing::instrument(skip_all, fields(%client), name = "invite")] pub(crate) async fn create_invite_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { // ACL check origin @@ -28,9 +29,7 @@ pub(crate) async fn create_invite_route( if !services.server.supported_room_version(&body.room_version) { return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: body.room_version.clone(), - }, + ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone() }, "Server does not support this room version.", )); } @@ -102,11 +101,14 @@ pub(crate) async fn create_invite_route( .try_into() .map_err(|e| err!(Request(InvalidParam("Invalid sender property: {e}"))))?; - if services.rooms.metadata.is_banned(&body.room_id).await && !services.users.is_admin(&invited_user).await { + if services.rooms.metadata.is_banned(&body.room_id).await + && !services.users.is_admin(&invited_user).await + { return Err!(Request(Forbidden("This room is banned on this homeserver."))); } - if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user).await { + if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user).await + { return Err!(Request(Forbidden("This server does not allow room invites."))); } diff --git a/src/api/server/key.rs b/src/api/server/key.rs index fea79897..75801a7a 100644 --- a/src/api/server/key.rs +++ b/src/api/server/key.rs @@ -22,7 +22,9 @@ use ruma::{ /// this will be valid forever. // Response type for this endpoint is Json because we need to calculate a // signature for the response -pub(crate) async fn get_server_keys_route(State(services): State) -> Result { +pub(crate) async fn get_server_keys_route( + State(services): State, +) -> Result { let server_name = services.globals.server_name(); let active_key_id = services.server_keys.active_key_id(); let mut all_keys = services.server_keys.verify_keys_for(server_name).await; @@ -72,6 +74,8 @@ fn expires_ts() -> MilliSecondsSinceUnixEpoch { /// /// - Matrix does not support invalidating public keys, so the key returned by /// this will be valid forever. -pub(crate) async fn get_server_keys_deprecated_route(State(services): State) -> impl IntoResponse { +pub(crate) async fn get_server_keys_deprecated_route( + State(services): State, +) -> impl IntoResponse { get_server_keys_route(State(services)).await } diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index 3f62fbbe..3900c418 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -23,7 +23,8 @@ use crate::{ /// /// Creates a join template. pub(crate) async fn create_join_event_template_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { return Err!(Request(NotFound("Room is unknown to this server."))); @@ -47,8 +48,8 @@ pub(crate) async fn create_join_event_template_route( .contains(body.origin()) { warn!( - "Server {} for remote user {} tried joining room ID {} which has a server name that is globally \ - forbidden. Rejecting.", + "Server {} for remote user {} tried joining room ID {} which has a server name that \ + is globally forbidden. Rejecting.", body.origin(), &body.user_id, &body.room_id, @@ -72,9 +73,7 @@ pub(crate) async fn create_join_event_template_route( let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; if !body.ver.contains(&room_version_id) { return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: room_version_id, - }, + ErrorKind::IncompatibleRoomVersion { room_version: room_version_id }, "Room version not supported.", )); } @@ -86,16 +85,25 @@ pub(crate) async fn create_join_event_template_route( if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { // room version does not support restricted join rules None - } else if user_can_perform_restricted_join(&services, &body.user_id, &body.room_id, &room_version_id).await? { + } else if user_can_perform_restricted_join( + &services, + &body.user_id, + &body.room_id, + &room_version_id, + ) + .await? + { let Some(auth_user) = services .rooms .state_cache .local_users_in_room(&body.room_id) .filter(|user| { - services - .rooms - .state_accessor - .user_can_invite(&body.room_id, user, &body.user_id, &state_lock) + services.rooms.state_accessor.user_can_invite( + &body.room_id, + user, + &body.user_id, + &state_lock, + ) }) .boxed() .next() @@ -116,13 +124,10 @@ pub(crate) async fn create_join_event_template_route( .rooms .timeline .create_hash_and_sign_event( - PduBuilder::state( - body.user_id.to_string(), - &RoomMemberEventContent { - join_authorized_via_users_server, - ..RoomMemberEventContent::new(MembershipState::Join) - }, - ), + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + join_authorized_via_users_server, + ..RoomMemberEventContent::new(MembershipState::Join) + }), &body.user_id, &body.room_id, &state_lock, @@ -142,7 +147,10 @@ pub(crate) async fn create_join_event_template_route( /// Checks whether the given user can join the given room via a restricted join. pub(crate) async fn user_can_perform_restricted_join( - services: &Services, user_id: &UserId, room_id: &RoomId, room_version_id: &RoomVersionId, + services: &Services, + user_id: &UserId, + room_id: &RoomId, + room_version_id: &RoomVersionId, ) -> Result { use RoomVersionId::*; @@ -159,13 +167,19 @@ pub(crate) async fn user_can_perform_restricted_join( let Ok(join_rules_event_content) = services .rooms .state_accessor - .room_state_get_content::(room_id, &StateEventType::RoomJoinRules, "") + .room_state_get_content::( + room_id, + &StateEventType::RoomJoinRules, + "", + ) .await else { return Ok(false); }; - let (JoinRule::Restricted(r) | JoinRule::KnockRestricted(r)) = join_rules_event_content.join_rule else { + let (JoinRule::Restricted(r) | JoinRule::KnockRestricted(r)) = + join_rules_event_content.join_rule + else { return Ok(false); }; @@ -195,12 +209,15 @@ pub(crate) async fn user_can_perform_restricted_join( } } -pub(crate) fn maybe_strip_event_id(pdu_json: &mut CanonicalJsonObject, room_version_id: &RoomVersionId) -> Result { +pub(crate) fn maybe_strip_event_id( + pdu_json: &mut CanonicalJsonObject, + room_version_id: &RoomVersionId, +) -> Result { use RoomVersionId::*; match room_version_id { - V1 | V2 => Ok(()), - _ => { + | V1 | V2 => Ok(()), + | _ => { pdu_json.remove("event_id"); Ok(()) }, diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 2895d3bf..746a4858 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -13,7 +13,8 @@ use crate::{service::pdu::PduBuilder, Ruma}; /// /// Creates a leave template. pub(crate) async fn create_leave_event_template_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { return Err!(Request(NotFound("Room is unknown to this server."))); @@ -37,7 +38,10 @@ pub(crate) async fn create_leave_event_template_route( .rooms .timeline .create_hash_and_sign_event( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent::new(MembershipState::Leave)), + PduBuilder::state( + body.user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Leave), + ), &body.user_id, &body.room_id, &state_lock, diff --git a/src/api/server/media.rs b/src/api/server/media.rs index 8c3ff435..03ec7b51 100644 --- a/src/api/server/media.rs +++ b/src/api/server/media.rs @@ -16,7 +16,8 @@ use crate::Ruma; /// Load media from our server. #[tracing::instrument(skip_all, fields(%client), name = "media_get")] pub(crate) async fn get_content_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let mxc = Mxc { @@ -33,7 +34,8 @@ pub(crate) async fn get_content_route( return Err!(Request(NotFound("Media not found."))); }; - let content_disposition = make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + let content_disposition = + make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); let content = Content { file: content.expect("entire file contents"), content_type: content_type.map(Into::into), @@ -51,7 +53,8 @@ pub(crate) async fn get_content_route( /// Load media thumbnail from our server. #[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")] pub(crate) async fn get_content_thumbnail_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; @@ -69,7 +72,8 @@ pub(crate) async fn get_content_thumbnail_route( return Err!(Request(NotFound("Media not found."))); }; - let content_disposition = make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + let content_disposition = + make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); let content = Content { file: content.expect("entire file contents"), content_type: content_type.map(Into::into), diff --git a/src/api/server/openid.rs b/src/api/server/openid.rs index 9b54807a..4833fbe1 100644 --- a/src/api/server/openid.rs +++ b/src/api/server/openid.rs @@ -7,7 +7,8 @@ use crate::{Result, Ruma}; /// /// Get information about the user that generated the OpenID token. pub(crate) async fn get_openid_userinfo_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { Ok(get_openid_userinfo::v1::Response::new( services diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index f6c41859..77cde15f 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -15,7 +15,8 @@ use crate::{Error, Result, Ruma}; /// Lists the public rooms on this server. #[tracing::instrument(skip_all, fields(%client), name = "publicrooms")] pub(crate) async fn get_public_rooms_filtered_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { if !services @@ -35,7 +36,9 @@ pub(crate) async fn get_public_rooms_filtered_route( &body.room_network, ) .await - .map_err(|_| Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list."))?; + .map_err(|_| { + Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.") + })?; Ok(get_public_rooms_filtered::v1::Response { chunk: response.chunk, @@ -50,7 +53,8 @@ pub(crate) async fn get_public_rooms_filtered_route( /// Lists the public rooms on this server. #[tracing::instrument(skip_all, fields(%client), "publicrooms")] pub(crate) async fn get_public_rooms_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { if !services @@ -69,7 +73,9 @@ pub(crate) async fn get_public_rooms_route( &body.room_network, ) .await - .map_err(|_| Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list."))?; + .map_err(|_| { + Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.") + })?; Ok(get_public_rooms::v1::Response { chunk: response.chunk, diff --git a/src/api/server/query.rs b/src/api/server/query.rs index 6536bb20..0e5f7e56 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -19,7 +19,8 @@ use crate::Ruma; /// /// Resolve a room alias to a room id. pub(crate) async fn get_room_information_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { let room_id = services .rooms @@ -50,10 +51,7 @@ pub(crate) async fn get_room_information_route( servers.insert(0, services.globals.server_name().to_owned()); } - Ok(get_room_information::v1::Response { - room_id, - servers, - }) + Ok(get_room_information::v1::Response { room_id, servers }) } /// # `GET /_matrix/federation/v1/query/profile` @@ -61,7 +59,8 @@ pub(crate) async fn get_room_information_route( /// /// Gets information on a profile. pub(crate) async fn get_profile_information_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services .globals @@ -88,14 +87,14 @@ pub(crate) async fn get_profile_information_route( let mut custom_profile_fields = BTreeMap::new(); match &body.field { - Some(ProfileField::DisplayName) => { + | Some(ProfileField::DisplayName) => { displayname = services.users.displayname(&body.user_id).await.ok(); }, - Some(ProfileField::AvatarUrl) => { + | Some(ProfileField::AvatarUrl) => { avatar_url = services.users.avatar_url(&body.user_id).await.ok(); blurhash = services.users.blurhash(&body.user_id).await.ok(); }, - Some(custom_field) => { + | Some(custom_field) => { if let Ok(value) = services .users .profile_key(&body.user_id, custom_field.as_str()) @@ -104,7 +103,7 @@ pub(crate) async fn get_profile_information_route( custom_profile_fields.insert(custom_field.to_string(), value); } }, - None => { + | None => { displayname = services.users.displayname(&body.user_id).await.ok(); avatar_url = services.users.avatar_url(&body.user_id).await.ok(); blurhash = services.users.blurhash(&body.user_id).await.ok(); diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 960c79b0..c5fc7118 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -2,15 +2,18 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{debug, debug_warn, err, error, result::LogErr, trace, utils::ReadyExt, warn, Err, Error, Result}; +use conduwuit::{ + debug, debug_warn, err, error, result::LogErr, trace, utils::ReadyExt, warn, Err, Error, + Result, +}; use futures::StreamExt; use ruma::{ api::{ client::error::ErrorKind, federation::transactions::{ edu::{ - DeviceListUpdateContent, DirectDeviceContent, Edu, PresenceContent, ReceiptContent, - SigningKeyUpdateContent, TypingContent, + DeviceListUpdateContent, DirectDeviceContent, Edu, PresenceContent, + ReceiptContent, SigningKeyUpdateContent, TypingContent, }, send_transaction_message, }, @@ -38,7 +41,8 @@ type ResolvedMap = BTreeMap>; /// Push EDUs and PDUs to this server. #[tracing::instrument(skip_all, fields(%client, origin = body.origin().as_str()), name = "send")] pub(crate) async fn send_transaction_message_route( - State(services): State, InsecureClientIp(client): InsecureClientIp, + State(services): State, + InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { if body.origin() != body.body.origin { @@ -69,7 +73,8 @@ pub(crate) async fn send_transaction_message_route( "Starting txn", ); - let resolved_map = handle_pdus(&services, &client, &body.pdus, body.origin(), &txn_start_time).await?; + let resolved_map = + handle_pdus(&services, &client, &body.pdus, body.origin(), &txn_start_time).await?; handle_edus(&services, &client, &body.edus, body.origin()).await; debug!( @@ -90,13 +95,17 @@ pub(crate) async fn send_transaction_message_route( } async fn handle_pdus( - services: &Services, _client: &IpAddr, pdus: &[Box], origin: &ServerName, txn_start_time: &Instant, + services: &Services, + _client: &IpAddr, + pdus: &[Box], + origin: &ServerName, + txn_start_time: &Instant, ) -> Result { let mut parsed_pdus = Vec::with_capacity(pdus.len()); for pdu in pdus { parsed_pdus.push(match services.rooms.event_handler.parse_incoming_pdu(pdu).await { - Ok(t) => t, - Err(e) => { + | Ok(t) => t, + | Err(e) => { debug_warn!("Could not parse PDU: {e}"); continue; }, @@ -145,26 +154,45 @@ async fn handle_pdus( Ok(resolved_map) } -async fn handle_edus(services: &Services, client: &IpAddr, edus: &[Raw], origin: &ServerName) { +async fn handle_edus( + services: &Services, + client: &IpAddr, + edus: &[Raw], + origin: &ServerName, +) { for edu in edus .iter() .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) { match edu { - Edu::Presence(presence) => handle_edu_presence(services, client, origin, presence).await, - Edu::Receipt(receipt) => handle_edu_receipt(services, client, origin, receipt).await, - Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await, - Edu::DeviceListUpdate(content) => handle_edu_device_list_update(services, client, origin, content).await, - Edu::DirectToDevice(content) => handle_edu_direct_to_device(services, client, origin, content).await, - Edu::SigningKeyUpdate(content) => handle_edu_signing_key_update(services, client, origin, content).await, - Edu::_Custom(ref _custom) => { + | Edu::Presence(presence) => { + handle_edu_presence(services, client, origin, presence).await; + }, + | Edu::Receipt(receipt) => + handle_edu_receipt(services, client, origin, receipt).await, + | Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await, + | Edu::DeviceListUpdate(content) => { + handle_edu_device_list_update(services, client, origin, content).await; + }, + | Edu::DirectToDevice(content) => { + handle_edu_direct_to_device(services, client, origin, content).await; + }, + | Edu::SigningKeyUpdate(content) => { + handle_edu_signing_key_update(services, client, origin, content).await; + }, + | Edu::_Custom(ref _custom) => { debug_warn!(?edus, "received custom/unknown EDU"); }, } } } -async fn handle_edu_presence(services: &Services, _client: &IpAddr, origin: &ServerName, presence: PresenceContent) { +async fn handle_edu_presence( + services: &Services, + _client: &IpAddr, + origin: &ServerName, + presence: PresenceContent, +) { if !services.globals.allow_incoming_presence() { return; } @@ -193,7 +221,12 @@ async fn handle_edu_presence(services: &Services, _client: &IpAddr, origin: &Ser } } -async fn handle_edu_receipt(services: &Services, _client: &IpAddr, origin: &ServerName, receipt: ReceiptContent) { +async fn handle_edu_receipt( + services: &Services, + _client: &IpAddr, + origin: &ServerName, + receipt: ReceiptContent, +) { if !services.globals.allow_incoming_read_receipts() { return; } @@ -230,7 +263,8 @@ async fn handle_edu_receipt(services: &Services, _client: &IpAddr, origin: &Serv .await { for event_id in &user_updates.event_ids { - let user_receipts = BTreeMap::from([(user_id.clone(), user_updates.data.clone())]); + let user_receipts = + BTreeMap::from([(user_id.clone(), user_updates.data.clone())]); let receipts = BTreeMap::from([(ReceiptType::Read, user_receipts)]); let receipt_content = BTreeMap::from([(event_id.to_owned(), receipts)]); let event = ReceiptEvent { @@ -255,7 +289,12 @@ async fn handle_edu_receipt(services: &Services, _client: &IpAddr, origin: &Serv } } -async fn handle_edu_typing(services: &Services, _client: &IpAddr, origin: &ServerName, typing: TypingContent) { +async fn handle_edu_typing( + services: &Services, + _client: &IpAddr, + origin: &ServerName, + typing: TypingContent, +) { if !services.globals.config.allow_incoming_typing { return; } @@ -321,12 +360,12 @@ async fn handle_edu_typing(services: &Services, _client: &IpAddr, origin: &Serve } async fn handle_edu_device_list_update( - services: &Services, _client: &IpAddr, origin: &ServerName, content: DeviceListUpdateContent, + services: &Services, + _client: &IpAddr, + origin: &ServerName, + content: DeviceListUpdateContent, ) { - let DeviceListUpdateContent { - user_id, - .. - } = content; + let DeviceListUpdateContent { user_id, .. } = content; if user_id.server_name() != origin { debug_warn!( @@ -340,14 +379,12 @@ async fn handle_edu_device_list_update( } async fn handle_edu_direct_to_device( - services: &Services, _client: &IpAddr, origin: &ServerName, content: DirectDeviceContent, + services: &Services, + _client: &IpAddr, + origin: &ServerName, + content: DirectDeviceContent, ) { - let DirectDeviceContent { - sender, - ev_type, - message_id, - messages, - } = content; + let DirectDeviceContent { sender, ev_type, message_id, messages } = content; if sender.server_name() != origin { debug_warn!( @@ -369,23 +406,28 @@ async fn handle_edu_direct_to_device( for (target_user_id, map) in &messages { for (target_device_id_maybe, event) in map { - let Ok(event) = event - .deserialize_as() - .map_err(|e| err!(Request(InvalidParam(error!("To-Device event is invalid: {e}"))))) - else { + let Ok(event) = event.deserialize_as().map_err(|e| { + err!(Request(InvalidParam(error!("To-Device event is invalid: {e}")))) + }) else { continue; }; let ev_type = ev_type.to_string(); match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => { + | DeviceIdOrAllDevices::DeviceId(target_device_id) => { services .users - .add_to_device_event(&sender, target_user_id, target_device_id, &ev_type, event) + .add_to_device_event( + &sender, + target_user_id, + target_device_id, + &ev_type, + event, + ) .await; }, - DeviceIdOrAllDevices::AllDevices => { + | DeviceIdOrAllDevices::AllDevices => { let (sender, ev_type, event) = (&sender, &ev_type, &event); services .users @@ -412,13 +454,12 @@ async fn handle_edu_direct_to_device( } async fn handle_edu_signing_key_update( - services: &Services, _client: &IpAddr, origin: &ServerName, content: SigningKeyUpdateContent, + services: &Services, + _client: &IpAddr, + origin: &ServerName, + content: SigningKeyUpdateContent, ) { - let SigningKeyUpdateContent { - user_id, - master_key, - self_signing_key, - } = content; + let SigningKeyUpdateContent { user_id, master_key, self_signing_key } = content; if user_id.server_name() != origin { debug_warn!( diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 9c72de2c..6ac84907 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -16,7 +16,8 @@ use ruma::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, }, - CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, + CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, + ServerName, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use service::Services; @@ -25,7 +26,10 @@ use crate::Ruma; /// helper method for /send_join v1 and v2 async fn create_join_event( - services: &Services, origin: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, + services: &Services, + origin: &ServerName, + room_id: &RoomId, + pdu: &RawJsonValue, ) -> Result { if !services.rooms.metadata.exists(room_id).await { return Err!(Request(NotFound("Room is unknown to this server."))); @@ -146,7 +150,8 @@ async fn create_join_event( if !services.globals.user_is_local(&authorising_user) { return Err!(Request(InvalidParam( - "Cannot authorise membership event through {authorising_user} as they do not belong to this homeserver" + "Cannot authorise membership event through {authorising_user} as they do not \ + belong to this homeserver" ))); } @@ -157,12 +162,19 @@ async fn create_join_event( .await { return Err!(Request(InvalidParam( - "Authorising user {authorising_user} is not in the room you are trying to join, they cannot authorise \ - your join." + "Authorising user {authorising_user} is not in the room you are trying to join, \ + they cannot authorise your join." ))); } - if !super::user_can_perform_restricted_join(services, &state_key, room_id, &room_version_id).await? { + if !super::user_can_perform_restricted_join( + services, + &state_key, + room_id, + &room_version_id, + ) + .await? + { return Err!(Request(UnableToAuthorizeJoin( "Joining user did not pass restricted room's rules." ))); @@ -228,7 +240,9 @@ async fn create_join_event( .event_ids_iter(room_id, starting_events) .await? .map(Ok) - .broad_and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) + .broad_and_then(|event_id| async move { + services.rooms.timeline.get_pdu_json(&event_id).await + }) .broad_and_then(|pdu| { services .sending @@ -252,7 +266,8 @@ async fn create_join_event( /// /// Submits a signed join event. pub(crate) async fn create_join_event_v1_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if services .globals @@ -261,8 +276,8 @@ pub(crate) async fn create_join_event_v1_route( .contains(body.origin()) { warn!( - "Server {} tried joining room ID {} through us who has a server name that is globally forbidden. \ - Rejecting.", + "Server {} tried joining room ID {} through us who has a server name that is \ + globally forbidden. Rejecting.", body.origin(), &body.room_id, ); @@ -277,8 +292,8 @@ pub(crate) async fn create_join_event_v1_route( .contains(&server.to_owned()) { warn!( - "Server {} tried joining room ID {} through us which has a server name that is globally forbidden. \ - Rejecting.", + "Server {} tried joining room ID {} through us which has a server name that is \ + globally forbidden. Rejecting.", body.origin(), &body.room_id, ); @@ -292,16 +307,15 @@ pub(crate) async fn create_join_event_v1_route( .boxed() .await?; - Ok(create_join_event::v1::Response { - room_state, - }) + Ok(create_join_event::v1::Response { room_state }) } /// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. pub(crate) async fn create_join_event_v2_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if services .globals @@ -320,8 +334,8 @@ pub(crate) async fn create_join_event_v2_route( .contains(&server.to_owned()) { warn!( - "Server {} tried joining room ID {} through us which has a server name that is globally forbidden. \ - Rejecting.", + "Server {} tried joining room ID {} through us which has a server name that is \ + globally forbidden. Rejecting.", body.origin(), &body.room_id, ); @@ -331,13 +345,10 @@ pub(crate) async fn create_join_event_v2_route( } } - let create_join_event::v1::RoomState { - auth_chain, - state, - event, - } = create_join_event(&services, body.origin(), &body.room_id, &body.pdu) - .boxed() - .await?; + let create_join_event::v1::RoomState { auth_chain, state, event } = + create_join_event(&services, body.origin(), &body.room_id, &body.pdu) + .boxed() + .await?; let room_state = create_join_event::v2::RoomState { members_omitted: false, auth_chain, @@ -346,7 +357,5 @@ pub(crate) async fn create_join_event_v2_route( servers_in_room: None, }; - Ok(create_join_event::v2::Response { - room_state, - }) + Ok(create_join_event::v2::Response { room_state }) } diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index b58bae7e..016ed7fa 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -21,7 +21,8 @@ use crate::{ /// /// Submits a signed leave event. pub(crate) async fn create_leave_event_v1_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { create_leave_event(&services, body.origin(), &body.room_id, &body.pdu).await?; @@ -32,14 +33,20 @@ pub(crate) async fn create_leave_event_v1_route( /// /// Submits a signed leave event. pub(crate) async fn create_leave_event_v2_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { create_leave_event(&services, body.origin(), &body.room_id, &body.pdu).await?; Ok(create_leave_event::v2::Response::new()) } -async fn create_leave_event(services: &Services, origin: &ServerName, room_id: &RoomId, pdu: &RawJsonValue) -> Result { +async fn create_leave_event( + services: &Services, + origin: &ServerName, + room_id: &RoomId, + pdu: &RawJsonValue, +) -> Result { if !services.rooms.metadata.exists(room_id).await { return Err!(Request(NotFound("Room is unknown to this server."))); } diff --git a/src/api/server/state.rs b/src/api/server/state.rs index ba1c400f..42f7e538 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -12,7 +12,8 @@ use crate::Ruma; /// /// Retrieves a snapshot of a room's state at a given event. pub(crate) async fn get_room_state_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { AccessCheck { services: &services, @@ -69,8 +70,5 @@ pub(crate) async fn get_room_state_route( .try_collect() .await?; - Ok(get_room_state::v1::Response { - auth_chain, - pdus, - }) + Ok(get_room_state::v1::Response { auth_chain, pdus }) } diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 503104df..186ef399 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -13,7 +13,8 @@ use crate::Ruma; /// Retrieves a snapshot of a room's state at a given event, in the form of /// event IDs. pub(crate) async fn get_room_state_ids_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { AccessCheck { services: &services, @@ -49,8 +50,5 @@ pub(crate) async fn get_room_state_ids_route( .collect() .await; - Ok(get_room_state_ids::v1::Response { - auth_chain_ids, - pdu_ids, - }) + Ok(get_room_state_ids::v1::Response { auth_chain_ids, pdu_ids }) } diff --git a/src/api/server/user.rs b/src/api/server/user.rs index 5072ed2f..321d0b66 100644 --- a/src/api/server/user.rs +++ b/src/api/server/user.rs @@ -18,7 +18,8 @@ use crate::{ /// /// Gets information on all devices of the user. pub(crate) async fn get_devices_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.globals.user_is_local(&body.user_id) { return Err(Error::BadRequest( @@ -52,11 +53,7 @@ pub(crate) async fn get_devices_route( services .users .get_device_keys(user_id, &device_id_clone) - .map_ok(|keys| UserDevice { - device_id, - keys, - device_display_name, - }) + .map_ok(|keys| UserDevice { device_id, keys, device_display_name }) .map(Result::ok) .await }) @@ -79,7 +76,8 @@ pub(crate) async fn get_devices_route( /// /// Gets devices and identity keys for the given users. pub(crate) async fn get_keys_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if body .device_keys @@ -112,7 +110,8 @@ pub(crate) async fn get_keys_route( /// /// Claims one-time keys. pub(crate) async fn claim_keys_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if body .one_time_keys @@ -127,7 +126,5 @@ pub(crate) async fn claim_keys_route( let result = claim_keys_helper(&services, &body.one_time_keys).await?; - Ok(claim_keys::v1::Response { - one_time_keys: result.one_time_keys, - }) + Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys }) } diff --git a/src/api/server/utils.rs b/src/api/server/utils.rs index 3e768513..112cf858 100644 --- a/src/api/server/utils.rs +++ b/src/api/server/utils.rs @@ -34,10 +34,11 @@ pub(super) async fn check(&self) -> Result { let server_can_see: OptionFuture<_> = self .event_id .map(|event_id| { - self.services - .rooms - .state_accessor - .server_can_see_event(self.origin, self.room_id, event_id) + self.services.rooms.state_accessor.server_can_see_event( + self.origin, + self.room_id, + event_id, + ) }) .into(); diff --git a/src/api/server/well_known.rs b/src/api/server/well_known.rs index e6145aea..48caa7d6 100644 --- a/src/api/server/well_known.rs +++ b/src/api/server/well_known.rs @@ -7,12 +7,13 @@ use crate::{Error, Result, Ruma}; /// /// Returns the .well-known URL if it is configured, otherwise returns 404. pub(crate) async fn well_known_server( - State(services): State, _body: Ruma, + State(services): State, + _body: Ruma, ) -> Result { Ok(discover_homeserver::Response { server: match services.server.config.well_known.server.as_ref() { - Some(server_name) => server_name.to_owned(), - None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), + | Some(server_name) => server_name.to_owned(), + | None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), }, }) } diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index a3a7acc0..423f5408 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -29,8 +29,9 @@ pub fn memory_usage() -> Option { let resident = mibs(stats::resident::read()); let retained = mibs(stats::retained::read()); Some(format!( - "allocated: {allocated:.2} MiB\nactive: {active:.2} MiB\nmapped: {mapped:.2} MiB\nmetadata: {metadata:.2} \ - MiB\nresident: {resident:.2} MiB\nretained: {retained:.2} MiB\n" + "allocated: {allocated:.2} MiB\nactive: {active:.2} MiB\nmapped: {mapped:.2} \ + MiB\nmetadata: {metadata:.2} MiB\nresident: {resident:.2} MiB\nretained: {retained:.2} \ + MiB\n" )) } diff --git a/src/core/alloc/mod.rs b/src/core/alloc/mod.rs index 7941a99c..31eb033c 100644 --- a/src/core/alloc/mod.rs +++ b/src/core/alloc/mod.rs @@ -8,10 +8,20 @@ pub use je::{memory_stats, memory_usage}; #[cfg(all(not(target_env = "msvc"), feature = "hardened_malloc", not(feature = "jemalloc")))] pub mod hardened; -#[cfg(all(not(target_env = "msvc"), feature = "hardened_malloc", not(feature = "jemalloc")))] +#[cfg(all( + not(target_env = "msvc"), + feature = "hardened_malloc", + not(feature = "jemalloc") +))] pub use hardened::{memory_stats, memory_usage}; -#[cfg(any(target_env = "msvc", all(not(feature = "hardened_malloc"), not(feature = "jemalloc"))))] +#[cfg(any( + target_env = "msvc", + all(not(feature = "hardened_malloc"), not(feature = "jemalloc")) +))] pub mod default; -#[cfg(any(target_env = "msvc", all(not(feature = "hardened_malloc"), not(feature = "jemalloc"))))] +#[cfg(any( + target_env = "msvc", + all(not(feature = "hardened_malloc"), not(feature = "jemalloc")) +))] pub use default::{memory_stats, memory_usage}; diff --git a/src/core/config/check.rs b/src/core/config/check.rs index b8415281..c242e2fd 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -16,18 +16,24 @@ pub fn check(config: &Config) -> Result<()> { warn_unknown_key(config); if config.sentry && config.sentry_endpoint.is_none() { - return Err!(Config("sentry_endpoint", "Sentry cannot be enabled without an endpoint set")); + return Err!(Config( + "sentry_endpoint", + "Sentry cannot be enabled without an endpoint set" + )); } if cfg!(all(feature = "hardened_malloc", feature = "jemalloc")) { - info!("hardened_malloc and jemalloc compile-time features are both enabled, this causes jemalloc to be used."); + info!( + "hardened_malloc and jemalloc compile-time features are both enabled, this causes \ + jemalloc to be used." + ); } if cfg!(not(unix)) && config.unix_socket_path.is_some() { return Err!(Config( "unix_socket_path", - "UNIX socket support is only available on *nix platforms. Please remove 'unix_socket_path' from your \ - config." + "UNIX socket support is only available on *nix platforms. Please remove \ + 'unix_socket_path' from your config." )); } @@ -44,30 +50,36 @@ pub fn check(config: &Config) -> Result<()> { use std::path::Path; if addr.ip().is_loopback() { - debug_info!("Found loopback listening address {addr}, running checks if we're in a container."); + debug_info!( + "Found loopback listening address {addr}, running checks if we're in a \ + container." + ); if Path::new("/proc/vz").exists() /* Guest */ && !Path::new("/proc/bz").exists() /* Host */ { error!( - "You are detected using OpenVZ with a loopback/localhost listening address of {addr}. If you \ - are using OpenVZ for containers and you use NAT-based networking to communicate with the \ - host and guest, this will NOT work. Please change this to \"0.0.0.0\". If this is expected, \ - you can ignore.", + "You are detected using OpenVZ with a loopback/localhost listening \ + address of {addr}. If you are using OpenVZ for containers and you use \ + NAT-based networking to communicate with the host and guest, this will \ + NOT work. Please change this to \"0.0.0.0\". If this is expected, you \ + can ignore.", ); } else if Path::new("/.dockerenv").exists() { error!( - "You are detected using Docker with a loopback/localhost listening address of {addr}. If you \ - are using a reverse proxy on the host and require communication to conduwuit in the Docker \ - container via NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". \ - If this is expected, you can ignore.", + "You are detected using Docker with a loopback/localhost listening \ + address of {addr}. If you are using a reverse proxy on the host and \ + require communication to conduwuit in the Docker container via \ + NAT-based networking, this will NOT work. Please change this to \ + \"0.0.0.0\". If this is expected, you can ignore.", ); } else if Path::new("/run/.containerenv").exists() { error!( - "You are detected using Podman with a loopback/localhost listening address of {addr}. If you \ - are using a reverse proxy on the host and require communication to conduwuit in the Podman \ - container via NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". \ - If this is expected, you can ignore.", + "You are detected using Podman with a loopback/localhost listening \ + address of {addr}. If you are using a reverse proxy on the host and \ + require communication to conduwuit in the Podman container via \ + NAT-based networking, this will NOT work. Please change this to \ + \"0.0.0.0\". If this is expected, you can ignore.", ); } } @@ -93,7 +105,8 @@ pub fn check(config: &Config) -> Result<()> { if config.emergency_password == Some(String::from("F670$2CP@Hw8mG7RY1$%!#Ic7YA")) { return Err!(Config( "emergency_password", - "The public example emergency password is being used, this is insecure. Please change this." + "The public example emergency password is being used, this is insecure. Please \ + change this." )); } @@ -124,7 +137,8 @@ pub fn check(config: &Config) -> Result<()> { if config.max_request_size < 10_000_000 { return Err!(Config( "max_request_size", - "Max request size is less than 10MB. Please increase it as this is too low for operable federation." + "Max request size is less than 10MB. Please increase it as this is too low for \ + operable federation." )); } @@ -145,11 +159,12 @@ pub fn check(config: &Config) -> Result<()> { { return Err!(Config( "registration_token", - "!! You have `allow_registration` enabled without a token configured in your config which means you are \ - allowing ANYONE to register on your conduwuit instance without any 2nd-step (e.g. registration token). \ - If this is not the intended behaviour, please set a registration token. For security and safety reasons, \ - conduwuit will shut down. If you are extra sure this is the desired behaviour you want, please set the \ - following config option to true: + "!! You have `allow_registration` enabled without a token configured in your config \ + which means you are allowing ANYONE to register on your conduwuit instance without \ + any 2nd-step (e.g. registration token). If this is not the intended behaviour, \ + please set a registration token. For security and safety reasons, conduwuit will \ + shut down. If you are extra sure this is the desired behaviour you want, please \ + set the following config option to true: `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`" )); } @@ -161,17 +176,18 @@ pub fn check(config: &Config) -> Result<()> { { warn!( "Open registration is enabled via setting \ - `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` and `allow_registration` to \ - true without a registration token configured. You are expected to be aware of the risks now. If this is \ - not the desired behaviour, please set a registration token." + `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` and \ + `allow_registration` to true without a registration token configured. You are \ + expected to be aware of the risks now. If this is not the desired behaviour, \ + please set a registration token." ); } if config.allow_outgoing_presence && !config.allow_local_presence { return Err!(Config( "allow_local_presence", - "Outgoing presence requires allowing local presence. Please enable 'allow_local_presence' or disable \ - outgoing presence." + "Outgoing presence requires allowing local presence. Please enable \ + 'allow_local_presence' or disable outgoing presence." )); } @@ -180,9 +196,10 @@ pub fn check(config: &Config) -> Result<()> { .contains(&"*".to_owned()) { warn!( - "All URLs are allowed for URL previews via setting \"url_preview_domain_contains_allowlist\" to \"*\". \ - This opens up significant attack surface to your server. You are expected to be aware of the risks by \ - doing this." + "All URLs are allowed for URL previews via setting \ + \"url_preview_domain_contains_allowlist\" to \"*\". This opens up significant \ + attack surface to your server. You are expected to be aware of the risks by doing \ + this." ); } if config @@ -190,9 +207,10 @@ pub fn check(config: &Config) -> Result<()> { .contains(&"*".to_owned()) { warn!( - "All URLs are allowed for URL previews via setting \"url_preview_domain_explicit_allowlist\" to \"*\". \ - This opens up significant attack surface to your server. You are expected to be aware of the risks by \ - doing this." + "All URLs are allowed for URL previews via setting \ + \"url_preview_domain_explicit_allowlist\" to \"*\". This opens up significant \ + attack surface to your server. You are expected to be aware of the risks by doing \ + this." ); } if config @@ -200,9 +218,9 @@ pub fn check(config: &Config) -> Result<()> { .contains(&"*".to_owned()) { warn!( - "All URLs are allowed for URL previews via setting \"url_preview_url_contains_allowlist\" to \"*\". This \ - opens up significant attack surface to your server. You are expected to be aware of the risks by doing \ - this." + "All URLs are allowed for URL previews via setting \ + \"url_preview_url_contains_allowlist\" to \"*\". This opens up significant attack \ + surface to your server. You are expected to be aware of the risks by doing this." ); } @@ -260,7 +278,8 @@ pub(super) fn is_dual_listening(raw_config: &Figment) -> Result<()> { let contains_unix_socket = raw_config.contains("unix_socket_path"); if contains_address && contains_unix_socket { return Err!( - "TOML keys \"address\" and \"unix_socket_path\" were both defined. Please specify only one option." + "TOML keys \"address\" and \"unix_socket_path\" were both defined. Please specify \ + only one option." ); } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a34514de..44b66f41 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -18,8 +18,8 @@ pub use figment::{value::Value as FigmentValue, Figment}; use itertools::Itertools; use regex::RegexSet; use ruma::{ - api::client::discovery::discover_support::ContactRole, OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, - RoomVersionId, + api::client::discovery::discover_support::ContactRole, OwnedRoomOrAliasId, OwnedServerName, + OwnedUserId, RoomVersionId, }; use serde::{de::IgnoredAny, Deserialize}; use url::Url; @@ -181,7 +181,10 @@ pub struct Config { /// are scaled by your CPU core count. /// /// default: 1.0 - #[serde(default = "default_cache_capacity_modifier", alias = "conduit_cache_capacity_modifier")] + #[serde( + default = "default_cache_capacity_modifier", + alias = "conduit_cache_capacity_modifier" + )] pub cache_capacity_modifier: f64, /// default: varies by system @@ -1555,7 +1558,8 @@ pub struct Config { pub db_pool_queue_size: usize, #[serde(flatten)] - #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime + #[allow(clippy::zero_sized_map_values)] + // this is a catchall, the map shouldn't be zero at runtime catchall: BTreeMap, } @@ -1676,15 +1680,15 @@ impl Config { fn get_bind_hosts(&self) -> Vec { match &self.address.addrs { - Left(addr) => vec![*addr], - Right(addrs) => addrs.clone(), + | Left(addr) => vec![*addr], + | Right(addrs) => addrs.clone(), } } fn get_bind_ports(&self) -> Vec { match &self.port.ports { - Left(port) => vec![*port], - Right(ports) => ports.clone(), + | Left(port) => vec![*port], + | Right(ports) => ports.clone(), } } @@ -1756,9 +1760,13 @@ impl fmt::Display for Config { line("Allow registration", &self.allow_registration.to_string()); line( "Registration token", - if self.registration_token.is_none() && self.registration_token_file.is_none() && self.allow_registration { + if self.registration_token.is_none() + && self.registration_token_file.is_none() + && self.allow_registration + { "not set (⚠️ open registration!)" - } else if self.registration_token.is_none() && self.registration_token_file.is_none() { + } else if self.registration_token.is_none() && self.registration_token_file.is_none() + { "not set" } else { "set" @@ -1811,7 +1819,8 @@ impl fmt::Display for Config { &self.allow_outgoing_read_receipts.to_string(), ); line( - "Block non-admin room invites (local and remote, admins can still send and receive invites)", + "Block non-admin room invites (local and remote, admins can still send and receive \ + invites)", &self.block_non_admin_invites.to_string(), ); line("Enable admin escape commands", &self.admin_escape_commands.to_string()); @@ -1859,13 +1868,10 @@ impl fmt::Display for Config { "Lockdown public room directory (only allow admins to publish)", &self.lockdown_public_room_directory.to_string(), ); - line( - "JWT secret", - match self.jwt_secret { - Some(_) => "set", - None => "not set", - }, - ); + line("JWT secret", match self.jwt_secret { + | Some(_) => "set", + | None => "not set", + }); line( "Trusted key servers", &self @@ -1979,7 +1985,8 @@ impl fmt::Display for Config { &lst.join(", ") }); line("Forbidden Remote Room Directory Server Names", { - let mut lst = Vec::with_capacity(self.forbidden_remote_room_directory_server_names.len()); + let mut lst = + Vec::with_capacity(self.forbidden_remote_room_directory_server_names.len()); for domain in &self.forbidden_remote_room_directory_server_names { lst.push(domain.host()); } @@ -2099,11 +2106,7 @@ fn default_address() -> ListeningAddr { } } -fn default_port() -> ListeningPort { - ListeningPort { - ports: Left(8008), - } -} +fn default_port() -> ListeningPort { ListeningPort { ports: Left(8008) } } fn default_unix_socket_perms() -> u32 { 660 } @@ -2115,19 +2118,33 @@ fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturati fn default_cache_capacity_modifier() -> f64 { 1.0 } -fn default_auth_chain_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } +fn default_auth_chain_cache_capacity() -> u32 { + parallelism_scaled_u32(10_000).saturating_add(100_000) +} -fn default_shorteventid_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(100_000) } +fn default_shorteventid_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(100_000) +} -fn default_eventidshort_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(100_000) } +fn default_eventidshort_cache_capacity() -> u32 { + parallelism_scaled_u32(25_000).saturating_add(100_000) +} -fn default_eventid_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(100_000) } +fn default_eventid_pdu_cache_capacity() -> u32 { + parallelism_scaled_u32(25_000).saturating_add(100_000) +} -fn default_shortstatekey_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } +fn default_shortstatekey_cache_capacity() -> u32 { + parallelism_scaled_u32(10_000).saturating_add(100_000) +} -fn default_statekeyshort_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } +fn default_statekeyshort_cache_capacity() -> u32 { + parallelism_scaled_u32(10_000).saturating_add(100_000) +} -fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } +fn default_servernameevent_data_cache_capacity() -> u32 { + parallelism_scaled_u32(100_000).saturating_add(500_000) +} fn default_server_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(500) } @@ -2203,7 +2220,9 @@ fn default_jaeger_filter() -> String { fn default_tracing_flame_output_path() -> String { "./tracing.folded".to_owned() } -fn default_trusted_servers() -> Vec { vec![OwnedServerName::try_from("matrix.org").unwrap()] } +fn default_trusted_servers() -> Vec { + vec![OwnedServerName::try_from("matrix.org").unwrap()] +} /// do debug logging by default for debug builds #[must_use] @@ -2332,4 +2351,6 @@ fn default_trusted_server_batch_size() -> usize { 256 } fn default_db_pool_workers() -> usize { sys::available_parallelism().saturating_mul(4).max(32) } -fn default_db_pool_queue_size() -> usize { sys::available_parallelism().saturating_mul(8).max(256) } +fn default_db_pool_queue_size() -> usize { + sys::available_parallelism().saturating_mul(8).max(256) +} diff --git a/src/core/config/proxy.rs b/src/core/config/proxy.rs index 48f883c6..ea388f24 100644 --- a/src/core/config/proxy.rs +++ b/src/core/config/proxy.rs @@ -42,11 +42,9 @@ pub enum ProxyConfig { impl ProxyConfig { pub fn to_proxy(&self) -> Result> { Ok(match self.clone() { - Self::None => None, - Self::Global { - url, - } => Some(Proxy::all(url)?), - Self::ByDomain(proxies) => Some(Proxy::custom(move |url| { + | Self::None => None, + | Self::Global { url } => Some(Proxy::all(url)?), + | Self::ByDomain(proxies) => Some(Proxy::custom(move |url| { // first matching proxy proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() })), @@ -76,24 +74,26 @@ impl PartialProxyConfig { for wc_domain in &self.include { if wc_domain.matches(domain) { match included_because { - Some(prev) if !wc_domain.more_specific_than(prev) => (), - _ => included_because = Some(wc_domain), + | Some(prev) if !wc_domain.more_specific_than(prev) => (), + | _ => included_because = Some(wc_domain), } } } for wc_domain in &self.exclude { if wc_domain.matches(domain) { match excluded_because { - Some(prev) if !wc_domain.more_specific_than(prev) => (), - _ => excluded_because = Some(wc_domain), + | Some(prev) if !wc_domain.more_specific_than(prev) => (), + | _ => excluded_because = Some(wc_domain), } } } match (included_because, excluded_because) { - (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), /* included for a more specific reason */ + | (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), /* included for + * a more specific + * reason */ // than excluded - (Some(_), None) => Some(&self.url), - _ => None, + | (Some(_), None) => Some(&self.url), + | _ => None, } } } @@ -108,19 +108,19 @@ enum WildCardedDomain { impl WildCardedDomain { fn matches(&self, domain: &str) -> bool { match self { - Self::WildCard => true, - Self::WildCarded(d) => domain.ends_with(d), - Self::Exact(d) => domain == d, + | Self::WildCard => true, + | Self::WildCarded(d) => domain.ends_with(d), + | Self::Exact(d) => domain == d, } } fn more_specific_than(&self, other: &Self) -> bool { match (self, other) { - (Self::WildCard, Self::WildCard) => false, - (_, Self::WildCard) => true, - (Self::Exact(a), Self::WildCarded(_)) => other.matches(a), - (Self::WildCarded(a), Self::WildCarded(b)) => a != b && a.ends_with(b), - _ => false, + | (Self::WildCard, Self::WildCard) => false, + | (_, Self::WildCard) => true, + | (Self::Exact(a), Self::WildCarded(_)) => other.matches(a), + | (Self::WildCarded(a), Self::WildCarded(b)) => a != b && a.ends_with(b), + | _ => false, } } } diff --git a/src/core/debug.rs b/src/core/debug.rs index e1980234..123cf820 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -84,7 +84,9 @@ pub fn trap() { } #[must_use] -pub fn panic_str(p: &Box) -> &'static str { p.downcast_ref::<&str>().copied().unwrap_or_default() } +pub fn panic_str(p: &Box) -> &'static str { + p.downcast_ref::<&str>().copied().unwrap_or_default() +} #[inline(always)] #[must_use] @@ -96,4 +98,6 @@ pub fn type_name() -> &'static str { std::any::type_name::() } #[must_use] #[inline] -pub const fn logging() -> bool { cfg!(debug_assertions) && cfg!(not(feature = "dev_release_log_level")) } +pub const fn logging() -> bool { + cfg!(debug_assertions) && cfg!(not(feature = "dev_release_log_level")) +} diff --git a/src/core/error/err.rs b/src/core/error/err.rs index a24441e0..60fa5bff 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -184,7 +184,12 @@ impl Visit for Visitor<'_> { } } -pub fn visit(out: &mut String, level: Level, __callsite: &'static DefaultCallsite, vs: &mut ValueSet<'_>) { +pub fn visit( + out: &mut String, + level: Level, + __callsite: &'static DefaultCallsite, + vs: &mut ValueSet<'_>, +) { let meta = __callsite.metadata(); let enabled = level_enabled!(level) && { let interest = __callsite.interest(); diff --git a/src/core/error/log.rs b/src/core/error/log.rs index 60bd7014..18c3a680 100644 --- a/src/core/error/log.rs +++ b/src/core/error/log.rs @@ -68,18 +68,20 @@ where pub fn inspect_log(error: &E) { inspect_log_level(error, Level::ERROR); } #[inline] -pub fn inspect_debug_log(error: &E) { inspect_debug_log_level(error, Level::ERROR); } +pub fn inspect_debug_log(error: &E) { + inspect_debug_log_level(error, Level::ERROR); +} #[inline] pub fn inspect_log_level(error: &E, level: Level) { use crate::{debug, error, info, trace, warn}; match level { - Level::ERROR => error!("{error}"), - Level::WARN => warn!("{error}"), - Level::INFO => info!("{error}"), - Level::DEBUG => debug!("{error}"), - Level::TRACE => trace!("{error}"), + | Level::ERROR => error!("{error}"), + | Level::WARN => warn!("{error}"), + | Level::INFO => info!("{error}"), + | Level::DEBUG => debug!("{error}"), + | Level::TRACE => trace!("{error}"), } } @@ -88,10 +90,10 @@ pub fn inspect_debug_log_level(error: &E, level: Level) { use crate::{debug, debug_error, debug_info, debug_warn, trace}; match level { - Level::ERROR => debug_error!("{error:?}"), - Level::WARN => debug_warn!("{error:?}"), - Level::INFO => debug_info!("{error:?}"), - Level::DEBUG => debug!("{error:?}"), - Level::TRACE => trace!("{error:?}"), + | Level::ERROR => debug_error!("{error:?}"), + | Level::WARN => debug_warn!("{error:?}"), + | Level::INFO => debug_info!("{error:?}"), + | Level::DEBUG => debug!("{error:?}"), + | Level::TRACE => trace!("{error:?}"), } } diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index b84f1b46..12ba0797 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -128,23 +128,25 @@ pub enum Error { impl Error { //#[deprecated] - pub fn bad_database(message: &'static str) -> Self { crate::err!(Database(error!("{message}"))) } + pub fn bad_database(message: &'static str) -> Self { + crate::err!(Database(error!("{message}"))) + } /// Sanitizes public-facing errors that can leak sensitive information. pub fn sanitized_message(&self) -> String { match self { - Self::Database(..) => String::from("Database error occurred."), - Self::Io(..) => String::from("I/O error occurred."), - _ => self.message(), + | Self::Database(..) => String::from("Database error occurred."), + | Self::Io(..) => String::from("I/O error occurred."), + | _ => self.message(), } } /// Generate the error message string. pub fn message(&self) -> String { match self { - Self::Federation(ref origin, ref error) => format!("Answer from {origin}: {error}"), - Self::Ruma(ref error) => response::ruma_error_message(error), - _ => format!("{self}"), + | Self::Federation(ref origin, ref error) => format!("Answer from {origin}: {error}"), + | Self::Ruma(ref error) => response::ruma_error_message(error), + | _ => format!("{self}"), } } @@ -154,9 +156,10 @@ impl Error { use ruma::api::client::error::ErrorKind::Unknown; match self { - Self::Federation(_, error) | Self::Ruma(error) => response::ruma_error_kind(error).clone(), - Self::BadRequest(kind, ..) | Self::Request(kind, ..) => kind.clone(), - _ => Unknown, + | Self::Federation(_, error) | Self::Ruma(error) => + response::ruma_error_kind(error).clone(), + | Self::BadRequest(kind, ..) | Self::Request(kind, ..) => kind.clone(), + | _ => Unknown, } } @@ -166,12 +169,12 @@ impl Error { use http::StatusCode; match self { - Self::Federation(_, error) | Self::Ruma(error) => error.status_code, - Self::Request(kind, _, code) => response::status_code(kind, *code), - Self::BadRequest(kind, ..) => response::bad_request_code(kind), - Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR), - Self::Conflict(_) => StatusCode::CONFLICT, - _ => StatusCode::INTERNAL_SERVER_ERROR, + | Self::Federation(_, error) | Self::Ruma(error) => error.status_code, + | Self::Request(kind, _, code) => response::status_code(kind, *code), + | Self::BadRequest(kind, ..) => response::bad_request_code(kind), + | Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR), + | Self::Conflict(_) => StatusCode::CONFLICT, + | _ => StatusCode::INTERNAL_SERVER_ERROR, } } diff --git a/src/core/error/panic.rs b/src/core/error/panic.rs index bec25132..c6a83ae0 100644 --- a/src/core/error/panic.rs +++ b/src/core/error/panic.rs @@ -20,9 +20,9 @@ impl Error { #[inline] pub fn into_panic(self) -> Box { match self { - Self::Panic(_, e) | Self::PanicAny(e) => e, - Self::JoinError(e) => e.into_panic(), - _ => Box::new(self), + | Self::Panic(_, e) | Self::PanicAny(e) => e, + | Self::JoinError(e) => e.into_panic(), + | _ => Box::new(self), } } @@ -37,9 +37,9 @@ impl Error { #[inline] pub fn is_panic(&self) -> bool { match &self { - Self::Panic(..) | Self::PanicAny(..) => true, - Self::JoinError(e) => e.is_panic(), - _ => false, + | Self::Panic(..) | Self::PanicAny(..) => true, + | Self::JoinError(e) => e.is_panic(), + | _ => false, } } } diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 21fbdcf2..568238c3 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -57,49 +57,35 @@ pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode { match kind { // 429 - LimitExceeded { - .. - } => StatusCode::TOO_MANY_REQUESTS, + | LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS, // 413 - TooLarge => StatusCode::PAYLOAD_TOO_LARGE, + | TooLarge => StatusCode::PAYLOAD_TOO_LARGE, // 405 - Unrecognized => StatusCode::METHOD_NOT_ALLOWED, + | Unrecognized => StatusCode::METHOD_NOT_ALLOWED, // 404 - NotFound => StatusCode::NOT_FOUND, + | NotFound => StatusCode::NOT_FOUND, // 403 - GuestAccessForbidden + | GuestAccessForbidden | ThreepidAuthFailed | UserDeactivated | ThreepidDenied - | WrongRoomKeysVersion { - .. - } - | Forbidden { - .. - } => StatusCode::FORBIDDEN, + | WrongRoomKeysVersion { .. } + | Forbidden { .. } => StatusCode::FORBIDDEN, // 401 - UnknownToken { - .. - } - | MissingToken - | Unauthorized => StatusCode::UNAUTHORIZED, + | UnknownToken { .. } | MissingToken | Unauthorized => StatusCode::UNAUTHORIZED, // 400 - _ => StatusCode::BAD_REQUEST, + | _ => StatusCode::BAD_REQUEST, } } pub(super) fn ruma_error_message(error: &ruma::api::client::error::Error) -> String { - if let ErrorBody::Standard { - message, - .. - } = &error.body - { + if let ErrorBody::Standard { message, .. } = &error.body { return message.to_string(); } diff --git a/src/core/info/cargo.rs b/src/core/info/cargo.rs index 735ccee6..c5a1d167 100644 --- a/src/core/info/cargo.rs +++ b/src/core/info/cargo.rs @@ -41,17 +41,22 @@ static FEATURES: OnceLock> = OnceLock::new(); static DEPENDENCIES: OnceLock = OnceLock::new(); #[must_use] -pub fn dependencies_names() -> Vec<&'static str> { dependencies().keys().map(String::as_str).collect() } +pub fn dependencies_names() -> Vec<&'static str> { + dependencies().keys().map(String::as_str).collect() +} pub fn dependencies() -> &'static DepsSet { - DEPENDENCIES - .get_or_init(|| init_dependencies().unwrap_or_else(|e| panic!("Failed to initialize dependencies: {e}"))) + DEPENDENCIES.get_or_init(|| { + init_dependencies().unwrap_or_else(|e| panic!("Failed to initialize dependencies: {e}")) + }) } /// List of all possible features for the project. For *enabled* features in /// this build see the companion function in info::rustc. pub fn features() -> &'static Vec { - FEATURES.get_or_init(|| init_features().unwrap_or_else(|e| panic!("Failed initialize features: {e}"))) + FEATURES.get_or_init(|| { + init_features().unwrap_or_else(|e| panic!("Failed initialize features: {e}")) + }) } fn init_features() -> Result> { diff --git a/src/core/info/room_version.rs b/src/core/info/room_version.rs index 59685b5f..40f0cf0a 100644 --- a/src/core/info/room_version.rs +++ b/src/core/info/room_version.rs @@ -34,7 +34,9 @@ impl crate::Server { } #[inline] - pub fn available_room_versions(&self) -> impl Iterator { + pub fn available_room_versions( + &self, + ) -> impl Iterator { available_room_versions() } diff --git a/src/core/log/capture/layer.rs b/src/core/log/capture/layer.rs index 674bc379..381a652f 100644 --- a/src/core/log/capture/layer.rs +++ b/src/core/log/capture/layer.rs @@ -22,11 +22,7 @@ type ScopeNames = ArrayVec<&'static str, 32>; impl Layer { #[inline] - pub fn new(state: &Arc) -> Self { - Self { - state: state.clone(), - } - } + pub fn new(state: &Arc) -> Self { Self { state: state.clone() } } } impl fmt::Debug for Layer { @@ -56,9 +52,7 @@ where S: Subscriber + for<'a> LookupSpan<'a>, { let names = ScopeNames::new(); - let mut visitor = Visitor { - values: Values::new(), - }; + let mut visitor = Visitor { values: Values::new() }; event.record(&mut visitor); let mut closure = capture.closure.lock().expect("exclusive lock"); @@ -83,7 +77,7 @@ where } } - capture.filter.as_ref().map_or(true, |filter| { + capture.filter.as_ref().is_none_or(|filter| { filter(Data { layer, event, @@ -95,7 +89,9 @@ where } impl Visit for Visitor { - fn record_debug(&mut self, f: &Field, v: &dyn fmt::Debug) { self.values.push((f.name(), format!("{v:?}"))); } + fn record_debug(&mut self, f: &Field, v: &dyn fmt::Debug) { + self.values.push((f.name(), format!("{v:?}"))); + } fn record_str(&mut self, f: &Field, v: &str) { self.values.push((f.name(), v.to_owned())); } } diff --git a/src/core/log/capture/mod.rs b/src/core/log/capture/mod.rs index c0fcd31e..20f70091 100644 --- a/src/core/log/capture/mod.rs +++ b/src/core/log/capture/mod.rs @@ -41,9 +41,7 @@ impl Capture { #[must_use] pub fn start(self: &Arc) -> Guard { self.state.add(self); - Guard { - capture: self.clone(), - } + Guard { capture: self.clone() } } pub fn stop(self: &Arc) { self.state.del(self); } diff --git a/src/core/log/capture/state.rs b/src/core/log/capture/state.rs index f2c401cd..dad6c8d8 100644 --- a/src/core/log/capture/state.rs +++ b/src/core/log/capture/state.rs @@ -13,11 +13,7 @@ impl Default for State { impl State { #[must_use] - pub fn new() -> Self { - Self { - active: RwLock::new(Vec::new()), - } - } + pub fn new() -> Self { Self { active: RwLock::new(Vec::new()) } } pub(super) fn add(&self, capture: &Arc) { self.active diff --git a/src/core/log/color.rs b/src/core/log/color.rs index 9baa99c2..23905d14 100644 --- a/src/core/log/color.rs +++ b/src/core/log/color.rs @@ -5,11 +5,11 @@ use super::Level; #[must_use] pub fn html(level: &Level) -> (&'static str, &'static str) { match *level { - Level::TRACE => ("#000000", "#A0A0A0"), - Level::DEBUG => ("#000000", "#FFFFFF"), - Level::ERROR => ("#000000", "#FF0000"), - Level::WARN => ("#000000", "#FFFF00"), - Level::INFO => ("#FFFFFF", "#008E00"), + | Level::TRACE => ("#000000", "#A0A0A0"), + | Level::DEBUG => ("#000000", "#FFFFFF"), + | Level::ERROR => ("#000000", "#FF0000"), + | Level::WARN => ("#000000", "#FFFF00"), + | Level::INFO => ("#FFFFFF", "#008E00"), } } @@ -18,10 +18,10 @@ pub fn html(level: &Level) -> (&'static str, &'static str) { #[must_use] pub fn code_tag(level: &Level) -> &'static str { match *level { - Level::TRACE => "#888888", - Level::DEBUG => "#C8C8C8", - Level::ERROR => "#FF0000", - Level::WARN => "#FFFF00", - Level::INFO => "#00FF00", + | Level::TRACE => "#888888", + | Level::DEBUG => "#C8C8C8", + | Level::ERROR => "#FF0000", + | Level::WARN => "#FFFF00", + | Level::INFO => "#00FF00", } } diff --git a/src/core/log/fmt.rs b/src/core/log/fmt.rs index 8766eb8d..353d4442 100644 --- a/src/core/log/fmt.rs +++ b/src/core/log/fmt.rs @@ -11,7 +11,8 @@ where let level = level.as_str().to_uppercase(); write!( out, - "{level:>5} {span:^12} {msg}
    " + "{level:>5} {span:^12} \ + {msg}
    " )?; Ok(()) diff --git a/src/core/log/fmt_span.rs b/src/core/log/fmt_span.rs index 5a340d0f..7ba86c4c 100644 --- a/src/core/log/fmt_span.rs +++ b/src/core/log/fmt_span.rs @@ -5,13 +5,13 @@ use crate::Result; #[inline] pub fn from_str(str: &str) -> Result { match str.to_uppercase().as_str() { - "ENTER" => Ok(FmtSpan::ENTER), - "EXIT" => Ok(FmtSpan::EXIT), - "NEW" => Ok(FmtSpan::NEW), - "CLOSE" => Ok(FmtSpan::CLOSE), - "ACTIVE" => Ok(FmtSpan::ACTIVE), - "FULL" => Ok(FmtSpan::FULL), - "NONE" => Ok(FmtSpan::NONE), - _ => Err(FmtSpan::NONE), + | "ENTER" => Ok(FmtSpan::ENTER), + | "EXIT" => Ok(FmtSpan::EXIT), + | "NEW" => Ok(FmtSpan::NEW), + | "CLOSE" => Ok(FmtSpan::CLOSE), + | "ACTIVE" => Ok(FmtSpan::ACTIVE), + | "FULL" => Ok(FmtSpan::FULL), + | "NONE" => Ok(FmtSpan::NONE), + | _ => Err(FmtSpan::NONE), } } diff --git a/src/core/log/suppress.rs b/src/core/log/suppress.rs index b13ee99e..55428d15 100644 --- a/src/core/log/suppress.rs +++ b/src/core/log/suppress.rs @@ -25,10 +25,7 @@ impl Suppress { .reload(&suppress, Some(&[handle])) .expect("log filter reloaded"); - Self { - server: server.clone(), - restore, - } + Self { server: server.clone(), restore } } } diff --git a/src/core/metrics/mod.rs b/src/core/metrics/mod.rs index 3ae139a8..d5f937d7 100644 --- a/src/core/metrics/mod.rs +++ b/src/core/metrics/mod.rs @@ -68,5 +68,7 @@ impl Metrics { pub fn task_root(&self) -> Option<&TaskMonitor> { self.task_monitor.as_ref() } - pub fn runtime_metrics(&self) -> Option<&runtime::RuntimeMetrics> { self.runtime_metrics.as_ref() } + pub fn runtime_metrics(&self) -> Option<&runtime::RuntimeMetrics> { + self.runtime_metrics.as_ref() + } } diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index 80ff0713..db3bb20a 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -35,7 +35,8 @@ impl Builder { { Self { event_type: content.event_type().into(), - content: to_raw_value(content).expect("Builder failed to serialize state event content to RawValue"), + content: to_raw_value(content) + .expect("Builder failed to serialize state event content to RawValue"), state_key: Some(state_key), ..Self::default() } @@ -47,7 +48,8 @@ impl Builder { { Self { event_type: content.event_type().into(), - content: to_raw_value(content).expect("Builder failed to serialize timeline event content to RawValue"), + content: to_raw_value(content) + .expect("Builder failed to serialize timeline event content to RawValue"), ..Self::default() } } diff --git a/src/core/pdu/count.rs b/src/core/pdu/count.rs index 85222382..0135cf28 100644 --- a/src/core/pdu/count.rs +++ b/src/core/pdu/count.rs @@ -21,8 +21,8 @@ impl Count { #[must_use] pub fn from_signed(signed: i64) -> Self { match signed { - i64::MIN..=0 => Self::Backfilled(signed), - _ => Self::Normal(signed as u64), + | i64::MIN..=0 => Self::Backfilled(signed), + | _ => Self::Normal(signed as u64), } } @@ -31,8 +31,8 @@ impl Count { pub fn into_unsigned(self) -> u64 { self.debug_assert_valid(); match self { - Self::Normal(i) => i, - Self::Backfilled(i) => i as u64, + | Self::Normal(i) => i, + | Self::Backfilled(i) => i as u64, } } @@ -41,8 +41,8 @@ impl Count { pub fn into_signed(self) -> i64 { self.debug_assert_valid(); match self { - Self::Normal(i) => i as i64, - Self::Backfilled(i) => i, + | Self::Normal(i) => i as i64, + | Self::Backfilled(i) => i, } } @@ -51,27 +51,27 @@ impl Count { pub fn into_normal(self) -> Self { self.debug_assert_valid(); match self { - Self::Normal(i) => Self::Normal(i), - Self::Backfilled(_) => Self::Normal(0), + | Self::Normal(i) => Self::Normal(i), + | Self::Backfilled(_) => Self::Normal(0), } } #[inline] pub fn checked_inc(self, dir: Direction) -> Result { match dir { - Direction::Forward => self.checked_add(1), - Direction::Backward => self.checked_sub(1), + | Direction::Forward => self.checked_add(1), + | Direction::Backward => self.checked_sub(1), } } #[inline] pub fn checked_add(self, add: u64) -> Result { Ok(match self { - Self::Normal(i) => Self::Normal( + | Self::Normal(i) => Self::Normal( i.checked_add(add) .ok_or_else(|| err!(Arithmetic("Count::Normal overflow")))?, ), - Self::Backfilled(i) => Self::Backfilled( + | Self::Backfilled(i) => Self::Backfilled( i.checked_add(add as i64) .ok_or_else(|| err!(Arithmetic("Count::Backfilled overflow")))?, ), @@ -81,11 +81,11 @@ impl Count { #[inline] pub fn checked_sub(self, sub: u64) -> Result { Ok(match self { - Self::Normal(i) => Self::Normal( + | Self::Normal(i) => Self::Normal( i.checked_sub(sub) .ok_or_else(|| err!(Arithmetic("Count::Normal underflow")))?, ), - Self::Backfilled(i) => Self::Backfilled( + | Self::Backfilled(i) => Self::Backfilled( i.checked_sub(sub as i64) .ok_or_else(|| err!(Arithmetic("Count::Backfilled underflow")))?, ), @@ -96,8 +96,8 @@ impl Count { #[must_use] pub fn saturating_inc(self, dir: Direction) -> Self { match dir { - Direction::Forward => self.saturating_add(1), - Direction::Backward => self.saturating_sub(1), + | Direction::Forward => self.saturating_add(1), + | Direction::Backward => self.saturating_sub(1), } } @@ -105,8 +105,8 @@ impl Count { #[must_use] pub fn saturating_add(self, add: u64) -> Self { match self { - Self::Normal(i) => Self::Normal(i.saturating_add(add)), - Self::Backfilled(i) => Self::Backfilled(i.saturating_add(add as i64)), + | Self::Normal(i) => Self::Normal(i.saturating_add(add)), + | Self::Backfilled(i) => Self::Backfilled(i.saturating_add(add as i64)), } } @@ -114,8 +114,8 @@ impl Count { #[must_use] pub fn saturating_sub(self, sub: u64) -> Self { match self { - Self::Normal(i) => Self::Normal(i.saturating_sub(sub)), - Self::Backfilled(i) => Self::Backfilled(i.saturating_sub(sub as i64)), + | Self::Normal(i) => Self::Normal(i.saturating_sub(sub)), + | Self::Backfilled(i) => Self::Backfilled(i.saturating_sub(sub as i64)), } } @@ -139,8 +139,8 @@ impl Display for Count { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.debug_assert_valid(); match self { - Self::Normal(i) => write!(f, "{i}"), - Self::Backfilled(i) => write!(f, "{i}"), + | Self::Normal(i) => write!(f, "{i}"), + | Self::Backfilled(i) => write!(f, "{i}"), } } } diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs index 96a1e4ba..fb98d686 100644 --- a/src/core/pdu/event.rs +++ b/src/core/pdu/event.rs @@ -19,13 +19,19 @@ impl Event for Pdu { fn content(&self) -> &RawJsonValue { &self.content } - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { MilliSecondsSinceUnixEpoch(self.origin_server_ts) } + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + MilliSecondsSinceUnixEpoch(self.origin_server_ts) + } fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { self.prev_events.iter() } + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.prev_events.iter() + } - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { self.auth_events.iter() } + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.auth_events.iter() + } fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } } diff --git a/src/core/pdu/event_id.rs b/src/core/pdu/event_id.rs index ae5b85f9..09b33edc 100644 --- a/src/core/pdu/event_id.rs +++ b/src/core/pdu/event_id.rs @@ -8,7 +8,8 @@ use crate::{err, Result}; /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub fn gen_event_id_canonical_json( - pdu: &RawJsonValue, room_version_id: &RoomVersionId, + pdu: &RawJsonValue, + room_version_id: &RoomVersionId, ) -> Result<(OwnedEventId, CanonicalJsonObject)> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()) .map_err(|e| err!(BadServerResponse(warn!("Error parsing incoming event: {e:?}"))))?; @@ -19,7 +20,10 @@ pub fn gen_event_id_canonical_json( } /// Generates a correct eventId for the incoming pdu. -pub fn gen_event_id(value: &CanonicalJsonObject, room_version_id: &RoomVersionId) -> Result { +pub fn gen_event_id( + value: &CanonicalJsonObject, + room_version_id: &RoomVersionId, +) -> Result { let reference_hash = ruma::signatures::reference_hash(value, room_version_id)?; let event_id: OwnedEventId = format!("${reference_hash}").try_into()?; diff --git a/src/core/pdu/filter.rs b/src/core/pdu/filter.rs index c7c7316d..aabf13db 100644 --- a/src/core/pdu/filter.rs +++ b/src/core/pdu/filter.rs @@ -84,7 +84,7 @@ fn matches_url(&self, filter: &RoomEventFilter) -> bool { .is_some_and(Value::is_string); match url_filter { - UrlFilter::EventsWithUrl => url, - UrlFilter::EventsWithoutUrl => !url, + | UrlFilter::EventsWithUrl => url, + | UrlFilter::EventsWithoutUrl => !url, } } diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 2aa60ed1..c90e174e 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -15,7 +15,8 @@ mod unsigned; use std::{cmp::Ordering, sync::Arc}; use ruma::{ - events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedRoomId, OwnedUserId, UInt, + events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedRoomId, + OwnedUserId, UInt, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/core/pdu/raw_id.rs b/src/core/pdu/raw_id.rs index ef8502f6..e1fd2381 100644 --- a/src/core/pdu/raw_id.rs +++ b/src/core/pdu/raw_id.rs @@ -29,10 +29,10 @@ impl RawId { #[must_use] pub fn shortroomid(self) -> [u8; INT_LEN] { match self { - Self::Normal(raw) => raw[0..INT_LEN] + | Self::Normal(raw) => raw[0..INT_LEN] .try_into() .expect("normal raw shortroomid array from slice"), - Self::Backfilled(raw) => raw[0..INT_LEN] + | Self::Backfilled(raw) => raw[0..INT_LEN] .try_into() .expect("backfilled raw shortroomid array from slice"), } @@ -42,10 +42,10 @@ impl RawId { #[must_use] pub fn shorteventid(self) -> [u8; INT_LEN] { match self { - Self::Normal(raw) => raw[INT_LEN..INT_LEN * 2] + | Self::Normal(raw) => raw[INT_LEN..INT_LEN * 2] .try_into() .expect("normal raw shorteventid array from slice"), - Self::Backfilled(raw) => raw[INT_LEN * 2..INT_LEN * 3] + | Self::Backfilled(raw) => raw[INT_LEN * 2..INT_LEN * 3] .try_into() .expect("backfilled raw shorteventid array from slice"), } @@ -55,8 +55,8 @@ impl RawId { #[must_use] pub fn as_bytes(&self) -> &[u8] { match self { - Self::Normal(ref raw) => raw, - Self::Backfilled(ref raw) => raw, + | Self::Normal(ref raw) => raw, + | Self::Backfilled(ref raw) => raw, } } } @@ -70,17 +70,17 @@ impl From<&[u8]> for RawId { #[inline] fn from(id: &[u8]) -> Self { match id.len() { - Self::NORMAL_LEN => Self::Normal( + | Self::NORMAL_LEN => Self::Normal( id[0..Self::NORMAL_LEN] .try_into() .expect("normal RawId from [u8]"), ), - Self::BACKFILLED_LEN => Self::Backfilled( + | Self::BACKFILLED_LEN => Self::Backfilled( id[0..Self::BACKFILLED_LEN] .try_into() .expect("backfilled RawId from [u8]"), ), - _ => unimplemented!("unrecognized RawId length"), + | _ => unimplemented!("unrecognized RawId length"), } } } @@ -95,11 +95,11 @@ impl From for RawId { vec.extend(id.shortroomid.to_be_bytes()); id.shorteventid.debug_assert_valid(); match id.shorteventid { - Count::Normal(shorteventid) => { + | Count::Normal(shorteventid) => { vec.extend(shorteventid.to_be_bytes()); Self::Normal(vec.as_ref().try_into().expect("RawVec into RawId::Normal")) }, - Count::Backfilled(shorteventid) => { + | Count::Backfilled(shorteventid) => { vec.extend(0_u64.to_be_bytes()); vec.extend(shorteventid.to_be_bytes()); Self::Backfilled( diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index 01d9147c..dc97bfa8 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -22,8 +22,8 @@ struct ExtractRedactedBecause { pub fn redact(&mut self, room_version_id: &RoomVersionId, reason: &Self) -> Result { self.unsigned = None; - let mut content = - serde_json::from_str(self.content.get()).map_err(|_| Error::bad_database("PDU in db has invalid content."))?; + let mut content = serde_json::from_str(self.content.get()) + .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; redact_content_in_place(&mut content, room_version_id, self.kind.to_string()) .map_err(|e| Error::Redaction(self.sender.server_name().to_owned(), e))?; @@ -75,7 +75,9 @@ pub fn is_redacted(&self) -> bool { #[must_use] pub fn copy_redacts(&self) -> (Option>, Box) { if self.kind == TimelineEventType::RoomRedaction { - if let Ok(mut content) = serde_json::from_str::(self.content.get()) { + if let Ok(mut content) = + serde_json::from_str::(self.content.get()) + { if let Some(redacts) = content.redacts { return (Some(redacts.into()), self.content.clone()); } else if let Some(redacts) = self.redacts.clone() { diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 59457749..8e1045db 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -1,8 +1,8 @@ use ruma::{ events::{ - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, - AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, - AnyTimelineEvent, StateEvent, + room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, + AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, + AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, }, serde::Raw, }; diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index 6f3e4401..fa305d71 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -13,8 +13,8 @@ pub fn remove_transaction_id(&mut self) -> Result { return Ok(()); }; - let mut unsigned: BTreeMap> = - serde_json::from_str(unsigned.get()).map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; + let mut unsigned: BTreeMap> = serde_json::from_str(unsigned.get()) + .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; unsigned.remove("transaction_id"); self.unsigned = to_raw_value(&unsigned) @@ -97,7 +97,9 @@ where #[implement(Pdu)] #[must_use] -pub fn get_unsigned_as_value(&self) -> JsonValue { self.get_unsigned::().unwrap_or_default() } +pub fn get_unsigned_as_value(&self) -> JsonValue { + self.get_unsigned::().unwrap_or_default() +} #[implement(Pdu)] pub fn get_unsigned(&self) -> Result { diff --git a/src/core/server.rs b/src/core/server.rs index 1e721517..2a70ae4b 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -79,8 +79,9 @@ impl Server { return Err!("Restart already in progress"); } - self.shutdown() - .inspect_err(|_| self.restarting.store(false, Ordering::Release)) + self.shutdown().inspect_err(|_| { + self.restarting.store(false, Ordering::Release); + }) } pub fn shutdown(&self) -> Result<()> { @@ -88,8 +89,9 @@ impl Server { return Err!("Shutdown already in progress"); } - self.signal("SIGTERM") - .inspect_err(|_| self.stopping.store(false, Ordering::Release)) + self.signal("SIGTERM").inspect_err(|_| { + self.stopping.store(false, Ordering::Release); + }) } pub fn signal(&self, sig: &'static str) -> Result<()> { diff --git a/src/core/utils/bool.rs b/src/core/utils/bool.rs index e9f399d4..b4192daf 100644 --- a/src/core/utils/bool.rs +++ b/src/core/utils/bool.rs @@ -66,19 +66,25 @@ impl BoolExt for bool { } #[inline] - fn map_ok_or T>(self, err: E, f: F) -> Result { self.ok_or(err).map(|()| f()) } + fn map_ok_or T>(self, err: E, f: F) -> Result { + self.ok_or(err).map(|()| f()) + } #[inline] fn map_or T>(self, err: T, f: F) -> T { self.then(f).unwrap_or(err) } #[inline] - fn map_or_else T>(self, err: F, f: F) -> T { self.then(f).unwrap_or_else(err) } + fn map_or_else T>(self, err: F, f: F) -> T { + self.then(f).unwrap_or_else(err) + } #[inline] fn ok_or(self, err: E) -> Result<(), E> { self.into_option().ok_or(err) } #[inline] - fn ok_or_else E>(self, err: F) -> Result<(), E> { self.into_option().ok_or_else(err) } + fn ok_or_else E>(self, err: F) -> Result<(), E> { + self.into_option().ok_or_else(err) + } #[inline] fn or T>(self, f: F) -> Option { (!self).then(f) } diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs index 441ba422..40316440 100644 --- a/src/core/utils/bytes.rs +++ b/src/core/utils/bytes.rs @@ -39,7 +39,9 @@ pub fn increment(old: Option<&[u8]>) -> [u8; 8] { /// Parses 8 big-endian bytes into an u64; panic on invalid argument #[inline] #[must_use] -pub fn u64_from_u8(bytes: &[u8]) -> u64 { u64_from_bytes(bytes).expect("must slice at least 8 bytes") } +pub fn u64_from_u8(bytes: &[u8]) -> u64 { + u64_from_bytes(bytes).expect("must slice at least 8 bytes") +} /// Parses the big-endian bytes into an u64. #[inline] diff --git a/src/core/utils/content_disposition.rs b/src/core/utils/content_disposition.rs index 3a264a74..82f11732 100644 --- a/src/core/utils/content_disposition.rs +++ b/src/core/utils/content_disposition.rs @@ -71,13 +71,10 @@ pub fn content_disposition_type(content_type: Option<&str>) -> ContentDispositio /// `sanitize_filename` crate #[tracing::instrument(level = "debug")] pub fn sanitise_filename(filename: &str) -> String { - sanitize_filename::sanitize_with_options( - filename, - sanitize_filename::Options { - truncate: false, - ..Default::default() - }, - ) + sanitize_filename::sanitize_with_options(filename, sanitize_filename::Options { + truncate: false, + ..Default::default() + }) } /// creates the final Content-Disposition based on whether the filename exists @@ -89,11 +86,16 @@ pub fn sanitise_filename(filename: &str) -> String { /// /// else: `Content-Disposition: attachment/inline` pub fn make_content_disposition( - content_disposition: Option<&ContentDisposition>, content_type: Option<&str>, filename: Option<&str>, + content_disposition: Option<&ContentDisposition>, + content_type: Option<&str>, + filename: Option<&str>, ) -> ContentDisposition { ContentDisposition::new(content_disposition_type(content_type)).with_filename( filename - .or_else(|| content_disposition.and_then(|content_disposition| content_disposition.filename.as_deref())) + .or_else(|| { + content_disposition + .and_then(|content_disposition| content_disposition.filename.as_deref()) + }) .map(sanitise_filename), ) } @@ -102,8 +104,8 @@ pub fn make_content_disposition( mod tests { #[test] fn string_sanitisation() { - const SAMPLE: &str = - "🏳️‍⚧️this\\r\\n įs \r\\n ä \\r\nstrïng 🥴that\n\r ../../../../../../../may be\r\n malicious🏳️‍⚧️"; + const SAMPLE: &str = "🏳️‍⚧️this\\r\\n įs \r\\n ä \\r\nstrïng 🥴that\n\r \ + ../../../../../../../may be\r\n malicious🏳️‍⚧️"; const SANITISED: &str = "🏳️‍⚧️thisrn įs n ä rstrïng 🥴that ..............may be malicious🏳️‍⚧️"; let options = sanitize_filename::Options { @@ -125,14 +127,12 @@ mod tests { fn empty_sanitisation() { use crate::utils::string::EMPTY; - let result = sanitize_filename::sanitize_with_options( - EMPTY, - sanitize_filename::Options { + let result = + sanitize_filename::sanitize_with_options(EMPTY, sanitize_filename::Options { windows: true, truncate: true, replacement: "", - }, - ); + }); assert_eq!(EMPTY, result); } diff --git a/src/core/utils/debug.rs b/src/core/utils/debug.rs index cb9d7bab..b16ae754 100644 --- a/src/core/utils/debug.rs +++ b/src/core/utils/debug.rs @@ -31,10 +31,8 @@ impl fmt::Debug for TruncatedSlice<'_, T> { /// fn bar(foos: &[&str]); /// ``` pub fn slice_truncated( - slice: &[T], max_len: usize, + slice: &[T], + max_len: usize, ) -> tracing::field::DebugValue> { - tracing::field::debug(TruncatedSlice { - inner: slice, - max_len, - }) + tracing::field::debug(TruncatedSlice { inner: slice, max_len }) } diff --git a/src/core/utils/defer.rs b/src/core/utils/defer.rs index 29199700..60243e97 100644 --- a/src/core/utils/defer.rs +++ b/src/core/utils/defer.rs @@ -9,9 +9,7 @@ macro_rules! defer { fn drop(&mut self) { (self.closure)(); } } - let _defer_ = _Defer_ { - closure: || $body, - }; + let _defer_ = _Defer_ { closure: || $body }; }; ($body:expr) => { diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index 977f74d2..19761309 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -14,17 +14,23 @@ pub trait TryExtExt where Self: TryFuture + Send, { - fn is_err(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> + fn is_err( + self, + ) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> where Self: Sized; #[allow(clippy::wrong_self_convention)] - fn is_ok(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> + fn is_ok( + self, + ) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> where Self: Sized; fn map_ok_or( - self, default: U, f: F, + self, + default: U, + f: F, ) -> MapOkOrElse U, impl FnOnce(Self::Error) -> U> where F: FnOnce(Self::Ok) -> U, @@ -32,11 +38,18 @@ where fn ok( self, - ) -> MapOkOrElse Option, impl FnOnce(Self::Error) -> Option> + ) -> MapOkOrElse< + Self, + impl FnOnce(Self::Ok) -> Option, + impl FnOnce(Self::Error) -> Option, + > where Self: Sized; - fn unwrap_or(self, default: Self::Ok) -> UnwrapOrElse Self::Ok> + fn unwrap_or( + self, + default: Self::Ok, + ) -> UnwrapOrElse Self::Ok> where Self: Sized; @@ -51,7 +64,9 @@ where Fut: TryFuture + Send, { #[inline] - fn is_err(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> + fn is_err( + self, + ) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> where Self: Sized, { @@ -59,7 +74,9 @@ where } #[inline] - fn is_ok(self) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> + fn is_ok( + self, + ) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> where Self: Sized, { @@ -68,7 +85,9 @@ where #[inline] fn map_ok_or( - self, default: U, f: F, + self, + default: U, + f: F, ) -> MapOkOrElse U, impl FnOnce(Self::Error) -> U> where F: FnOnce(Self::Ok) -> U, @@ -80,7 +99,11 @@ where #[inline] fn ok( self, - ) -> MapOkOrElse Option, impl FnOnce(Self::Error) -> Option> + ) -> MapOkOrElse< + Self, + impl FnOnce(Self::Ok) -> Option, + impl FnOnce(Self::Error) -> Option, + > where Self: Sized, { @@ -88,7 +111,10 @@ where } #[inline] - fn unwrap_or(self, default: Self::Ok) -> UnwrapOrElse Self::Ok> + fn unwrap_or( + self, + default: Self::Ok, + ) -> UnwrapOrElse Self::Ok> where Self: Sized, { diff --git a/src/core/utils/hash/argon.rs b/src/core/utils/hash/argon.rs index 0a1e1e14..18146b47 100644 --- a/src/core/utils/hash/argon.rs +++ b/src/core/utils/hash/argon.rs @@ -1,8 +1,8 @@ use std::sync::OnceLock; use argon2::{ - password_hash, password_hash::SaltString, Algorithm, Argon2, Params, PasswordHash, PasswordHasher, - PasswordVerifier, Version, + password_hash, password_hash::SaltString, Algorithm, Argon2, Params, PasswordHash, + PasswordHasher, PasswordVerifier, Version, }; use crate::{err, Error, Result}; diff --git a/src/core/utils/html.rs b/src/core/utils/html.rs index fe07b2dd..f2b6d861 100644 --- a/src/core/utils/html.rs +++ b/src/core/utils/html.rs @@ -16,12 +16,12 @@ impl fmt::Display for Escape<'_> { let mut last = 0; for (i, ch) in s.char_indices() { let s = match ch { - '>' => ">", - '<' => "<", - '&' => "&", - '\'' => "'", - '"' => """, - _ => continue, + | '>' => ">", + | '<' => "<", + | '&' => "&", + | '\'' => "'", + | '"' => """, + | _ => continue, }; fmt.write_str(&pile_o_bits[last..i])?; fmt.write_str(s)?; diff --git a/src/core/utils/json.rs b/src/core/utils/json.rs index a9adad54..4a3fec8f 100644 --- a/src/core/utils/json.rs +++ b/src/core/utils/json.rs @@ -8,16 +8,24 @@ use crate::Result; /// `CanonicalJsonObject`. /// /// `value` must serialize to an `serde_json::Value::Object`. -pub fn to_canonical_object(value: T) -> Result { +pub fn to_canonical_object( + value: T, +) -> Result { use serde::ser::Error; match serde_json::to_value(value).map_err(CanonicalJsonError::SerDe)? { - serde_json::Value::Object(map) => try_from_json_map(map), - _ => Err(CanonicalJsonError::SerDe(serde_json::Error::custom("Value must be an object"))), + | serde_json::Value::Object(map) => try_from_json_map(map), + | _ => + Err(CanonicalJsonError::SerDe(serde_json::Error::custom("Value must be an object"))), } } -pub fn deserialize_from_str<'de, D: serde::de::Deserializer<'de>, T: FromStr, E: fmt::Display>( +pub fn deserialize_from_str< + 'de, + D: serde::de::Deserializer<'de>, + T: FromStr, + E: fmt::Display, +>( deserializer: D, ) -> Result { struct Visitor, E>(std::marker::PhantomData); diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index ccff6400..da2357d5 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -56,7 +56,12 @@ macro_rules! validated { /// Returns false if the exponential backoff has expired based on the inputs #[inline] #[must_use] -pub fn continue_exponential_backoff_secs(min: u64, max: u64, elapsed: Duration, tries: u32) -> bool { +pub fn continue_exponential_backoff_secs( + min: u64, + max: u64, + elapsed: Duration, + tries: u32, +) -> bool { let min = Duration::from_secs(min); let max = Duration::from_secs(max); continue_exponential_backoff(min, max, elapsed, tries) @@ -65,7 +70,12 @@ pub fn continue_exponential_backoff_secs(min: u64, max: u64, elapsed: Duration, /// Returns false if the exponential backoff has expired based on the inputs #[inline] #[must_use] -pub fn continue_exponential_backoff(min: Duration, max: Duration, elapsed: Duration, tries: u32) -> bool { +pub fn continue_exponential_backoff( + min: Duration, + max: Duration, + elapsed: Duration, + tries: u32, +) -> bool { let min = min.saturating_mul(tries).saturating_mul(tries); let min = cmp::min(min, max); elapsed < min diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index ad53bc42..a9b73fb6 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -47,8 +47,8 @@ pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, sou macro_rules! extract_variant { ($e:expr, $variant:path) => { match $e { - $variant(value) => Some(value), - _ => None, + | $variant(value) => Some(value), + | _ => None, } }; } diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs index c3c51798..152a75d1 100644 --- a/src/core/utils/mutex_map.rs +++ b/src/core/utils/mutex_map.rs @@ -70,10 +70,9 @@ where impl Drop for Guard { fn drop(&mut self) { if Arc::strong_count(Omg::mutex(&self.val)) <= 2 { - self.map - .lock() - .expect("locked") - .retain(|_, val| !Arc::ptr_eq(val, Omg::mutex(&self.val)) || Arc::strong_count(val) > 2); + self.map.lock().expect("locked").retain(|_, val| { + !Arc::ptr_eq(val, Omg::mutex(&self.val)) || Arc::strong_count(val) > 2 + }); } } } diff --git a/src/core/utils/result.rs b/src/core/utils/result.rs index 6b11ea66..1ad5371b 100644 --- a/src/core/utils/result.rs +++ b/src/core/utils/result.rs @@ -10,9 +10,9 @@ mod unwrap_infallible; mod unwrap_or_err; pub use self::{ - debug_inspect::DebugInspect, filter::Filter, flat_ok::FlatOk, into_is_ok::IntoIsOk, log_debug_err::LogDebugErr, - log_err::LogErr, map_expect::MapExpect, not_found::NotFound, unwrap_infallible::UnwrapInfallible, - unwrap_or_err::UnwrapOrErr, + debug_inspect::DebugInspect, filter::Filter, flat_ok::FlatOk, into_is_ok::IntoIsOk, + log_debug_err::LogDebugErr, log_err::LogErr, map_expect::MapExpect, not_found::NotFound, + unwrap_infallible::UnwrapInfallible, unwrap_or_err::UnwrapOrErr, }; pub type Result = std::result::Result; diff --git a/src/core/utils/result/flat_ok.rs b/src/core/utils/result/flat_ok.rs index e378e5d0..8e7bb968 100644 --- a/src/core/utils/result/flat_ok.rs +++ b/src/core/utils/result/flat_ok.rs @@ -19,7 +19,9 @@ impl FlatOk for Option> { fn flat_ok_or(self, err: Ep) -> Result { self.flat_ok().ok_or(err) } #[inline] - fn flat_ok_or_else Ep>(self, err: F) -> Result { self.flat_ok().ok_or_else(err) } + fn flat_ok_or_else Ep>(self, err: F) -> Result { + self.flat_ok().ok_or_else(err) + } } impl FlatOk for Result, E> { @@ -30,5 +32,7 @@ impl FlatOk for Result, E> { fn flat_ok_or(self, err: Ep) -> Result { self.flat_ok().ok_or(err) } #[inline] - fn flat_ok_or_else Ep>(self, err: F) -> Result { self.flat_ok().ok_or_else(err) } + fn flat_ok_or_else Ep>(self, err: F) -> Result { + self.flat_ok().ok_or_else(err) + } } diff --git a/src/core/utils/result/log_err.rs b/src/core/utils/result/log_err.rs index 374a5e59..a1ce891f 100644 --- a/src/core/utils/result/log_err.rs +++ b/src/core/utils/result/log_err.rs @@ -20,5 +20,7 @@ pub trait LogErr { impl LogErr for Result { #[inline] - fn err_log(self, level: Level) -> Self { self.inspect_err(|error| error::inspect_log_level(&error, level)) } + fn err_log(self, level: Level) -> Self { + self.inspect_err(|error| error::inspect_log_level(&error, level)) + } } diff --git a/src/core/utils/set.rs b/src/core/utils/set.rs index ddcf05ff..8eac7157 100644 --- a/src/core/utils/set.rs +++ b/src/core/utils/set.rs @@ -32,7 +32,9 @@ where /// Intersection of sets /// /// Outputs the set of elements common to all input sets. Inputs must be sorted. -pub fn intersection_sorted(mut input: Iters) -> impl Iterator + Send +pub fn intersection_sorted( + mut input: Iters, +) -> impl Iterator + Send where Iters: Iterator + Clone + Send, Iter: Iterator + Send, diff --git a/src/core/utils/stream/expect.rs b/src/core/utils/stream/expect.rs index 68ac24ce..3509bb83 100644 --- a/src/core/utils/stream/expect.rs +++ b/src/core/utils/stream/expect.rs @@ -14,9 +14,13 @@ where Item: 'a, { #[inline] - fn expect_ok(self: T) -> impl Stream + Send + 'a { self.map_expect("stream expectation failure") } + fn expect_ok(self: T) -> impl Stream + Send + 'a { + self.map_expect("stream expectation failure") + } //TODO: move to impl MapExpect #[inline] - fn map_expect(self, msg: &'a str) -> impl Stream + Send + 'a { self.map(|res| res.expect(msg)) } + fn map_expect(self, msg: &'a str) -> impl Stream + Send + 'a { + self.map(|res| res.expect(msg)) + } } diff --git a/src/core/utils/stream/ignore.rs b/src/core/utils/stream/ignore.rs index 4e426557..9baa00f3 100644 --- a/src/core/utils/stream/ignore.rs +++ b/src/core/utils/stream/ignore.rs @@ -23,8 +23,12 @@ where #[cfg(not(debug_assertions))] #[inline] - fn ignore_err(self: T) -> impl Stream + Send + 'a { self.filter_map(|res| ready(res.ok())) } + fn ignore_err(self: T) -> impl Stream + Send + 'a { + self.filter_map(|res| ready(res.ok())) + } #[inline] - fn ignore_ok(self: T) -> impl Stream + Send + 'a { self.filter_map(|res| ready(res.err())) } + fn ignore_ok(self: T) -> impl Stream + Send + 'a { + self.filter_map(|res| ready(res.err())) + } } diff --git a/src/core/utils/stream/iter_stream.rs b/src/core/utils/stream/iter_stream.rs index 2face4b0..9077deac 100644 --- a/src/core/utils/stream/iter_stream.rs +++ b/src/core/utils/stream/iter_stream.rs @@ -13,7 +13,11 @@ pub trait IterStream { /// Convert an Iterator into a TryStream fn try_stream( self, - ) -> impl TryStream::Item, Error = Error, Item = Result<::Item, Error>> + Send; + ) -> impl TryStream< + Ok = ::Item, + Error = Error, + Item = Result<::Item, Error>, + > + Send; } impl IterStream for I @@ -27,8 +31,11 @@ where #[inline] fn try_stream( self, - ) -> impl TryStream::Item, Error = Error, Item = Result<::Item, Error>> + Send - { + ) -> impl TryStream< + Ok = ::Item, + Error = Error, + Item = Result<::Item, Error>, + > + Send { self.stream().map(Ok) } } diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index 9bba589e..d93187e9 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -3,7 +3,9 @@ use futures::{ future::{ready, Ready}, - stream::{All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile}, + stream::{ + All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile, + }, }; /// Synchronous combinators to augment futures::StreamExt. Most Stream @@ -24,19 +26,32 @@ where where F: Fn(Item) -> bool; - fn ready_filter<'a, F>(self, f: F) -> Filter, impl FnMut(&Item) -> Ready + 'a> + fn ready_filter<'a, F>( + self, + f: F, + ) -> Filter, impl FnMut(&Item) -> Ready + 'a> where F: Fn(&Item) -> bool + 'a; - fn ready_filter_map(self, f: F) -> FilterMap>, impl FnMut(Item) -> Ready>> + fn ready_filter_map( + self, + f: F, + ) -> FilterMap>, impl FnMut(Item) -> Ready>> where F: Fn(Item) -> Option; - fn ready_fold(self, init: T, f: F) -> Fold, T, impl FnMut(T, Item) -> Ready> + fn ready_fold( + self, + init: T, + f: F, + ) -> Fold, T, impl FnMut(T, Item) -> Ready> where F: Fn(T, Item) -> T; - fn ready_fold_default(self, f: F) -> Fold, T, impl FnMut(T, Item) -> Ready> + fn ready_fold_default( + self, + f: F, + ) -> Fold, T, impl FnMut(T, Item) -> Ready> where F: Fn(T, Item) -> T, T: Default; @@ -45,23 +60,33 @@ where where F: FnMut(Item); - fn ready_take_while<'a, F>(self, f: F) -> TakeWhile, impl FnMut(&Item) -> Ready + 'a> + fn ready_take_while<'a, F>( + self, + f: F, + ) -> TakeWhile, impl FnMut(&Item) -> Ready + 'a> where F: Fn(&Item) -> bool + 'a; fn ready_scan( - self, init: T, f: F, + self, + init: T, + f: F, ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> where F: Fn(&mut T, Item) -> Option; fn ready_scan_each( - self, init: T, f: F, + self, + init: T, + f: F, ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> where F: Fn(&mut T, &Item); - fn ready_skip_while<'a, F>(self, f: F) -> SkipWhile, impl FnMut(&Item) -> Ready + 'a> + fn ready_skip_while<'a, F>( + self, + f: F, + ) -> SkipWhile, impl FnMut(&Item) -> Ready + 'a> where F: Fn(&Item) -> bool + 'a; } @@ -87,7 +112,10 @@ where } #[inline] - fn ready_filter<'a, F>(self, f: F) -> Filter, impl FnMut(&Item) -> Ready + 'a> + fn ready_filter<'a, F>( + self, + f: F, + ) -> Filter, impl FnMut(&Item) -> Ready + 'a> where F: Fn(&Item) -> bool + 'a, { @@ -95,7 +123,10 @@ where } #[inline] - fn ready_filter_map(self, f: F) -> FilterMap>, impl FnMut(Item) -> Ready>> + fn ready_filter_map( + self, + f: F, + ) -> FilterMap>, impl FnMut(Item) -> Ready>> where F: Fn(Item) -> Option, { @@ -103,7 +134,11 @@ where } #[inline] - fn ready_fold(self, init: T, f: F) -> Fold, T, impl FnMut(T, Item) -> Ready> + fn ready_fold( + self, + init: T, + f: F, + ) -> Fold, T, impl FnMut(T, Item) -> Ready> where F: Fn(T, Item) -> T, { @@ -111,7 +146,10 @@ where } #[inline] - fn ready_fold_default(self, f: F) -> Fold, T, impl FnMut(T, Item) -> Ready> + fn ready_fold_default( + self, + f: F, + ) -> Fold, T, impl FnMut(T, Item) -> Ready> where F: Fn(T, Item) -> T, T: Default, @@ -121,7 +159,10 @@ where #[inline] #[allow(clippy::unit_arg)] - fn ready_for_each(self, mut f: F) -> ForEach, impl FnMut(Item) -> Ready<()>> + fn ready_for_each( + self, + mut f: F, + ) -> ForEach, impl FnMut(Item) -> Ready<()>> where F: FnMut(Item), { @@ -129,7 +170,10 @@ where } #[inline] - fn ready_take_while<'a, F>(self, f: F) -> TakeWhile, impl FnMut(&Item) -> Ready + 'a> + fn ready_take_while<'a, F>( + self, + f: F, + ) -> TakeWhile, impl FnMut(&Item) -> Ready + 'a> where F: Fn(&Item) -> bool + 'a, { @@ -138,7 +182,9 @@ where #[inline] fn ready_scan( - self, init: T, f: F, + self, + init: T, + f: F, ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> where F: Fn(&mut T, Item) -> Option, @@ -148,7 +194,9 @@ where #[inline] fn ready_scan_each( - self, init: T, f: F, + self, + init: T, + f: F, ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> where F: Fn(&mut T, &Item), @@ -160,7 +208,10 @@ where } #[inline] - fn ready_skip_while<'a, F>(self, f: F) -> SkipWhile, impl FnMut(&Item) -> Ready + 'a> + fn ready_skip_while<'a, F>( + self, + f: F, + ) -> SkipWhile, impl FnMut(&Item) -> Ready + 'a> where F: Fn(&Item) -> bool + 'a, { diff --git a/src/core/utils/stream/tools.rs b/src/core/utils/stream/tools.rs index b5b036cc..7b24642e 100644 --- a/src/core/utils/stream/tools.rs +++ b/src/core/utils/stream/tools.rs @@ -24,12 +24,17 @@ where F: Fn(Item) -> K + Send, K: Eq + Hash + Send; - fn counts_by_with_cap(self, f: F) -> impl Future> + Send + fn counts_by_with_cap( + self, + f: F, + ) -> impl Future> + Send where F: Fn(Item) -> K + Send, K: Eq + Hash + Send; - fn counts_with_cap(self) -> impl Future> + Send + fn counts_with_cap( + self, + ) -> impl Future> + Send where ::Item: Eq + Hash; @@ -63,7 +68,10 @@ where } #[inline] - fn counts_by_with_cap(self, f: F) -> impl Future> + Send + fn counts_by_with_cap( + self, + f: F, + ) -> impl Future> + Send where F: Fn(Item) -> K + Send, K: Eq + Hash + Send, @@ -72,7 +80,9 @@ where } #[inline] - fn counts_with_cap(self) -> impl Future> + Send + fn counts_with_cap( + self, + ) -> impl Future> + Send where ::Item: Eq + Hash, { diff --git a/src/core/utils/stream/try_broadband.rs b/src/core/utils/stream/try_broadband.rs index 59c488e0..d1213174 100644 --- a/src/core/utils/stream/try_broadband.rs +++ b/src/core/utils/stream/try_broadband.rs @@ -12,13 +12,20 @@ pub trait TryBroadbandExt where Self: TryStream> + Send + Sized, { - fn broadn_and_then(self, n: N, f: F) -> impl TryStream> + Send + fn broadn_and_then( + self, + n: N, + f: F, + ) -> impl TryStream> + Send where N: Into>, F: Fn(Self::Ok) -> Fut + Send + Sync, Fut: TryFuture> + Send; - fn broad_and_then(self, f: F) -> impl TryStream> + Send + fn broad_and_then( + self, + f: F, + ) -> impl TryStream> + Send where F: Fn(Self::Ok) -> Fut + Send + Sync, Fut: TryFuture> + Send, @@ -31,7 +38,11 @@ impl TryBroadbandExt for S where S: TryStream> + Send + Sized, { - fn broadn_and_then(self, n: N, f: F) -> impl TryStream> + Send + fn broadn_and_then( + self, + n: N, + f: F, + ) -> impl TryStream> + Send where N: Into>, F: Fn(Self::Ok) -> Fut + Send + Sync, diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index b0a2b6c5..d8da04ec 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -16,31 +16,43 @@ where S: TryStream> + Send + ?Sized, Self: TryStream + Send + Sized, { - fn ready_and_then(self, f: F) -> AndThen>, impl FnMut(S::Ok) -> Ready>> + fn ready_and_then( + self, + f: F, + ) -> AndThen>, impl FnMut(S::Ok) -> Ready>> where F: Fn(S::Ok) -> Result; fn ready_try_filter_map( - self, f: F, - ) -> TryFilterMap, E>>, impl FnMut(S::Ok) -> Ready, E>>> + self, + f: F, + ) -> TryFilterMap< + Self, + Ready, E>>, + impl FnMut(S::Ok) -> Ready, E>>, + > where F: Fn(S::Ok) -> Result, E>; fn ready_try_fold( - self, init: U, f: F, + self, + init: U, + f: F, ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> where F: Fn(U, S::Ok) -> Result; fn ready_try_fold_default( - self, f: F, + self, + f: F, ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> where F: Fn(U, S::Ok) -> Result, U: Default; fn ready_try_for_each( - self, f: F, + self, + f: F, ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> where F: FnMut(S::Ok) -> Result<(), E>; @@ -52,7 +64,10 @@ where Self: TryStream + Send + Sized, { #[inline] - fn ready_and_then(self, f: F) -> AndThen>, impl FnMut(S::Ok) -> Ready>> + fn ready_and_then( + self, + f: F, + ) -> AndThen>, impl FnMut(S::Ok) -> Ready>> where F: Fn(S::Ok) -> Result, { @@ -60,8 +75,13 @@ where } fn ready_try_filter_map( - self, f: F, - ) -> TryFilterMap, E>>, impl FnMut(S::Ok) -> Ready, E>>> + self, + f: F, + ) -> TryFilterMap< + Self, + Ready, E>>, + impl FnMut(S::Ok) -> Ready, E>>, + > where F: Fn(S::Ok) -> Result, E>, { @@ -70,7 +90,9 @@ where #[inline] fn ready_try_fold( - self, init: U, f: F, + self, + init: U, + f: F, ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> where F: Fn(U, S::Ok) -> Result, @@ -80,7 +102,8 @@ where #[inline] fn ready_try_fold_default( - self, f: F, + self, + f: F, ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> where F: Fn(U, S::Ok) -> Result, @@ -91,7 +114,8 @@ where #[inline] fn ready_try_for_each( - self, mut f: F, + self, + mut f: F, ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> where F: FnMut(S::Ok) -> Result<(), E>, diff --git a/src/core/utils/string/between.rs b/src/core/utils/string/between.rs index 209a9dab..05c137b4 100644 --- a/src/core/utils/string/between.rs +++ b/src/core/utils/string/between.rs @@ -15,7 +15,9 @@ pub trait Between<'a> { impl<'a> Between<'a> for &'a str { #[inline] - fn between_infallible(&self, delim: Delim<'_>) -> &'a str { self.between(delim).unwrap_or(self) } + fn between_infallible(&self, delim: Delim<'_>) -> &'a str { + self.between(delim).unwrap_or(self) + } #[inline] fn between(&self, delim: Delim<'_>) -> Option<&'a str> { diff --git a/src/core/utils/string/split.rs b/src/core/utils/string/split.rs index 96de28df..e643f51b 100644 --- a/src/core/utils/string/split.rs +++ b/src/core/utils/string/split.rs @@ -15,8 +15,12 @@ pub trait SplitInfallible<'a> { impl<'a> SplitInfallible<'a> for &'a str { #[inline] - fn rsplit_once_infallible(&self, delim: &str) -> Pair<'a> { self.rsplit_once(delim).unwrap_or((self, EMPTY)) } + fn rsplit_once_infallible(&self, delim: &str) -> Pair<'a> { + self.rsplit_once(delim).unwrap_or((self, EMPTY)) + } #[inline] - fn split_once_infallible(&self, delim: &str) -> Pair<'a> { self.split_once(delim).unwrap_or((self, EMPTY)) } + fn split_once_infallible(&self, delim: &str) -> Pair<'a> { + self.split_once(delim).unwrap_or((self, EMPTY)) + } } diff --git a/src/core/utils/string/unquote.rs b/src/core/utils/string/unquote.rs index eeded610..ea7ddbf9 100644 --- a/src/core/utils/string/unquote.rs +++ b/src/core/utils/string/unquote.rs @@ -26,7 +26,9 @@ impl<'a> Unquote<'a> for &'a str { } #[inline] - fn unquote(&self) -> Option<&'a str> { self.strip_prefix(QUOTE).and_then(|s| s.strip_suffix(QUOTE)) } + fn unquote(&self) -> Option<&'a str> { + self.strip_prefix(QUOTE).and_then(|s| s.strip_suffix(QUOTE)) + } #[inline] fn is_quoted(&self) -> bool { self.starts_with(QUOTE) && self.ends_with(QUOTE) } diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index f3ff08b6..05ef12ca 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -45,8 +45,8 @@ pub unsafe fn current_exe() -> Result { let exe = std::env::current_exe()?; match exe.to_str() { - None => Ok(exe), - Some(str) => Ok(str + | None => Ok(exe), + | Some(str) => Ok(str .strip_suffix(" (deleted)") .map(PathBuf::from) .unwrap_or(exe)), @@ -58,5 +58,6 @@ pub unsafe fn current_exe() -> Result { /// accurate on all platforms; defaults to false. #[must_use] pub fn current_exe_deleted() -> bool { - std::env::current_exe().is_ok_and(|exe| exe.to_str().is_some_and(|exe| exe.ends_with(" (deleted)"))) + std::env::current_exe() + .is_ok_and(|exe| exe.to_str().is_some_and(|exe| exe.ends_with(" (deleted)"))) } diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs index f4f78b02..5c870730 100644 --- a/src/core/utils/tests.rs +++ b/src/core/utils/tests.rs @@ -1,4 +1,5 @@ #![cfg(test)] +#![allow(clippy::disallowed_methods)] use crate::utils; diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs index f96a27d0..d65eb2d4 100644 --- a/src/core/utils/time.rs +++ b/src/core/utils/time.rs @@ -13,7 +13,9 @@ pub fn now_millis() -> u64 { } #[inline] -pub fn parse_timepoint_ago(ago: &str) -> Result { timepoint_ago(parse_duration(ago)?) } +pub fn parse_timepoint_ago(ago: &str) -> Result { + timepoint_ago(parse_duration(ago)?) +} #[inline] pub fn timepoint_ago(duration: Duration) -> Result { @@ -61,13 +63,13 @@ pub fn pretty(d: Duration) -> String { let gen64 = |w, f, u| fmt(w, (f * 100.0) as u32, u); let gen128 = |w, f, u| gen64(u64::try_from(w).expect("u128 to u64"), f, u); match whole_and_frac(d) { - (Days(whole), frac) => gen64(whole, frac, "days"), - (Hours(whole), frac) => gen64(whole, frac, "hours"), - (Mins(whole), frac) => gen64(whole, frac, "minutes"), - (Secs(whole), frac) => gen64(whole, frac, "seconds"), - (Millis(whole), frac) => gen128(whole, frac, "milliseconds"), - (Micros(whole), frac) => gen128(whole, frac, "microseconds"), - (Nanos(whole), frac) => gen128(whole, frac, "nanoseconds"), + | (Days(whole), frac) => gen64(whole, frac, "days"), + | (Hours(whole), frac) => gen64(whole, frac, "hours"), + | (Mins(whole), frac) => gen64(whole, frac, "minutes"), + | (Secs(whole), frac) => gen64(whole, frac, "seconds"), + | (Millis(whole), frac) => gen128(whole, frac, "milliseconds"), + | (Micros(whole), frac) => gen128(whole, frac, "microseconds"), + | (Nanos(whole), frac) => gen128(whole, frac, "nanoseconds"), } } @@ -80,18 +82,15 @@ pub fn whole_and_frac(d: Duration) -> (Unit, f64) { use Unit::*; let whole = whole_unit(d); - ( - whole, - match whole { - Days(_) => (d.as_secs() % 86_400) as f64 / 86_400.0, - Hours(_) => (d.as_secs() % 3_600) as f64 / 3_600.0, - Mins(_) => (d.as_secs() % 60) as f64 / 60.0, - Secs(_) => f64::from(d.subsec_millis()) / 1000.0, - Millis(_) => f64::from(d.subsec_micros()) / 1000.0, - Micros(_) => f64::from(d.subsec_nanos()) / 1000.0, - Nanos(_) => 0.0, - }, - ) + (whole, match whole { + | Days(_) => (d.as_secs() % 86_400) as f64 / 86_400.0, + | Hours(_) => (d.as_secs() % 3_600) as f64 / 3_600.0, + | Mins(_) => (d.as_secs() % 60) as f64 / 60.0, + | Secs(_) => f64::from(d.subsec_millis()) / 1000.0, + | Millis(_) => f64::from(d.subsec_micros()) / 1000.0, + | Micros(_) => f64::from(d.subsec_nanos()) / 1000.0, + | Nanos(_) => 0.0, + }) } /// Return the largest Unit which represents the duration. The value is @@ -101,18 +100,18 @@ pub fn whole_unit(d: Duration) -> Unit { use Unit::*; match d.as_secs() { - 86_400.. => Days(d.as_secs() / 86_400), - 3_600..=86_399 => Hours(d.as_secs() / 3_600), - 60..=3_599 => Mins(d.as_secs() / 60), + | 86_400.. => Days(d.as_secs() / 86_400), + | 3_600..=86_399 => Hours(d.as_secs() / 3_600), + | 60..=3_599 => Mins(d.as_secs() / 60), - _ => match d.as_micros() { - 1_000_000.. => Secs(d.as_secs()), - 1_000..=999_999 => Millis(d.subsec_millis().into()), + | _ => match d.as_micros() { + | 1_000_000.. => Secs(d.as_secs()), + | 1_000..=999_999 => Millis(d.subsec_millis().into()), - _ => match d.as_nanos() { - 1_000.. => Micros(d.subsec_micros().into()), + | _ => match d.as_nanos() { + | 1_000.. => Micros(d.subsec_micros().into()), - _ => Nanos(d.subsec_nanos().into()), + | _ => Nanos(d.subsec_nanos().into()), }, }, } diff --git a/src/database/cork.rs b/src/database/cork.rs index 5fe5fd7a..11b6efd7 100644 --- a/src/database/cork.rs +++ b/src/database/cork.rs @@ -26,11 +26,7 @@ impl Cork { #[inline] pub(super) fn new(db: &Arc, flush: bool, sync: bool) -> Self { db.cork(); - Self { - db: db.clone(), - flush, - sync, - } + Self { db: db.clone(), flush, sync } } } diff --git a/src/database/database.rs b/src/database/database.rs index bd8dcb1a..83d2c201 100644 --- a/src/database/database.rs +++ b/src/database/database.rs @@ -17,10 +17,7 @@ impl Database { /// Load an existing database or create a new one. pub async fn open(server: &Arc) -> Result> { let db = Engine::open(server).await?; - Ok(Arc::new(Self { - db: db.clone(), - maps: maps::open(&db)?, - })) + Ok(Arc::new(Self { db: db.clone(), maps: maps::open(&db)? })) } #[inline] @@ -31,7 +28,9 @@ impl Database { } #[inline] - pub fn iter(&self) -> impl Iterator + Send + '_ { self.maps.iter() } + pub fn iter(&self) -> impl Iterator + Send + '_ { + self.maps.iter() + } #[inline] pub fn keys(&self) -> impl Iterator + Send + '_ { self.maps.keys() } diff --git a/src/database/de.rs b/src/database/de.rs index ac8c021f..f648b89a 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -13,11 +13,7 @@ pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result where T: Deserialize<'a>, { - let mut deserializer = Deserializer { - buf, - pos: 0, - seq: false, - }; + let mut deserializer = Deserializer { buf, pos: 0, seq: false }; T::deserialize(&mut deserializer).debug_inspect(|_| { deserializer @@ -169,7 +165,12 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { visitor.visit_seq(self) } - fn deserialize_tuple_struct(self, _name: &'static str, _len: usize, visitor: V) -> Result + fn deserialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + visitor: V, + ) -> Result where V: Visitor<'de>, { @@ -186,7 +187,12 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { d.deserialize_map(visitor).map_err(Into::into) } - fn deserialize_struct(self, name: &'static str, fields: &'static [&'static str], visitor: V) -> Result + fn deserialize_struct( + self, + name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result where V: Visitor<'de>, { @@ -201,9 +207,9 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { V: Visitor<'de>, { match name { - "Ignore" => self.record_ignore(), - "IgnoreAll" => self.record_ignore_all(), - _ => unhandled!("Unrecognized deserialization Directive {name:?}"), + | "Ignore" => self.record_ignore(), + | "IgnoreAll" => self.record_ignore_all(), + | _ => unhandled!("Unrecognized deserialization Directive {name:?}"), }; visitor.visit_unit() @@ -214,13 +220,16 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { V: Visitor<'de>, { match name { - "$serde_json::private::RawValue" => visitor.visit_map(self), - _ => visitor.visit_newtype_struct(self), + | "$serde_json::private::RawValue" => visitor.visit_map(self), + | _ => visitor.visit_newtype_struct(self), } } fn deserialize_enum( - self, _name: &'static str, _variants: &'static [&'static str], _visitor: V, + self, + _name: &'static str, + _variants: &'static [&'static str], + _visitor: V, ) -> Result where V: Visitor<'de>, @@ -260,7 +269,10 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } fn deserialize_u8>(self, _visitor: V) -> Result { - unhandled!("deserialize u8 not implemented; try dereferencing the Handle for [u8] access instead") + unhandled!( + "deserialize u8 not implemented; try dereferencing the Handle for [u8] access \ + instead" + ) } fn deserialize_u16>(self, _visitor: V) -> Result { @@ -338,8 +350,8 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { ); match self.record_peek_byte() { - Some(b'{') => self.deserialize_map(visitor), - _ => self.deserialize_str(visitor), + | Some(b'{') => self.deserialize_map(visitor), + | _ => self.deserialize_str(visitor), } } } diff --git a/src/database/engine.rs b/src/database/engine.rs index 72fe11e6..63a6087d 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -6,12 +6,14 @@ use std::{ sync::{atomic::AtomicU32, Arc, Mutex, RwLock}, }; -use conduwuit::{debug, error, info, utils::time::rfc2822_from_seconds, warn, Err, Result, Server}; +use conduwuit::{ + debug, error, info, utils::time::rfc2822_from_seconds, warn, Err, Result, Server, +}; use rocksdb::{ backup::{BackupEngine, BackupEngineOptions}, perf::get_memory_usage_stats, - AsColumnFamilyRef, BoundColumnFamily, Cache, ColumnFamilyDescriptor, DBCommon, DBWithThreadMode, Env, LogLevel, - MultiThreaded, Options, + AsColumnFamilyRef, BoundColumnFamily, Cache, ColumnFamilyDescriptor, DBCommon, + DBWithThreadMode, Env, LogLevel, MultiThreaded, Options, }; use crate::{ @@ -169,11 +171,13 @@ impl Engine { pub fn memory_usage(&self) -> Result { let mut res = String::new(); - let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&self.row_cache])).or_else(or_else)?; + let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&self.row_cache])) + .or_else(or_else)?; let mibs = |input| f64::from(u32::try_from(input / 1024).unwrap_or(0)) / 1024.0; writeln!( res, - "Memory buffers: {:.2} MiB\nPending write: {:.2} MiB\nTable readers: {:.2} MiB\nRow cache: {:.2} MiB", + "Memory buffers: {:.2} MiB\nPending write: {:.2} MiB\nTable readers: {:.2} MiB\nRow \ + cache: {:.2} MiB", mibs(stats.mem_table_total), mibs(stats.mem_table_unflushed), mibs(stats.mem_table_readers_total), @@ -202,7 +206,8 @@ impl Engine { return Ok(()); } - let options = BackupEngineOptions::new(path.expect("valid database backup path")).map_err(map_err)?; + let options = BackupEngineOptions::new(path.expect("valid database backup path")) + .map_err(map_err)?; let mut engine = BackupEngine::open(&options, &self.env).map_err(map_err)?; if config.database_backups_to_keep > 0 { let flush = !self.is_read_only(); @@ -232,13 +237,14 @@ impl Engine { let config = &self.server.config; let path = config.database_backup_path.as_ref(); if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok( - "Configure database_backup_path to enable backups, or the path specified is not valid".to_owned(), - ); + return Ok("Configure database_backup_path to enable backups, or the path \ + specified is not valid" + .to_owned()); } let mut res = String::new(); - let options = BackupEngineOptions::new(path.expect("valid database backup path")).or_else(or_else)?; + let options = BackupEngineOptions::new(path.expect("valid database backup path")) + .or_else(or_else)?; let engine = BackupEngine::open(&options, &self.env).or_else(or_else)?; for info in engine.get_backup_info() { writeln!( @@ -256,8 +262,8 @@ impl Engine { pub fn file_list(&self) -> Result { match self.db.live_files() { - Err(e) => Ok(String::from(e)), - Ok(files) => { + | Err(e) => Ok(String::from(e)), + | Ok(files) => { let mut res = String::new(); writeln!(res, "| lev | sst | keys | dels | size | column |")?; writeln!(res, "| ---: | :--- | ---: | ---: | ---: | :--- |")?; @@ -265,7 +271,12 @@ impl Engine { writeln!( res, "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", - file.level, file.name, file.num_entries, file.num_deletions, file.size, file.column_family_name, + file.level, + file.name, + file.num_entries, + file.num_deletions, + file.size, + file.column_family_name, )?; } @@ -277,7 +288,11 @@ impl Engine { /// Query for database property by null-terminated name which is expected to /// have a result with an integer representation. This is intended for /// low-overhead programmatic use. - pub(crate) fn property_integer(&self, cf: &impl AsColumnFamilyRef, name: &CStr) -> Result { + pub(crate) fn property_integer( + &self, + cf: &impl AsColumnFamilyRef, + name: &CStr, + ) -> Result { result(self.db.property_int_value_cf(cf, name)) .and_then(|val| val.map_or_else(|| Err!("Property {name:?} not found."), Ok)) } @@ -300,8 +315,8 @@ impl Engine { pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result<()> { warn!("Starting database repair. This may take a long time..."); match Db::repair(db_opts, path) { - Ok(()) => info!("Database repair successful."), - Err(e) => return Err!("Repair failed: {e:?}"), + | Ok(()) => info!("Database repair successful."), + | Err(e) => return Err!("Repair failed: {e:?}"), } Ok(()) @@ -320,10 +335,10 @@ pub(crate) fn handle_log(level: LogLevel, msg: &str) { } match level { - LogLevel::Header | LogLevel::Debug => debug!("{msg}"), - LogLevel::Error | LogLevel::Fatal => error!("{msg}"), - LogLevel::Info => debug!("{msg}"), - LogLevel::Warn => warn!("{msg}"), + | LogLevel::Header | LogLevel::Debug => debug!("{msg}"), + | LogLevel::Error | LogLevel::Fatal => error!("{msg}"), + | LogLevel::Info => debug!("{msg}"), + | LogLevel::Warn => warn!("{msg}"), }; } diff --git a/src/database/handle.rs b/src/database/handle.rs index b4d34b85..43b57839 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -11,11 +11,7 @@ pub struct Handle<'a> { } impl<'a> From> for Handle<'a> { - fn from(val: DBPinnableSlice<'a>) -> Self { - Self { - val, - } - } + fn from(val: DBPinnableSlice<'a>) -> Self { Self { val } } } impl Debug for Handle<'_> { diff --git a/src/database/map.rs b/src/database/map.rs index 09721b2a..af22a43c 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -56,7 +56,10 @@ impl Map { } #[inline] - pub fn watch_prefix<'a, K>(&'a self, prefix: &K) -> Pin + Send + 'a>> + pub fn watch_prefix<'a, K>( + &'a self, + prefix: &K, + ) -> Pin + Send + 'a>> where K: AsRef<[u8]> + ?Sized + Debug, { @@ -64,7 +67,9 @@ impl Map { } #[inline] - pub fn property_integer(&self, name: &CStr) -> Result { self.db.property_integer(&self.cf(), name) } + pub fn property_integer(&self, name: &CStr) -> Result { + self.db.property_integer(&self.cf(), name) + } #[inline] pub fn property(&self, name: &str) -> Result { self.db.property(&self.cf(), name) } @@ -76,7 +81,9 @@ impl Map { } impl Debug for Map { - fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result { write!(out, "Map {{name: {0}}}", self.name) } + fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(out, "Map {{name: {0}}}", self.name) + } } impl Display for Map { diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index aa3ea676..424f8970 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -29,7 +29,10 @@ where /// - harder errors will panic #[inline] #[implement(super::Map)] -pub fn acontains(self: &Arc, key: &K) -> impl Future + Send + '_ +pub fn acontains( + self: &Arc, + key: &K, +) -> impl Future + Send + '_ where K: Serialize + ?Sized + Debug, { @@ -42,7 +45,11 @@ where /// - harder errors will panic #[implement(super::Map)] #[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] -pub fn bcontains(self: &Arc, key: &K, buf: &mut B) -> impl Future + Send + '_ +pub fn bcontains( + self: &Arc, + key: &K, + buf: &mut B, +) -> impl Future + Send + '_ where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, diff --git a/src/database/map/count.rs b/src/database/map/count.rs index 7b632bb3..b9b34613 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -26,7 +26,10 @@ where /// - From is a raw #[implement(super::Map)] #[inline] -pub fn raw_count_from<'a, P>(self: &'a Arc, from: &'a P) -> impl Future + Send + 'a +pub fn raw_count_from<'a, P>( + self: &'a Arc, + from: &'a P, +) -> impl Future + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { @@ -38,7 +41,10 @@ where /// - Prefix is structured key #[implement(super::Map)] #[inline] -pub fn count_prefix<'a, P>(self: &'a Arc, prefix: &P) -> impl Future + Send + 'a +pub fn count_prefix<'a, P>( + self: &'a Arc, + prefix: &P, +) -> impl Future + Send + 'a where P: Serialize + ?Sized + Debug + 'a, { @@ -50,7 +56,10 @@ where /// - Prefix is raw #[implement(super::Map)] #[inline] -pub fn raw_count_prefix<'a, P>(self: &'a Arc, prefix: &'a P) -> impl Future + Send + 'a +pub fn raw_count_prefix<'a, P>( + self: &'a Arc, + prefix: &'a P, +) -> impl Future + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 88023b1c..94a6b727 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -31,7 +31,10 @@ where /// the query. The maximum size is supplied as const generic parameter. #[implement(super::Map)] #[inline] -pub fn aqry(self: &Arc, key: &K) -> impl Future>> + Send +pub fn aqry( + self: &Arc, + key: &K, +) -> impl Future>> + Send where K: Serialize + ?Sized + Debug, { @@ -43,7 +46,11 @@ where /// asynchronously. The key is serialized into a user-supplied Writer. #[implement(super::Map)] #[tracing::instrument(skip(self, buf), level = "trace")] -pub fn bqry(self: &Arc, key: &K, buf: &mut B) -> impl Future>> + Send +pub fn bqry( + self: &Arc, + key: &K, + buf: &mut B, +) -> impl Future>> + Send where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, @@ -110,15 +117,15 @@ where match res { // cache hit; not found - Ok(None) => Err!(Request(NotFound("Not found in database"))), + | Ok(None) => Err!(Request(NotFound("Not found in database"))), // cache hit; value found - Ok(Some(res)) => Ok(Some(Handle::from(res))), + | Ok(Some(res)) => Ok(Some(Handle::from(res))), // cache miss; unknown - Err(e) if is_incomplete(&e) => Ok(None), + | Err(e) if is_incomplete(&e) => Ok(None), // some other error occurred - Err(e) => or_else(e), + | Err(e) => or_else(e), } } diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index 5f444cce..49cd5920 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -9,7 +9,8 @@ use crate::{util::map_err, Handle}; #[implement(super::Map)] #[tracing::instrument(skip(self, keys), level = "trace")] pub fn aqry_batch<'b, 'a: 'b, const MAX: usize, I, K>( - self: &'a Arc, keys: I, + self: &'a Arc, + keys: I, ) -> impl Stream>> + Send + 'a where I: Iterator + Send + 'a, @@ -22,7 +23,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, keys), level = "trace")] -pub fn get_batch<'a, I, K>(self: &'a Arc, keys: I) -> impl Stream>> + Send + 'a +pub fn get_batch<'a, I, K>( + self: &'a Arc, + keys: I, +) -> impl Stream>> + Send + 'a where I: Iterator + Debug + Send + 'a, K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, @@ -34,7 +38,10 @@ where #[implement(super::Map)] #[tracing::instrument(name = "batch_blocking", level = "trace", skip_all)] -pub(crate) fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator>> + Send +pub(crate) fn get_batch_blocking<'a, I, K>( + &self, + keys: I, +) -> impl Iterator>> + Send where I: Iterator + ExactSizeIterator + Debug + Send, K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 2f6d2020..2ffc68df 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -11,7 +11,10 @@ use crate::{ }; #[implement(super::Map)] -pub fn keys_from<'a, K, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send +pub fn keys_from<'a, K, P>( + self: &'a Arc, + from: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -30,7 +33,10 @@ where } #[implement(super::Map)] -pub fn keys_raw_from<'a, K, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send +pub fn keys_raw_from<'a, K, P>( + self: &'a Arc, + from: &P, +) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index 24112ac1..32a1f04c 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -11,7 +11,10 @@ use serde::{Deserialize, Serialize}; use crate::keyval::{result_deserialize_key, serialize_key, Key}; #[implement(super::Map)] -pub fn keys_prefix<'a, K, P>(self: &'a Arc, prefix: &P) -> impl Stream>> + Send +pub fn keys_prefix<'a, K, P>( + self: &'a Arc, + prefix: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -22,7 +25,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn keys_prefix_raw

    (self: &Arc, prefix: &P) -> impl Stream>> + Send +pub fn keys_prefix_raw

    ( + self: &Arc, + prefix: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -33,7 +39,8 @@ where #[implement(super::Map)] pub fn keys_raw_prefix<'a, K, P>( - self: &'a Arc, prefix: &'a P, + self: &'a Arc, + prefix: &'a P, ) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, @@ -44,7 +51,10 @@ where } #[implement(super::Map)] -pub fn raw_keys_prefix<'a, P>(self: &'a Arc, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn raw_keys_prefix<'a, P>( + self: &'a Arc, + prefix: &'a P, +) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index c895105c..a398f315 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -11,7 +11,10 @@ use crate::{ }; #[implement(super::Map)] -pub fn rev_keys_from<'a, K, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send +pub fn rev_keys_from<'a, K, P>( + self: &'a Arc, + from: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -22,7 +25,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn rev_keys_from_raw

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn rev_keys_from_raw

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -31,7 +37,10 @@ where } #[implement(super::Map)] -pub fn rev_keys_raw_from<'a, K, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send +pub fn rev_keys_raw_from<'a, K, P>( + self: &'a Arc, + from: &P, +) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -42,7 +51,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn rev_raw_keys_from

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn rev_raw_keys_from

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index a5ed35ec..9fda49a0 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -11,7 +11,10 @@ use serde::{Deserialize, Serialize}; use crate::keyval::{result_deserialize_key, serialize_key, Key}; #[implement(super::Map)] -pub fn rev_keys_prefix<'a, K, P>(self: &'a Arc, prefix: &P) -> impl Stream>> + Send +pub fn rev_keys_prefix<'a, K, P>( + self: &'a Arc, + prefix: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -22,7 +25,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn rev_keys_prefix_raw

    (self: &Arc, prefix: &P) -> impl Stream>> + Send +pub fn rev_keys_prefix_raw

    ( + self: &Arc, + prefix: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -33,7 +39,8 @@ where #[implement(super::Map)] pub fn rev_keys_raw_prefix<'a, K, P>( - self: &'a Arc, prefix: &'a P, + self: &'a Arc, + prefix: &'a P, ) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, @@ -44,7 +51,10 @@ where } #[implement(super::Map)] -pub fn rev_raw_keys_prefix<'a, P>(self: &'a Arc, prefix: &'a P) -> impl Stream>> + Send + 'a +pub fn rev_raw_keys_prefix<'a, P>( + self: &'a Arc, + prefix: &'a P, +) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, { diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 2d6a76b7..6ddb9bc7 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -19,7 +19,8 @@ use crate::{ /// - Result is deserialized #[implement(super::Map)] pub fn rev_stream_from<'a, K, V, P>( - self: &'a Arc, from: &P, + self: &'a Arc, + from: &P, ) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -36,7 +37,10 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn rev_stream_from_raw

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn rev_stream_from_raw

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -50,7 +54,8 @@ where /// - Result is deserialized #[implement(super::Map)] pub fn rev_stream_raw_from<'a, K, V, P>( - self: &'a Arc, from: &P, + self: &'a Arc, + from: &P, ) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, @@ -67,7 +72,10 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn rev_raw_stream_from

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn rev_raw_stream_from

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index 13aa40f2..857aa3a5 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -16,7 +16,8 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; /// - Result is deserialized #[implement(super::Map)] pub fn rev_stream_prefix<'a, K, V, P>( - self: &'a Arc, prefix: &P, + self: &'a Arc, + prefix: &P, ) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -33,7 +34,10 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn rev_stream_prefix_raw

    (self: &Arc, prefix: &P) -> impl Stream>> + Send +pub fn rev_stream_prefix_raw

    ( + self: &Arc, + prefix: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -48,7 +52,8 @@ where /// - Result is deserialized #[implement(super::Map)] pub fn rev_stream_raw_prefix<'a, K, V, P>( - self: &'a Arc, prefix: &'a P, + self: &'a Arc, + prefix: &'a P, ) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, @@ -65,7 +70,8 @@ where /// - Result is raw #[implement(super::Map)] pub fn rev_raw_stream_prefix<'a, P>( - self: &'a Arc, prefix: &'a P, + self: &'a Arc, + prefix: &'a P, ) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index c6fe439a..1dae9d78 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -18,7 +18,10 @@ use crate::{ /// - Query is serialized /// - Result is deserialized #[implement(super::Map)] -pub fn stream_from<'a, K, V, P>(self: &'a Arc, from: &P) -> impl Stream>> + Send +pub fn stream_from<'a, K, V, P>( + self: &'a Arc, + from: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -33,7 +36,10 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn stream_from_raw

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn stream_from_raw

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -47,7 +53,8 @@ where /// - Result is deserialized #[implement(super::Map)] pub fn stream_raw_from<'a, K, V, P>( - self: &'a Arc, from: &P, + self: &'a Arc, + from: &P, ) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug + Sync, @@ -63,7 +70,10 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn raw_stream_from

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn raw_stream_from

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index e7dad211..a05e2fc5 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -16,7 +16,8 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; /// - Result is deserialized #[implement(super::Map)] pub fn stream_prefix<'a, K, V, P>( - self: &'a Arc, prefix: &P, + self: &'a Arc, + prefix: &P, ) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, @@ -33,7 +34,10 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn stream_prefix_raw

    (self: &Arc, prefix: &P) -> impl Stream>> + Send +pub fn stream_prefix_raw

    ( + self: &Arc, + prefix: &P, +) -> impl Stream>> + Send where P: Serialize + ?Sized + Debug, { @@ -48,7 +52,8 @@ where /// - Result is deserialized #[implement(super::Map)] pub fn stream_raw_prefix<'a, K, V, P>( - self: &'a Arc, prefix: &'a P, + self: &'a Arc, + prefix: &'a P, ) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, @@ -65,7 +70,8 @@ where /// - Result is raw #[implement(super::Map)] pub fn raw_stream_prefix<'a, P>( - self: &'a Arc, prefix: &'a P, + self: &'a Arc, + prefix: &'a P, ) -> impl Stream>> + Send + 'a where P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, diff --git a/src/database/opts.rs b/src/database/opts.rs index 52ed4818..933830bd 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -2,8 +2,9 @@ use std::{cmp, collections::HashMap, convert::TryFrom}; use conduwuit::{err, utils, Config, Result}; use rocksdb::{ - statistics::StatsLevel, BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Env, - LogLevel, LruCacheOptions, Options, UniversalCompactOptions, UniversalCompactionStopStyle, + statistics::StatsLevel, BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, + DBRecoveryMode, Env, LogLevel, LruCacheOptions, Options, UniversalCompactOptions, + UniversalCompactionStopStyle, }; /// Create database-wide options suitable for opening the database. This also @@ -11,7 +12,12 @@ use rocksdb::{ /// resulting value. Note that we require special per-column options on some /// columns, therefor columns should only be opened after passing this result /// through cf_options(). -pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_cache: &Cache) -> Result { +pub(crate) fn db_options( + config: &Config, + env: &mut Env, + row_cache: &Cache, + col_cache: &Cache, +) -> Result { const DEFAULT_STATS_LEVEL: StatsLevel = if cfg!(debug_assertions) { StatsLevel::ExceptDetailedTimers } else { @@ -73,13 +79,13 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ opts.set_disable_auto_compactions(!config.rocksdb_compaction); opts.set_statistics_level(match config.rocksdb_stats_level { - 0 => StatsLevel::DisableAll, - 1 => DEFAULT_STATS_LEVEL, - 2 => StatsLevel::ExceptHistogramOrTimers, - 3 => StatsLevel::ExceptTimers, - 4 => StatsLevel::ExceptDetailedTimers, - 5 => StatsLevel::ExceptTimeForMutex, - 6_u8..=u8::MAX => StatsLevel::All, + | 0 => StatsLevel::DisableAll, + | 1 => DEFAULT_STATS_LEVEL, + | 2 => StatsLevel::ExceptHistogramOrTimers, + | 3 => StatsLevel::ExceptTimers, + | 4 => StatsLevel::ExceptDetailedTimers, + | 5 => StatsLevel::ExceptTimeForMutex, + | 6_u8..=u8::MAX => StatsLevel::All, }); // Default: https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords @@ -88,11 +94,11 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ // recovered in this manner as it's likely any lost information will be // restored via federation. opts.set_wal_recovery_mode(match config.rocksdb_recovery_mode { - 0 => DBRecoveryMode::AbsoluteConsistency, - 1 => DBRecoveryMode::TolerateCorruptedTailRecords, - 2 => DBRecoveryMode::PointInTime, - 3 => DBRecoveryMode::SkipAnyCorruptedRecord, - 4_u8..=u8::MAX => unimplemented!(), + | 0 => DBRecoveryMode::AbsoluteConsistency, + | 1 => DBRecoveryMode::TolerateCorruptedTailRecords, + | 2 => DBRecoveryMode::PointInTime, + | 3 => DBRecoveryMode::SkipAnyCorruptedRecord, + | 4_u8..=u8::MAX => unimplemented!(), }); // @@ -111,11 +117,14 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ /// db_options() as the argument to this function and use the return value in /// the arguments to open the specific column. pub(crate) fn cf_options( - cfg: &Config, name: &str, mut opts: Options, cache: &mut HashMap, + cfg: &Config, + name: &str, + mut opts: Options, + cache: &mut HashMap, ) -> Result { // Columns with non-default compaction options match name { - "backupid_algorithm" + | "backupid_algorithm" | "backupid_etag" | "backupkeyid_backup" | "roomid_shortroomid" @@ -125,12 +134,12 @@ pub(crate) fn cf_options( | "shortstatehash_statediff" | "userdevicetxnid_response" | "userfilterid_filter" => set_for_sequential_small_uc(&mut opts, cfg), - &_ => {}, + | &_ => {}, } // Columns with non-default table/cache configs match name { - "shorteventid_eventid" => set_table_with_new_cache( + | "shorteventid_eventid" => set_table_with_new_cache( &mut opts, cfg, cache, @@ -138,7 +147,7 @@ pub(crate) fn cf_options( cache_size(cfg, cfg.shorteventid_cache_capacity, 64)?, ), - "eventid_shorteventid" => set_table_with_new_cache( + | "eventid_shorteventid" => set_table_with_new_cache( &mut opts, cfg, cache, @@ -146,7 +155,7 @@ pub(crate) fn cf_options( cache_size(cfg, cfg.eventidshort_cache_capacity, 64)?, ), - "eventid_pduid" => set_table_with_new_cache( + | "eventid_pduid" => set_table_with_new_cache( &mut opts, cfg, cache, @@ -154,7 +163,7 @@ pub(crate) fn cf_options( cache_size(cfg, cfg.eventid_pdu_cache_capacity, 64)?, ), - "shorteventid_authchain" => { + | "shorteventid_authchain" => { set_table_with_new_cache( &mut opts, cfg, @@ -164,7 +173,7 @@ pub(crate) fn cf_options( ); }, - "shortstatekey_statekey" => set_table_with_new_cache( + | "shortstatekey_statekey" => set_table_with_new_cache( &mut opts, cfg, cache, @@ -172,7 +181,7 @@ pub(crate) fn cf_options( cache_size(cfg, cfg.shortstatekey_cache_capacity, 1024)?, ), - "statekey_shortstatekey" => set_table_with_new_cache( + | "statekey_shortstatekey" => set_table_with_new_cache( &mut opts, cfg, cache, @@ -180,22 +189,32 @@ pub(crate) fn cf_options( cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024)?, ), - "servernameevent_data" => set_table_with_new_cache( + | "servernameevent_data" => set_table_with_new_cache( &mut opts, cfg, cache, name, - cache_size(cfg, cfg.servernameevent_data_cache_capacity, 128)?, /* Raw average value size = 102, key + cache_size(cfg, cfg.servernameevent_data_cache_capacity, 128)?, /* Raw average + * value size = + * 102, key * size = 34 */ ), - "eventid_outlierpdu" => { - set_table_with_new_cache(&mut opts, cfg, cache, name, cache_size(cfg, cfg.pdu_cache_capacity, 1536)?); + | "eventid_outlierpdu" => { + set_table_with_new_cache( + &mut opts, + cfg, + cache, + name, + cache_size(cfg, cfg.pdu_cache_capacity, 1536)?, + ); }, - "pduid_pdu" => set_table_with_shared_cache(&mut opts, cfg, cache, name, "eventid_outlierpdu"), + | "pduid_pdu" => { + set_table_with_shared_cache(&mut opts, cfg, cache, name, "eventid_outlierpdu"); + }, - &_ => {}, + | &_ => {}, } Ok(opts) @@ -203,11 +222,11 @@ pub(crate) fn cf_options( fn set_logging_defaults(opts: &mut Options, config: &Config) { let rocksdb_log_level = match config.rocksdb_log_level.as_ref() { - "debug" => LogLevel::Debug, - "info" => LogLevel::Info, - "warn" => LogLevel::Warn, - "fatal" => LogLevel::Fatal, - _ => LogLevel::Error, + | "debug" => LogLevel::Debug, + | "info" => LogLevel::Info, + | "warn" => LogLevel::Warn, + | "fatal" => LogLevel::Fatal, + | _ => LogLevel::Error, }; opts.set_log_level(rocksdb_log_level); @@ -225,13 +244,13 @@ fn set_logging_defaults(opts: &mut Options, config: &Config) { fn set_compression_defaults(opts: &mut Options, config: &Config) { let rocksdb_compression_algo = match config.rocksdb_compression_algo.as_ref() { - "snappy" => DBCompressionType::Snappy, - "zlib" => DBCompressionType::Zlib, - "bz2" => DBCompressionType::Bz2, - "lz4" => DBCompressionType::Lz4, - "lz4hc" => DBCompressionType::Lz4hc, - "none" => DBCompressionType::None, - _ => DBCompressionType::Zstd, + | "snappy" => DBCompressionType::Snappy, + | "zlib" => DBCompressionType::Zlib, + | "bz2" => DBCompressionType::Bz2, + | "lz4" => DBCompressionType::Lz4, + | "lz4hc" => DBCompressionType::Lz4hc, + | "none" => DBCompressionType::None, + | _ => DBCompressionType::Zstd, }; if config.rocksdb_bottommost_compression { @@ -239,7 +258,13 @@ fn set_compression_defaults(opts: &mut Options, config: &Config) { opts.set_bottommost_zstd_max_train_bytes(0, true); // -14 w_bits is only read by zlib. - opts.set_bottommost_compression_options(-14, config.rocksdb_bottommost_compression_level, 0, 0, true); + opts.set_bottommost_compression_options( + -14, + config.rocksdb_bottommost_compression_level, + 0, + 0, + true, + ); } // -14 w_bits is only read by zlib. @@ -338,7 +363,11 @@ fn uc_options(_config: &Config) -> UniversalCompactOptions { } fn set_table_with_new_cache( - opts: &mut Options, config: &Config, caches: &mut HashMap, name: &str, size: usize, + opts: &mut Options, + config: &Config, + caches: &mut HashMap, + name: &str, + size: usize, ) { let mut cache_opts = LruCacheOptions::default(); cache_opts.set_capacity(size); @@ -351,7 +380,11 @@ fn set_table_with_new_cache( } fn set_table_with_shared_cache( - opts: &mut Options, config: &Config, cache: &HashMap, _name: &str, cache_name: &str, + opts: &mut Options, + config: &Config, + cache: &HashMap, + _name: &str, + cache_name: &str, ) { let mut table = table_options(config); table.set_block_cache( diff --git a/src/database/pool.rs b/src/database/pool.rs index 3301b821..e4d78897 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -230,8 +230,8 @@ fn worker_wait(&self, recv: &Receiver) -> Result { #[implement(Pool)] fn worker_handle(&self, cmd: Cmd) { match cmd { - Cmd::Get(cmd) => self.handle_get(cmd), - Cmd::Iter(cmd) => self.handle_iter(cmd), + | Cmd::Get(cmd) => self.handle_get(cmd), + | Cmd::Iter(cmd) => self.handle_iter(cmd), } } @@ -251,8 +251,8 @@ fn handle_iter(&self, mut cmd: Seek) { let from = cmd.key.as_deref().map(Into::into); let result = match cmd.dir { - Direction::Forward => cmd.state.init_fwd(from), - Direction::Reverse => cmd.state.init_rev(from), + | Direction::Forward => cmd.state.init_fwd(from), + | Direction::Reverse => cmd.state.init_rev(from), }; let chan_result = chan.send(into_send_seek(result)); @@ -274,8 +274,8 @@ fn _handle_seek(&self, mut cmd: Seek) { } match cmd.dir { - Direction::Forward => cmd.state.seek_fwd(), - Direction::Reverse => cmd.state.seek_rev(), + | Direction::Forward => cmd.state.seek_fwd(), + | Direction::Reverse => cmd.state.seek_rev(), }; let chan_result = chan.send(into_send_seek(cmd.state)); diff --git a/src/database/ser.rs b/src/database/ser.rs index 8abe5521..878319fe 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -6,7 +6,9 @@ use serde::{ser, Serialize}; use crate::util::unhandled; #[inline] -pub fn serialize_to_vec(val: T) -> Result> { serialize_to::, T>(val) } +pub fn serialize_to_vec(val: T) -> Result> { + serialize_to::, T>(val) +} #[inline] pub fn serialize_to(val: T) -> Result @@ -26,17 +28,15 @@ where W: Write + AsRef<[u8]> + 'a, T: Serialize, { - let mut serializer = Serializer { - out, - depth: 0, - sep: false, - fin: false, - }; + let mut serializer = Serializer { out, depth: 0, sep: false, fin: false }; val.serialize(&mut serializer) .map_err(|error| err!(SerdeSer("{error}"))) .debug_inspect(|()| { - debug_assert_eq!(serializer.depth, 0, "Serialization completed at non-zero recursion level"); + debug_assert_eq!( + serializer.depth, 0, + "Serialization completed at non-zero recursion level" + ); })?; Ok((*out).as_ref()) @@ -132,29 +132,45 @@ impl ser::Serializer for &mut Serializer<'_, W> { Ok(self) } - fn serialize_tuple_struct(self, _name: &'static str, _len: usize) -> Result { + fn serialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { self.tuple_start(); Ok(self) } fn serialize_tuple_variant( - self, _name: &'static str, _idx: u32, _var: &'static str, _len: usize, + self, + _name: &'static str, + _idx: u32, + _var: &'static str, + _len: usize, ) -> Result { unhandled!("serialize Tuple Variant not implemented") } fn serialize_map(self, _len: Option) -> Result { - unhandled!("serialize Map not implemented; did you mean to use database::Json() around your serde_json::Value?") + unhandled!( + "serialize Map not implemented; did you mean to use database::Json() around your \ + serde_json::Value?" + ) } fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { unhandled!( - "serialize Struct not implemented at this time; did you mean to use database::Json() around your struct?" + "serialize Struct not implemented at this time; did you mean to use \ + database::Json() around your struct?" ) } fn serialize_struct_variant( - self, _name: &'static str, _idx: u32, _var: &'static str, _len: usize, + self, + _name: &'static str, + _idx: u32, + _var: &'static str, + _len: usize, ) -> Result { unhandled!("serialize Struct Variant not implemented") } @@ -170,36 +186,47 @@ impl ser::Serializer for &mut Serializer<'_, W> { ); match name { - "Json" => serde_json::to_writer(&mut self.out, value).map_err(Into::into), - _ => unhandled!("Unrecognized serialization Newtype {name:?}"), + | "Json" => serde_json::to_writer(&mut self.out, value).map_err(Into::into), + | _ => unhandled!("Unrecognized serialization Newtype {name:?}"), } } fn serialize_newtype_variant( - self, _name: &'static str, _idx: u32, _var: &'static str, _value: &T, + self, + _name: &'static str, + _idx: u32, + _var: &'static str, + _value: &T, ) -> Result { unhandled!("serialize Newtype Variant not implemented") } fn serialize_unit_struct(self, name: &'static str) -> Result { match name { - "Interfix" => { + | "Interfix" => { self.set_finalized(); }, - "Separator" => { + | "Separator" => { self.separator()?; }, - _ => unhandled!("Unrecognized serialization directive: {name:?}"), + | _ => unhandled!("Unrecognized serialization directive: {name:?}"), }; Ok(()) } - fn serialize_unit_variant(self, _name: &'static str, _idx: u32, _var: &'static str) -> Result { + fn serialize_unit_variant( + self, + _name: &'static str, + _idx: u32, + _var: &'static str, + ) -> Result { unhandled!("serialize Unit Variant not implemented") } - fn serialize_some(self, val: &T) -> Result { val.serialize(self) } + fn serialize_some(self, val: &T) -> Result { + val.serialize(self) + } fn serialize_none(self) -> Result { Ok(()) } @@ -226,27 +253,39 @@ impl ser::Serializer for &mut Serializer<'_, W> { self.write(v) } - fn serialize_f64(self, _v: f64) -> Result { unhandled!("serialize f64 not implemented") } + fn serialize_f64(self, _v: f64) -> Result { + unhandled!("serialize f64 not implemented") + } - fn serialize_f32(self, _v: f32) -> Result { unhandled!("serialize f32 not implemented") } + fn serialize_f32(self, _v: f32) -> Result { + unhandled!("serialize f32 not implemented") + } fn serialize_i64(self, v: i64) -> Result { self.write(&v.to_be_bytes()) } fn serialize_i32(self, v: i32) -> Result { self.write(&v.to_be_bytes()) } - fn serialize_i16(self, _v: i16) -> Result { unhandled!("serialize i16 not implemented") } + fn serialize_i16(self, _v: i16) -> Result { + unhandled!("serialize i16 not implemented") + } - fn serialize_i8(self, _v: i8) -> Result { unhandled!("serialize i8 not implemented") } + fn serialize_i8(self, _v: i8) -> Result { + unhandled!("serialize i8 not implemented") + } fn serialize_u64(self, v: u64) -> Result { self.write(&v.to_be_bytes()) } fn serialize_u32(self, v: u32) -> Result { self.write(&v.to_be_bytes()) } - fn serialize_u16(self, _v: u16) -> Result { unhandled!("serialize u16 not implemented") } + fn serialize_u16(self, _v: u16) -> Result { + unhandled!("serialize u16 not implemented") + } fn serialize_u8(self, v: u8) -> Result { self.write(&[v]) } - fn serialize_bool(self, _v: bool) -> Result { unhandled!("serialize bool not implemented") } + fn serialize_bool(self, _v: bool) -> Result { + unhandled!("serialize bool not implemented") + } fn serialize_unit(self) -> Result { unhandled!("serialize unit not implemented") } } @@ -255,7 +294,9 @@ impl ser::SerializeSeq for &mut Serializer<'_, W> { type Error = Error; type Ok = (); - fn serialize_element(&mut self, val: &T) -> Result { val.serialize(&mut **self) } + fn serialize_element(&mut self, val: &T) -> Result { + val.serialize(&mut **self) + } fn end(self) -> Result { self.sequence_end() } } @@ -315,7 +356,11 @@ impl ser::SerializeStruct for &mut Serializer<'_, W> { type Error = Error; type Ok = (); - fn serialize_field(&mut self, _key: &'static str, _val: &T) -> Result { + fn serialize_field( + &mut self, + _key: &'static str, + _val: &T, + ) -> Result { unhandled!("serialize Struct Field not implemented") } @@ -326,9 +371,15 @@ impl ser::SerializeStructVariant for &mut Serializer<'_, W> { type Error = Error; type Ok = (); - fn serialize_field(&mut self, _key: &'static str, _val: &T) -> Result { + fn serialize_field( + &mut self, + _key: &'static str, + _val: &T, + ) -> Result { unhandled!("serialize Struct Variant Field not implemented") } - fn end(self) -> Result { unhandled!("serialize Struct Variant End not implemented") } + fn end(self) -> Result { + unhandled!("serialize Struct Variant End not implemented") + } } diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index 06cb6de9..2a38d97e 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -17,18 +17,12 @@ pub(crate) struct Items<'a> { impl<'a> Items<'a> { pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { - Self { - state: State::new(db, cf, opts), - } + Self { state: State::new(db, cf, opts) } } } impl<'a> convert::From> for Items<'a> { - fn from(state: State<'a>) -> Self { - Self { - state, - } - } + fn from(state: State<'a>) -> Self { Self { state } } } impl<'a> Cursor<'a, KeyVal<'a>> for Items<'a> { @@ -40,11 +34,7 @@ impl<'a> Cursor<'a, KeyVal<'a>> for Items<'a> { fn seek(&mut self) { self.state.seek_fwd(); } #[inline] - fn init(self, from: From<'a>) -> Self { - Self { - state: self.state.init_fwd(from), - } - } + fn init(self, from: From<'a>) -> Self { Self { state: self.state.init_fwd(from) } } } impl<'a> Stream for Items<'a> { diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index 2d0c4639..c3a6cc7f 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -17,18 +17,12 @@ pub(crate) struct ItemsRev<'a> { impl<'a> ItemsRev<'a> { pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { - Self { - state: State::new(db, cf, opts), - } + Self { state: State::new(db, cf, opts) } } } impl<'a> convert::From> for ItemsRev<'a> { - fn from(state: State<'a>) -> Self { - Self { - state, - } - } + fn from(state: State<'a>) -> Self { Self { state } } } impl<'a> Cursor<'a, KeyVal<'a>> for ItemsRev<'a> { @@ -40,11 +34,7 @@ impl<'a> Cursor<'a, KeyVal<'a>> for ItemsRev<'a> { fn seek(&mut self) { self.state.seek_rev(); } #[inline] - fn init(self, from: From<'a>) -> Self { - Self { - state: self.state.init_rev(from), - } - } + fn init(self, from: From<'a>) -> Self { Self { state: self.state.init_rev(from) } } } impl<'a> Stream for ItemsRev<'a> { diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index a901b342..0696781d 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -17,18 +17,12 @@ pub(crate) struct Keys<'a> { impl<'a> Keys<'a> { pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { - Self { - state: State::new(db, cf, opts), - } + Self { state: State::new(db, cf, opts) } } } impl<'a> convert::From> for Keys<'a> { - fn from(state: State<'a>) -> Self { - Self { - state, - } - } + fn from(state: State<'a>) -> Self { Self { state } } } impl<'a> Cursor<'a, Key<'a>> for Keys<'a> { @@ -41,11 +35,7 @@ impl<'a> Cursor<'a, Key<'a>> for Keys<'a> { fn seek(&mut self) { self.state.seek_fwd(); } #[inline] - fn init(self, from: From<'a>) -> Self { - Self { - state: self.state.init_fwd(from), - } - } + fn init(self, from: From<'a>) -> Self { Self { state: self.state.init_fwd(from) } } } impl<'a> Stream for Keys<'a> { diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index 73758a85..42706d9f 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -17,18 +17,12 @@ pub(crate) struct KeysRev<'a> { impl<'a> KeysRev<'a> { pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { - Self { - state: State::new(db, cf, opts), - } + Self { state: State::new(db, cf, opts) } } } impl<'a> convert::From> for KeysRev<'a> { - fn from(state: State<'a>) -> Self { - Self { - state, - } - } + fn from(state: State<'a>) -> Self { Self { state } } } impl<'a> Cursor<'a, Key<'a>> for KeysRev<'a> { @@ -41,11 +35,7 @@ impl<'a> Cursor<'a, Key<'a>> for KeysRev<'a> { fn seek(&mut self) { self.state.seek_rev(); } #[inline] - fn init(self, from: From<'a>) -> Self { - Self { - state: self.state.init_rev(from), - } - } + fn init(self, from: From<'a>) -> Self { Self { state: self.state.init_rev(from) } } } impl<'a> Stream for KeysRev<'a> { diff --git a/src/database/tests.rs b/src/database/tests.rs index 7f486966..3d41a544 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -66,10 +66,7 @@ fn ser_complex() { media_id: "AbCdEfGhIjK", }; - let dim = Dim { - width: 123, - height: 456, - }; + let dim = Dim { width: 123, height: 456 }; let mut a = Vec::new(); a.extend_from_slice(b"mxc://"); @@ -128,9 +125,7 @@ fn ser_json_macro() { foo: String, } - let content = Foo { - foo: "bar".to_owned(), - }; + let content = Foo { foo: "bar".to_owned() }; let content = serde_json::to_value(content).expect("failed to serialize content"); let sender: &UserId = "@foo:example.com".try_into().unwrap(); let serialized = serialize_to_vec(Json(json!({ @@ -153,7 +148,8 @@ fn ser_json_raw() { ..Default::default() }; - let value = serde_json::value::to_raw_value(&filter).expect("failed to serialize to raw value"); + let value = + serde_json::value::to_raw_value(&filter).expect("failed to serialize to raw value"); let a = serialize_to_vec(value.get()).expect("failed to serialize raw value"); let s = String::from_utf8_lossy(&a); assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); @@ -169,7 +165,8 @@ fn ser_json_raw_json() { ..Default::default() }; - let value = serde_json::value::to_raw_value(&filter).expect("failed to serialize to raw value"); + let value = + serde_json::value::to_raw_value(&filter).expect("failed to serialize to raw value"); let a = serialize_to_vec(Json(value)).expect("failed to serialize json value"); let s = String::from_utf8_lossy(&a); assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); @@ -241,7 +238,8 @@ fn de_tuple_ignore() { let room_id: &RoomId = "!room:example.com".try_into().unwrap(); let raw: &[u8] = b"@user:example.com\xFF@user2:example.net\xFF!room:example.com"; - let (a, _, c): (&UserId, Ignore, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); + let (a, _, c): (&UserId, Ignore, &RoomId) = + de::from_slice(raw).expect("failed to deserialize"); assert_eq!(a, user_id, "deserialized user_id does not match"); assert_eq!(c, room_id, "deserialized room_id does not match"); @@ -254,7 +252,8 @@ fn de_json_array() { let b: Raw>> = de::from_slice(&s).expect("failed to deserialize"); - let d: Vec = serde_json::from_str(b.json().get()).expect("failed to deserialize JSON"); + let d: Vec = + serde_json::from_str(b.json().get()).expect("failed to deserialize JSON"); for (i, a) in a.iter().enumerate() { assert_eq!(*a, d[i]); @@ -268,7 +267,8 @@ fn de_json_raw_array() { let b: Raw>> = de::from_slice(&s).expect("failed to deserialize"); - let c: Vec> = serde_json::from_str(b.json().get()).expect("failed to deserialize JSON"); + let c: Vec> = + serde_json::from_str(b.json().get()).expect("failed to deserialize JSON"); for (i, a) in a.iter().enumerate() { let c = serde_json::to_value(c[i].json()).expect("failed to deserialize JSON to string"); diff --git a/src/database/util.rs b/src/database/util.rs index 0fca3b68..c2a020e3 100644 --- a/src/database/util.rs +++ b/src/database/util.rs @@ -30,13 +30,15 @@ pub(crate) fn _into_direction(mode: &IteratorMode<'_>) -> Direction { use IteratorMode::{End, From, Start}; match mode { - Start | From(_, Forward) => Forward, - End | From(_, Reverse) => Reverse, + | Start | From(_, Forward) => Forward, + | End | From(_, Reverse) => Reverse, } } #[inline] -pub(crate) fn result(r: std::result::Result) -> Result { +pub(crate) fn result( + r: std::result::Result, +) -> Result { r.map_or_else(or_else, and_then) } diff --git a/src/database/watchers.rs b/src/database/watchers.rs index 93c82b44..9ce6f74c 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -15,10 +15,13 @@ pub(crate) struct Watchers { } impl Watchers { - pub(crate) fn watch<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + pub(crate) fn watch<'a>( + &'a self, + prefix: &[u8], + ) -> Pin + Send + 'a>> { let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) { - hash_map::Entry::Occupied(o) => o.get().1.clone(), - hash_map::Entry::Vacant(v) => { + | hash_map::Entry::Occupied(o) => o.get().1.clone(), + | hash_map::Entry::Vacant(v) => { let (tx, rx) = watch::channel(()); v.insert((tx, rx.clone())); rx diff --git a/src/macros/admin.rs b/src/macros/admin.rs index b0dc1956..e98e914c 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -42,14 +42,14 @@ fn dispatch_arm(v: &Variant) -> Result { let target = camel_to_snake_string(&format!("{name}")); let handler = Ident::new(&target, Span::call_site().into()); let res = match &v.fields { - Fields::Named(fields) => { + | Fields::Named(fields) => { let field = fields.named.iter().filter_map(|f| f.ident.as_ref()); let arg = field.clone(); quote! { #name { #( #field ),* } => Box::pin(context.#handler(#( #arg ),*)).await?, } }, - Fields::Unnamed(fields) => { + | Fields::Unnamed(fields) => { let Some(ref field) = fields.unnamed.first() else { return Err(Error::new(Span::call_site().into(), "One unnamed field required")); }; @@ -57,7 +57,7 @@ fn dispatch_arm(v: &Variant) -> Result { #name ( #field ) => Box::pin(#handler::process(#field, context)).await?, } }, - Fields::Unit => { + | Fields::Unit => { quote! { #name => Box::pin(context.#handler()).await?, } diff --git a/src/macros/config.rs b/src/macros/config.rs index 2934a0b2..eb269e1e 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -4,8 +4,8 @@ use proc_macro::TokenStream; use proc_macro2::Span; use quote::ToTokens; use syn::{ - parse::Parser, punctuated::Punctuated, spanned::Spanned, Error, Expr, ExprLit, Field, Fields, FieldsNamed, - ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, + parse::Parser, punctuated::Punctuated, spanned::Spanned, Error, Expr, ExprLit, Field, Fields, + FieldsNamed, ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, }; use crate::{ @@ -29,9 +29,9 @@ pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result Result<()> { let settings = get_simple_settings(args); - let filename = settings - .get("filename") - .ok_or_else(|| Error::new(args[0].span(), "missing required 'filename' attribute argument"))?; + let filename = settings.get("filename").ok_or_else(|| { + Error::new(args[0].span(), "missing required 'filename' attribute argument") + })?; let undocumented = settings .get("undocumented") @@ -43,9 +43,9 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { .split(' ') .collect(); - let section = settings - .get("section") - .ok_or_else(|| Error::new(args[0].span(), "missing required 'section' attribute argument"))?; + let section = settings.get("section").ok_or_else(|| { + Error::new(args[0].span(), "missing required 'section' attribute argument") + })?; let mut file = OpenOptions::new() .write(true) @@ -53,7 +53,12 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { .truncate(section == "global") .append(section != "global") .open(filename) - .map_err(|e| Error::new(Span::call_site(), format!("Failed to open config file for generation: {e}")))?; + .map_err(|e| { + Error::new( + Span::call_site(), + format!("Failed to open config file for generation: {e}"), + ) + })?; if let Some(header) = settings.get("header") { file.write_all(header.as_bytes()) @@ -63,11 +68,7 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { file.write_fmt(format_args!("\n[{section}]\n")) .expect("written to config file"); - if let Fields::Named(FieldsNamed { - named, - .. - }) = &input.fields - { + if let Fields::Named(FieldsNamed { named, .. }) = &input.fields { for field in named { let Some(ident) = &field.ident else { continue; @@ -120,12 +121,7 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { fn get_default(field: &Field) -> Option { for attr in &field.attrs { - let Meta::List(MetaList { - path, - tokens, - .. - }) = &attr.meta - else { + let Meta::List(MetaList { path, tokens, .. }) = &attr.meta else { continue; }; @@ -149,23 +145,18 @@ fn get_default(field: &Field) -> Option { }; match arg { - Meta::NameValue(MetaNameValue { - value: Expr::Lit(ExprLit { - lit: Lit::Str(str), - .. - }), + | Meta::NameValue(MetaNameValue { + value: Expr::Lit(ExprLit { lit: Lit::Str(str), .. }), .. }) => { match str.value().as_str() { - "HashSet::new" | "Vec::new" | "RegexSet::empty" => Some("[]".to_owned()), - "true_fn" => return Some("true".to_owned()), - _ => return None, + | "HashSet::new" | "Vec::new" | "RegexSet::empty" => Some("[]".to_owned()), + | "true_fn" => return Some("true".to_owned()), + | _ => return None, }; }, - Meta::Path { - .. - } => return Some("false".to_owned()), - _ => return None, + | Meta::Path { .. } => return Some("false".to_owned()), + | _ => return None, }; } @@ -174,12 +165,7 @@ fn get_default(field: &Field) -> Option { fn get_doc_default(field: &Field) -> Option { for attr in &field.attrs { - let Meta::NameValue(MetaNameValue { - path, - value, - .. - }) = &attr.meta - else { + let Meta::NameValue(MetaNameValue { path, value, .. }) = &attr.meta else { continue; }; @@ -187,11 +173,7 @@ fn get_doc_default(field: &Field) -> Option { continue; } - let Expr::Lit(ExprLit { - lit, - .. - }) = &value - else { + let Expr::Lit(ExprLit { lit, .. }) = &value else { continue; }; @@ -217,12 +199,7 @@ fn get_doc_default(field: &Field) -> Option { fn get_doc_comment(field: &Field) -> Option { let mut out = String::new(); for attr in &field.attrs { - let Meta::NameValue(MetaNameValue { - path, - value, - .. - }) = &attr.meta - else { + let Meta::NameValue(MetaNameValue { path, value, .. }) = &attr.meta else { continue; }; @@ -230,11 +207,7 @@ fn get_doc_comment(field: &Field) -> Option { continue; } - let Expr::Lit(ExprLit { - lit, - .. - }) = &value - else { + let Expr::Lit(ExprLit { lit, .. }) = &value else { continue; }; @@ -254,11 +227,7 @@ fn get_doc_comment(field: &Field) -> Option { } fn get_type_name(field: &Field) -> Option { - let Type::Path(TypePath { - path, - .. - }) = &field.ty - else { + let Type::Path(TypePath { path, .. }) = &field.ty else { return None; }; diff --git a/src/macros/implement.rs b/src/macros/implement.rs index b798bae5..8d18f243 100644 --- a/src/macros/implement.rs +++ b/src/macros/implement.rs @@ -20,12 +20,15 @@ pub(super) fn implement(item: ItemFn, args: &[Meta]) -> Result { } fn get_receiver(args: &[Meta]) -> Result { - let receiver = &args - .first() - .ok_or_else(|| Error::new(Span::call_site().into(), "Missing required argument to receiver"))?; + let receiver = &args.first().ok_or_else(|| { + Error::new(Span::call_site().into(), "Missing required argument to receiver") + })?; let Meta::Path(receiver) = receiver else { - return Err(Error::new(Span::call_site().into(), "First argument is not path to receiver")); + return Err(Error::new( + Span::call_site().into(), + "First argument is not path to receiver", + )); }; Ok(receiver.clone()) diff --git a/src/macros/refutable.rs b/src/macros/refutable.rs index facb4729..66e0ebc3 100644 --- a/src/macros/refutable.rs +++ b/src/macros/refutable.rs @@ -9,11 +9,7 @@ pub(super) fn refutable(mut item: ItemFn, _args: &[Meta]) -> Result let stmt = &mut item.block.stmts; let sig = &mut item.sig; for (i, input) in inputs.iter().enumerate() { - let Typed(PatType { - pat, - .. - }) = input - else { + let Typed(PatType { pat, .. }) = input else { continue; }; @@ -24,11 +20,7 @@ pub(super) fn refutable(mut item: ItemFn, _args: &[Meta]) -> Result let variant = &pat.path; let fields = &pat.fields; - let Some(Typed(PatType { - ref mut pat, - .. - })) = sig.inputs.get_mut(i) - else { + let Some(Typed(PatType { ref mut pat, .. })) = sig.inputs.get_mut(i) else { continue; }; diff --git a/src/macros/rustc.rs b/src/macros/rustc.rs index f484e5f5..1220c8d4 100644 --- a/src/macros/rustc.rs +++ b/src/macros/rustc.rs @@ -4,8 +4,8 @@ use quote::quote; pub(super) fn flags_capture(args: TokenStream) -> TokenStream { let cargo_crate_name = std::env::var("CARGO_CRATE_NAME"); let crate_name = match cargo_crate_name.as_ref() { - Err(_) => return args, - Ok(crate_name) => crate_name.trim_start_matches("conduwuit_"), + | Err(_) => return args, + | Ok(crate_name) => crate_name.trim_start_matches("conduwuit_"), }; let flag = std::env::args().collect::>(); diff --git a/src/macros/utils.rs b/src/macros/utils.rs index 23c4c16f..9f799f7b 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -6,23 +6,11 @@ use crate::Result; pub(crate) fn get_simple_settings(args: &[Meta]) -> HashMap { args.iter().fold(HashMap::new(), |mut map, arg| { - let Meta::NameValue(MetaNameValue { - path, - value, - .. - }) = arg - else { + let Meta::NameValue(MetaNameValue { path, value, .. }) = arg else { return map; }; - let Expr::Lit( - ExprLit { - lit: Lit::Str(str), - .. - }, - .., - ) = value - else { + let Expr::Lit(ExprLit { lit: Lit::Str(str), .. }, ..) = value else { return map; }; diff --git a/src/main/logging.rs b/src/main/logging.rs index 999265e7..e8a18b10 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -10,12 +10,15 @@ use conduwuit::{ use tracing_subscriber::{layer::SubscriberExt, reload, EnvFilter, Layer, Registry}; #[cfg(feature = "perf_measurements")] -pub(crate) type TracingFlameGuard = Option>>; +pub(crate) type TracingFlameGuard = + Option>>; #[cfg(not(feature = "perf_measurements"))] pub(crate) type TracingFlameGuard = (); #[allow(clippy::redundant_clone)] -pub(crate) fn init(config: &Config) -> Result<(LogLevelReloadHandles, TracingFlameGuard, Arc)> { +pub(crate) fn init( + config: &Config, +) -> Result<(LogLevelReloadHandles, TracingFlameGuard, Arc)> { let reload_handles = LogLevelReloadHandles::default(); let console_span_events = fmt_span::from_str(&config.log_span_events).unwrap_or_err(); @@ -27,7 +30,8 @@ pub(crate) fn init(config: &Config) -> Result<(LogLevelReloadHandles, TracingFla .with_ansi(config.log_colors) .with_span_events(console_span_events) .with_thread_ids(config.log_thread_ids); - let (console_reload_filter, console_reload_handle) = reload::Layer::new(console_filter.clone()); + let (console_reload_filter, console_reload_handle) = + reload::Layer::new(console_filter.clone()); reload_handles.add("console", Box::new(console_reload_handle)); let cap_state = Arc::new(capture::State::new()); @@ -39,8 +43,8 @@ pub(crate) fn init(config: &Config) -> Result<(LogLevelReloadHandles, TracingFla #[cfg(feature = "sentry_telemetry")] let subscriber = { - let sentry_filter = - EnvFilter::try_new(&config.sentry_filter).map_err(|e| err!(Config("sentry_filter", "{e}.")))?; + let sentry_filter = EnvFilter::try_new(&config.sentry_filter) + .map_err(|e| err!(Config("sentry_filter", "{e}.")))?; let sentry_layer = sentry_tracing::layer(); let (sentry_reload_filter, sentry_reload_handle) = reload::Layer::new(sentry_filter); reload_handles.add("sentry", Box::new(sentry_reload_handle)); @@ -52,8 +56,9 @@ pub(crate) fn init(config: &Config) -> Result<(LogLevelReloadHandles, TracingFla let (flame_layer, flame_guard) = if config.tracing_flame { let flame_filter = EnvFilter::try_new(&config.tracing_flame_filter) .map_err(|e| err!(Config("tracing_flame_filter", "{e}.")))?; - let (flame_layer, flame_guard) = tracing_flame::FlameLayer::with_file(&config.tracing_flame_output_path) - .map_err(|e| err!(Config("tracing_flame_output_path", "{e}.")))?; + let (flame_layer, flame_guard) = + tracing_flame::FlameLayer::with_file(&config.tracing_flame_output_path) + .map_err(|e| err!(Config("tracing_flame_output_path", "{e}.")))?; let flame_layer = flame_layer .with_empty_samples(false) .with_filter(flame_filter); @@ -62,17 +67,20 @@ pub(crate) fn init(config: &Config) -> Result<(LogLevelReloadHandles, TracingFla (None, None) }; - let jaeger_filter = - EnvFilter::try_new(&config.jaeger_filter).map_err(|e| err!(Config("jaeger_filter", "{e}.")))?; + let jaeger_filter = EnvFilter::try_new(&config.jaeger_filter) + .map_err(|e| err!(Config("jaeger_filter", "{e}.")))?; let jaeger_layer = config.allow_jaeger.then(|| { - opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); + opentelemetry::global::set_text_map_propagator( + opentelemetry_jaeger::Propagator::new(), + ); let tracer = opentelemetry_jaeger::new_agent_pipeline() .with_auto_split_batch(true) .with_service_name("conduwuit") .install_batch(opentelemetry_sdk::runtime::Tokio) .expect("jaeger agent pipeline"); let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - let (jaeger_reload_filter, jaeger_reload_handle) = reload::Layer::new(jaeger_filter.clone()); + let (jaeger_reload_filter, jaeger_reload_handle) = + reload::Layer::new(jaeger_filter.clone()); reload_handles.add("jaeger", Box::new(jaeger_reload_handle)); Some(telemetry.with_filter(jaeger_reload_filter)) }); diff --git a/src/main/main.rs b/src/main/main.rs index 5c066584..0946e835 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -68,8 +68,8 @@ async fn async_main(server: &Arc) -> Result<(), Error> { extern crate conduwuit_router as router; match router::start(&server.server).await { - Ok(services) => server.services.lock().await.insert(services), - Err(error) => { + | Ok(services) => server.services.lock().await.insert(services), + | Err(error) => { error!("Critical error starting server: {error}"); return Err(error); }, diff --git a/src/main/mods.rs b/src/main/mods.rs index ca984a64..ca799b90 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -44,8 +44,8 @@ pub(crate) async fn run(server: &Arc, starts: bool) -> Result<(bool, boo if starts { let start = main_mod.get::("start")?; match start(&server.server).await { - Ok(services) => server.services.lock().await.insert(services), - Err(error) => { + | Ok(services) => server.services.lock().await.insert(services), + | Err(error) => { error!("Starting server: {error}"); return Err(error); }, diff --git a/src/main/server.rs b/src/main/server.rs index 7c3eec1f..00c7a6cc 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -23,7 +23,10 @@ pub(crate) struct Server { } impl Server { - pub(crate) fn build(args: &Args, runtime: Option<&runtime::Handle>) -> Result, Error> { + pub(crate) fn build( + args: &Args, + runtime: Option<&runtime::Handle>, + ) -> Result, Error> { let _runtime_guard = runtime.map(runtime::Handle::enter); let raw_config = Config::load(args.config.as_deref())?; @@ -33,12 +36,14 @@ impl Server { #[cfg(feature = "sentry_telemetry")] let sentry_guard = crate::sentry::init(&config); - let (tracing_reload_handle, tracing_flame_guard, capture) = crate::logging::init(&config)?; + let (tracing_reload_handle, tracing_flame_guard, capture) = + crate::logging::init(&config)?; config.check()?; #[cfg(unix)] - sys::maximize_fd_limit().expect("Unable to increase maximum soft and hard file descriptor limit"); + sys::maximize_fd_limit() + .expect("Unable to increase maximum soft and hard file descriptor limit"); info!( server_name = %config.server_name, @@ -49,14 +54,10 @@ impl Server { ); Ok(Arc::new(Self { - server: Arc::new(conduwuit::Server::new( - config, - runtime.cloned(), - Log { - reload: tracing_reload_handle, - capture, - }, - )), + server: Arc::new(conduwuit::Server::new(config, runtime.cloned(), Log { + reload: tracing_reload_handle, + capture, + })), services: None.into(), diff --git a/src/router/layers.rs b/src/router/layers.rs index 1c5beace..96bca4fd 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -41,7 +41,11 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { #[cfg(feature = "sentry_telemetry")] let layers = layers.layer(sentry_tower::NewSentryLayer::>::new_from_top()); - #[cfg(any(feature = "zstd_compression", feature = "gzip_compression", feature = "brotli_compression"))] + #[cfg(any( + feature = "zstd_compression", + feature = "gzip_compression", + feature = "brotli_compression" + ))] let layers = layers.layer(compression_layer(server)); let layers = layers @@ -88,7 +92,11 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { Ok((router.layer(layers), guard)) } -#[cfg(any(feature = "zstd_compression", feature = "gzip_compression", feature = "brotli_compression"))] +#[cfg(any( + feature = "zstd_compression", + feature = "gzip_compression", + feature = "brotli_compression" +))] fn compression_layer(server: &Server) -> tower_http::compression::CompressionLayer { let mut compression_layer = tower_http::compression::CompressionLayer::new(); @@ -148,11 +156,15 @@ fn cors_layer(_server: &Server) -> CorsLayer { .max_age(Duration::from_secs(86400)) } -fn body_limit_layer(server: &Server) -> DefaultBodyLimit { DefaultBodyLimit::max(server.config.max_request_size) } +fn body_limit_layer(server: &Server) -> DefaultBodyLimit { + DefaultBodyLimit::max(server.config.max_request_size) +} #[tracing::instrument(name = "panic", level = "error", skip_all)] #[allow(clippy::needless_pass_by_value)] -fn catch_panic(err: Box) -> http::Response> { +fn catch_panic( + err: Box, +) -> http::Response> { //TODO: XXX /* conduwuit_service::services() diff --git a/src/router/mod.rs b/src/router/mod.rs index aab0b185..f64dcb67 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -17,7 +17,9 @@ conduwuit::mod_dtor! {} conduwuit::rustc_flags_capture! {} #[unsafe(no_mangle)] -pub extern "Rust" fn start(server: &Arc) -> Pin>> + Send>> { +pub extern "Rust" fn start( + server: &Arc, +) -> Pin>> + Send>> { AssertUnwindSafe(run::start(server.clone())) .catch_unwind() .map_err(Error::from_panic) @@ -26,7 +28,9 @@ pub extern "Rust" fn start(server: &Arc) -> Pin) -> Pin> + Send>> { +pub extern "Rust" fn stop( + services: Arc, +) -> Pin> + Send>> { AssertUnwindSafe(run::stop(services)) .catch_unwind() .map_err(Error::from_panic) @@ -35,7 +39,9 @@ pub extern "Rust" fn stop(services: Arc) -> Pin) -> Pin> + Send>> { +pub extern "Rust" fn run( + services: &Arc, +) -> Pin> + Send>> { AssertUnwindSafe(run::run(services.clone())) .catch_unwind() .map_err(Error::from_panic) diff --git a/src/router/request.rs b/src/router/request.rs index b17e9c4f..559c7f88 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -14,7 +14,9 @@ use http::{Method, StatusCode, Uri}; skip_all, )] pub(crate) async fn spawn( - State(services): State>, req: http::Request, next: axum::middleware::Next, + State(services): State>, + req: http::Request, + next: axum::middleware::Next, ) -> Result { let server = &services.server; if !server.running() { @@ -40,7 +42,9 @@ pub(crate) async fn spawn( #[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn handle( - State(services): State>, req: http::Request, next: axum::middleware::Next, + State(services): State>, + req: http::Request, + next: axum::middleware::Next, ) -> Result { let server = &services.server; if !server.running() { diff --git a/src/router/run.rs b/src/router/run.rs index 6d5c8923..248f7052 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -33,9 +33,10 @@ pub(crate) async fn run(services: Arc) -> Result<()> { .runtime() .spawn(signal(server.clone(), tx.clone(), handle.clone())); - let mut listener = server - .runtime() - .spawn(serve::serve(services.clone(), handle.clone(), tx.subscribe())); + let mut listener = + server + .runtime() + .spawn(serve::serve(services.clone(), handle.clone(), tx.subscribe())); // Focal point debug!("Running"); @@ -63,7 +64,8 @@ pub(crate) async fn start(server: Arc) -> Result> { let services = Services::build(server).await?.start().await?; #[cfg(feature = "systemd")] - sd_notify::notify(true, &[sd_notify::NotifyState::Ready]).expect("failed to notify systemd of ready state"); + sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) + .expect("failed to notify systemd of ready state"); debug!("Started"); Ok(services) @@ -98,7 +100,8 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { } #[cfg(feature = "systemd")] - sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]).expect("failed to notify systemd of stopping state"); + sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) + .expect("failed to notify systemd of stopping state"); info!("Shutdown complete."); Ok(()) @@ -121,7 +124,12 @@ async fn signal(server: Arc, tx: Sender<()>, handle: axum_server::Handle } } -async fn handle_shutdown(server: &Arc, tx: &Sender<()>, handle: &axum_server::Handle, sig: &str) { +async fn handle_shutdown( + server: &Arc, + tx: &Sender<()>, + handle: &axum_server::Handle, + sig: &str, +) { debug!("Received signal {sig}"); if let Err(e) = tx.send(()) { error!("failed sending shutdown transaction to channel: {e}"); @@ -139,7 +147,9 @@ async fn handle_shutdown(server: &Arc, tx: &Sender<()>, handle: &axum_se } async fn handle_services_poll( - server: &Arc, result: Result<()>, listener: JoinHandle>, + server: &Arc, + result: Result<()>, + listener: JoinHandle>, ) -> Result<()> { debug!("Service manager finished: {result:?}"); diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs index 35792359..f6262202 100644 --- a/src/router/serve/mod.rs +++ b/src/router/serve/mod.rs @@ -14,7 +14,9 @@ use super::layers; /// Serve clients pub(super) async fn serve( - services: Arc, handle: ServerHandle, shutdown: broadcast::Receiver<()>, + services: Arc, + handle: ServerHandle, + shutdown: broadcast::Receiver<()>, ) -> Result<()> { let server = &services.server; let config = &server.config; diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index f6b6fba4..0e971f3c 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -9,12 +9,16 @@ use conduwuit::{debug_info, info, Result, Server}; use tokio::task::JoinSet; pub(super) async fn serve( - server: &Arc, app: Router, handle: ServerHandle, addrs: Vec, + server: &Arc, + app: Router, + handle: ServerHandle, + addrs: Vec, ) -> Result<()> { let app = app.into_make_service_with_connect_info::(); let mut join_set = JoinSet::new(); for addr in &addrs { - join_set.spawn_on(bind(*addr).handle(handle.clone()).serve(app.clone()), server.runtime()); + join_set + .spawn_on(bind(*addr).handle(handle.clone()).serve(app.clone()), server.runtime()); } info!("Listening on {addrs:?}"); diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index 7f54cfcc..9d3fbd3b 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -10,7 +10,12 @@ use conduwuit::{err, Result, Server}; use tokio::task::JoinSet; use tracing::{debug, info, warn}; -pub(super) async fn serve(server: &Arc, app: Router, handle: ServerHandle, addrs: Vec) -> Result { +pub(super) async fn serve( + server: &Arc, + app: Router, + handle: ServerHandle, + addrs: Vec, +) -> Result { let tls = &server.config.tls; let certs = tls .certs @@ -29,7 +34,8 @@ pub(super) async fn serve(server: &Arc, app: Router, handle: ServerHandl debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); info!( - "Note: It is strongly recommended that you use a reverse proxy instead of running conduwuit directly with TLS." + "Note: It is strongly recommended that you use a reverse proxy instead of running \ + conduwuit directly with TLS." ); let conf = RustlsConfig::from_pem_file(certs, key).await?; @@ -58,8 +64,8 @@ pub(super) async fn serve(server: &Arc, app: Router, handle: ServerHandl if tls.dual_protocol { warn!( - "Listening on {addrs:?} with TLS certificate {certs} and supporting plain text (HTTP) connections too \ - (insecure!)", + "Listening on {addrs:?} with TLS certificate {certs} and supporting plain text \ + (HTTP) connections too (insecure!)", ); } else { info!("Listening on {addrs:?} with TLS certificate {certs}"); diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index fb37e125..bee3c8c7 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -10,7 +10,9 @@ use axum::{ extract::{connect_info::IntoMakeServiceWithConnectInfo, Request}, Router, }; -use conduwuit::{debug, debug_error, info, result::UnwrapInfallible, trace, warn, Err, Result, Server}; +use conduwuit::{ + debug, debug_error, info, result::UnwrapInfallible, trace, warn, Err, Result, Server, +}; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ rt::{TokioExecutor, TokioIo}, @@ -31,7 +33,11 @@ const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::new const FINI_POLL_INTERVAL: Duration = Duration::from_millis(750); #[tracing::instrument(skip_all, level = "debug")] -pub(super) async fn serve(server: &Arc, app: Router, mut shutdown: broadcast::Receiver<()>) -> Result<()> { +pub(super) async fn serve( + server: &Arc, + app: Router, + mut shutdown: broadcast::Receiver<()>, +) -> Result<()> { let mut tasks = JoinSet::<()>::new(); let executor = TokioExecutor::new(); let app = app.into_make_service_with_connect_info::(); @@ -55,8 +61,12 @@ pub(super) async fn serve(server: &Arc, app: Router, mut shutdown: broad } async fn accept( - server: &Arc, listener: &UnixListener, tasks: &mut JoinSet<()>, mut app: MakeService, - builder: server::conn::auto::Builder, conn: (UnixStream, SocketAddr), + server: &Arc, + listener: &UnixListener, + tasks: &mut JoinSet<()>, + mut app: MakeService, + builder: server::conn::auto::Builder, + conn: (UnixStream, SocketAddr), ) { let (socket, remote) = conn; let socket = TokioIo::new(socket); @@ -103,7 +113,8 @@ async fn init(server: &Arc) -> Result { } let socket_perms = config.unix_socket_perms.to_string(); - let octal_perms = u32::from_str_radix(&socket_perms, 8).expect("failed to convert octal permissions"); + let octal_perms = + u32::from_str_radix(&socket_perms, 8).expect("failed to convert octal permissions"); let perms = std::fs::Permissions::from_mode(octal_perms); if let Err(e) = fs::set_permissions(&path, perms).await { return Err!("Failed to set socket {path:?} permissions: {e}"); diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 536a24e8..ddbc15a4 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -9,8 +9,8 @@ use database::{Deserialized, Handle, Interfix, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ events::{ - AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, + AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, + GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, RoomId, UserId, @@ -54,7 +54,11 @@ impl crate::Service for Service { #[allow(clippy::needless_pass_by_value)] #[implement(Service)] pub async fn update( - &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, data: &serde_json::Value, + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + event_type: RoomAccountDataEventType, + data: &serde_json::Value, ) -> Result<()> { if data.get("type").is_none() || data.get("content").is_none() { return Err!(Request(InvalidParam("Account data doesn't have all required fields."))); @@ -91,7 +95,12 @@ where /// Searches the global account data for a specific kind. #[implement(Service)] -pub async fn get_room(&self, room_id: &RoomId, user_id: &UserId, kind: RoomAccountDataEventType) -> Result +pub async fn get_room( + &self, + room_id: &RoomId, + user_id: &UserId, + kind: RoomAccountDataEventType, +) -> Result where T: for<'de> Deserialize<'de>, { @@ -101,7 +110,12 @@ where } #[implement(Service)] -pub async fn get_raw(&self, room_id: Option<&RoomId>, user_id: &UserId, kind: &str) -> Result> { +pub async fn get_raw( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + kind: &str, +) -> Result> { let key = (room_id, user_id, kind.to_owned()); self.db .roomusertype_roomuserdataid @@ -113,7 +127,10 @@ pub async fn get_raw(&self, room_id: Option<&RoomId>, user_id: &UserId, kind: &s /// Returns all changes to the account data that happened after `since`. #[implement(Service)] pub fn changes_since<'a>( - &'a self, room_id: Option<&'a RoomId>, user_id: &'a UserId, since: u64, + &'a self, + room_id: Option<&'a RoomId>, + user_id: &'a UserId, + since: u64, ) -> impl Stream + Send + 'a { let prefix = (room_id, user_id, Interfix); let prefix = database::serialize_key(prefix).expect("failed to serialize prefix"); @@ -128,8 +145,10 @@ pub fn changes_since<'a>( .ready_take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(_, v)| { match room_id { - Some(_) => serde_json::from_slice::>(v).map(AnyRawAccountDataEvent::Room), - None => serde_json::from_slice::>(v).map(AnyRawAccountDataEvent::Global), + | Some(_) => serde_json::from_slice::>(v) + .map(AnyRawAccountDataEvent::Room), + | None => serde_json::from_slice::>(v) + .map(AnyRawAccountDataEvent::Global), } .map_err(|e| err!(Database("Database contains invalid account data: {e}"))) .log_err() diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index c25eb6c6..0edcd2f3 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -94,15 +94,16 @@ impl Console { debug!("session starting"); while self.server.running() { match self.readline().await { - Ok(event) => match event { - ReadlineEvent::Line(string) => self.clone().handle(string).await, - ReadlineEvent::Interrupted => continue, - ReadlineEvent::Eof => break, - ReadlineEvent::Quit => self.server.shutdown().unwrap_or_else(error::default_log), + | Ok(event) => match event { + | ReadlineEvent::Line(string) => self.clone().handle(string).await, + | ReadlineEvent::Interrupted => continue, + | ReadlineEvent::Eof => break, + | ReadlineEvent::Quit => + self.server.shutdown().unwrap_or_else(error::default_log), }, - Err(error) => match error { - ReadlineError::Closed => break, - ReadlineError::IO(error) => { + | Err(error) => match error { + | ReadlineError::Closed => break, + | ReadlineError::IO(error) => { error!("console I/O: {error:?}"); break; }, @@ -158,9 +159,9 @@ impl Console { async fn process(self: Arc, line: String) { match self.admin.command_in_place(line, None).await { - Ok(Some(ref content)) => self.output(content), - Err(ref content) => self.output_err(content), - _ => unreachable!(), + | Ok(Some(ref content)) => self.output(content), + | Err(ref content) => self.output_err(content), + | _ => unreachable!(), } } diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 971fdf67..7b691fb1 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -42,8 +42,9 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { let create_content = { use RoomVersionId::*; match room_version { - V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => RoomCreateEventContent::new_v1(server_user.clone()), - _ => RoomCreateEventContent::new_v11(), + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => + RoomCreateEventContent::new_v1(server_user.clone()), + | _ => RoomCreateEventContent::new_v11(), } }; @@ -52,15 +53,12 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomCreateEventContent { - federate: true, - predecessor: None, - room_version: room_version.clone(), - ..create_content - }, - ), + PduBuilder::state(String::new(), &RoomCreateEventContent { + federate: true, + predecessor: None, + room_version: room_version.clone(), + ..create_content + }), server_user, &room_id, &state_lock, @@ -72,7 +70,10 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder::state(server_user.to_string(), &RoomMemberEventContent::new(MembershipState::Join)), + PduBuilder::state( + server_user.to_string(), + &RoomMemberEventContent::new(MembershipState::Join), + ), server_user, &room_id, &state_lock, @@ -86,13 +87,10 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomPowerLevelsEventContent { - users, - ..Default::default() - }, - ), + PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { + users, + ..Default::default() + }), server_user, &room_id, &state_lock, @@ -131,7 +129,10 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomGuestAccessEventContent::new(GuestAccess::Forbidden)), + PduBuilder::state( + String::new(), + &RoomGuestAccessEventContent::new(GuestAccess::Forbidden), + ), server_user, &room_id, &state_lock, @@ -155,12 +156,9 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomTopicEventContent { - topic: format!("Manage {}", services.globals.server_name()), - }, - ), + PduBuilder::state(String::new(), &RoomTopicEventContent { + topic: format!("Manage {}", services.globals.server_name()), + }), server_user, &room_id, &state_lock, @@ -174,13 +172,10 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomCanonicalAliasEventContent { - alias: Some(alias.clone()), - alt_aliases: Vec::new(), - }, - ), + PduBuilder::state(String::new(), &RoomCanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }), server_user, &room_id, &state_lock, @@ -197,12 +192,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .rooms .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomPreviewUrlsEventContent { - disabled: true, - }, - ), + PduBuilder::state(String::new(), &RoomPreviewUrlsEventContent { disabled: true }), server_user, &room_id, &state_lock, diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 2b05bfc7..3ad9283f 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -34,7 +34,10 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { self.services .timeline .build_and_append_pdu( - PduBuilder::state(user_id.to_string(), &RoomMemberEventContent::new(MembershipState::Invite)), + PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), server_user, &room_id, &state_lock, @@ -43,7 +46,10 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { self.services .timeline .build_and_append_pdu( - PduBuilder::state(user_id.to_string(), &RoomMemberEventContent::new(MembershipState::Join)), + PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Join), + ), user_id, &room_id, &state_lock, @@ -51,18 +57,18 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { .await?; // Set power level - let users = BTreeMap::from_iter([(server_user.clone(), 100.into()), (user_id.to_owned(), 100.into())]); + let users = BTreeMap::from_iter([ + (server_user.clone(), 100.into()), + (user_id.to_owned(), 100.into()), + ]); self.services .timeline .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomPowerLevelsEventContent { - users, - ..Default::default() - }, - ), + PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { + users, + ..Default::default() + }), server_user, &room_id, &state_lock, @@ -103,9 +109,7 @@ async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> R .get_room(room_id, user_id, RoomAccountDataEventType::Tag) .await .unwrap_or_else(|_| TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, + content: TagEventContent { tags: BTreeMap::new() }, }); event diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index c4783565..59639e58 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -10,7 +10,9 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server}; +use conduwuit::{ + debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server, +}; pub use create::create_admin_room; use futures::{FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; @@ -158,21 +160,19 @@ impl Service { /// the queue is full. pub fn command(&self, command: String, reply_id: Option) -> Result<()> { self.sender - .send(CommandInput { - command, - reply_id, - }) + .send(CommandInput { command, reply_id }) .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) } /// Dispatches a comamnd to the processor on the current task and waits for /// completion. - pub async fn command_in_place(&self, command: String, reply_id: Option) -> ProcessorResult { - self.process_command(CommandInput { - command, - reply_id, - }) - .await + pub async fn command_in_place( + &self, + command: String, + reply_id: Option, + ) -> ProcessorResult { + self.process_command(CommandInput { command, reply_id }) + .await } /// Invokes the tab-completer to complete the command. When unavailable, @@ -191,8 +191,8 @@ impl Service { async fn handle_command(&self, command: CommandInput) { match self.process_command(command).await { - Ok(None) => debug!("Command successful with no response"), - Ok(Some(output)) | Err(output) => self + | Ok(None) => debug!("Command successful with no response"), + | Ok(Some(output)) | Err(output) => self .handle_response(output) .await .unwrap_or_else(default_log), @@ -250,10 +250,7 @@ impl Service { } async fn handle_response(&self, content: RoomMessageEventContent) -> Result<()> { - let Some(Relation::Reply { - in_reply_to, - }) = content.relates_to.as_ref() - else { + let Some(Relation::Reply { in_reply_to }) = content.relates_to.as_ref() else { return Ok(()); }; @@ -277,7 +274,10 @@ impl Service { } async fn respond_to_room( - &self, content: RoomMessageEventContent, room_id: &RoomId, user_id: &UserId, + &self, + content: RoomMessageEventContent, + room_id: &RoomId, + user_id: &UserId, ) -> Result<()> { assert!(self.user_is_admin(user_id).await, "sender is not admin"); @@ -298,12 +298,16 @@ impl Service { } async fn handle_response_error( - &self, e: Error, room_id: &RoomId, user_id: &UserId, state_lock: &RoomMutexGuard, + &self, + e: Error, + room_id: &RoomId, + user_id: &UserId, + state_lock: &RoomMutexGuard, ) -> Result<()> { error!("Failed to build and append admin room response PDU: \"{e}\""); let content = RoomMessageEventContent::text_plain(format!( - "Failed to build and append admin room PDU: \"{e}\"\n\nThe original admin command may have finished \ - successfully, but we could not return the output." + "Failed to build and append admin room PDU: \"{e}\"\n\nThe original admin command \ + may have finished successfully, but we could not return the output." )); self.services @@ -321,7 +325,8 @@ impl Service { // Admin command with public echo (in admin room) let server_user = &self.services.globals.server_user; - let is_public_prefix = body.starts_with("!admin") || body.starts_with(server_user.as_str()); + let is_public_prefix = + body.starts_with("!admin") || body.starts_with(server_user.as_str()); // Expected backward branch if !is_public_escape && !is_public_prefix { diff --git a/src/service/admin/startup.rs b/src/service/admin/startup.rs index 68ad4be1..582e863d 100644 --- a/src/service/admin/startup.rs +++ b/src/service/admin/startup.rs @@ -65,9 +65,9 @@ async fn startup_execute_command(&self, i: usize, command: String) -> Result<()> debug!("Startup command #{i}: executing {command:?}"); match self.command_in_place(command, None).await { - Ok(Some(output)) => Self::startup_command_output(i, &output), - Err(output) => Self::startup_command_error(i, &output), - Ok(None) => { + | Ok(Some(output)) => Self::startup_command_output(i, &output), + | Err(output) => Self::startup_command_error(i, &output), + | Ok(None) => { info!("Startup command #{i} completed (no output)."); Ok(()) }, diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index cf2921a7..2a54ee09 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -61,7 +61,11 @@ impl crate::Service for Service { impl Service { /// Registers an appservice and returns the ID to the caller - pub async fn register_appservice(&self, registration: &Registration, appservice_config_body: &str) -> Result { + pub async fn register_appservice( + &self, + registration: &Registration, + appservice_config_body: &str, + ) -> Result { //TODO: Check for collisions between exclusive appservice namespaces self.registration_info .write() @@ -152,7 +156,10 @@ impl Service { .any(|info| info.rooms.is_exclusive_match(room_id.as_str())) } - pub fn read(&self) -> impl Future>> { + pub fn read( + &self, + ) -> impl Future>> + { self.registration_info.read() } diff --git a/src/service/appservice/registration_info.rs b/src/service/appservice/registration_info.rs index 612933be..9758e186 100644 --- a/src/service/appservice/registration_info.rs +++ b/src/service/appservice/registration_info.rs @@ -15,13 +15,15 @@ pub struct RegistrationInfo { impl RegistrationInfo { #[must_use] pub fn is_user_match(&self, user_id: &UserId) -> bool { - self.users.is_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() + self.users.is_match(user_id.as_str()) + || self.registration.sender_localpart == user_id.localpart() } #[inline] #[must_use] pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { - self.users.is_exclusive_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() + self.users.is_exclusive_match(user_id.as_str()) + || self.registration.sender_localpart == user_id.localpart() } } diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index f2bc8256..f63d78b8 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -42,7 +42,9 @@ impl crate::Service for Service { .build()?, url_preview: base(config) - .and_then(|builder| builder_interface(builder, url_preview_bind_iface.as_deref()))? + .and_then(|builder| { + builder_interface(builder, url_preview_bind_iface.as_deref()) + })? .local_address(url_preview_bind_addr) .dns_resolver(resolver.resolver.clone()) .redirect(redirect::Policy::limited(3)) @@ -178,7 +180,10 @@ fn base(config: &Config) -> Result { } #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] -fn builder_interface(builder: reqwest::ClientBuilder, config: Option<&str>) -> Result { +fn builder_interface( + builder: reqwest::ClientBuilder, + config: Option<&str>, +) -> Result { if let Some(iface) = config { Ok(builder.interface(iface)) } else { @@ -187,7 +192,10 @@ fn builder_interface(builder: reqwest::ClientBuilder, config: Option<&str>) -> R } #[cfg(not(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))] -fn builder_interface(builder: reqwest::ClientBuilder, config: Option<&str>) -> Result { +fn builder_interface( + builder: reqwest::ClientBuilder, + config: Option<&str>, +) -> Result { use conduwuit::Err; if let Some(iface) = config { diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 5063fbd4..9b2e4025 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -3,7 +3,9 @@ use std::sync::Arc; use async_trait::async_trait; use conduwuit::{error, warn, Result}; use ruma::{ - events::{push_rules::PushRulesEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType}, + events::{ + push_rules::PushRulesEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, + }, push::Ruleset, }; @@ -31,16 +33,14 @@ impl crate::Service for Service { })) } - async fn worker(self: Arc) -> Result<()> { + async fn worker(self: Arc) -> Result { if self.services.globals.is_read_only() { return Ok(()); } - self.set_emergency_access() - .await - .inspect_err(|e| error!("Could not set the configured emergency password for the server user: {e}"))?; - - Ok(()) + self.set_emergency_access().await.inspect_err(|e| { + error!("Could not set the configured emergency password for the server user: {e}"); + }) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } @@ -49,7 +49,7 @@ impl crate::Service for Service { impl Service { /// Sets the emergency password and push rules for the server user account /// in case emergency password is set - async fn set_emergency_access(&self) -> Result { + async fn set_emergency_access(&self) -> Result { let server_user = &self.services.globals.server_user; self.services @@ -57,8 +57,8 @@ impl Service { .set_password(server_user, self.services.globals.emergency_password().as_deref())?; let (ruleset, pwd_set) = match self.services.globals.emergency_password() { - Some(_) => (Ruleset::server_default(server_user), true), - None => (Ruleset::new(), false), + | Some(_) => (Ruleset::server_default(server_user), true), + | None => (Ruleset::new(), false), }; self.services @@ -68,9 +68,7 @@ impl Service { server_user, GlobalAccountDataEventType::PushRules.to_string().into(), &serde_json::to_value(&GlobalAccountDataEvent { - content: PushRulesEventContent { - global: ruleset, - }, + content: PushRulesEventContent { global: ruleset }, }) .expect("to json value always works"), ) @@ -78,14 +76,14 @@ impl Service { if pwd_set { warn!( - "The server account emergency password is set! Please unset it as soon as you finish admin account \ - recovery! You will be logged out of the server service account when you finish." + "The server account emergency password is set! Please unset it as soon as you \ + finish admin account recovery! You will be logged out of the server service \ + account when you finish." ); + Ok(()) } else { // logs out any users still in the server service account and removes sessions - self.services.users.deactivate_account(server_user).await?; + self.services.users.deactivate_account(server_user).await } - - Ok(pwd_set) } } diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 5edd1f30..07b4ac2c 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -16,7 +16,9 @@ impl Data { let db = &args.db; Self { global: db["global"].clone(), - counter: RwLock::new(Self::stored_count(&db["global"]).expect("initialized global counter")), + counter: RwLock::new( + Self::stored_count(&db["global"]).expect("initialized global counter"), + ), db: args.db.clone(), } } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 2403b703..88199f5f 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -10,7 +10,9 @@ use std::{ use conduwuit::{error, Config, Result}; use data::Data; use regex::RegexSet; -use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, ServerName, UserId}; +use ruma::{ + OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, ServerName, UserId, +}; use tokio::sync::Mutex; use crate::service; @@ -40,31 +42,31 @@ impl crate::Service for Service { .as_ref() .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); - let turn_secret = config - .turn_secret_file - .as_ref() - .map_or(config.turn_secret.clone(), |path| { - std::fs::read_to_string(path).unwrap_or_else(|e| { - error!("Failed to read the TURN secret file: {e}"); - - config.turn_secret.clone() - }) - }); - - let registration_token = + let turn_secret = config - .registration_token_file + .turn_secret_file .as_ref() - .map_or(config.registration_token.clone(), |path| { - let Ok(token) = std::fs::read_to_string(path).inspect_err(|e| { - error!("Failed to read the registration token file: {e}"); - }) else { - return config.registration_token.clone(); - }; + .map_or(config.turn_secret.clone(), |path| { + std::fs::read_to_string(path).unwrap_or_else(|e| { + error!("Failed to read the TURN secret file: {e}"); - Some(token) + config.turn_secret.clone() + }) }); + let registration_token = config.registration_token_file.as_ref().map_or( + config.registration_token.clone(), + |path| { + let Ok(token) = std::fs::read_to_string(path).inspect_err(|e| { + error!("Failed to read the registration token file: {e}"); + }) else { + return config.registration_token.clone(); + }; + + Some(token) + }, + ); + let mut s = Self { db, config: config.clone(), @@ -73,8 +75,11 @@ impl crate::Service for Service { stateres_mutex: Arc::new(Mutex::new(())), admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name)) .expect("#admins:server_name is valid alias name"), - server_user: UserId::parse_with_server_name(String::from("conduit"), &config.server_name) - .expect("@conduit:server_name is valid"), + server_user: UserId::parse_with_server_name( + String::from("conduit"), + &config.server_name, + ) + .expect("@conduit:server_name is valid"), turn_secret, registration_token, }; @@ -125,7 +130,9 @@ impl Service { pub fn allow_guest_registration(&self) -> bool { self.config.allow_guest_registration } - pub fn allow_guests_auto_join_rooms(&self) -> bool { self.config.allow_guests_auto_join_rooms } + pub fn allow_guests_auto_join_rooms(&self) -> bool { + self.config.allow_guests_auto_join_rooms + } pub fn log_guest_registrations(&self) -> bool { self.config.log_guest_registrations } @@ -137,17 +144,23 @@ impl Service { self.config.allow_public_room_directory_over_federation } - pub fn allow_device_name_federation(&self) -> bool { self.config.allow_device_name_federation } + pub fn allow_device_name_federation(&self) -> bool { + self.config.allow_device_name_federation + } pub fn allow_room_creation(&self) -> bool { self.config.allow_room_creation } - pub fn new_user_displayname_suffix(&self) -> &String { &self.config.new_user_displayname_suffix } + pub fn new_user_displayname_suffix(&self) -> &String { + &self.config.new_user_displayname_suffix + } pub fn allow_check_for_updates(&self) -> bool { self.config.allow_check_for_updates } pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { self.jwt_decoding_key.as_ref() } + pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { + self.jwt_decoding_key.as_ref() + } pub fn turn_password(&self) -> &String { &self.config.turn_password } @@ -173,11 +186,15 @@ impl Service { &self.config.url_preview_domain_explicit_denylist } - pub fn url_preview_url_contains_allowlist(&self) -> &Vec { &self.config.url_preview_url_contains_allowlist } + pub fn url_preview_url_contains_allowlist(&self) -> &Vec { + &self.config.url_preview_url_contains_allowlist + } pub fn url_preview_max_spider_size(&self) -> usize { self.config.url_preview_max_spider_size } - pub fn url_preview_check_root_domain(&self) -> bool { self.config.url_preview_check_root_domain } + pub fn url_preview_check_root_domain(&self) -> bool { + self.config.url_preview_check_root_domain + } pub fn forbidden_alias_names(&self) -> &RegexSet { &self.config.forbidden_alias_names } @@ -189,18 +206,26 @@ impl Service { pub fn allow_outgoing_presence(&self) -> bool { self.config.allow_outgoing_presence } - pub fn allow_incoming_read_receipts(&self) -> bool { self.config.allow_incoming_read_receipts } + pub fn allow_incoming_read_receipts(&self) -> bool { + self.config.allow_incoming_read_receipts + } - pub fn allow_outgoing_read_receipts(&self) -> bool { self.config.allow_outgoing_read_receipts } + pub fn allow_outgoing_read_receipts(&self) -> bool { + self.config.allow_outgoing_read_receipts + } pub fn block_non_admin_invites(&self) -> bool { self.config.block_non_admin_invites } /// checks if `user_id` is local to us via server_name comparison #[inline] - pub fn user_is_local(&self, user_id: &UserId) -> bool { self.server_is_ours(user_id.server_name()) } + pub fn user_is_local(&self, user_id: &UserId) -> bool { + self.server_is_ours(user_id.server_name()) + } #[inline] - pub fn server_is_ours(&self, server_name: &ServerName) -> bool { server_name == self.config.server_name } + pub fn server_is_ours(&self, server_name: &ServerName) -> bool { + server_name == self.config.server_name + } #[inline] pub fn is_read_only(&self) -> bool { self.db.db.is_read_only() } diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 140fc701..1165c3ed 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -48,7 +48,11 @@ impl crate::Service for Service { } #[implement(Service)] -pub fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw) -> Result { +pub fn create_backup( + &self, + user_id: &UserId, + backup_metadata: &Raw, +) -> Result { let version = self.services.globals.next_count()?.to_string(); let count = self.services.globals.next_count()?; @@ -71,13 +75,18 @@ pub async fn delete_backup(&self, user_id: &UserId, version: &str) { .backupkeyid_backup .keys_prefix_raw(&key) .ignore_err() - .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) + .ready_for_each(|outdated_key| { + self.db.backupkeyid_backup.remove(outdated_key); + }) .await; } #[implement(Service)] pub async fn update_backup<'a>( - &self, user_id: &UserId, version: &'a str, backup_metadata: &Raw, + &self, + user_id: &UserId, + version: &'a str, + backup_metadata: &Raw, ) -> Result<&'a str> { let key = (user_id, version); if self.db.backupid_algorithm.qry(&key).await.is_err() { @@ -110,7 +119,10 @@ pub async fn get_latest_backup_version(&self, user_id: &UserId) -> Result Result<(String, Raw)> { +pub async fn get_latest_backup( + &self, + user_id: &UserId, +) -> Result<(String, Raw)> { type Key<'a> = (&'a UserId, &'a str); type KeyVal<'a> = (Key<'a>, Raw); @@ -134,7 +146,12 @@ pub async fn get_backup(&self, user_id: &UserId, version: &str) -> Result, + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + key_data: &Raw, ) -> Result<()> { let key = (user_id, version); if self.db.backupid_algorithm.qry(&key).await.is_err() { @@ -176,14 +193,16 @@ pub async fn get_etag(&self, user_id: &UserId, version: &str) -> String { } #[implement(Service)] -pub async fn get_all(&self, user_id: &UserId, version: &str) -> BTreeMap { +pub async fn get_all( + &self, + user_id: &UserId, + version: &str, +) -> BTreeMap { type Key<'a> = (Ignore, Ignore, &'a RoomId, &'a str); type KeyVal<'a> = (Key<'a>, Raw); let mut rooms = BTreeMap::::new(); - let default = || RoomKeyBackup { - sessions: BTreeMap::new(), - }; + let default = || RoomKeyBackup { sessions: BTreeMap::new() }; let prefix = (user_id, version, Interfix); self.db @@ -204,7 +223,10 @@ pub async fn get_all(&self, user_id: &UserId, version: &str) -> BTreeMap BTreeMap> { type KeyVal<'a> = ((Ignore, Ignore, Ignore, &'a str), Raw); @@ -213,14 +235,20 @@ pub async fn get_room( .backupkeyid_backup .stream_prefix(&prefix) .ignore_err() - .map(|((.., session_id), key_backup_data): KeyVal<'_>| (session_id.to_owned(), key_backup_data)) + .map(|((.., session_id), key_backup_data): KeyVal<'_>| { + (session_id.to_owned(), key_backup_data) + }) .collect() .await } #[implement(Service)] pub async fn get_session( - &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, ) -> Result> { let key = (user_id, version, room_id, session_id); @@ -245,17 +273,27 @@ pub async fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: & .backupkeyid_backup .keys_prefix_raw(&key) .ignore_err() - .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) + .ready_for_each(|outdated_key| { + self.db.backupkeyid_backup.remove(outdated_key); + }) .await; } #[implement(Service)] -pub async fn delete_room_key(&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str) { +pub async fn delete_room_key( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, +) { let key = (user_id, version, room_id, session_id); self.db .backupkeyid_backup .keys_prefix_raw(&key) .ignore_err() - .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) + .ready_for_each(|outdated_key| { + self.db.backupkeyid_backup.remove(outdated_key); + }) .await; } diff --git a/src/service/manager.rs b/src/service/manager.rs index 3ce2e0a5..ea33d285 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -102,21 +102,32 @@ impl Manager { unimplemented!("unexpected worker task abort {error:?}"); } - async fn handle_result(&self, workers: &mut WorkersLocked<'_>, result: WorkerResult) -> Result<()> { + async fn handle_result( + &self, + workers: &mut WorkersLocked<'_>, + result: WorkerResult, + ) -> Result<()> { let (service, result) = result; match result { - Ok(()) => self.handle_finished(workers, &service).await, - Err(error) => self.handle_error(workers, &service, error).await, + | Ok(()) => self.handle_finished(workers, &service).await, + | Err(error) => self.handle_error(workers, &service, error).await, } } - async fn handle_finished(&self, _workers: &mut WorkersLocked<'_>, service: &Arc) -> Result<()> { + async fn handle_finished( + &self, + _workers: &mut WorkersLocked<'_>, + service: &Arc, + ) -> Result<()> { debug!("service {:?} worker finished", service.name()); Ok(()) } async fn handle_error( - &self, workers: &mut WorkersLocked<'_>, service: &Arc, error: Error, + &self, + workers: &mut WorkersLocked<'_>, + service: &Arc, + error: Error, ) -> Result<()> { let name = service.name(); error!("service {name:?} aborted: {error}"); @@ -138,9 +149,16 @@ impl Manager { } /// Start the worker in a task for the service. - async fn start_worker(&self, workers: &mut WorkersLocked<'_>, service: &Arc) -> Result<()> { + async fn start_worker( + &self, + workers: &mut WorkersLocked<'_>, + service: &Arc, + ) -> Result<()> { if !self.server.running() { - return Err!("Service {:?} worker not starting during server shutdown.", service.name()); + return Err!( + "Service {:?} worker not starting during server shutdown.", + service.name() + ); } debug!("Service {:?} worker starting...", service.name()); diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 71fb1cdb..43310515 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -34,7 +34,11 @@ impl Data { } pub(super) fn create_file_metadata( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, dim: &Dim, content_disposition: Option<&ContentDisposition>, + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + dim: &Dim, + content_disposition: Option<&ContentDisposition>, content_type: Option<&str>, ) -> Result> { let dim: &[u32] = &[dim.width, dim.height]; @@ -63,7 +67,10 @@ impl Data { .stream_prefix_raw(&prefix) .ignore_err() .ready_for_each(|(key, val)| { - debug_assert!(key.starts_with(mxc.to_string().as_bytes()), "key should start with the mxc"); + debug_assert!( + key.starts_with(mxc.to_string().as_bytes()), + "key should start with the mxc" + ); let user = str_from_bytes(val).unwrap_or_default(); debug_info!("Deleting key {key:?} which was uploaded by user {user}"); @@ -95,7 +102,11 @@ impl Data { Ok(keys) } - pub(super) async fn search_file_metadata(&self, mxc: &Mxc<'_>, dim: &Dim) -> Result { + pub(super) async fn search_file_metadata( + &self, + mxc: &Mxc<'_>, + dim: &Dim, + ) -> Result { let dim: &[u32] = &[dim.width, dim.height]; let prefix = (mxc, dim, Interfix); @@ -113,8 +124,9 @@ impl Data { let content_type = parts .next() .map(|bytes| { - string_from_bytes(bytes) - .map_err(|_| Error::bad_database("Content type in mediaid_file is invalid unicode.")) + string_from_bytes(bytes).map_err(|_| { + Error::bad_database("Content type in mediaid_file is invalid unicode.") + }) }) .transpose()?; @@ -127,16 +139,16 @@ impl Data { } else { Some( string_from_bytes(content_disposition_bytes) - .map_err(|_| Error::bad_database("Content Disposition in mediaid_file is invalid unicode."))? + .map_err(|_| { + Error::bad_database( + "Content Disposition in mediaid_file is invalid unicode.", + ) + })? .parse()?, ) }; - Ok(Metadata { - content_disposition, - content_type, - key, - }) + Ok(Metadata { content_disposition, content_type, key }) } /// Gets all the MXCs associated with a user @@ -144,7 +156,9 @@ impl Data { self.mediaid_user .stream() .ignore_err() - .ready_filter_map(|(key, user): (&str, &UserId)| (user == user_id).then(|| key.into())) + .ready_filter_map(|(key, user): (&str, &UserId)| { + (user == user_id).then(|| key.into()) + }) .collect() .await } @@ -166,7 +180,12 @@ impl Data { Ok(()) } - pub(super) fn set_url_preview(&self, url: &str, data: &UrlPreviewData, timestamp: Duration) -> Result<()> { + pub(super) fn set_url_preview( + &self, + url: &str, + data: &UrlPreviewData, + timestamp: Duration, + ) -> Result<()> { let mut value = Vec::::new(); value.extend_from_slice(×tamp.as_secs().to_be_bytes()); value.push(0xFF); @@ -218,43 +237,43 @@ impl Data { .next() .and_then(|b| String::from_utf8(b.to_vec()).ok()) { - Some(s) if s.is_empty() => None, - x => x, + | Some(s) if s.is_empty() => None, + | x => x, }; let description = match values .next() .and_then(|b| String::from_utf8(b.to_vec()).ok()) { - Some(s) if s.is_empty() => None, - x => x, + | Some(s) if s.is_empty() => None, + | x => x, }; let image = match values .next() .and_then(|b| String::from_utf8(b.to_vec()).ok()) { - Some(s) if s.is_empty() => None, - x => x, + | Some(s) if s.is_empty() => None, + | x => x, }; let image_size = match values .next() .map(|b| usize::from_be_bytes(b.try_into().unwrap_or_default())) { - Some(0) => None, - x => x, + | Some(0) => None, + | x => x, }; let image_width = match values .next() .map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default())) { - Some(0) => None, - x => x, + | Some(0) => None, + | x => x, }; let image_height = match values .next() .map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default())) { - Some(0) => None, - x => x, + | Some(0) => None, + | x => x, }; Ok(UrlPreviewData { diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 5932643b..9555edd7 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -83,7 +83,8 @@ pub(crate) async fn checkup_sha256_media(services: &Services) -> Result<()> { for key in media.db.get_all_media_keys().await { let new_path = media.get_media_file_sha256(&key).into_os_string(); let old_path = media.get_media_file_b64(&key).into_os_string(); - if let Err(e) = handle_media_check(&dbs, config, &files, &key, &new_path, &old_path).await { + if let Err(e) = handle_media_check(&dbs, config, &files, &key, &new_path, &old_path).await + { error!( media_id = ?encode_key(&key), ?new_path, ?old_path, "Failed to resolve media check failure: {e}" @@ -100,8 +101,12 @@ pub(crate) async fn checkup_sha256_media(services: &Services) -> Result<()> { } async fn handle_media_check( - dbs: &(&Arc, &Arc), config: &Config, files: &HashSet, key: &[u8], - new_path: &OsStr, old_path: &OsStr, + dbs: &(&Arc, &Arc), + config: &Config, + files: &HashSet, + key: &[u8], + new_path: &OsStr, + old_path: &OsStr, ) -> Result<()> { use crate::media::encode_key; diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 989e1c27..7e77090c 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -80,13 +80,21 @@ impl crate::Service for Service { impl Service { /// Uploads a file. pub async fn create( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, content_disposition: Option<&ContentDisposition>, - content_type: Option<&str>, file: &[u8], + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + content_disposition: Option<&ContentDisposition>, + content_type: Option<&str>, + file: &[u8], ) -> Result<()> { // Width, Height = 0 if it's not a thumbnail - let key = self - .db - .create_file_metadata(mxc, user, &Dim::default(), content_disposition, content_type)?; + let key = self.db.create_file_metadata( + mxc, + user, + &Dim::default(), + content_disposition, + content_type, + )?; //TODO: Dangling metadata in database if creation fails let mut f = self.create_media_file(&key).await?; @@ -132,10 +140,10 @@ impl Service { debug_info!(%deletion_count, "Deleting MXC {mxc} by user {user} from database and filesystem"); match self.delete(&mxc).await { - Ok(()) => { + | Ok(()) => { deletion_count = deletion_count.saturating_add(1); }, - Err(e) => { + | Err(e) => { debug_error!(%deletion_count, "Failed to delete {mxc} from user {user}, ignoring error: {e}"); }, } @@ -146,11 +154,8 @@ impl Service { /// Downloads a file. pub async fn get(&self, mxc: &Mxc<'_>) -> Result> { - if let Ok(Metadata { - content_disposition, - content_type, - key, - }) = self.db.search_file_metadata(mxc, &Dim::default()).await + if let Ok(Metadata { content_disposition, content_type, key }) = + self.db.search_file_metadata(mxc, &Dim::default()).await { let mut content = Vec::new(); let path = self.get_media_file(&key); @@ -181,13 +186,19 @@ impl Service { let mxc = parts .next() .map(|bytes| { - utils::string_from_bytes(bytes) - .map_err(|e| err!(Database(error!("Failed to parse MXC unicode bytes from our database: {e}")))) + utils::string_from_bytes(bytes).map_err(|e| { + err!(Database(error!( + "Failed to parse MXC unicode bytes from our database: {e}" + ))) + }) }) .transpose()?; let Some(mxc_s) = mxc else { - debug_warn!(?mxc, "Parsed MXC URL unicode bytes from database but is still invalid"); + debug_warn!( + ?mxc, + "Parsed MXC URL unicode bytes from database but is still invalid" + ); continue; }; @@ -207,7 +218,11 @@ impl Service { /// Deletes all remote only media files in the given at or after /// time/duration. Returns a usize with the amount of media files deleted. pub async fn delete_all_remote_media_at_after_time( - &self, time: SystemTime, before: bool, after: bool, yes_i_want_to_delete_local_media: bool, + &self, + time: SystemTime, + before: bool, + after: bool, + yes_i_want_to_delete_local_media: bool, ) -> Result { let all_keys = self.db.get_all_media_keys().await; let mut remote_mxcs = Vec::with_capacity(all_keys.len()); @@ -218,19 +233,26 @@ impl Service { let mxc = parts .next() .map(|bytes| { - utils::string_from_bytes(bytes) - .map_err(|e| err!(Database(error!("Failed to parse MXC unicode bytes from our database: {e}")))) + utils::string_from_bytes(bytes).map_err(|e| { + err!(Database(error!( + "Failed to parse MXC unicode bytes from our database: {e}" + ))) + }) }) .transpose()?; let Some(mxc_s) = mxc else { - debug_warn!(?mxc, "Parsed MXC URL unicode bytes from database but is still invalid"); + debug_warn!( + ?mxc, + "Parsed MXC URL unicode bytes from database but is still invalid" + ); continue; }; trace!("Parsed MXC key to URL: {mxc_s}"); let mxc = OwnedMxcUri::from(mxc_s); - if (mxc.server_name() == Ok(self.services.globals.server_name()) && !yes_i_want_to_delete_local_media) + if (mxc.server_name() == Ok(self.services.globals.server_name()) + && !yes_i_want_to_delete_local_media) || !mxc.is_valid() { debug!("Ignoring local or broken media MXC: {mxc}"); @@ -240,9 +262,12 @@ impl Service { let path = self.get_media_file(&key); let file_metadata = match fs::metadata(path.clone()).await { - Ok(file_metadata) => file_metadata, - Err(e) => { - error!("Failed to obtain file metadata for MXC {mxc} at file path \"{path:?}\", skipping: {e}"); + | Ok(file_metadata) => file_metadata, + | Err(e) => { + error!( + "Failed to obtain file metadata for MXC {mxc} at file path \ + \"{path:?}\", skipping: {e}" + ); continue; }, }; @@ -250,12 +275,12 @@ impl Service { trace!(%mxc, ?path, "File metadata: {file_metadata:?}"); let file_created_at = match file_metadata.created() { - Ok(value) => value, - Err(err) if err.kind() == std::io::ErrorKind::Unsupported => { + | Ok(value) => value, + | Err(err) if err.kind() == std::io::ErrorKind::Unsupported => { debug!("btime is unsupported, using mtime instead"); file_metadata.modified()? }, - Err(err) => { + | Err(err) => { error!("Could not delete MXC {mxc} at path {path:?}: {err:?}. Skipping..."); continue; }, @@ -264,10 +289,16 @@ impl Service { debug!("File created at: {file_created_at:?}"); if file_created_at >= time && before { - debug!("File is within (before) user duration, pushing to list of file paths and keys to delete."); + debug!( + "File is within (before) user duration, pushing to list of file paths and \ + keys to delete." + ); remote_mxcs.push(mxc.to_string()); } else if file_created_at <= time && after { - debug!("File is not within (after) user duration, pushing to list of file paths and keys to delete."); + debug!( + "File is not within (after) user duration, pushing to list of file paths \ + and keys to delete." + ); remote_mxcs.push(mxc.to_string()); } } @@ -289,10 +320,10 @@ impl Service { debug_info!("Deleting MXC {mxc} from database and filesystem"); match self.delete(&mxc).await { - Ok(()) => { + | Ok(()) => { deletion_count = deletion_count.saturating_add(1); }, - Err(e) => { + | Err(e) => { warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); continue; }, diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index d571ac56..b1c53305 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -53,10 +53,10 @@ pub async fn download_image(&self, url: &str) -> Result { self.create(&mxc, None, None, None, &image).await?; let (width, height) = match ImgReader::new(Cursor::new(&image)).with_guessed_format() { - Err(_) => (None, None), - Ok(reader) => match reader.into_dimensions() { - Err(_) => (None, None), - Ok((width, height)) => (Some(width), Some(height)), + | Err(_) => (None, None), + | Ok(reader) => match reader.into_dimensions() { + | Err(_) => (None, None), + | Ok((width, height)) => (Some(width), Some(height)), }, }; @@ -79,8 +79,8 @@ pub async fn get_url_preview(&self, url: &Url) -> Result { let _request_lock = self.url_preview_mutex.lock(url.as_str()).await; match self.db.get_url_preview(url.as_str()).await { - Ok(preview) => Ok(preview), - Err(_) => self.request_url_preview(url).await, + | Ok(preview) => Ok(preview), + | Err(_) => self.request_url_preview(url).await, } } @@ -111,9 +111,9 @@ async fn request_url_preview(&self, url: &Url) -> Result { return Err!(Request(Unknown("Unknown Content-Type"))); }; let data = match content_type { - html if html.starts_with("text/html") => self.download_html(url.as_str()).await?, - img if img.starts_with("image/") => self.download_image(url.as_str()).await?, - _ => return Err!(Request(Unknown("Unsupported Content-Type"))), + | html if html.starts_with("text/html") => self.download_html(url.as_str()).await?, + | img if img.starts_with("image/") => self.download_image(url.as_str()).await?, + | _ => return Err!(Request(Unknown("Unsupported Content-Type"))), }; self.set_url_preview(url.as_str(), &data).await?; @@ -131,8 +131,9 @@ async fn download_html(&self, url: &str) -> Result { bytes.extend_from_slice(&chunk); if bytes.len() > self.services.globals.url_preview_max_spider_size() { debug!( - "Response body from URL {} exceeds url_preview_max_spider_size ({}), not processing the rest of the \ - response body and assuming our necessary data is in this range.", + "Response body from URL {} exceeds url_preview_max_spider_size ({}), not \ + processing the rest of the response body and assuming our necessary data is in \ + this range.", url, self.services.globals.url_preview_max_spider_size() ); @@ -145,8 +146,8 @@ async fn download_html(&self, url: &str) -> Result { }; let mut data = match html.opengraph.images.first() { - None => UrlPreviewData::default(), - Some(obj) => self.download_image(&obj.url).await?, + | None => UrlPreviewData::default(), + | Some(obj) => self.download_image(&obj.url).await?, }; let props = html.opengraph.properties; @@ -169,11 +170,11 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { } let host = match url.host_str() { - None => { + | None => { debug!("Ignoring URL preview for a URL that does not have a host (?): {}", url); return false; }, - Some(h) => h.to_owned(), + | Some(h) => h.to_owned(), }; let allowlist_domain_contains = self @@ -205,7 +206,10 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { } if allowlist_domain_explicit.contains(&host) { - debug!("Host {} is allowed by url_preview_domain_explicit_allowlist (check 2/4)", &host); + debug!( + "Host {} is allowed by url_preview_domain_explicit_allowlist (check 2/4)", + &host + ); return true; } @@ -213,7 +217,10 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { .iter() .any(|domain_s| domain_s.contains(&host.clone())) { - debug!("Host {} is allowed by url_preview_domain_contains_allowlist (check 3/4)", &host); + debug!( + "Host {} is allowed by url_preview_domain_contains_allowlist (check 3/4)", + &host + ); return true; } @@ -229,11 +236,12 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { if self.services.globals.url_preview_check_root_domain() { debug!("Checking root domain"); match host.split_once('.') { - None => return false, - Some((_, root_domain)) => { + | None => return false, + | Some((_, root_domain)) => { if denylist_domain_explicit.contains(&root_domain.to_owned()) { debug!( - "Root domain {} is not allowed by url_preview_domain_explicit_denylist (check 1/3)", + "Root domain {} is not allowed by \ + url_preview_domain_explicit_denylist (check 1/3)", &root_domain ); return true; @@ -241,7 +249,8 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { if allowlist_domain_explicit.contains(&root_domain.to_owned()) { debug!( - "Root domain {} is allowed by url_preview_domain_explicit_allowlist (check 2/3)", + "Root domain {} is allowed by url_preview_domain_explicit_allowlist \ + (check 2/3)", &root_domain ); return true; @@ -252,7 +261,8 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { .any(|domain_s| domain_s.contains(&root_domain.to_owned())) { debug!( - "Root domain {} is allowed by url_preview_domain_contains_allowlist (check 3/3)", + "Root domain {} is allowed by url_preview_domain_contains_allowlist \ + (check 3/3)", &root_domain ); return true; diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index 8ec917b7..d5ad5391 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -1,6 +1,9 @@ use std::{fmt::Debug, time::Duration}; -use conduwuit::{debug_warn, err, implement, utils::content_disposition::make_content_disposition, Err, Error, Result}; +use conduwuit::{ + debug_warn, err, implement, utils::content_disposition::make_content_disposition, Err, Error, + Result, +}; use http::header::{HeaderValue, CONTENT_DISPOSITION, CONTENT_TYPE}; use ruma::{ api::{ @@ -19,7 +22,12 @@ use super::{Dim, FileMeta}; #[implement(super::Service)] pub async fn fetch_remote_thumbnail( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, dim: &Dim, + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + server: Option<&ServerName>, + timeout_ms: Duration, + dim: &Dim, ) -> Result { self.check_fetch_authorized(mxc)?; @@ -38,7 +46,11 @@ pub async fn fetch_remote_thumbnail( #[implement(super::Service)] pub async fn fetch_remote_content( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + server: Option<&ServerName>, + timeout_ms: Duration, ) -> Result { self.check_fetch_authorized(mxc)?; @@ -57,7 +69,12 @@ pub async fn fetch_remote_content( #[implement(super::Service)] async fn fetch_thumbnail_authenticated( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, dim: &Dim, + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + server: Option<&ServerName>, + timeout_ms: Duration, + dim: &Dim, ) -> Result { use federation::authenticated_media::get_content_thumbnail::v1::{Request, Response}; @@ -70,20 +87,22 @@ async fn fetch_thumbnail_authenticated( timeout_ms, }; - let Response { - content, - .. - } = self.federation_request(mxc, user, server, request).await?; + let Response { content, .. } = self.federation_request(mxc, user, server, request).await?; match content { - FileOrLocation::File(content) => self.handle_thumbnail_file(mxc, user, dim, content).await, - FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await, + | FileOrLocation::File(content) => + self.handle_thumbnail_file(mxc, user, dim, content).await, + | FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await, } } #[implement(super::Service)] async fn fetch_content_authenticated( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + server: Option<&ServerName>, + timeout_ms: Duration, ) -> Result { use federation::authenticated_media::get_content::v1::{Request, Response}; @@ -92,21 +111,23 @@ async fn fetch_content_authenticated( timeout_ms, }; - let Response { - content, - .. - } = self.federation_request(mxc, user, server, request).await?; + let Response { content, .. } = self.federation_request(mxc, user, server, request).await?; match content { - FileOrLocation::File(content) => self.handle_content_file(mxc, user, content).await, - FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await, + | FileOrLocation::File(content) => self.handle_content_file(mxc, user, content).await, + | FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await, } } #[allow(deprecated)] #[implement(super::Service)] async fn fetch_thumbnail_unauthenticated( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, dim: &Dim, + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + server: Option<&ServerName>, + timeout_ms: Duration, + dim: &Dim, ) -> Result { use media::get_content_thumbnail::v3::{Request, Response}; @@ -123,17 +144,10 @@ async fn fetch_thumbnail_unauthenticated( }; let Response { - file, - content_type, - content_disposition, - .. + file, content_type, content_disposition, .. } = self.federation_request(mxc, user, server, request).await?; - let content = Content { - file, - content_type, - content_disposition, - }; + let content = Content { file, content_type, content_disposition }; self.handle_thumbnail_file(mxc, user, dim, content).await } @@ -141,7 +155,11 @@ async fn fetch_thumbnail_unauthenticated( #[allow(deprecated)] #[implement(super::Service)] async fn fetch_content_unauthenticated( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + server: Option<&ServerName>, + timeout_ms: Duration, ) -> Result { use media::get_content::v3::{Request, Response}; @@ -154,27 +172,27 @@ async fn fetch_content_unauthenticated( }; let Response { - file, - content_type, - content_disposition, - .. + file, content_type, content_disposition, .. } = self.federation_request(mxc, user, server, request).await?; - let content = Content { - file, - content_type, - content_disposition, - }; + let content = Content { file, content_type, content_disposition }; self.handle_content_file(mxc, user, content).await } #[implement(super::Service)] async fn handle_thumbnail_file( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, dim: &Dim, content: Content, + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + dim: &Dim, + content: Content, ) -> Result { - let content_disposition = - make_content_disposition(content.content_disposition.as_ref(), content.content_type.as_deref(), None); + let content_disposition = make_content_disposition( + content.content_disposition.as_ref(), + content.content_type.as_deref(), + None, + ); self.upload_thumbnail( mxc, @@ -193,9 +211,17 @@ async fn handle_thumbnail_file( } #[implement(super::Service)] -async fn handle_content_file(&self, mxc: &Mxc<'_>, user: Option<&UserId>, content: Content) -> Result { - let content_disposition = - make_content_disposition(content.content_disposition.as_ref(), content.content_type.as_deref(), None); +async fn handle_content_file( + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + content: Content, +) -> Result { + let content_disposition = make_content_disposition( + content.content_disposition.as_ref(), + content.content_type.as_deref(), + None, + ); self.create( mxc, @@ -213,7 +239,12 @@ async fn handle_content_file(&self, mxc: &Mxc<'_>, user: Option<&UserId>, conten } #[implement(super::Service)] -async fn handle_location(&self, mxc: &Mxc<'_>, user: Option<&UserId>, location: &str) -> Result { +async fn handle_location( + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + location: &str, +) -> Result { self.location_request(location).await.map_err(|error| { err!(Request(NotFound( debug_warn!(%mxc, ?user, ?location, ?error, "Fetching media from location failed") @@ -263,7 +294,11 @@ async fn location_request(&self, location: &str) -> Result { #[implement(super::Service)] async fn federation_request( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, request: Request, + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + server: Option<&ServerName>, + request: Request, ) -> Result where Request: OutgoingRequest + Send + Debug, @@ -277,7 +312,12 @@ where // Handles and adjusts the error for the caller to determine if they should // request the fallback endpoint or give up. -fn handle_federation_error(mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, error: Error) -> Error { +fn handle_federation_error( + mxc: &Mxc<'_>, + user: Option<&UserId>, + server: Option<&ServerName>, + error: Error, +) -> Error { let fallback = || { err!(Request(NotFound( debug_error!(%mxc, ?user, ?server, ?error, "Remote media not found") @@ -303,7 +343,8 @@ fn handle_federation_error(mxc: &Mxc<'_>, user: Option<&UserId>, server: Option< #[implement(super::Service)] #[allow(deprecated)] pub async fn fetch_remote_thumbnail_legacy( - &self, body: &media::get_content_thumbnail::v3::Request, + &self, + body: &media::get_content_thumbnail::v3::Request, ) -> Result { let mxc = Mxc { server_name: &body.server_name, @@ -315,20 +356,17 @@ pub async fn fetch_remote_thumbnail_legacy( let reponse = self .services .sending - .send_federation_request( - mxc.server_name, - media::get_content_thumbnail::v3::Request { - allow_remote: body.allow_remote, - height: body.height, - width: body.width, - method: body.method.clone(), - server_name: body.server_name.clone(), - media_id: body.media_id.clone(), - timeout_ms: body.timeout_ms, - allow_redirect: body.allow_redirect, - animated: body.animated, - }, - ) + .send_federation_request(mxc.server_name, media::get_content_thumbnail::v3::Request { + allow_remote: body.allow_remote, + height: body.height, + width: body.width, + method: body.method.clone(), + server_name: body.server_name.clone(), + media_id: body.media_id.clone(), + timeout_ms: body.timeout_ms, + allow_redirect: body.allow_redirect, + animated: body.animated, + }) .await?; let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; @@ -341,27 +379,30 @@ pub async fn fetch_remote_thumbnail_legacy( #[implement(super::Service)] #[allow(deprecated)] pub async fn fetch_remote_content_legacy( - &self, mxc: &Mxc<'_>, allow_redirect: bool, timeout_ms: Duration, + &self, + mxc: &Mxc<'_>, + allow_redirect: bool, + timeout_ms: Duration, ) -> Result { self.check_legacy_freeze()?; self.check_fetch_authorized(mxc)?; let response = self .services .sending - .send_federation_request( - mxc.server_name, - media::get_content::v3::Request { - allow_remote: true, - server_name: mxc.server_name.into(), - media_id: mxc.media_id.into(), - timeout_ms, - allow_redirect, - }, - ) + .send_federation_request(mxc.server_name, media::get_content::v3::Request { + allow_remote: true, + server_name: mxc.server_name.into(), + media_id: mxc.media_id.into(), + timeout_ms, + allow_redirect, + }) .await?; - let content_disposition = - make_content_disposition(response.content_disposition.as_ref(), response.content_type.as_deref(), None); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); self.create( mxc, diff --git a/src/service/media/tests.rs b/src/service/media/tests.rs index b2f31e6f..1d6dce30 100644 --- a/src/service/media/tests.rs +++ b/src/service/media/tests.rs @@ -13,7 +13,12 @@ async fn long_file_names_works() { impl Data for MockedKVDatabase { fn create_file_metadata( - &self, _sender_user: Option<&str>, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, + &self, + _sender_user: Option<&str>, + mxc: String, + width: u32, + height: u32, + content_disposition: Option<&str>, content_type: Option<&str>, ) -> Result> { // copied from src/database/key_value/media.rs @@ -46,14 +51,22 @@ async fn long_file_names_works() { fn get_all_media_keys(&self) -> Vec> { todo!() } fn search_file_metadata( - &self, _mxc: String, _width: u32, _height: u32, + &self, + _mxc: String, + _width: u32, + _height: u32, ) -> Result<(Option, Option, Vec)> { todo!() } fn remove_url_preview(&self, _url: &str) -> Result<()> { todo!() } - fn set_url_preview(&self, _url: &str, _data: &UrlPreviewData, _timestamp: std::time::Duration) -> Result<()> { + fn set_url_preview( + &self, + _url: &str, + _data: &UrlPreviewData, + _timestamp: std::time::Duration, + ) -> Result<()> { todo!() } @@ -64,11 +77,18 @@ async fn long_file_names_works() { let mxc = "mxc://example.com/ascERGshawAWawugaAcauga".to_owned(); let width = 100; let height = 100; - let content_disposition = "attachment; filename=\"this is a very long file name with spaces and special \ - characters like äöüß and even emoji like 🦀.png\""; + let content_disposition = "attachment; filename=\"this is a very long file name with spaces \ + and special characters like äöüß and even emoji like 🦀.png\""; let content_type = "image/png"; let key = db - .create_file_metadata(None, mxc, width, height, Some(content_disposition), Some(content_type)) + .create_file_metadata( + None, + mxc, + width, + height, + Some(content_disposition), + Some(content_type), + ) .unwrap(); let mut r = PathBuf::from("/tmp/media"); // r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs index 42fc40e7..5c8063cb 100644 --- a/src/service/media/thumbnail.rs +++ b/src/service/media/thumbnail.rs @@ -22,12 +22,17 @@ impl super::Service { /// Uploads or replaces a file thumbnail. #[allow(clippy::too_many_arguments)] pub async fn upload_thumbnail( - &self, mxc: &Mxc<'_>, user: Option<&UserId>, content_disposition: Option<&ContentDisposition>, - content_type: Option<&str>, dim: &Dim, file: &[u8], + &self, + mxc: &Mxc<'_>, + user: Option<&UserId>, + content_disposition: Option<&ContentDisposition>, + content_type: Option<&str>, + dim: &Dim, + file: &[u8], ) -> Result<()> { - let key = self - .db - .create_file_metadata(mxc, user, dim, content_disposition, content_type)?; + let key = + self.db + .create_file_metadata(mxc, user, dim, content_disposition, content_type)?; //TODO: Dangling metadata in database if creation fails let mut f = self.create_media_file(&key).await?; @@ -78,7 +83,12 @@ impl super::Service { /// Generate a thumbnail #[tracing::instrument(skip(self), name = "generate", level = "debug")] - async fn get_thumbnail_generate(&self, mxc: &Mxc<'_>, dim: &Dim, data: Metadata) -> Result> { + async fn get_thumbnail_generate( + &self, + mxc: &Mxc<'_>, + dim: &Dim, + data: Metadata, + ) -> Result> { let mut content = Vec::new(); let path = self.get_media_file(&data.key); fs::File::open(path) @@ -117,11 +127,7 @@ impl super::Service { fn thumbnail_generate(image: &DynamicImage, requested: &Dim) -> Result { let thumbnail = if !requested.crop() { - let Dim { - width, - height, - .. - } = requested.scaled(&Dim { + let Dim { width, height, .. } = requested.scaled(&Dim { width: image.width(), height: image.height(), ..Dim::default() @@ -202,12 +208,12 @@ impl Dim { #[must_use] pub fn normalized(&self) -> Self { match (self.width, self.height) { - (0..=32, 0..=32) => Self::new(32, 32, Some(Method::Crop)), - (0..=96, 0..=96) => Self::new(96, 96, Some(Method::Crop)), - (0..=320, 0..=240) => Self::new(320, 240, Some(Method::Scale)), - (0..=640, 0..=480) => Self::new(640, 480, Some(Method::Scale)), - (0..=800, 0..=600) => Self::new(800, 600, Some(Method::Scale)), - _ => Self::default(), + | (0..=32, 0..=32) => Self::new(32, 32, Some(Method::Crop)), + | (0..=96, 0..=96) => Self::new(96, 96, Some(Method::Crop)), + | (0..=320, 0..=240) => Self::new(320, 240, Some(Method::Scale)), + | (0..=640, 0..=480) => Self::new(640, 480, Some(Method::Scale)), + | (0..=800, 0..=600) => Self::new(800, 600, Some(Method::Scale)), + | _ => Self::default(), } } diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 102ac7d4..adf75c0b 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -12,7 +12,9 @@ use conduwuit::{ use futures::{FutureExt, StreamExt}; use itertools::Itertools; use ruma::{ - events::{push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType}, + events::{ + push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType, + }, push::Ruleset, OwnedUserId, RoomId, UserId, }; @@ -45,7 +47,8 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> { if !services.users.exists(server_user).await { error!("The {server_user} server user does not exist, and the database is not new."); return Err!(Database( - "Cannot reuse an existing database after changing the server name, please delete the old one first.", + "Cannot reuse an existing database after changing the server name, please \ + delete the old one first.", )); } } @@ -144,7 +147,8 @@ async fn migrate(services: &Services) -> Result<()> { assert!( version_match, - "Failed asserting local database version {} is equal to known latest conduwuit database version {}", + "Failed asserting local database version {} is equal to known latest conduwuit database \ + version {}", services.globals.db.database_version().await, DATABASE_VERSION, ); @@ -192,7 +196,8 @@ async fn migrate(services: &Services) -> Result<()> { let matches = patterns.matches(room_alias.alias()); if matches.matched_any() { warn!( - "Room with alias {} ({}) matches the following forbidden room name patterns: {}", + "Room with alias {} ({}) matches the following forbidden room \ + name patterns: {}", room_alias, &room_id, matches @@ -223,8 +228,8 @@ async fn db_lt_12(services: &Services) -> Result<()> { .await { let user = match UserId::parse_with_server_name(username.as_str(), &config.server_name) { - Ok(u) => u, - Err(e) => { + | Ok(u) => u, + | Err(e) => { warn!("Invalid username {username}: {e}"); continue; }, @@ -240,7 +245,8 @@ async fn db_lt_12(services: &Services) -> Result<()> { //content rule { - let content_rule_transformation = [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; + let content_rule_transformation = + [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; let rule = rules_list.content.get(content_rule_transformation[0]); if rule.is_some() { @@ -301,8 +307,8 @@ async fn db_lt_13(services: &Services) -> Result<()> { .await { let user = match UserId::parse_with_server_name(username.as_str(), &config.server_name) { - Ok(u) => u, - Err(e) => { + | Ok(u) => u, + | Err(e) => { warn!("Invalid username {username}: {e}"); continue; }, @@ -413,7 +419,9 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) .rooms .state_accessor .get_member(room_id, user_id) - .map(|member| member.is_ok_and(|member| member.membership == MembershipState::Join)) + .map(|member| { + member.is_ok_and(|member| member.membership == MembershipState::Join) + }) }) .collect::>() .await; @@ -426,7 +434,9 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) .rooms .state_accessor .get_member(room_id, user_id) - .map(|member| member.is_ok_and(|member| member.membership == MembershipState::Join)) + .map(|member| { + member.is_ok_and(|member| member.membership == MembershipState::Join) + }) }) .collect::>() .await; @@ -444,7 +454,8 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) for room_id in &room_ids { debug_info!( - "Updating joined count for room {room_id} to fix servers in room after correcting membership states" + "Updating joined count for room {room_id} to fix servers in room after correcting \ + membership states" ); services diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index dd0d8e72..3d614333 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -53,18 +53,22 @@ impl Data { } pub(super) async fn set_presence( - &self, user_id: &UserId, presence_state: &PresenceState, currently_active: Option, - last_active_ago: Option, status_msg: Option, + &self, + user_id: &UserId, + presence_state: &PresenceState, + currently_active: Option, + last_active_ago: Option, + status_msg: Option, ) -> Result<()> { let last_presence = self.get_presence(user_id).await; let state_changed = match last_presence { - Err(_) => true, - Ok(ref presence) => presence.1.content.presence != *presence_state, + | Err(_) => true, + | Ok(ref presence) => presence.1.content.presence != *presence_state, }; let status_msg_changed = match last_presence { - Err(_) => true, - Ok(ref last_presence) => { + | Err(_) => true, + | Ok(ref last_presence) => { let old_msg = last_presence .1 .content @@ -80,18 +84,22 @@ impl Data { let now = utils::millis_since_unix_epoch(); let last_last_active_ts = match last_presence { - Err(_) => 0, - Ok((_, ref presence)) => now.saturating_sub(presence.content.last_active_ago.unwrap_or_default().into()), + | Err(_) => 0, + | Ok((_, ref presence)) => + now.saturating_sub(presence.content.last_active_ago.unwrap_or_default().into()), }; let last_active_ts = match last_active_ago { - None => now, - Some(last_active_ago) => now.saturating_sub(last_active_ago.into()), + | None => now, + | Some(last_active_ago) => now.saturating_sub(last_active_ago.into()), }; // TODO: tighten for state flicker? if !status_msg_changed && !state_changed && last_active_ts < last_last_active_ts { - debug_warn!("presence spam {user_id:?} last_active_ts:{last_active_ts:?} < {last_last_active_ts:?}",); + debug_warn!( + "presence spam {user_id:?} last_active_ts:{last_active_ts:?} < \ + {last_last_active_ts:?}", + ); return Ok(()); } @@ -138,7 +146,10 @@ impl Data { } #[inline] - pub(super) fn presence_since(&self, since: u64) -> impl Stream + Send + '_ { + pub(super) fn presence_since( + &self, + since: u64, + ) -> impl Stream + Send + '_ { self.presenceid_presence .raw_stream() .ignore_err() diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index 7e80e05e..1f9f63d9 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -99,13 +99,14 @@ impl Service { let last_presence = self.db.get_presence(user_id).await; let state_changed = match last_presence { - Err(_) => true, - Ok((_, ref presence)) => presence.content.presence != *new_state, + | Err(_) => true, + | Ok((_, ref presence)) => presence.content.presence != *new_state, }; let last_last_active_ago = match last_presence { - Err(_) => 0_u64, - Ok((_, ref presence)) => presence.content.last_active_ago.unwrap_or_default().into(), + | Err(_) => 0_u64, + | Ok((_, ref presence)) => + presence.content.last_active_ago.unwrap_or_default().into(), }; if !state_changed && last_last_active_ago < REFRESH_TIMEOUT { @@ -113,8 +114,8 @@ impl Service { } let status_msg = match last_presence { - Ok((_, ref presence)) => presence.content.status_msg.clone(), - Err(_) => Some(String::new()), + | Ok((_, ref presence)) => presence.content.status_msg.clone(), + | Err(_) => Some(String::new()), }; let last_active_ago = UInt::new(0); @@ -125,12 +126,16 @@ impl Service { /// Adds a presence event which will be saved until a new event replaces it. pub async fn set_presence( - &self, user_id: &UserId, state: &PresenceState, currently_active: Option, last_active_ago: Option, + &self, + user_id: &UserId, + state: &PresenceState, + currently_active: Option, + last_active_ago: Option, status_msg: Option, ) -> Result<()> { let presence_state = match state.as_str() { - "" => &PresenceState::Offline, // default an empty string to 'offline' - &_ => state, + | "" => &PresenceState::Offline, // default an empty string to 'offline' + | &_ => state, }; self.db @@ -141,8 +146,8 @@ impl Service { && user_id != self.services.globals.server_user { let timeout = match presence_state { - PresenceState::Online => self.services.server.config.presence_idle_timeout_s, - _ => self.services.server.config.presence_offline_timeout_s, + | PresenceState::Online => self.services.server.config.presence_idle_timeout_s, + | _ => self.services.server.config.presence_offline_timeout_s, }; self.timer_sender @@ -160,16 +165,25 @@ impl Service { /// /// TODO: Why is this not used? #[allow(dead_code)] - pub async fn remove_presence(&self, user_id: &UserId) { self.db.remove_presence(user_id).await } + pub async fn remove_presence(&self, user_id: &UserId) { + self.db.remove_presence(user_id).await; + } /// Returns the most recent presence updates that happened after the event /// with id `since`. - pub fn presence_since(&self, since: u64) -> impl Stream + Send + '_ { + pub fn presence_since( + &self, + since: u64, + ) -> impl Stream + Send + '_ { self.db.presence_since(since) } #[inline] - pub async fn from_json_bytes_to_event(&self, bytes: &[u8], user_id: &UserId) -> Result { + pub async fn from_json_bytes_to_event( + &self, + bytes: &[u8], + user_id: &UserId, + ) -> Result { let presence = Presence::from_json_bytes(bytes)?; let event = presence .to_presence_event(user_id, &self.services.users) @@ -192,13 +206,16 @@ impl Service { } let new_state = match (&presence_state, last_active_ago.map(u64::from)) { - (PresenceState::Online, Some(ago)) if ago >= self.idle_timeout => Some(PresenceState::Unavailable), - (PresenceState::Unavailable, Some(ago)) if ago >= self.offline_timeout => Some(PresenceState::Offline), - _ => None, + | (PresenceState::Online, Some(ago)) if ago >= self.idle_timeout => + Some(PresenceState::Unavailable), + | (PresenceState::Unavailable, Some(ago)) if ago >= self.offline_timeout => + Some(PresenceState::Offline), + | _ => None, }; debug!( - "Processed presence timer for user '{user_id}': Old state = {presence_state}, New state = {new_state:?}" + "Processed presence timer for user '{user_id}': Old state = {presence_state}, New \ + state = {new_state:?}" ); if let Some(new_state) = new_state { diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index aed4a3f8..b88a004b 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -21,7 +21,10 @@ pub(super) struct Presence { impl Presence { #[must_use] pub(super) fn new( - state: PresenceState, currently_active: bool, last_active_ts: u64, status_msg: Option, + state: PresenceState, + currently_active: bool, + last_active_ts: u64, + status_msg: Option, ) -> Self { Self { state, @@ -32,11 +35,16 @@ impl Presence { } pub(super) fn from_json_bytes(bytes: &[u8]) -> Result { - serde_json::from_slice(bytes).map_err(|_| Error::bad_database("Invalid presence data in database")) + serde_json::from_slice(bytes) + .map_err(|_| Error::bad_database("Invalid presence data in database")) } /// Creates a PresenceEvent from available data. - pub(super) async fn to_presence_event(&self, user_id: &UserId, users: &users::Service) -> PresenceEvent { + pub(super) async fn to_presence_event( + &self, + user_id: &UserId, + users: &users::Service, + ) -> PresenceEvent { let now = utils::millis_since_unix_epoch(); let last_active_ago = if self.currently_active { None diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index ffe822b7..cea3ba35 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -19,9 +19,12 @@ use ruma::{ IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ - room::power_levels::RoomPowerLevelsEventContent, AnySyncTimelineEvent, StateEventType, TimelineEventType, + room::power_levels::RoomPowerLevelsEventContent, AnySyncTimelineEvent, StateEventType, + TimelineEventType, + }, + push::{ + Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, Ruleset, Tweak, }, - push::{Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, uint, RoomId, UInt, UserId, }; @@ -55,7 +58,8 @@ impl crate::Service for Service { services: Services { globals: args.depend::("globals"), client: args.depend::("client"), - state_accessor: args.depend::("rooms::state_accessor"), + state_accessor: args + .depend::("rooms::state_accessor"), state_cache: args.depend::("rooms::state_cache"), users: args.depend::("users"), sending: args.depend::("sending"), @@ -67,23 +71,31 @@ impl crate::Service for Service { } impl Service { - pub async fn set_pusher(&self, sender: &UserId, pusher: &set_pusher::v3::PusherAction) -> Result { + pub async fn set_pusher( + &self, + sender: &UserId, + pusher: &set_pusher::v3::PusherAction, + ) -> Result { match pusher { - set_pusher::v3::PusherAction::Post(data) => { + | set_pusher::v3::PusherAction::Post(data) => { let pushkey = data.pusher.ids.pushkey.as_str(); if pushkey.len() > 512 { - return Err!(Request(InvalidParam("Push key length cannot be greater than 512 bytes."))); + return Err!(Request(InvalidParam( + "Push key length cannot be greater than 512 bytes." + ))); } if data.pusher.ids.app_id.as_str().len() > 64 { - return Err!(Request(InvalidParam("App ID length cannot be greater than 64 bytes."))); + return Err!(Request(InvalidParam( + "App ID length cannot be greater than 64 bytes." + ))); } let key = (sender, data.pusher.ids.pushkey.as_str()); self.db.senderkey_pusher.put(key, Json(pusher)); }, - set_pusher::v3::PusherAction::Delete(ids) => { + | set_pusher::v3::PusherAction::Delete(ids) => { let key = (sender, ids.pushkey.as_str()); self.db.senderkey_pusher.del(key); @@ -118,7 +130,10 @@ impl Service { .await } - pub fn get_pushkeys<'a>(&'a self, sender: &'a UserId) -> impl Stream + Send + 'a { + pub fn get_pushkeys<'a>( + &'a self, + sender: &'a UserId, + ) -> impl Stream + Send + 'a { let prefix = (sender, Interfix); self.db .senderkey_pusher @@ -160,14 +175,16 @@ impl Service { let response = self.services.client.pusher.execute(reqwest_request).await; match response { - Ok(mut response) => { + | Ok(mut response) => { // reqwest::Response -> http::Response conversion trace!("Checking response destination's IP"); if let Some(remote_addr) = response.remote_addr() { if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Not allowed to send requests to this IP")); + return Err!(BadServerResponse( + "Not allowed to send requests to this IP" + )); } } } @@ -197,10 +214,13 @@ impl Service { .body(body) .expect("reqwest body is valid http body"), ); - response - .map_err(|e| err!(BadServerResponse(warn!("Push gateway {dest} returned invalid response: {e}")))) + response.map_err(|e| { + err!(BadServerResponse(warn!( + "Push gateway {dest} returned invalid response: {e}" + ))) + }) }, - Err(e) => { + | Err(e) => { warn!("Could not send request to pusher {dest}: {e}"); Err(e.into()) }, @@ -209,7 +229,12 @@ impl Service { #[tracing::instrument(skip(self, user, unread, pusher, ruleset, pdu))] pub async fn send_push_notice( - &self, user: &UserId, unread: UInt, pusher: &Pusher, ruleset: Ruleset, pdu: &PduEvent, + &self, + user: &UserId, + unread: UInt, + pusher: &Pusher, + ruleset: Ruleset, + pdu: &PduEvent, ) -> Result<()> { let mut notify = None; let mut tweaks = Vec::new(); @@ -220,8 +245,9 @@ impl Service { .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "") .await .and_then(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|e| err!(Database(error!("invalid m.room.power_levels event: {e:?}")))) + serde_json::from_str(ev.content.get()).map_err(|e| { + err!(Database(error!("invalid m.room.power_levels event: {e:?}"))) + }) }) .unwrap_or_default(); @@ -230,12 +256,12 @@ impl Service { .await { let n = match action { - Action::Notify => true, - Action::SetTweak(tweak) => { + | Action::Notify => true, + | Action::SetTweak(tweak) => { tweaks.push(tweak.clone()); continue; }, - _ => false, + | _ => false, }; if notify.is_some() { @@ -257,8 +283,12 @@ impl Service { #[tracing::instrument(skip(self, user, ruleset, pdu), level = "debug")] pub async fn get_actions<'a>( - &self, user: &UserId, ruleset: &'a Ruleset, power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, room_id: &RoomId, + &self, + user: &UserId, + ruleset: &'a Ruleset, + power_levels: &RoomPowerLevelsEventContent, + pdu: &Raw, + room_id: &RoomId, ) -> &'a [Action] { let power_levels = PushConditionPowerLevelsCtx { users: power_levels.users.clone(), @@ -294,14 +324,21 @@ impl Service { } #[tracing::instrument(skip(self, unread, pusher, tweaks, event))] - async fn send_notice(&self, unread: UInt, pusher: &Pusher, tweaks: Vec, event: &PduEvent) -> Result<()> { + async fn send_notice( + &self, + unread: UInt, + pusher: &Pusher, + tweaks: Vec, + event: &PduEvent, + ) -> Result<()> { // TODO: email match &pusher.kind { - PusherKind::Http(http) => { + | PusherKind::Http(http) => { // TODO (timo): can pusher/devices have conflicting formats let event_id_only = http.format == Some(PushFormat::EventIdOnly); - let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone()); + let mut device = + Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone()); device.data.default_payload = http.default_payload.clone(); device.data.format.clone_from(&http.format); @@ -319,8 +356,11 @@ impl Service { notifi.counts = NotificationCounts::new(unread, uint!(0)); if event_id_only { - self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) - .await?; + self.send_request( + &http.url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } else { if event.kind == TimelineEventType::RoomEncrypted || tweaks @@ -336,10 +376,12 @@ impl Service { notifi.content = serde_json::value::to_raw_value(&event.content).ok(); if event.kind == TimelineEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); + notifi.user_is_target = + event.state_key.as_deref() == Some(event.sender.as_str()); } - notifi.sender_display_name = self.services.users.displayname(&event.sender).await.ok(); + notifi.sender_display_name = + self.services.users.displayname(&event.sender).await.ok(); notifi.room_name = self .services @@ -355,15 +397,18 @@ impl Service { .await .ok(); - self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) - .await?; + self.send_request( + &http.url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } Ok(()) }, // TODO: Handle email //PusherKind::Email(_) => Ok(()), - _ => Ok(()), + | _ => Ok(()), } } } diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 4d2da695..63de6539 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -35,17 +35,9 @@ impl super::Service { (self.resolve_actual_dest(server_name, true).await?, false) }; - let CachedDest { - dest, - host, - .. - } = result; + let CachedDest { dest, host, .. } = result; - Ok(ActualDest { - dest, - host, - cached, - }) + Ok(ActualDest { dest, host, cached }) } /// Returns: `actual_destination`, host header @@ -53,12 +45,16 @@ impl super::Service { /// Numbers in comments below refer to bullet points in linked section of /// specification #[tracing::instrument(skip_all, name = "actual")] - pub async fn resolve_actual_dest(&self, dest: &ServerName, cache: bool) -> Result { + pub async fn resolve_actual_dest( + &self, + dest: &ServerName, + cache: bool, + ) -> Result { trace!("Finding actual destination for {dest}"); let mut host = dest.as_str().to_owned(); let actual_dest = match get_ip_with_port(dest.as_str()) { - Some(host_port) => Self::actual_dest_1(host_port)?, - None => { + | Some(host_port) => Self::actual_dest_1(host_port)?, + | None => if let Some(pos) = dest.as_str().find(':') { self.actual_dest_2(dest, cache, pos).await? } else if let Some(delegated) = self.request_well_known(dest.as_str()).await? { @@ -67,8 +63,7 @@ impl super::Service { self.actual_dest_4(&host, cache, overrider).await? } else { self.actual_dest_5(dest, cache).await? - } - }, + }, }; // Can't use get_ip_with_port here because we don't want to add a port @@ -79,7 +74,10 @@ impl super::Service { FedDest::Named(addr.to_string(), FedDest::default_port()) } else if let Some(pos) = host.find(':') { let (host, port) = host.split_at(pos); - FedDest::Named(host.to_owned(), port.try_into().unwrap_or_else(|_| FedDest::default_port())) + FedDest::Named( + host.to_owned(), + port.try_into().unwrap_or_else(|_| FedDest::default_port()), + ) } else { FedDest::Named(host, FedDest::default_port()) }; @@ -100,20 +98,30 @@ impl super::Service { async fn actual_dest_2(&self, dest: &ServerName, cache: bool, pos: usize) -> Result { debug!("2: Hostname with included port"); let (host, port) = dest.as_str().split_at(pos); - self.conditional_query_and_cache_override(host, host, port.parse::().unwrap_or(8448), cache) - .await?; + self.conditional_query_and_cache_override( + host, + host, + port.parse::().unwrap_or(8448), + cache, + ) + .await?; Ok(FedDest::Named( host.to_owned(), port.try_into().unwrap_or_else(|_| FedDest::default_port()), )) } - async fn actual_dest_3(&self, host: &mut String, cache: bool, delegated: String) -> Result { + async fn actual_dest_3( + &self, + host: &mut String, + cache: bool, + delegated: String, + ) -> Result { debug!("3: A .well-known file is available"); *host = add_port_to_hostname(&delegated).uri_string(); match get_ip_with_port(&delegated) { - Some(host_and_port) => Self::actual_dest_3_1(host_and_port), - None => { + | Some(host_and_port) => Self::actual_dest_3_1(host_and_port), + | None => if let Some(pos) = delegated.find(':') { self.actual_dest_3_2(cache, delegated, pos).await } else { @@ -123,8 +131,7 @@ impl super::Service { } else { self.actual_dest_3_4(cache, delegated).await } - } - }, + }, } } @@ -133,22 +140,42 @@ impl super::Service { Ok(host_and_port) } - async fn actual_dest_3_2(&self, cache: bool, delegated: String, pos: usize) -> Result { + async fn actual_dest_3_2( + &self, + cache: bool, + delegated: String, + pos: usize, + ) -> Result { debug!("3.2: Hostname with port in .well-known file"); let (host, port) = delegated.split_at(pos); - self.conditional_query_and_cache_override(host, host, port.parse::().unwrap_or(8448), cache) - .await?; + self.conditional_query_and_cache_override( + host, + host, + port.parse::().unwrap_or(8448), + cache, + ) + .await?; Ok(FedDest::Named( host.to_owned(), port.try_into().unwrap_or_else(|_| FedDest::default_port()), )) } - async fn actual_dest_3_3(&self, cache: bool, delegated: String, overrider: FedDest) -> Result { + async fn actual_dest_3_3( + &self, + cache: bool, + delegated: String, + overrider: FedDest, + ) -> Result { debug!("3.3: SRV lookup successful"); let force_port = overrider.port(); - self.conditional_query_and_cache_override(&delegated, &overrider.hostname(), force_port.unwrap_or(8448), cache) - .await?; + self.conditional_query_and_cache_override( + &delegated, + &overrider.hostname(), + force_port.unwrap_or(8448), + cache, + ) + .await?; if let Some(port) = force_port { Ok(FedDest::Named( delegated, @@ -169,11 +196,21 @@ impl super::Service { Ok(add_port_to_hostname(&delegated)) } - async fn actual_dest_4(&self, host: &str, cache: bool, overrider: FedDest) -> Result { + async fn actual_dest_4( + &self, + host: &str, + cache: bool, + overrider: FedDest, + ) -> Result { debug!("4: No .well-known; SRV record found"); let force_port = overrider.port(); - self.conditional_query_and_cache_override(host, &overrider.hostname(), force_port.unwrap_or(8448), cache) - .await?; + self.conditional_query_and_cache_override( + host, + &overrider.hostname(), + force_port.unwrap_or(8448), + cache, + ) + .await?; if let Some(port) = force_port { let port = format!(":{port}"); Ok(FedDest::Named( @@ -245,7 +282,11 @@ impl super::Service { #[inline] async fn conditional_query_and_cache_override( - &self, overname: &str, hostname: &str, port: u16, cache: bool, + &self, + overname: &str, + hostname: &str, + port: u16, + cache: bool, ) -> Result<()> { if cache { self.query_and_cache_override(overname, hostname, port) @@ -256,22 +297,24 @@ impl super::Service { } #[tracing::instrument(skip_all, name = "ip")] - async fn query_and_cache_override(&self, overname: &'_ str, hostname: &'_ str, port: u16) -> Result<()> { + async fn query_and_cache_override( + &self, + overname: &'_ str, + hostname: &'_ str, + port: u16, + ) -> Result<()> { match self.resolver.resolver.lookup_ip(hostname.to_owned()).await { - Err(e) => Self::handle_resolve_error(&e, hostname), - Ok(override_ip) => { + | Err(e) => Self::handle_resolve_error(&e, hostname), + | Ok(override_ip) => { if hostname != overname { debug_info!("{overname:?} overriden by {hostname:?}"); } - self.set_cached_override( - overname, - CachedOverride { - ips: override_ip.into_iter().take(MAX_IPS).collect(), - port, - expire: CachedOverride::default_expire(), - }, - ); + self.set_cached_override(overname, CachedOverride { + ips: override_ip.into_iter().take(MAX_IPS).collect(), + port, + expire: CachedOverride::default_expire(), + }); Ok(()) }, @@ -280,14 +323,15 @@ impl super::Service { #[tracing::instrument(skip_all, name = "srv")] async fn query_srv_record(&self, hostname: &'_ str) -> Result> { - let hostnames = [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; + let hostnames = + [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; for hostname in hostnames { debug!("querying SRV for {hostname:?}"); let hostname = hostname.trim_end_matches('.'); match self.resolver.resolver.srv_lookup(hostname).await { - Err(e) => Self::handle_resolve_error(&e, hostname)?, - Ok(result) => { + | Err(e) => Self::handle_resolve_error(&e, hostname)?, + | Ok(result) => return Ok(result.iter().next().map(|result| { FedDest::Named( result.target().to_string().trim_end_matches('.').to_owned(), @@ -296,8 +340,7 @@ impl super::Service { .try_into() .unwrap_or_else(|_| FedDest::default_port()), ) - })) - }, + })), } } @@ -308,25 +351,24 @@ impl super::Service { use hickory_resolver::error::ResolveErrorKind; match *e.kind() { - ResolveErrorKind::NoRecordsFound { - .. - } => { + | ResolveErrorKind::NoRecordsFound { .. } => { // Raise to debug_warn if we can find out the result wasn't from cache debug!(%host, "No DNS records found: {e}"); Ok(()) }, - ResolveErrorKind::Timeout => { + | ResolveErrorKind::Timeout => { Err!(warn!(%host, "DNS {e}")) }, - ResolveErrorKind::NoConnections => { + | ResolveErrorKind::NoConnections => { error!( - "Your DNS server is overloaded and has ran out of connections. It is strongly recommended you \ - remediate this issue to ensure proper federation connectivity." + "Your DNS server is overloaded and has ran out of connections. It is \ + strongly recommended you remediate this issue to ensure proper federation \ + connectivity." ); Err!(error!(%host, "DNS error: {e}")) }, - _ => Err!(error!(%host, "DNS error: {e}")), + | _ => Err!(error!(%host, "DNS error: {e}")), } } @@ -349,8 +391,9 @@ impl super::Service { dest.is_ip_literal() || !IPAddress::is_valid(dest.host()), "Destination is not an IP literal." ); - let ip = IPAddress::parse(dest.host()) - .map_err(|e| err!(BadServerResponse(debug_error!("Failed to parse IP literal from string: {e}"))))?; + let ip = IPAddress::parse(dest.host()).map_err(|e| { + err!(BadServerResponse(debug_error!("Failed to parse IP literal from string: {e}"))) + })?; self.validate_ip(&ip)?; diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index ca65db35..3e961f4c 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -46,7 +46,11 @@ impl Cache { } impl super::Service { - pub fn set_cached_destination(&self, name: OwnedServerName, dest: CachedDest) -> Option { + pub fn set_cached_destination( + &self, + name: OwnedServerName, + dest: CachedDest, + ) -> Option { trace!(?name, ?dest, "set cached destination"); self.cache .destinations @@ -65,7 +69,11 @@ impl super::Service { .cloned() } - pub fn set_cached_override(&self, name: &str, over: CachedOverride) -> Option { + pub fn set_cached_override( + &self, + name: &str, + over: CachedOverride, + ) -> Option { trace!(?name, ?over, "set cached override"); self.cache .overrides @@ -102,7 +110,9 @@ impl CachedDest { //pub fn valid(&self) -> bool { self.expire > SystemTime::now() } #[must_use] - pub(crate) fn default_expire() -> SystemTime { rand::timepoint_secs(60 * 60 * 18..60 * 60 * 36) } + pub(crate) fn default_expire() -> SystemTime { + rand::timepoint_secs(60 * 60 * 18..60 * 60 * 36) + } } impl CachedOverride { @@ -113,5 +123,7 @@ impl CachedOverride { //pub fn valid(&self) -> bool { self.expire > SystemTime::now() } #[must_use] - pub(crate) fn default_expire() -> SystemTime { rand::timepoint_secs(60 * 60 * 6..60 * 60 * 12) } + pub(crate) fn default_expire() -> SystemTime { + rand::timepoint_secs(60 * 60 * 6..60 * 60 * 12) + } } diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index 54905647..633b397a 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -60,27 +60,26 @@ impl Resolver { opts.shuffle_dns_servers = true; opts.rotate = true; opts.ip_strategy = match config.ip_lookup_strategy { - 1 => hickory_resolver::config::LookupIpStrategy::Ipv4Only, - 2 => hickory_resolver::config::LookupIpStrategy::Ipv6Only, - 3 => hickory_resolver::config::LookupIpStrategy::Ipv4AndIpv6, - 4 => hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4, - _ => hickory_resolver::config::LookupIpStrategy::Ipv4thenIpv6, + | 1 => hickory_resolver::config::LookupIpStrategy::Ipv4Only, + | 2 => hickory_resolver::config::LookupIpStrategy::Ipv6Only, + | 3 => hickory_resolver::config::LookupIpStrategy::Ipv4AndIpv6, + | 4 => hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4, + | _ => hickory_resolver::config::LookupIpStrategy::Ipv4thenIpv6, }; opts.authentic_data = false; let resolver = Arc::new(TokioAsyncResolver::tokio(conf, opts)); Ok(Arc::new(Self { resolver: resolver.clone(), - hooked: Arc::new(Hooked { - resolver, - cache, - }), + hooked: Arc::new(Hooked { resolver, cache }), })) } } impl Resolve for Resolver { - fn resolve(&self, name: Name) -> Resolving { resolve_to_reqwest(self.resolver.clone(), name).boxed() } + fn resolve(&self, name: Name) -> Resolving { + resolve_to_reqwest(self.resolver.clone(), name).boxed() + } } impl Resolve for Hooked { diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index 9c348b47..3986db8e 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -29,8 +29,8 @@ pub(crate) fn get_ip_with_port(dest_str: &str) -> Option { pub(crate) fn add_port_to_hostname(dest: &str) -> FedDest { let (host, port) = match dest.find(':') { - None => (dest, DEFAULT_PORT), - Some(pos) => dest.split_at(pos), + | None => (dest, DEFAULT_PORT), + | Some(pos) => dest.split_at(pos), }; FedDest::Named( @@ -42,23 +42,23 @@ pub(crate) fn add_port_to_hostname(dest: &str) -> FedDest { impl FedDest { pub(crate) fn https_string(&self) -> String { match self { - Self::Literal(addr) => format!("https://{addr}"), - Self::Named(host, port) => format!("https://{host}{port}"), + | Self::Literal(addr) => format!("https://{addr}"), + | Self::Named(host, port) => format!("https://{host}{port}"), } } pub(crate) fn uri_string(&self) -> String { match self { - Self::Literal(addr) => addr.to_string(), - Self::Named(host, port) => format!("{host}{port}"), + | Self::Literal(addr) => addr.to_string(), + | Self::Named(host, port) => format!("{host}{port}"), } } #[inline] pub(crate) fn hostname(&self) -> Cow<'_, str> { match &self { - Self::Literal(addr) => addr.ip().to_string().into(), - Self::Named(host, _) => host.into(), + | Self::Literal(addr) => addr.ip().to_string().into(), + | Self::Named(host, _) => host.into(), } } @@ -66,16 +66,20 @@ impl FedDest { #[allow(clippy::string_slice)] pub(crate) fn port(&self) -> Option { match &self { - Self::Literal(addr) => Some(addr.port()), - Self::Named(_, port) => port[1..].parse().ok(), + | Self::Literal(addr) => Some(addr.port()), + | Self::Named(_, port) => port[1..].parse().ok(), } } #[inline] #[must_use] - pub fn default_port() -> PortString { PortString::from(DEFAULT_PORT).expect("default port string") } + pub fn default_port() -> PortString { + PortString::from(DEFAULT_PORT).expect("default port string") + } } impl fmt::Display for FedDest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.uri_string().as_str()) } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.uri_string().as_str()) + } } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 57db0e15..0790c376 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -52,7 +52,8 @@ impl crate::Service for Service { appservice: args.depend::("appservice"), globals: args.depend::("globals"), sending: args.depend::("sending"), - state_accessor: args.depend::("rooms::state_accessor"), + state_accessor: args + .depend::("rooms::state_accessor"), }, })) } @@ -62,8 +63,15 @@ impl crate::Service for Service { impl Service { #[tracing::instrument(skip(self))] - pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { - if alias == self.services.globals.admin_alias && user_id != self.services.globals.server_user { + pub fn set_alias( + &self, + alias: &RoomAliasId, + room_id: &RoomId, + user_id: &UserId, + ) -> Result<()> { + if alias == self.services.globals.admin_alias + && user_id != self.services.globals.server_user + { return Err(Error::BadRequest( ErrorKind::forbidden(), "Only the server user can set this alias", @@ -120,7 +128,9 @@ impl Service { } pub async fn resolve_with_servers( - &self, room: &RoomOrAliasId, servers: Option>, + &self, + room: &RoomOrAliasId, + servers: Option>, ) -> Result<(OwnedRoomId, Vec)> { if room.is_room_id() { let room_id = RoomId::parse(room).expect("valid RoomId"); @@ -133,14 +143,16 @@ impl Service { #[tracing::instrument(skip(self), name = "resolve")] pub async fn resolve_alias( - &self, room_alias: &RoomAliasId, servers: Option>, + &self, + room_alias: &RoomAliasId, + servers: Option>, ) -> Result<(OwnedRoomId, Vec)> { let server_name = room_alias.server_name(); let server_is_ours = self.services.globals.server_is_ours(server_name); let servers_contains_ours = || { - servers - .as_ref() - .is_some_and(|servers| servers.contains(&self.services.globals.config.server_name)) + servers.as_ref().is_some_and(|servers| { + servers.contains(&self.services.globals.config.server_name) + }) }; if !server_is_ours && !servers_contains_ours() { @@ -150,8 +162,8 @@ impl Service { } let room_id = match self.resolve_local_alias(room_alias).await { - Ok(r) => Some(r), - Err(_) => self.resolve_appservice_alias(room_alias).await?, + | Ok(r) => Some(r), + | Err(_) => self.resolve_appservice_alias(room_alias).await?, }; room_id.map_or_else( @@ -166,7 +178,10 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub fn local_aliases_for_room<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + pub fn local_aliases_for_room<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { let prefix = (room_id, Interfix); self.db .aliasid_alias @@ -208,10 +223,15 @@ impl Service { if let Ok(content) = self .services .state_accessor - .room_state_get_content::(&room_id, &StateEventType::RoomPowerLevels, "") + .room_state_get_content::( + &room_id, + &StateEventType::RoomPowerLevels, + "", + ) .await { - return Ok(RoomPowerLevels::from(content).user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)); + return Ok(RoomPowerLevels::from(content) + .user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)); } // If there is no power levels event, only the room creator can change @@ -232,7 +252,10 @@ impl Service { self.db.alias_userid.get(alias.alias()).await.deserialized() } - async fn resolve_appservice_alias(&self, room_alias: &RoomAliasId) -> Result> { + async fn resolve_appservice_alias( + &self, + room_alias: &RoomAliasId, + ) -> Result> { use ruma::api::appservice::query::query_room_alias; for appservice in self.services.appservice.read().await.values() { @@ -242,9 +265,7 @@ impl Service { .sending .send_appservice_request( appservice.registration.clone(), - query_room_alias::v1::Request { - room_alias: room_alias.to_owned(), - }, + query_room_alias::v1::Request { room_alias: room_alias.to_owned() }, ) .await, Ok(Some(_opt_result)) @@ -261,19 +282,27 @@ impl Service { } pub async fn appservice_checks( - &self, room_alias: &RoomAliasId, appservice_info: &Option, + &self, + room_alias: &RoomAliasId, + appservice_info: &Option, ) -> Result<()> { if !self .services .globals .server_is_ours(room_alias.server_name()) { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Alias is from another server.")); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Alias is from another server.", + )); } if let Some(ref info) = appservice_info { if !info.aliases.is_match(room_alias.as_str()) { - return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias is not in namespace.")); + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "Room alias is not in namespace.", + )); } } else if self .services @@ -281,7 +310,10 @@ impl Service { .is_exclusive_alias(room_alias) .await { - return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias reserved by appservice.")); + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "Room alias reserved by appservice.", + )); } Ok(()) diff --git a/src/service/rooms/alias/remote.rs b/src/service/rooms/alias/remote.rs index 00661da2..7744bee2 100644 --- a/src/service/rooms/alias/remote.rs +++ b/src/service/rooms/alias/remote.rs @@ -6,7 +6,9 @@ use ruma::{api::federation, OwnedRoomId, OwnedServerName, RoomAliasId, ServerNam #[implement(super::Service)] pub(super) async fn remote_resolve( - &self, room_alias: &RoomAliasId, servers: Vec, + &self, + room_alias: &RoomAliasId, + servers: Vec, ) -> Result<(OwnedRoomId, Vec)> { debug!(?room_alias, servers = ?servers, "resolve"); let servers = once(room_alias.server_name()) @@ -17,12 +19,12 @@ pub(super) async fn remote_resolve( let mut resolved_room_id: Option = None; for server in servers { match self.remote_request(room_alias, &server).await { - Err(e) => debug_error!("Failed to query for {room_alias:?} from {server}: {e}"), - Ok(Response { - room_id, - servers, - }) => { - debug!("Server {server} answered with {room_id:?} for {room_alias:?} servers: {servers:?}"); + | Err(e) => debug_error!("Failed to query for {room_alias:?} from {server}: {e}"), + | Ok(Response { room_id, servers }) => { + debug!( + "Server {server} answered with {room_id:?} for {room_alias:?} servers: \ + {servers:?}" + ); resolved_room_id.get_or_insert(room_id); add_server(&mut resolved_servers, server); @@ -37,16 +39,20 @@ pub(super) async fn remote_resolve( resolved_room_id .map(|room_id| (room_id, resolved_servers)) - .ok_or_else(|| err!(Request(NotFound("No servers could assist in resolving the room alias")))) + .ok_or_else(|| { + err!(Request(NotFound("No servers could assist in resolving the room alias"))) + }) } #[implement(super::Service)] -async fn remote_request(&self, room_alias: &RoomAliasId, server: &ServerName) -> Result { +async fn remote_request( + &self, + room_alias: &RoomAliasId, + server: &ServerName, +) -> Result { use federation::query::get_room_information::v1::Request; - let request = Request { - room_alias: room_alias.to_owned(), - }; + let request = Request { room_alias: room_alias.to_owned() }; self.services .sending diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 1548cd55..af8ae364 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -19,14 +19,18 @@ impl Data { let db = &args.db; let config = &args.server.config; let cache_size = f64::from(config.auth_chain_cache_capacity); - let cache_size = usize_from_f64(cache_size * config.cache_capacity_modifier).expect("valid cache size"); + let cache_size = usize_from_f64(cache_size * config.cache_capacity_modifier) + .expect("valid cache size"); Self { shorteventid_authchain: db["shorteventid_authchain"].clone(), auth_chain_cache: Mutex::new(LruCache::new(cache_size)), } } - pub(super) async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { + pub(super) async fn get_cached_eventid_authchain( + &self, + key: &[u64], + ) -> Result> { debug_assert!(!key.is_empty(), "auth_chain key must not be empty"); // Check RAM cache diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index b875bf9c..87992f2d 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -43,7 +43,9 @@ impl crate::Service for Service { impl Service { pub async fn event_ids_iter<'a, I>( - &'a self, room_id: &RoomId, starting_events: I, + &'a self, + room_id: &RoomId, + starting_events: I, ) -> Result> + Send + '_> where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, @@ -57,7 +59,11 @@ impl Service { Ok(stream) } - pub async fn get_event_ids<'a, I>(&'a self, room_id: &RoomId, starting_events: I) -> Result>> + pub async fn get_event_ids<'a, I>( + &'a self, + room_id: &RoomId, + starting_events: I, + ) -> Result>> where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { @@ -74,7 +80,11 @@ impl Service { } #[tracing::instrument(skip_all, name = "auth_chain")] - pub async fn get_auth_chain<'a, I>(&'a self, room_id: &RoomId, starting_events: I) -> Result> + pub async fn get_auth_chain<'a, I>( + &'a self, + room_id: &RoomId, + starting_events: I, + ) -> Result> where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { @@ -110,7 +120,8 @@ impl Service { continue; } - let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); + let chunk_key: Vec = + chunk.iter().map(|(short, _)| short).copied().collect(); if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { trace!("Found cache entry for whole chunk"); full_auth_chain.extend(cached.iter().copied()); @@ -169,7 +180,11 @@ impl Service { } #[tracing::instrument(skip(self, room_id))] - async fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + async fn get_auth_chain_inner( + &self, + room_id: &RoomId, + event_id: &EventId, + ) -> Result> { let mut todo = vec![Arc::from(event_id)]; let mut found = HashSet::new(); @@ -177,8 +192,10 @@ impl Service { trace!(?event_id, "processing auth event"); match self.services.timeline.get_pdu(&event_id).await { - Err(e) => debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"), - Ok(pdu) => { + | Err(e) => { + debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"); + }, + | Ok(pdu) => { if pdu.room_id != room_id { return Err!(Request(Forbidden(error!( ?event_id, @@ -196,7 +213,11 @@ impl Service { .await; if found.insert(sauthevent) { - trace!(?event_id, ?auth_event, "adding auth event to processing queue"); + trace!( + ?event_id, + ?auth_event, + "adding auth event to processing queue" + ); todo.push(auth_event.clone()); } } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 3046a328..039efca7 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -32,10 +32,14 @@ pub fn set_public(&self, room_id: &RoomId) { self.db.publicroomids.insert(room_i pub fn set_not_public(&self, room_id: &RoomId) { self.db.publicroomids.remove(room_id); } #[implement(Service)] -pub fn public_rooms(&self) -> impl Stream + Send { self.db.publicroomids.keys().ignore_err() } +pub fn public_rooms(&self) -> impl Stream + Send { + self.db.publicroomids.keys().ignore_err() +} #[implement(Service)] -pub async fn is_public_room(&self, room_id: &RoomId) -> bool { self.visibility(room_id).await == Visibility::Public } +pub async fn is_public_room(&self, room_id: &RoomId) -> bool { + self.visibility(room_id).await == Visibility::Public +} #[implement(Service)] pub async fn visibility(&self, room_id: &RoomId) -> Visibility { diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 316a1722..af0ef67d 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -5,10 +5,14 @@ use std::{ }; use conduwuit::{ - debug, debug_error, implement, info, pdu, trace, utils::math::continue_exponential_backoff_secs, warn, PduEvent, + debug, debug_error, implement, info, pdu, trace, + utils::math::continue_exponential_backoff_secs, warn, PduEvent, }; use futures::TryFutureExt; -use ruma::{api::federation::event::get_event, CanonicalJsonValue, EventId, RoomId, RoomVersionId, ServerName}; +use ruma::{ + api::federation::event::get_event, CanonicalJsonValue, EventId, RoomId, RoomVersionId, + ServerName, +}; /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. @@ -21,7 +25,11 @@ use ruma::{api::federation::event::get_event, CanonicalJsonValue, EventId, RoomI /// d. TODO: Ask other servers over federation? #[implement(super::Service)] pub(super) async fn fetch_and_handle_outliers<'a>( - &self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, + &self, + origin: &'a ServerName, + events: &'a [Arc], + create_event: &'a PduEvent, + room_id: &'a RoomId, room_version_id: &'a RoomVersionId, ) -> Vec<(Arc, Option>)> { let back_off = |id| match self @@ -32,10 +40,12 @@ pub(super) async fn fetch_and_handle_outliers<'a>( .expect("locked") .entry(id) { - hash_map::Entry::Vacant(e) => { + | hash_map::Entry::Vacant(e) => { e.insert((Instant::now(), 1)); }, - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)), + | hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)); + }, }; let mut events_with_auth_events = Vec::with_capacity(events.len()); @@ -67,7 +77,12 @@ pub(super) async fn fetch_and_handle_outliers<'a>( // Exponential backoff const MIN_DURATION: u64 = 5 * 60; const MAX_DURATION: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { + if continue_exponential_backoff_secs( + MIN_DURATION, + MAX_DURATION, + time.elapsed(), + *tries, + ) { info!("Backing off from {next_id}"); continue; } @@ -86,18 +101,16 @@ pub(super) async fn fetch_and_handle_outliers<'a>( match self .services .sending - .send_federation_request( - origin, - get_event::v1::Request { - event_id: (*next_id).to_owned(), - include_unredacted_content: None, - }, - ) + .send_federation_request(origin, get_event::v1::Request { + event_id: (*next_id).to_owned(), + include_unredacted_content: None, + }) .await { - Ok(res) => { + | Ok(res) => { debug!("Got {next_id} over federation"); - let Ok((calculated_event_id, value)) = pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) + let Ok((calculated_event_id, value)) = + pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) else { back_off((*next_id).to_owned()); continue; @@ -105,15 +118,18 @@ pub(super) async fn fetch_and_handle_outliers<'a>( if calculated_event_id != *next_id { warn!( - "Server didn't return event id we requested: requested: {next_id}, we got \ - {calculated_event_id}. Event: {:?}", + "Server didn't return event id we requested: requested: {next_id}, \ + we got {calculated_event_id}. Event: {:?}", &res.pdu ); } - if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { + if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) + { for auth_event in auth_events { - if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { let a: Arc = auth_event; todo_auth_events.push(a); } else { @@ -127,7 +143,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( events_in_reverse_order.push((next_id.clone(), value)); events_all.insert(next_id); }, - Err(e) => { + | Err(e) => { debug_error!("Failed to fetch event {next_id}: {e}"); back_off((*next_id).to_owned()); }, @@ -158,20 +174,32 @@ pub(super) async fn fetch_and_handle_outliers<'a>( // Exponential backoff const MIN_DURATION: u64 = 5 * 60; const MAX_DURATION: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { + if continue_exponential_backoff_secs( + MIN_DURATION, + MAX_DURATION, + time.elapsed(), + *tries, + ) { debug!("Backing off from {next_id}"); continue; } } - match Box::pin(self.handle_outlier_pdu(origin, create_event, &next_id, room_id, value.clone(), true)).await + match Box::pin(self.handle_outlier_pdu( + origin, + create_event, + &next_id, + room_id, + value.clone(), + true, + )) + .await { - Ok((pdu, json)) => { + | Ok((pdu, json)) => if next_id == *id { pdus.push((pdu, Some(json))); - } - }, - Err(e) => { + }, + | Err(e) => { warn!("Authentication of event {next_id} failed: {e:?}"); back_off(next_id.into()); }, diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 577b3ff2..b271958f 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -8,7 +8,8 @@ use futures::{future, FutureExt}; use ruma::{ int, state_res::{self}, - uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, + ServerName, }; use super::check_room_id; @@ -17,7 +18,11 @@ use super::check_room_id; #[allow(clippy::type_complexity)] #[tracing::instrument(skip_all)] pub(super) async fn fetch_prev( - &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, + &self, + origin: &ServerName, + create_event: &PduEvent, + room_id: &RoomId, + room_version_id: &RoomVersionId, initial_set: Vec>, ) -> Result<( Vec>, @@ -35,7 +40,13 @@ pub(super) async fn fetch_prev( self.services.server.check_running()?; if let Some((pdu, mut json_opt)) = self - .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id, room_version_id) + .fetch_and_handle_outliers( + origin, + &[prev_event_id.clone()], + create_event, + room_id, + room_version_id, + ) .boxed() .await .pop() @@ -67,7 +78,8 @@ pub(super) async fn fetch_prev( } } - graph.insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect()); + graph + .insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect()); } else { // Time based check failed graph.insert(prev_event_id.clone(), HashSet::new()); diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 9c7bc65e..9ea3e081 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -6,7 +6,8 @@ use std::{ use conduwuit::{debug, implement, warn, Err, Error, PduEvent, Result}; use futures::FutureExt; use ruma::{ - api::federation::event::get_room_state_ids, events::StateEventType, EventId, RoomId, RoomVersionId, ServerName, + api::federation::event::get_room_state_ids, events::StateEventType, EventId, RoomId, + RoomVersionId, ServerName, }; /// Call /state_ids to find out what the state at this pdu is. We trust the @@ -15,20 +16,21 @@ use ruma::{ #[implement(super::Service)] #[tracing::instrument(skip(self, create_event, room_version_id))] pub(super) async fn fetch_state( - &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, + &self, + origin: &ServerName, + create_event: &PduEvent, + room_id: &RoomId, + room_version_id: &RoomVersionId, event_id: &EventId, ) -> Result>>> { debug!("Fetching state ids"); let res = self .services .sending - .send_federation_request( - origin, - get_room_state_ids::v1::Request { - room_id: room_id.to_owned(), - event_id: (*event_id).to_owned(), - }, - ) + .send_federation_request(origin, get_room_state_ids::v1::Request { + room_id: room_id.to_owned(), + event_id: (*event_id).to_owned(), + }) .await .inspect_err(|e| warn!("Fetching state for event failed: {e}"))?; @@ -58,14 +60,13 @@ pub(super) async fn fetch_state( .await; match state.entry(shortstatekey) { - hash_map::Entry::Vacant(v) => { + | hash_map::Entry::Vacant(v) => { v.insert(Arc::from(&*pdu.event_id)); }, - hash_map::Entry::Occupied(_) => { + | hash_map::Entry::Occupied(_) => return Err(Error::bad_database( "State event's type and state_key combination exists multiple times.", - )) - }, + )), } } diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index d63f96f9..ca56228d 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -7,7 +7,8 @@ use std::{ use conduwuit::{debug, err, implement, warn, Error, Result}; use futures::{FutureExt, TryFutureExt}; use ruma::{ - api::client::error::ErrorKind, events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId, + api::client::error::ErrorKind, events::StateEventType, CanonicalJsonValue, EventId, RoomId, + ServerName, UserId, }; use super::{check_room_id, get_room_version_id}; @@ -43,8 +44,12 @@ use crate::rooms::timeline::RawPduId; #[implement(super::Service)] #[tracing::instrument(skip(self, origin, value, is_timeline_event), name = "pdu")] pub async fn handle_incoming_pdu<'a>( - &self, origin: &'a ServerName, room_id: &'a RoomId, event_id: &'a EventId, - value: BTreeMap, is_timeline_event: bool, + &self, + origin: &'a ServerName, + room_id: &'a RoomId, + event_id: &'a EventId, + value: BTreeMap, + is_timeline_event: bool, ) -> Result> { // 1. Skip the PDU if we already have it as a timeline event if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await { @@ -144,10 +149,10 @@ pub async fn handle_incoming_pdu<'a>( .expect("locked") .entry(prev_id.into()) { - Entry::Vacant(e) => { + | Entry::Vacant(e) => { e.insert((now, 1)); }, - Entry::Occupied(mut e) => { + | Entry::Occupied(mut e) => { *e.get_mut() = (now, e.get().1.saturating_add(1)); }, }; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 9391ebf3..59ef27ba 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -17,8 +17,13 @@ use super::{check_room_id, get_room_version_id, to_room_version}; #[implement(super::Service)] #[allow(clippy::too_many_arguments)] pub(super) async fn handle_outlier_pdu<'a>( - &self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, - mut value: CanonicalJsonObject, auth_events_known: bool, + &self, + origin: &'a ServerName, + create_event: &'a PduEvent, + event_id: &'a EventId, + room_id: &'a RoomId, + mut value: CanonicalJsonObject, + auth_events_known: bool, ) -> Result<(Arc, BTreeMap)> { // 1. Remove unsigned field value.remove("unsigned"); @@ -34,8 +39,8 @@ pub(super) async fn handle_outlier_pdu<'a>( .verify_event(&value, Some(&room_version_id)) .await { - Ok(ruma::signatures::Verified::All) => value, - Ok(ruma::signatures::Verified::Signatures) => { + | Ok(ruma::signatures::Verified::All) => value, + | Ok(ruma::signatures::Verified::Signatures) => { // Redact debug_info!("Calculated hash does not match (redaction): {event_id}"); let Ok(obj) = ruma::canonical_json::redact(value, &room_version_id, None) else { @@ -44,24 +49,26 @@ pub(super) async fn handle_outlier_pdu<'a>( // Skip the PDU if it is redacted and we already have it as an outlier event if self.services.timeline.pdu_exists(event_id).await { - return Err!(Request(InvalidParam("Event was redacted and we already knew about it"))); + return Err!(Request(InvalidParam( + "Event was redacted and we already knew about it" + ))); } obj }, - Err(e) => { + | Err(e) => return Err!(Request(InvalidParam(debug_error!( "Signature verification failed for {event_id}: {e}" - )))) - }, + )))), }; // Now that we have checked the signature and hashes we can add the eventID and // convert to our PduEvent type val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); - let incoming_pdu = - serde_json::from_value::(serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue")) - .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; + let incoming_pdu = serde_json::from_value::( + serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; check_room_id(room_id, &incoming_pdu)?; @@ -108,10 +115,10 @@ pub(super) async fn handle_outlier_pdu<'a>( .clone() .expect("all auth events have state keys"), )) { - hash_map::Entry::Vacant(v) => { + | hash_map::Entry::Vacant(v) => { v.insert(auth_event); }, - hash_map::Entry::Occupied(_) => { + | hash_map::Entry::Occupied(_) => { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Auth event's type and state_key combination exists multiple times.", diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 1b4e9fe2..becaeb17 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -4,7 +4,9 @@ use std::{ time::Instant, }; -use conduwuit::{debug, implement, utils::math::continue_exponential_backoff_secs, Error, PduEvent, Result}; +use conduwuit::{ + debug, implement, utils::math::continue_exponential_backoff_secs, Error, PduEvent, Result, +}; use ruma::{api::client::error::ErrorKind, CanonicalJsonValue, EventId, RoomId, ServerName}; #[implement(super::Service)] @@ -15,15 +17,23 @@ use ruma::{api::client::error::ErrorKind, CanonicalJsonValue, EventId, RoomId, S name = "prev" )] pub(super) async fn handle_prev_pdu<'a>( - &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - eventid_info: &mut HashMap, (Arc, BTreeMap)>, - create_event: &Arc, first_pdu_in_room: &Arc, prev_id: &EventId, + &self, + origin: &'a ServerName, + event_id: &'a EventId, + room_id: &'a RoomId, + eventid_info: &mut HashMap< + Arc, + (Arc, BTreeMap), + >, + create_event: &Arc, + first_pdu_in_room: &Arc, + prev_id: &EventId, ) -> Result { // Check for disabled again because it might have changed if self.services.metadata.is_disabled(room_id).await { debug!( - "Federaton of room {room_id} is currently disabled on this server. Request by origin {origin} and event \ - ID {event_id}" + "Federaton of room {room_id} is currently disabled on this server. Request by \ + origin {origin} and event ID {event_id}" ); return Err(Error::BadRequest( ErrorKind::forbidden(), diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index de3d2f49..bfc5a014 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -23,8 +23,8 @@ use conduwuit::{ }; use futures::TryFutureExt; use ruma::{ - events::room::create::RoomCreateEventContent, state_res::RoomVersion, EventId, OwnedEventId, OwnedRoomId, RoomId, - RoomVersionId, + events::room::create::RoomCreateEventContent, state_res::RoomVersion, EventId, OwnedEventId, + OwnedRoomId, RoomId, RoomVersionId, }; use crate::{globals, rooms, sending, server_keys, Dep}; @@ -69,8 +69,10 @@ impl crate::Service for Service { pdu_metadata: args.depend::("rooms::pdu_metadata"), short: args.depend::("rooms::short"), state: args.depend::("rooms::state"), - state_accessor: args.depend::("rooms::state_accessor"), - state_compressor: args.depend::("rooms::state_compressor"), + state_accessor: args + .depend::("rooms::state_accessor"), + state_compressor: args + .depend::("rooms::state_compressor"), timeline: args.depend::("rooms::timeline"), server: args.server.clone(), }, @@ -95,7 +97,9 @@ impl crate::Service for Service { } impl Service { - async fn event_exists(&self, event_id: Arc) -> bool { self.services.timeline.pdu_exists(&event_id).await } + async fn event_exists(&self, event_id: Arc) -> bool { + self.services.timeline.pdu_exists(&event_id).await + } async fn event_fetch(&self, event_id: Arc) -> Option> { self.services diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 6c19f43f..f3c75f36 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -3,9 +3,13 @@ use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, R use serde_json::value::RawValue as RawJsonValue; #[implement(super::Service)] -pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { - let value = serde_json::from_str::(pdu.get()) - .map_err(|e| err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}"))))?; +pub async fn parse_incoming_pdu( + &self, + pdu: &RawJsonValue, +) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { + let value = serde_json::from_str::(pdu.get()).map_err(|e| { + err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}"))) + })?; let room_id: OwnedRoomId = value .get("room_id") @@ -20,8 +24,9 @@ pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result<(OwnedEvent .await .map_err(|_| err!("Server is not in room {room_id}"))?; - let (event_id, value) = gen_event_id_canonical_json(pdu, &room_version_id) - .map_err(|e| err!(Request(InvalidParam("Could not convert event to canonical json: {e}"))))?; + let (event_id, value) = gen_event_id_canonical_json(pdu, &room_version_id).map_err(|e| { + err!(Request(InvalidParam("Could not convert event to canonical json: {e}"))) + })?; Ok((event_id, value, room_id)) } diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 8ba4e4f4..d507c9c3 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -20,7 +20,10 @@ use crate::rooms::state_compressor::CompressedStateEvent; #[implement(super::Service)] #[tracing::instrument(skip_all, name = "resolve")] pub async fn resolve_state( - &self, room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap>, + &self, + room_id: &RoomId, + room_version_id: &RoomVersionId, + incoming_state: HashMap>, ) -> Result>> { debug!("Loading current room state ids"); let current_sstatehash = self @@ -76,10 +79,16 @@ pub async fn resolve_state( let event_fetch = |event_id| self.event_fetch(event_id); let event_exists = |event_id| self.event_exists(event_id); - let state = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) - .boxed() - .await - .map_err(|e| err!(Database(error!("State resolution failed: {e:?}"))))?; + let state = state_res::resolve( + room_version_id, + &fork_states, + &auth_chain_sets, + &event_fetch, + &event_exists, + ) + .boxed() + .await + .map_err(|e| err!(Database(error!("State resolution failed: {e:?}"))))?; drop(lock); diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 6c76d9b5..51879d0d 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -21,7 +21,8 @@ use ruma::{ // request and build the state from a known point and resolve if > 1 prev_event #[tracing::instrument(skip_all, name = "state")] pub(super) async fn state_at_incoming_degree_one( - &self, incoming_pdu: &Arc, + &self, + incoming_pdu: &Arc, ) -> Result>>> { let prev_event = &*incoming_pdu.prev_events[0]; let Ok(prev_event_sstatehash) = self @@ -70,7 +71,10 @@ pub(super) async fn state_at_incoming_degree_one( #[implement(super::Service)] #[tracing::instrument(skip_all, name = "state")] pub(super) async fn state_at_incoming_resolved( - &self, incoming_pdu: &Arc, room_id: &RoomId, room_version_id: &RoomVersionId, + &self, + incoming_pdu: &Arc, + room_id: &RoomId, + room_version_id: &RoomVersionId, ) -> Result>>> { debug!("Calculating state at event using state res"); let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len()); @@ -157,10 +161,16 @@ pub(super) async fn state_at_incoming_resolved( let event_fetch = |event_id| self.event_fetch(event_id); let event_exists = |event_id| self.event_exists(event_id); - let result = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists) - .boxed() - .await - .map_err(|e| err!(Database(warn!(?e, "State resolution on prev events failed.")))); + let result = state_res::resolve( + room_version_id, + &fork_states, + &auth_chain_sets, + &event_fetch, + &event_exists, + ) + .boxed() + .await + .map_err(|e| err!(Database(warn!(?e, "State resolution on prev events failed.")))); drop(lock); diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 83267563..aa484cd6 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -19,8 +19,12 @@ use crate::rooms::{state_compressor::HashSetCompressStateEvent, timeline::RawPdu #[implement(super::Service)] pub(super) async fn upgrade_outlier_to_timeline_pdu( - &self, incoming_pdu: Arc, val: BTreeMap, create_event: &PduEvent, - origin: &ServerName, room_id: &RoomId, + &self, + incoming_pdu: Arc, + val: BTreeMap, + create_event: &PduEvent, + origin: &ServerName, + room_id: &RoomId, ) -> Result> { // Skip the PDU if we already have it as a timeline event if let Ok(pduid) = self @@ -63,7 +67,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .await?; } - let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); + let state_at_incoming_event = + state_at_incoming_event.expect("we always set this to some above"); let room_version = to_room_version(&room_version_id); debug!("Performing auth check"); @@ -124,24 +129,34 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( !auth_check || incoming_pdu.kind == TimelineEventType::RoomRedaction && match room_version_id { - V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { if let Some(redact_id) = &incoming_pdu.redacts { !self .services .state_accessor - .user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .user_can_redact( + redact_id, + &incoming_pdu.sender, + &incoming_pdu.room_id, + true, + ) .await? } else { false } }, - _ => { + | _ => { let content: RoomRedactionEventContent = incoming_pdu.get_content()?; if let Some(redact_id) = &content.redacts { !self .services .state_accessor - .user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .user_can_redact( + redact_id, + &incoming_pdu.sender, + &incoming_pdu.room_id, + true, + ) .await? } else { false @@ -229,11 +244,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Set the new room state to the resolved state debug!("Forcing new room state"); - let HashSetCompressStateEvent { - shortstatehash, - added, - removed, - } = self + let HashSetCompressStateEvent { shortstatehash, added, removed } = self .services .state_compressor .save_state(room_id, new_room_state) diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index a4bd4e8f..c3c27b9e 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -51,7 +51,11 @@ impl crate::Service for Service { #[tracing::instrument(skip(self), level = "debug")] #[inline] pub async fn lazy_load_was_sent_before( - &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, ll_user: &UserId, + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, ) -> bool { let key = (user_id, device_id, room_id, ll_user); self.db.lazyloadedids.qry(&key).await.is_ok() @@ -60,7 +64,12 @@ pub async fn lazy_load_was_sent_before( #[implement(Service)] #[tracing::instrument(skip(self), level = "debug")] pub fn lazy_load_mark_sent( - &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, lazy_load: HashSet, count: PduCount, + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + lazy_load: HashSet, + count: PduCount, ) { let key = (user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), count); @@ -72,7 +81,13 @@ pub fn lazy_load_mark_sent( #[implement(Service)] #[tracing::instrument(skip(self), level = "debug")] -pub fn lazy_load_confirm_delivery(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, since: PduCount) { +pub fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + since: PduCount, +) { let key = (user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), since); let Some(user_ids) = self.lazy_load_waiting.lock().expect("locked").remove(&key) else { diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 8f65eec3..6d5a85a0 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -58,7 +58,9 @@ pub async fn exists(&self, room_id: &RoomId) -> bool { } #[implement(Service)] -pub fn iter_ids(&self) -> impl Stream + Send + '_ { self.db.roomid_shortroomid.keys().ignore_err() } +pub fn iter_ids(&self) -> impl Stream + Send + '_ { + self.db.roomid_shortroomid.keys().ignore_err() +} #[implement(Service)] #[inline] @@ -81,12 +83,18 @@ pub fn ban_room(&self, room_id: &RoomId, banned: bool) { } #[implement(Service)] -pub fn list_banned_rooms(&self) -> impl Stream + Send + '_ { self.db.bannedroomids.keys().ignore_err() } +pub fn list_banned_rooms(&self) -> impl Stream + Send + '_ { + self.db.bannedroomids.keys().ignore_err() +} #[implement(Service)] #[inline] -pub async fn is_disabled(&self, room_id: &RoomId) -> bool { self.db.disabledroomids.get(room_id).await.is_ok() } +pub async fn is_disabled(&self, room_id: &RoomId) -> bool { + self.db.disabledroomids.get(room_id).await.is_ok() +} #[implement(Service)] #[inline] -pub async fn is_banned(&self, room_id: &RoomId) -> bool { self.db.bannedroomids.get(room_id).await.is_ok() } +pub async fn is_banned(&self, room_id: &RoomId) -> bool { + self.db.bannedroomids.get(room_id).await.is_ok() +} diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index be9e4ba9..01950975 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -56,26 +56,27 @@ impl Data { } pub(super) fn get_relations<'a>( - &'a self, user_id: &'a UserId, shortroomid: ShortRoomId, target: ShortEventId, from: PduCount, dir: Direction, + &'a self, + user_id: &'a UserId, + shortroomid: ShortRoomId, + target: ShortEventId, + from: PduCount, + dir: Direction, ) -> impl Stream + Send + '_ { let mut current = ArrayVec::::new(); current.extend(target.to_be_bytes()); current.extend(from.saturating_inc(dir).into_unsigned().to_be_bytes()); let current = current.as_slice(); match dir { - Direction::Forward => self.tofrom_relation.raw_keys_from(current).boxed(), - Direction::Backward => self.tofrom_relation.rev_raw_keys_from(current).boxed(), + | Direction::Forward => self.tofrom_relation.raw_keys_from(current).boxed(), + | Direction::Backward => self.tofrom_relation.rev_raw_keys_from(current).boxed(), } .ignore_err() .ready_take_while(move |key| key.starts_with(&target.to_be_bytes())) .map(|to_from| u64_from_u8(&to_from[8..16])) .map(PduCount::from_unsigned) .wide_filter_map(move |shorteventid| async move { - let pdu_id: RawPduId = PduId { - shortroomid, - shorteventid, - } - .into(); + let pdu_id: RawPduId = PduId { shortroomid, shorteventid }.into(); let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; @@ -99,7 +100,9 @@ impl Data { self.referencedevents.qry(&key).await.is_ok() } - pub(super) fn mark_event_soft_failed(&self, event_id: &EventId) { self.softfailedeventids.insert(event_id, []); } + pub(super) fn mark_event_soft_failed(&self, event_id: &EventId) { + self.softfailedeventids.insert(event_id, []); + } pub(super) async fn is_event_soft_failed(&self, event_id: &EventId) -> bool { self.softfailedeventids.get(event_id).await.is_ok() diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 23c6d90b..ac70dbc3 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -36,8 +36,8 @@ impl Service { #[tracing::instrument(skip(self, from, to), level = "debug")] pub fn add_relation(&self, from: PduCount, to: PduCount) { match (from, to) { - (PduCount::Normal(f), PduCount::Normal(t)) => self.db.add_relation(f, t), - _ => { + | (PduCount::Normal(f), PduCount::Normal(t)) => self.db.add_relation(f, t), + | _ => { // TODO: Relations with backfilled pdus }, } @@ -45,15 +45,21 @@ impl Service { #[allow(clippy::too_many_arguments)] pub async fn get_relations( - &self, user_id: &UserId, room_id: &RoomId, target: &EventId, from: PduCount, limit: usize, max_depth: u8, + &self, + user_id: &UserId, + room_id: &RoomId, + target: &EventId, + from: PduCount, + limit: usize, + max_depth: u8, dir: Direction, ) -> Vec { let room_id = self.services.short.get_or_create_shortroomid(room_id).await; let target = match self.services.timeline.get_pdu_count(target).await { - Ok(PduCount::Normal(c)) => c, + | Ok(PduCount::Normal(c)) => c, // TODO: Support backfilled relations - _ => 0, // This will result in an empty iterator + | _ => 0, // This will result in an empty iterator }; let mut pdus: Vec<_> = self @@ -66,9 +72,9 @@ impl Service { 'limit: while let Some(stack_pdu) = stack.pop() { let target = match stack_pdu.0 .0 { - PduCount::Normal(c) => c, + | PduCount::Normal(c) => c, // TODO: Support backfilled relations - PduCount::Backfilled(_) => 0, // This will result in an empty iterator + | PduCount::Backfilled(_) => 0, // This will result in an empty iterator }; let relations: Vec<_> = self @@ -106,7 +112,9 @@ impl Service { #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_event_soft_failed(&self, event_id: &EventId) { self.db.mark_event_soft_failed(event_id) } + pub fn mark_event_soft_failed(&self, event_id: &EventId) { + self.db.mark_event_soft_failed(event_id); + } #[inline] #[tracing::instrument(skip(self), level = "debug")] diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index 03d04b65..c21ad36c 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -40,7 +40,12 @@ impl Data { } } - pub(super) async fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) { + pub(super) async fn readreceipt_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: &ReceiptEvent, + ) { // Remove old entry let last_possible_key = (room_id, u64::MAX); self.readreceiptid_readreceipt @@ -57,7 +62,9 @@ impl Data { } pub(super) fn readreceipts_since<'a>( - &'a self, room_id: &'a RoomId, since: u64, + &'a self, + room_id: &'a RoomId, + since: u64, ) -> impl Stream> + Send + 'a { type Key<'a> = (&'a RoomId, u64, &'a UserId); type KeyVal<'a> = (Key<'a>, CanonicalJsonObject); @@ -87,12 +94,20 @@ impl Data { self.roomuserid_lastprivatereadupdate.put(key, next_count); } - pub(super) async fn private_read_get_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + pub(super) async fn private_read_get_count( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result { let key = (room_id, user_id); self.roomuserid_privateread.qry(&key).await.deserialized() } - pub(super) async fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> u64 { + pub(super) async fn last_privateread_update( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> u64 { let key = (room_id, user_id); self.roomuserid_lastprivatereadupdate .qry(&key) diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index d92b9759..53e64957 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -44,7 +44,12 @@ impl crate::Service for Service { impl Service { /// Replaces the previous read receipt. - pub async fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) { + pub async fn readreceipt_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: &ReceiptEvent, + ) { self.db.readreceipt_update(user_id, room_id, event).await; self.services .sending @@ -54,23 +59,21 @@ impl Service { } /// Gets the latest private read receipt from the user in the room - pub async fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let pdu_count = self - .private_read_get_count(room_id, user_id) - .map_err(|e| err!(Database(warn!("No private read receipt was set in {room_id}: {e}")))); - let shortroomid = self - .services - .short - .get_shortroomid(room_id) - .map_err(|e| err!(Database(warn!("Short room ID does not exist in database for {room_id}: {e}")))); + pub async fn private_read_get( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result> { + let pdu_count = self.private_read_get_count(room_id, user_id).map_err(|e| { + err!(Database(warn!("No private read receipt was set in {room_id}: {e}"))) + }); + let shortroomid = self.services.short.get_shortroomid(room_id).map_err(|e| { + err!(Database(warn!("Short room ID does not exist in database for {room_id}: {e}"))) + }); let (pdu_count, shortroomid) = try_join!(pdu_count, shortroomid)?; let shorteventid = PduCount::Normal(pdu_count); - let pdu_id: RawPduId = PduId { - shortroomid, - shorteventid, - } - .into(); + let pdu_id: RawPduId = PduId { shortroomid, shorteventid }.into(); let pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await?; @@ -80,21 +83,17 @@ impl Service { event_id, BTreeMap::from_iter([( ruma::events::receipt::ReceiptType::ReadPrivate, - BTreeMap::from_iter([( - user_id, - ruma::events::receipt::Receipt { - ts: None, // TODO: start storing the timestamp so we can return one - thread: ruma::events::receipt::ReceiptThread::Unthreaded, - }, - )]), + BTreeMap::from_iter([(user_id, ruma::events::receipt::Receipt { + ts: None, // TODO: start storing the timestamp so we can return one + thread: ruma::events::receipt::ReceiptThread::Unthreaded, + })]), )]), )]); let receipt_event_content = ReceiptEventContent(content); - let receipt_sync_event = SyncEphemeralRoomEvent { - content: receipt_event_content, - }; + let receipt_sync_event = SyncEphemeralRoomEvent { content: receipt_event_content }; - let event = serde_json::value::to_raw_value(&receipt_sync_event).expect("receipt created manually"); + let event = serde_json::value::to_raw_value(&receipt_sync_event) + .expect("receipt created manually"); Ok(Raw::from_json(event)) } @@ -104,7 +103,9 @@ impl Service { #[inline] #[tracing::instrument(skip(self), level = "debug")] pub fn readreceipts_since<'a>( - &'a self, room_id: &'a RoomId, since: u64, + &'a self, + room_id: &'a RoomId, + since: u64, ) -> impl Stream> + Send + 'a { self.db.readreceipts_since(room_id, since) } @@ -119,7 +120,11 @@ impl Service { /// Returns the private read marker PDU count. #[inline] #[tracing::instrument(skip(self), level = "debug")] - pub async fn private_read_get_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + pub async fn private_read_get_count( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result { self.db.private_read_get_count(room_id, user_id).await } @@ -137,7 +142,9 @@ where { let mut json = BTreeMap::new(); for value in receipts { - let receipt = serde_json::from_str::>(value.json().get()); + let receipt = serde_json::from_str::>( + value.json().get(), + ); if let Ok(value) = receipt { for (event, receipt) in value.content { json.insert(event, receipt); @@ -149,9 +156,7 @@ where let content = ReceiptEventContent::from_iter(json); Raw::from_json( - serde_json::value::to_raw_value(&SyncEphemeralRoomEvent { - content, - }) - .expect("received valid json"), + serde_json::value::to_raw_value(&SyncEphemeralRoomEvent { content }) + .expect("received valid json"), ) } diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 025fc42a..35cfd444 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -49,18 +49,18 @@ pub struct RoomQuery<'a> { type TokenId = ArrayVec; -const TOKEN_ID_MAX_LEN: usize = size_of::() + WORD_MAX_LEN + 1 + size_of::(); +const TOKEN_ID_MAX_LEN: usize = + size_of::() + WORD_MAX_LEN + 1 + size_of::(); const WORD_MAX_LEN: usize = 50; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - db: Data { - tokenids: args.db["tokenids"].clone(), - }, + db: Data { tokenids: args.db["tokenids"].clone() }, services: Services { short: args.depend::("rooms::short"), - state_accessor: args.depend::("rooms::state_accessor"), + state_accessor: args + .depend::("rooms::state_accessor"), timeline: args.depend::("rooms::timeline"), }, })) @@ -103,7 +103,8 @@ pub fn deindex_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_b #[implement(Service)] pub async fn search_pdus<'a>( - &'a self, query: &'a RoomQuery<'a>, + &'a self, + query: &'a RoomQuery<'a>, ) -> Result<(usize, impl Stream + Send + 'a)> { let pdu_ids: Vec<_> = self.search_pdu_ids(query).await?.collect().await; @@ -136,7 +137,10 @@ pub async fn search_pdus<'a>( // result is modeled as a stream such that callers don't have to be refactored // though an additional async/wrap still exists for now #[implement(Service)] -pub async fn search_pdu_ids(&self, query: &RoomQuery<'_>) -> Result + Send + '_> { +pub async fn search_pdu_ids( + &self, + query: &RoomQuery<'_>, +) -> Result + Send + '_> { let shortroomid = self.services.short.get_shortroomid(query.room_id).await?; let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await; @@ -147,7 +151,11 @@ pub async fn search_pdu_ids(&self, query: &RoomQuery<'_>) -> Result, shortroomid: ShortRoomId) -> Vec> { +async fn search_pdu_ids_query_room( + &self, + query: &RoomQuery<'_>, + shortroomid: ShortRoomId, +) -> Vec> { tokenize(&query.criteria.search_term) .stream() .wide_then(|word| async move { @@ -162,7 +170,9 @@ async fn search_pdu_ids_query_room(&self, query: &RoomQuery<'_>, shortroomid: Sh /// Iterate over PduId's containing a word #[implement(Service)] fn search_pdu_ids_query_words<'a>( - &'a self, shortroomid: ShortRoomId, word: &'a str, + &'a self, + shortroomid: ShortRoomId, + word: &'a str, ) -> impl Stream + Send + '_ { self.search_pdu_ids_query_word(shortroomid, word) .map(move |key| -> RawPduId { @@ -173,7 +183,11 @@ fn search_pdu_ids_query_words<'a>( /// Iterate over raw database results for a word #[implement(Service)] -fn search_pdu_ids_query_word(&self, shortroomid: ShortRoomId, word: &str) -> impl Stream> + Send + '_ { +fn search_pdu_ids_query_word( + &self, + shortroomid: ShortRoomId, + word: &str, +) -> impl Stream> + Send + '_ { // rustc says const'ing this not yet stable let end_id: RawPduId = PduId { shortroomid, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 465ce1a9..f814411b 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -60,7 +60,10 @@ pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEvent } #[implement(Service)] -pub fn multi_get_or_create_shorteventid<'a, I>(&'a self, event_ids: I) -> impl Stream + Send + '_ +pub fn multi_get_or_create_shorteventid<'a, I>( + &'a self, + event_ids: I, +) -> impl Stream + Send + '_ where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, ::Item: AsRef<[u8]> + Send + Sync + 'a, @@ -72,8 +75,8 @@ where event_ids.next().map(|event_id| (event_id, result)) }) .map(|(event_id, result)| match result { - Ok(ref short) => utils::u64_from_u8(short), - Err(_) => self.create_shorteventid(event_id), + | Ok(ref short) => utils::u64_from_u8(short), + | Err(_) => self.create_shorteventid(event_id), }) } @@ -104,7 +107,11 @@ pub async fn get_shorteventid(&self, event_id: &EventId) -> Result } #[implement(Service)] -pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> ShortStateKey { +pub async fn get_or_create_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, +) -> ShortStateKey { const BUFSIZE: usize = size_of::(); if let Ok(shortstatekey) = self.get_shortstatekey(event_type, state_key).await { @@ -127,7 +134,11 @@ pub async fn get_or_create_shortstatekey(&self, event_type: &StateEventType, sta } #[implement(Service)] -pub async fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { +pub async fn get_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, +) -> Result { let key = (event_type, state_key); self.db .statekey_shortstatekey @@ -153,7 +164,10 @@ where } #[implement(Service)] -pub fn multi_get_eventid_from_short<'a, Id, I>(&'a self, shorteventid: I) -> impl Stream> + Send + 'a +pub fn multi_get_eventid_from_short<'a, Id, I>( + &'a self, + shorteventid: I, +) -> impl Stream> + Send + 'a where I: Iterator + Send + 'a, Id: for<'de> Deserialize<'de> + Sized + ToOwned + 'a, @@ -168,7 +182,10 @@ where } #[implement(Service)] -pub async fn get_statekey_from_short(&self, shortstatekey: ShortStateKey) -> Result<(StateEventType, String)> { +pub async fn get_statekey_from_short( + &self, + shortstatekey: ShortStateKey, +) -> Result<(StateEventType, String)> { const BUFSIZE: usize = size_of::(); self.db diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index aa523871..d60c4c9e 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -125,7 +125,8 @@ enum Identifier<'a> { pub struct Service { services: Services, - pub roomid_spacehierarchy_cache: Mutex>>, + pub roomid_spacehierarchy_cache: + Mutex>>, } struct Services { @@ -145,11 +146,13 @@ impl crate::Service for Service { let cache_size = cache_size * config.cache_capacity_modifier; Ok(Arc::new(Self { services: Services { - state_accessor: args.depend::("rooms::state_accessor"), + state_accessor: args + .depend::("rooms::state_accessor"), state_cache: args.depend::("rooms::state_cache"), state: args.depend::("rooms::state"), short: args.depend::("rooms::short"), - event_handler: args.depend::("rooms::event_handler"), + event_handler: args + .depend::("rooms::event_handler"), timeline: args.depend::("rooms::timeline"), sending: args.depend::("sending"), }, @@ -166,28 +169,37 @@ impl Service { /// Errors if the room does not exist, so a check if the room exists should /// be done pub async fn get_federation_hierarchy( - &self, room_id: &RoomId, server_name: &ServerName, suggested_only: bool, + &self, + room_id: &RoomId, + server_name: &ServerName, + suggested_only: bool, ) -> Result { match self - .get_summary_and_children_local(&room_id.to_owned(), Identifier::ServerName(server_name)) + .get_summary_and_children_local( + &room_id.to_owned(), + Identifier::ServerName(server_name), + ) .await? { - Some(SummaryAccessibility::Accessible(room)) => { + | Some(SummaryAccessibility::Accessible(room)) => { let mut children = Vec::new(); let mut inaccessible_children = Vec::new(); for (child, _via) in get_parent_children_via(&room, suggested_only) { match self - .get_summary_and_children_local(&child, Identifier::ServerName(server_name)) + .get_summary_and_children_local( + &child, + Identifier::ServerName(server_name), + ) .await? { - Some(SummaryAccessibility::Accessible(summary)) => { + | Some(SummaryAccessibility::Accessible(summary)) => { children.push((*summary).into()); }, - Some(SummaryAccessibility::Inaccessible) => { + | Some(SummaryAccessibility::Inaccessible) => { inaccessible_children.push(child); }, - None => (), + | None => (), } } @@ -197,16 +209,18 @@ impl Service { inaccessible_children, }) }, - Some(SummaryAccessibility::Inaccessible) => { - Err(Error::BadRequest(ErrorKind::NotFound, "The requested room is inaccessible")) - }, - None => Err(Error::BadRequest(ErrorKind::NotFound, "The requested room was not found")), + | Some(SummaryAccessibility::Inaccessible) => + Err(Error::BadRequest(ErrorKind::NotFound, "The requested room is inaccessible")), + | None => + Err(Error::BadRequest(ErrorKind::NotFound, "The requested room was not found")), } } /// Gets the summary of a space using solely local information async fn get_summary_and_children_local( - &self, current_room: &OwnedRoomId, identifier: Identifier<'_>, + &self, + current_room: &OwnedRoomId, + identifier: Identifier<'_>, ) -> Result> { if let Some(cached) = self .roomid_spacehierarchy_cache @@ -241,9 +255,7 @@ impl Service { if let Ok(summary) = summary { self.roomid_spacehierarchy_cache.lock().await.insert( current_room.clone(), - Some(CachedSpaceHierarchySummary { - summary: summary.clone(), - }), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), ); Ok(Some(SummaryAccessibility::Accessible(Box::new(summary)))) @@ -258,20 +270,21 @@ impl Service { /// Gets the summary of a space using solely federation #[tracing::instrument(skip(self))] async fn get_summary_and_children_federation( - &self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &[OwnedServerName], + &self, + current_room: &OwnedRoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], ) -> Result> { for server in via { debug_info!("Asking {server} for /hierarchy"); let Ok(response) = self .services .sending - .send_federation_request( - server, - federation::space::get_hierarchy::v1::Request { - room_id: current_room.to_owned(), - suggested_only, - }, - ) + .send_federation_request(server, federation::space::get_hierarchy::v1::Request { + room_id: current_room.to_owned(), + suggested_only, + }) .await else { continue; @@ -282,9 +295,7 @@ impl Service { self.roomid_spacehierarchy_cache.lock().await.insert( current_room.clone(), - Some(CachedSpaceHierarchySummary { - summary: summary.clone(), - }), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), ); for child in response.children { @@ -356,7 +367,11 @@ impl Service { /// Gets the summary of a space using either local or remote (federation) /// sources async fn get_summary_and_children_client( - &self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &[OwnedServerName], + &self, + current_room: &OwnedRoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], ) -> Result> { if let Ok(Some(response)) = self .get_summary_and_children_local(current_room, Identifier::UserId(user_id)) @@ -370,7 +385,9 @@ impl Service { } async fn get_room_summary( - &self, current_room: &OwnedRoomId, children_state: Vec>, + &self, + current_room: &OwnedRoomId, + children_state: Vec>, identifier: &Identifier<'_>, ) -> Result { let room_id: &RoomId = current_room; @@ -388,12 +405,20 @@ impl Service { .allowed_room_ids(join_rule.clone()); if !self - .is_accessible_child(current_room, &join_rule.clone().into(), identifier, &allowed_room_ids) + .is_accessible_child( + current_room, + &join_rule.clone().into(), + identifier, + &allowed_room_ids, + ) .await { debug_info!("User is not allowed to see room {room_id}"); // This error will be caught later - return Err(Error::BadRequest(ErrorKind::forbidden(), "User is not allowed to see the room")); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "User is not allowed to see the room", + )); } Ok(SpaceHierarchyParentSummary { @@ -446,7 +471,12 @@ impl Service { } pub async fn get_client_hierarchy( - &self, sender_user: &UserId, room_id: &RoomId, limit: usize, short_room_ids: Vec, max_depth: u64, + &self, + sender_user: &UserId, + room_id: &RoomId, + limit: usize, + short_room_ids: Vec, + max_depth: u64, suggested_only: bool, ) -> Result { let mut parents = VecDeque::new(); @@ -454,27 +484,30 @@ impl Service { // Don't start populating the results if we have to start at a specific room. let mut populate_results = short_room_ids.is_empty(); - let mut stack = vec![vec![( - room_id.to_owned(), - match room_id.server_name() { - Some(server_name) => vec![server_name.into()], - None => vec![], - }, - )]]; + let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { + | Some(server_name) => vec![server_name.into()], + | None => vec![], + })]]; let mut results = Vec::with_capacity(limit); - while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } { + while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } + { if results.len() >= limit { break; } match ( - self.get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) - .await?, + self.get_summary_and_children_client( + ¤t_room, + suggested_only, + sender_user, + &via, + ) + .await?, current_room == room_id, ) { - (Some(SummaryAccessibility::Accessible(summary)), _) => { + | (Some(SummaryAccessibility::Accessible(summary)), _) => { let mut children: Vec<(OwnedRoomId, Vec)> = get_parent_children_via(&summary, suggested_only) .into_iter() @@ -493,7 +526,9 @@ impl Service { self.services .short .get_shortroomid(room) - .map_ok(|short| Some(&short) != short_room_ids.get(parents.len())) + .map_ok(|short| { + Some(&short) != short_room_ids.get(parents.len()) + }) .unwrap_or_else(|_| false) }) .map(Clone::clone) @@ -525,14 +560,20 @@ impl Service { // Root room in the space hierarchy, we return an error // if this one fails. }, - (Some(SummaryAccessibility::Inaccessible), true) => { - return Err(Error::BadRequest(ErrorKind::forbidden(), "The requested room is inaccessible")); + | (Some(SummaryAccessibility::Inaccessible), true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room is inaccessible", + )); }, - (None, true) => { - return Err(Error::BadRequest(ErrorKind::forbidden(), "The requested room was not found")); + | (None, true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room was not found", + )); }, // Just ignore other unavailable rooms - (None | Some(SummaryAccessibility::Inaccessible), false) => (), + | (None | Some(SummaryAccessibility::Inaccessible), false) => (), } } @@ -544,15 +585,19 @@ impl Service { let short_room_ids: Vec<_> = parents .iter() .stream() - .filter_map(|room_id| async move { self.services.short.get_shortroomid(room_id).await.ok() }) + .filter_map(|room_id| async move { + self.services.short.get_shortroomid(room_id).await.ok() + }) .collect() .await; Some( PaginationToken { short_room_ids, - limit: UInt::new(max_depth).expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth).expect("When sent in request it must have been valid UInt"), + limit: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + max_depth: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), suggested_only, } .to_string(), @@ -566,9 +611,12 @@ impl Service { /// Simply returns the stripped m.space.child events of a room async fn get_stripped_space_child_events( - &self, room_id: &RoomId, + &self, + room_id: &RoomId, ) -> Result>>, Error> { - let Ok(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id).await else { + let Ok(current_shortstatehash) = + self.services.state.get_room_shortstatehash(room_id).await + else { return Ok(None); }; @@ -581,18 +629,17 @@ impl Service { let mut children_pdus = Vec::with_capacity(state.len()); for (key, id) in state { - let (event_type, state_key) = self.services.short.get_statekey_from_short(key).await?; + let (event_type, state_key) = + self.services.short.get_statekey_from_short(key).await?; if event_type != StateEventType::SpaceChild { continue; } - let pdu = self - .services - .timeline - .get_pdu(&id) - .await - .map_err(|e| err!(Database("Event {id:?} in space state not found: {e:?}")))?; + let pdu = + self.services.timeline.get_pdu(&id).await.map_err(|e| { + err!(Database("Event {id:?} in space state not found: {e:?}")) + })?; if let Ok(content) = pdu.get_content::() { if content.via.is_empty() { @@ -610,11 +657,14 @@ impl Service { /// With the given identifier, checks if a room is accessable async fn is_accessible_child( - &self, current_room: &OwnedRoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>, + &self, + current_room: &OwnedRoomId, + join_rule: &SpaceRoomJoinRule, + identifier: &Identifier<'_>, allowed_room_ids: &Vec, ) -> bool { match identifier { - Identifier::ServerName(server_name) => { + | Identifier::ServerName(server_name) => { // Checks if ACLs allow for the server to participate if self .services @@ -626,7 +676,7 @@ impl Service { return false; } }, - Identifier::UserId(user_id) => { + | Identifier::UserId(user_id) => { if self .services .state_cache @@ -642,16 +692,18 @@ impl Service { }, } match &join_rule { - SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted => true, - SpaceRoomJoinRule::Restricted => { + | SpaceRoomJoinRule::Public + | SpaceRoomJoinRule::Knock + | SpaceRoomJoinRule::KnockRestricted => true, + | SpaceRoomJoinRule::Restricted => { for room in allowed_room_ids { match identifier { - Identifier::UserId(user) => { + | Identifier::UserId(user) => { if self.services.state_cache.is_joined(user, room).await { return true; } }, - Identifier::ServerName(server) => { + | Identifier::ServerName(server) => { if self.services.state_cache.server_in_room(server, room).await { return true; } @@ -661,7 +713,7 @@ impl Service { false }, // Invite only, Private, or Custom join rule - _ => false, + | _ => false, } } } @@ -737,7 +789,8 @@ fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRooms /// Returns the children of a SpaceHierarchyParentSummary, making use of the /// children_state field fn get_parent_children_via( - parent: &SpaceHierarchyParentSummary, suggested_only: bool, + parent: &SpaceHierarchyParentSummary, + suggested_only: bool, ) -> Vec<(OwnedRoomId, Vec)> { parent .children_state @@ -755,7 +808,8 @@ fn get_parent_children_via( } fn next_room_to_traverse( - stack: &mut Vec)>>, parents: &mut VecDeque, + stack: &mut Vec)>>, + parents: &mut VecDeque, ) -> Option<(OwnedRoomId, Vec)> { while stack.last().is_some_and(Vec::is_empty) { stack.pop(); diff --git a/src/service/rooms/spaces/tests.rs b/src/service/rooms/spaces/tests.rs index 71640035..b4c387d7 100644 --- a/src/service/rooms/spaces/tests.rs +++ b/src/service/rooms/spaces/tests.rs @@ -69,18 +69,15 @@ fn get_summary_children() { } .into(); - assert_eq!( - get_parent_children_via(&summary, false), - vec![ - (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) - ] - ); - assert_eq!( - get_parent_children_via(&summary, true), - vec![(owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")])] - ); + assert_eq!(get_parent_children_via(&summary, false), vec![ + (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) + ]); + assert_eq!(get_parent_children_via(&summary, true), vec![( + owned_room_id!("!bar:example.org"), + vec![owned_server_name!("example.org")] + )]); } #[test] diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index e083ed34..a7f79e94 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -16,7 +16,9 @@ use conduwuit::{ warn, PduEvent, Result, }; use database::{Deserialized, Ignore, Interfix, Map}; -use futures::{future::join_all, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{ + future::join_all, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, +}; use ruma::{ events::{ room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, @@ -70,8 +72,10 @@ impl crate::Service for Service { short: args.depend::("rooms::short"), spaces: args.depend::("rooms::spaces"), state_cache: args.depend::("rooms::state_cache"), - state_accessor: args.depend::("rooms::state_accessor"), - state_compressor: args.depend::("rooms::state_compressor"), + state_accessor: args + .depend::("rooms::state_accessor"), + state_compressor: args + .depend::("rooms::state_compressor"), timeline: args.depend::("rooms::timeline"), }, db: Data { @@ -100,7 +104,8 @@ impl Service { shortstatehash: u64, statediffnew: Arc>, _statediffremoved: Arc>, - state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex + state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state + * mutex */ ) -> Result { let event_ids = statediffnew .iter() @@ -120,8 +125,9 @@ impl Service { }; match pdu.kind { - TimelineEventType::RoomMember => { - let Some(user_id) = pdu.state_key.as_ref().map(UserId::parse).flat_ok() else { + | TimelineEventType::RoomMember => { + let Some(user_id) = pdu.state_key.as_ref().map(UserId::parse).flat_ok() + else { continue; }; @@ -131,10 +137,18 @@ impl Service { self.services .state_cache - .update_membership(room_id, &user_id, membership_event, &pdu.sender, None, None, false) + .update_membership( + room_id, + &user_id, + membership_event, + &pdu.sender, + None, + None, + false, + ) .await?; }, - TimelineEventType::SpaceChild => { + | TimelineEventType::SpaceChild => { self.services .spaces .roomid_spacehierarchy_cache @@ -142,7 +156,7 @@ impl Service { .await .remove(&pdu.room_id); }, - _ => continue, + | _ => continue, } } @@ -159,7 +173,10 @@ impl Service { /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. #[tracing::instrument(skip(self, state_ids_compressed), level = "debug")] pub async fn set_event_state( - &self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: Arc>, + &self, + event_id: &EventId, + room_id: &RoomId, + state_ids_compressed: Arc>, ) -> Result { const KEY_LEN: usize = size_of::(); const VAL_LEN: usize = size_of::(); @@ -190,22 +207,23 @@ impl Service { Vec::new() }; - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.full_state) - .copied() - .collect(); + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew: HashSet<_> = state_ids_compressed + .difference(&parent_stateinfo.full_state) + .copied() + .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo - .full_state - .difference(&state_ids_compressed) - .copied() - .collect(); + let statediffremoved: HashSet<_> = parent_stateinfo + .full_state + .difference(&state_ids_compressed) + .copied() + .collect(); - (Arc::new(statediffnew), Arc::new(statediffremoved)) - } else { - (state_ids_compressed, Arc::new(HashSet::new())) - }; + (Arc::new(statediffnew), Arc::new(statediffremoved)) + } else { + (state_ids_compressed, Arc::new(HashSet::new())) + }; self.services.state_compressor.save_state_from_diff( shortstatehash, statediffnew, @@ -338,7 +356,8 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - _mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room + * state mutex */ ) { const BUFSIZE: usize = size_of::(); @@ -366,7 +385,10 @@ impl Service { .deserialized() } - pub fn get_forward_extremities<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + '_ { + pub fn get_forward_extremities<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + '_ { let prefix = (room_id, Interfix); self.db @@ -380,7 +402,8 @@ impl Service { &self, room_id: &RoomId, event_ids: Vec, - _state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex + _state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room + * state mutex */ ) { let prefix = (room_id, Interfix); self.db @@ -399,26 +422,33 @@ impl Service { /// This fetches auth events from the current state. #[tracing::instrument(skip(self, content), level = "debug")] pub async fn get_auth_events( - &self, room_id: &RoomId, kind: &TimelineEventType, sender: &UserId, state_key: Option<&str>, + &self, + room_id: &RoomId, + kind: &TimelineEventType, + sender: &UserId, + state_key: Option<&str>, content: &serde_json::value::RawValue, ) -> Result>> { let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else { return Ok(HashMap::new()); }; - let mut sauthevents: HashMap<_, _> = state_res::auth_types_for_event(kind, sender, state_key, content)? - .iter() - .stream() - .broad_filter_map(|(event_type, state_key)| { - self.services - .short - .get_shortstatekey(event_type, state_key) - .map_ok(move |ssk| (ssk, (event_type, state_key))) - .map(Result::ok) - }) - .map(|(ssk, (event_type, state_key))| (ssk, (event_type.to_owned(), state_key.to_owned()))) - .collect() - .await; + let mut sauthevents: HashMap<_, _> = + state_res::auth_types_for_event(kind, sender, state_key, content)? + .iter() + .stream() + .broad_filter_map(|(event_type, state_key)| { + self.services + .short + .get_shortstatekey(event_type, state_key) + .map_ok(move |ssk| (ssk, (event_type, state_key))) + .map(Result::ok) + }) + .map(|(ssk, (event_type, state_key))| { + (ssk, (event_type.to_owned(), state_key.to_owned())) + }) + .collect() + .await; let (state_keys, event_ids): (Vec<_>, Vec<_>) = self .services diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 9947b036..d60e505e 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -39,14 +39,16 @@ impl Data { services: Services { short: args.depend::("rooms::short"), state: args.depend::("rooms::state"), - state_compressor: args.depend::("rooms::state_compressor"), + state_compressor: args + .depend::("rooms::state_compressor"), timeline: args.depend::("rooms::timeline"), }, } } pub(super) async fn state_full( - &self, shortstatehash: ShortStateHash, + &self, + shortstatehash: ShortStateHash, ) -> Result> { let state = self .state_full_pdus(shortstatehash) @@ -58,7 +60,10 @@ impl Data { Ok(state) } - pub(super) async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result> { + pub(super) async fn state_full_pdus( + &self, + shortstatehash: ShortStateHash, + ) -> Result> { let short_ids = self.state_full_shortids(shortstatehash).await?; let full_pdus = self @@ -66,16 +71,19 @@ impl Data { .short .multi_get_eventid_from_short(short_ids.iter().map(ref_at!(1))) .ready_filter_map(Result::ok) - .broad_filter_map( - |event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await.ok() }, - ) + .broad_filter_map(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await.ok() + }) .collect() .await; Ok(full_pdus) } - pub(super) async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result> + pub(super) async fn state_full_ids( + &self, + shortstatehash: ShortStateHash, + ) -> Result> where Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, ::Owned: Borrow, @@ -96,7 +104,8 @@ impl Data { } pub(super) async fn state_full_shortids( - &self, shortstatehash: ShortStateHash, + &self, + shortstatehash: ShortStateHash, ) -> Result> { let shortids = self .services @@ -118,7 +127,10 @@ impl Data { /// Returns a single EventId from `room_id` with key /// (`event_type`,`state_key`). pub(super) async fn state_get_id( - &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, ) -> Result where Id: for<'de> Deserialize<'de> + Sized + ToOwned, @@ -155,10 +167,15 @@ impl Data { /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). pub(super) async fn state_get( - &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, ) -> Result { self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await }) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) .await } @@ -179,7 +196,8 @@ impl Data { /// Returns the full room state. pub(super) async fn room_state_full( - &self, room_id: &RoomId, + &self, + room_id: &RoomId, ) -> Result> { self.services .state @@ -203,7 +221,10 @@ impl Data { /// Returns a single EventId from `room_id` with key /// (`event_type`,`state_key`). pub(super) async fn room_state_get_id( - &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, ) -> Result where Id: for<'de> Deserialize<'de> + Sized + ToOwned, @@ -218,7 +239,10 @@ impl Data { /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). pub(super) async fn room_state_get( - &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, ) -> Result { self.services .state diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 798bcbed..d8093dd7 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -34,8 +34,8 @@ use ruma::{ }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, EventId, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, - ServerName, UserId, + EventEncryptionAlgorithm, EventId, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, ServerName, UserId, }; use serde::Deserialize; @@ -75,8 +75,12 @@ impl crate::Service for Service { timeline: args.depend::("rooms::timeline"), }, db: Data::new(&args), - server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64(server_visibility_cache_capacity)?)), - user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64(user_visibility_cache_capacity)?)), + server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( + server_visibility_cache_capacity, + )?)), + user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( + user_visibility_cache_capacity, + )?)), })) } @@ -102,7 +106,10 @@ impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_full_ids(&self, shortstatehash: ShortStateHash) -> Result> + pub async fn state_full_ids( + &self, + shortstatehash: ShortStateHash, + ) -> Result> where Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, ::Owned: Borrow, @@ -112,13 +119,15 @@ impl Service { #[inline] pub async fn state_full_shortids( - &self, shortstatehash: ShortStateHash, + &self, + shortstatehash: ShortStateHash, ) -> Result> { self.db.state_full_shortids(shortstatehash).await } pub async fn state_full( - &self, shortstatehash: ShortStateHash, + &self, + shortstatehash: ShortStateHash, ) -> Result> { self.db.state_full(shortstatehash).await } @@ -127,7 +136,10 @@ impl Service { /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] pub async fn state_get_id( - &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, ) -> Result where Id: for<'de> Deserialize<'de> + Sized + ToOwned, @@ -142,7 +154,10 @@ impl Service { /// `state_key`). #[inline] pub async fn state_get( - &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, ) -> Result { self.db .state_get(shortstatehash, event_type, state_key) @@ -151,7 +166,10 @@ impl Service { /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). pub async fn state_get_content( - &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, ) -> Result where T: for<'de> Deserialize<'de>, @@ -162,7 +180,11 @@ impl Service { } /// Get membership for given user in state - async fn user_membership(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> MembershipState { + async fn user_membership( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, + ) -> MembershipState { self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) .await .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) @@ -185,7 +207,12 @@ impl Service { /// Whether a server is allowed to see an event through federation, based on /// the room's history_visibility at that event's state. #[tracing::instrument(skip_all, level = "trace")] - pub async fn server_can_see_event(&self, origin: &ServerName, room_id: &RoomId, event_id: &EventId) -> bool { + pub async fn server_can_see_event( + &self, + origin: &ServerName, + room_id: &RoomId, + event_id: &EventId, + ) -> bool { let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { return true; }; @@ -213,20 +240,20 @@ impl Service { .ready_filter(|member| member.server_name() == origin); let visibility = match history_visibility { - HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, - HistoryVisibility::Invited => { + | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, + | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny current_server_members .any(|member| self.user_was_invited(shortstatehash, member)) .await }, - HistoryVisibility::Joined => { + | HistoryVisibility::Joined => { // Allow if any member on requested server was joined, else deny current_server_members .any(|member| self.user_was_joined(shortstatehash, member)) .await }, - _ => { + | _ => { error!("Unknown history visibility {history_visibility}"); false }, @@ -243,7 +270,12 @@ impl Service { /// Whether a user is allowed to see an event, based on /// the room's history_visibility at that event's state. #[tracing::instrument(skip_all, level = "trace")] - pub async fn user_can_see_event(&self, user_id: &UserId, room_id: &RoomId, event_id: &EventId) -> bool { + pub async fn user_can_see_event( + &self, + user_id: &UserId, + room_id: &RoomId, + event_id: &EventId, + ) -> bool { let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { return true; }; @@ -267,17 +299,17 @@ impl Service { }); let visibility = match history_visibility { - HistoryVisibility::WorldReadable => true, - HistoryVisibility::Shared => currently_member, - HistoryVisibility::Invited => { + | HistoryVisibility::WorldReadable => true, + | HistoryVisibility::Shared => currently_member, + | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny self.user_was_invited(shortstatehash, user_id).await }, - HistoryVisibility::Joined => { + | HistoryVisibility::Joined => { // Allow if any member on requested server was joined, else deny self.user_was_joined(shortstatehash, user_id).await }, - _ => { + | _ => { error!("Unknown history visibility {history_visibility}"); false }, @@ -307,9 +339,10 @@ impl Service { }); match history_visibility { - HistoryVisibility::Invited => self.services.state_cache.is_invited(user_id, room_id).await, - HistoryVisibility::WorldReadable => true, - _ => false, + | HistoryVisibility::Invited => + self.services.state_cache.is_invited(user_id, room_id).await, + | HistoryVisibility::WorldReadable => true, + | _ => false, } } @@ -320,7 +353,10 @@ impl Service { /// Returns the full room state. #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_full(&self, room_id: &RoomId) -> Result> { + pub async fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result> { self.db.room_state_full(room_id).await } @@ -334,7 +370,10 @@ impl Service { /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] pub async fn room_state_get_id( - &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, ) -> Result where Id: for<'de> Deserialize<'de> + Sized + ToOwned, @@ -349,14 +388,20 @@ impl Service { /// `state_key`). #[tracing::instrument(skip(self), level = "debug")] pub async fn room_state_get( - &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, ) -> Result { self.db.room_state_get(room_id, event_type, state_key).await } /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). pub async fn room_state_get_content( - &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, ) -> Result where T: for<'de> Deserialize<'de>, @@ -381,18 +426,29 @@ impl Service { JsOption::from_option(content) } - pub async fn get_member(&self, room_id: &RoomId, user_id: &UserId) -> Result { + pub async fn get_member( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result { self.room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str()) .await } pub async fn user_can_invite( - &self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &RoomMutexGuard, + &self, + room_id: &RoomId, + sender: &UserId, + target_user: &UserId, + state_lock: &RoomMutexGuard, ) -> bool { self.services .timeline .create_hash_and_sign_event( - PduBuilder::state(target_user.into(), &RoomMemberEventContent::new(MembershipState::Invite)), + PduBuilder::state( + target_user.into(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), sender, room_id, state_lock, @@ -405,7 +461,9 @@ impl Service { pub async fn is_world_readable(&self, room_id: &RoomId) -> bool { self.room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") .await - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility == HistoryVisibility::WorldReadable) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility == HistoryVisibility::WorldReadable + }) .unwrap_or(false) } @@ -439,7 +497,11 @@ impl Service { /// If federation is true, it allows redaction events from any user of the /// same server as the original event sender pub async fn user_can_redact( - &self, redacts: &EventId, sender: &UserId, room_id: &RoomId, federation: bool, + &self, + redacts: &EventId, + sender: &UserId, + room_id: &RoomId, + federation: bool, ) -> Result { let redacting_event = self.services.timeline.get_pdu(redacts).await; @@ -451,7 +513,11 @@ impl Service { } if let Ok(pl_event_content) = self - .room_state_get_content::(room_id, &StateEventType::RoomPowerLevels, "") + .room_state_get_content::( + room_id, + &StateEventType::RoomPowerLevels, + "", + ) .await { let pl_event: RoomPowerLevels = pl_event_content.into(); @@ -485,10 +551,15 @@ impl Service { } /// Returns the join rule (`SpaceRoomJoinRule`) for a given room - pub async fn get_join_rule(&self, room_id: &RoomId) -> Result<(SpaceRoomJoinRule, Vec)> { + pub async fn get_join_rule( + &self, + room_id: &RoomId, + ) -> Result<(SpaceRoomJoinRule, Vec)> { self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") .await - .map(|c: RoomJoinRulesEventContent| (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule))) + .map(|c: RoomJoinRulesEventContent| { + (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) + }) .or_else(|_| Ok((SpaceRoomJoinRule::Invite, vec![]))) } @@ -497,10 +568,7 @@ impl Service { let mut room_ids = Vec::with_capacity(1); if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { for rule in r.allow { - if let AllowRule::RoomMembership(RoomMembership { - room_id: membership, - }) = rule - { + if let AllowRule::RoomMembership(RoomMembership { room_id: membership }) = rule { room_ids.push(membership.clone()); } } @@ -520,7 +588,10 @@ impl Service { /// Gets the room's encryption algorithm if `m.room.encryption` state event /// is found - pub async fn get_room_encryption(&self, room_id: &RoomId) -> Result { + pub async fn get_room_encryption( + &self, + room_id: &RoomId, + ) -> Result { self.room_state_get_content(room_id, &StateEventType::RoomEncryption, "") .await .map(|content: RoomEncryptionEventContent| content.algorithm) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 6b7d35d2..c2de8b62 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -20,7 +20,8 @@ use ruma::{ member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, }, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEventType, StateEventType, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, }, int, serde::Raw, @@ -68,7 +69,8 @@ impl crate::Service for Service { services: Services { account_data: args.depend::("account_data"), globals: args.depend::("globals"), - state_accessor: args.depend::("rooms::state_accessor"), + state_accessor: args + .depend::("rooms::state_accessor"), users: args.depend::("users"), }, db: Data { @@ -96,8 +98,13 @@ impl Service { #[tracing::instrument(skip(self, last_state))] #[allow(clippy::too_many_arguments)] pub async fn update_membership( - &self, room_id: &RoomId, user_id: &UserId, membership_event: RoomMemberEventContent, sender: &UserId, - last_state: Option>>, invite_via: Option>, + &self, + room_id: &RoomId, + user_id: &UserId, + membership_event: RoomMemberEventContent, + sender: &UserId, + last_state: Option>>, + invite_via: Option>, update_joined_count: bool, ) -> Result<()> { let membership = membership_event.membership; @@ -138,7 +145,7 @@ impl Service { } match &membership { - MembershipState::Join => { + | MembershipState::Join => { // Check if the user never joined this room if !self.once_joined(user_id, room_id).await { // Add the user ID to the join list then @@ -181,12 +188,21 @@ impl Service { if let Ok(tag_event) = self .services .account_data - .get_room(&predecessor.room_id, user_id, RoomAccountDataEventType::Tag) + .get_room( + &predecessor.room_id, + user_id, + RoomAccountDataEventType::Tag, + ) .await { self.services .account_data - .update(Some(room_id), user_id, RoomAccountDataEventType::Tag, &tag_event) + .update( + Some(room_id), + user_id, + RoomAccountDataEventType::Tag, + &tag_event, + ) .await .ok(); }; @@ -195,7 +211,10 @@ impl Service { if let Ok(mut direct_event) = self .services .account_data - .get_global::(user_id, GlobalAccountDataEventType::Direct) + .get_global::( + user_id, + GlobalAccountDataEventType::Direct, + ) .await { let mut room_ids_updated = false; @@ -213,7 +232,8 @@ impl Service { None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), - &serde_json::to_value(&direct_event).expect("to json always works"), + &serde_json::to_value(&direct_event) + .expect("to json always works"), ) .await?; } @@ -223,7 +243,7 @@ impl Service { self.mark_as_joined(user_id, room_id); }, - MembershipState::Invite => { + | MembershipState::Invite => { // We want to know if the sender is ignored by the receiver if self.services.users.user_is_ignored(sender, user_id).await { return Ok(()); @@ -232,10 +252,10 @@ impl Service { self.mark_as_invited(user_id, room_id, last_state, invite_via) .await; }, - MembershipState::Leave | MembershipState::Ban => { + | MembershipState::Leave | MembershipState::Ban => { self.mark_as_left(user_id, room_id); }, - _ => {}, + | _ => {}, } if update_joined_count { @@ -246,7 +266,11 @@ impl Service { } #[tracing::instrument(skip(self, room_id, appservice), level = "debug")] - pub async fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> bool { + pub async fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &RegistrationInfo, + ) -> bool { if let Some(cached) = self .appservice_in_room_cache .read() @@ -347,7 +371,10 @@ impl Service { /// Returns an iterator of all servers participating in this room. #[tracing::instrument(skip(self), level = "debug")] - pub fn room_servers<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + pub fn room_servers<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { let prefix = (room_id, Interfix); self.db .roomserverids @@ -357,7 +384,11 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub async fn server_in_room<'a>(&'a self, server: &'a ServerName, room_id: &'a RoomId) -> bool { + pub async fn server_in_room<'a>( + &'a self, + server: &'a ServerName, + room_id: &'a RoomId, + ) -> bool { let key = (server, room_id); self.db.serverroomids.qry(&key).await.is_ok() } @@ -365,7 +396,10 @@ impl Service { /// Returns an iterator of all rooms a server participates in (as far as we /// know). #[tracing::instrument(skip(self), level = "debug")] - pub fn server_rooms<'a>(&'a self, server: &'a ServerName) -> impl Stream + Send + 'a { + pub fn server_rooms<'a>( + &'a self, + server: &'a ServerName, + ) -> impl Stream + Send + 'a { let prefix = (server, Interfix); self.db .serverroomids @@ -393,7 +427,9 @@ impl Service { /// List the rooms common between two users pub fn get_shared_rooms<'a>( - &'a self, user_a: &'a UserId, user_b: &'a UserId, + &'a self, + user_a: &'a UserId, + user_b: &'a UserId, ) -> impl Stream + Send + 'a { use conduwuit::utils::set; @@ -404,7 +440,10 @@ impl Service { /// Returns an iterator of all joined members of a room. #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + pub fn room_members<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { let prefix = (room_id, Interfix); self.db .roomuserid_joined @@ -422,7 +461,10 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] /// Returns an iterator of all our local users in the room, even if they're /// deactivated/guests - pub fn local_users_in_room<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + pub fn local_users_in_room<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { self.room_members(room_id) .ready_filter(|user| self.services.globals.user_is_local(user)) } @@ -430,7 +472,10 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] /// Returns an iterator of all our local joined users in a room who are /// active (not deactivated, not guest) - pub fn active_local_users_in_room<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + pub fn active_local_users_in_room<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { self.local_users_in_room(room_id) .filter(|user| self.services.users.is_active(user)) } @@ -447,7 +492,10 @@ impl Service { /// Returns an iterator over all User IDs who ever joined a room. #[tracing::instrument(skip(self), level = "debug")] - pub fn room_useroncejoined<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + pub fn room_useroncejoined<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { let prefix = (room_id, Interfix); self.db .roomuseroncejoinedids @@ -458,7 +506,10 @@ impl Service { /// Returns an iterator over all invited members of a room. #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members_invited<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + pub fn room_members_invited<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { let prefix = (room_id, Interfix); self.db .roomuserid_invitecount @@ -485,7 +536,10 @@ impl Service { /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_joined<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + pub fn rooms_joined<'a>( + &'a self, + user_id: &'a UserId, + ) -> impl Stream + Send + 'a { self.db .userroomid_joined .keys_raw_prefix(user_id) @@ -495,7 +549,10 @@ impl Service { /// Returns an iterator over all rooms a user was invited to. #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_invited<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + pub fn rooms_invited<'a>( + &'a self, + user_id: &'a UserId, + ) -> impl Stream + Send + 'a { type KeyVal<'a> = (Key<'a>, Raw>); type Key<'a> = (&'a UserId, &'a RoomId); @@ -510,30 +567,45 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub async fn invite_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>> { + pub async fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>> { let key = (user_id, room_id); self.db .userroomid_invitestate .qry(&key) .await .deserialized() - .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) + .and_then(|val: Raw>| { + val.deserialize_as().map_err(Into::into) + }) } #[tracing::instrument(skip(self), level = "debug")] - pub async fn left_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>> { + pub async fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>> { let key = (user_id, room_id); self.db .userroomid_leftstate .qry(&key) .await .deserialized() - .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) + .and_then(|val: Raw>| { + val.deserialize_as().map_err(Into::into) + }) } /// Returns an iterator over all rooms a user left. #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_left<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + pub fn rooms_left<'a>( + &'a self, + user_id: &'a UserId, + ) -> impl Stream + Send + 'a { type KeyVal<'a> = (Key<'a>, Raw>>); type Key<'a> = (&'a UserId, &'a RoomId); @@ -571,7 +643,11 @@ impl Service { self.db.userroomid_leftstate.qry(&key).await.is_ok() } - pub async fn user_membership(&self, user_id: &UserId, room_id: &RoomId) -> Option { + pub async fn user_membership( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Option { let states = join4( self.is_joined(user_id, room_id), self.is_left(user_id, room_id), @@ -581,16 +657,19 @@ impl Service { .await; match states { - (true, ..) => Some(MembershipState::Join), - (_, true, ..) => Some(MembershipState::Leave), - (_, _, true, ..) => Some(MembershipState::Invite), - (false, false, false, true) => Some(MembershipState::Ban), - _ => None, + | (true, ..) => Some(MembershipState::Join), + | (_, true, ..) => Some(MembershipState::Leave), + | (_, _, true, ..) => Some(MembershipState::Invite), + | (false, false, false, true) => Some(MembershipState::Ban), + | _ => None, } } #[tracing::instrument(skip(self), level = "debug")] - pub fn servers_invite_via<'a>(&'a self, room_id: &'a RoomId) -> impl Stream + Send + 'a { + pub fn servers_invite_via<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { type KeyVal<'a> = (Ignore, Vec<&'a ServerName>); self.db @@ -711,7 +790,10 @@ impl Service { } pub async fn mark_as_invited( - &self, user_id: &UserId, room_id: &RoomId, last_state: Option>>, + &self, + user_id: &UserId, + room_id: &RoomId, + last_state: Option>>, invite_via: Option>, ) { let roomuser_id = (room_id, user_id); diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 36bc92c0..dbe0a386 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -69,7 +69,8 @@ pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; - let cache_capacity = f64::from(config.stateinfo_cache_capacity) * config.cache_capacity_modifier; + let cache_capacity = + f64::from(config.stateinfo_cache_capacity) * config.cache_capacity_modifier; Ok(Arc::new(Self { stateinfo_cache: LruCache::new(usize_from_f64(cache_capacity)?).into(), db: Data { @@ -85,17 +86,16 @@ impl crate::Service for Service { fn memory_usage(&self, out: &mut dyn Write) -> Result { let (cache_len, ents) = { let cache = self.stateinfo_cache.lock().expect("locked"); - let ents = cache - .iter() - .map(at!(1)) - .flat_map(|vec| vec.iter()) - .fold(HashMap::new(), |mut ents, ssi| { + let ents = cache.iter().map(at!(1)).flat_map(|vec| vec.iter()).fold( + HashMap::new(), + |mut ents, ssi| { for cs in &[&ssi.added, &ssi.removed, &ssi.full_state] { ents.insert(Arc::as_ptr(cs), compressed_state_size(cs)); } ents - }); + }, + ); (cache.len(), ents) }; @@ -117,7 +117,10 @@ impl crate::Service for Service { impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and /// removed diff for the selected shortstatehash and each parent layer. - pub async fn load_shortstatehash_info(&self, shortstatehash: ShortStateHash) -> Result { + pub async fn load_shortstatehash_info( + &self, + shortstatehash: ShortStateHash, + ) -> Result { if let Some(r) = self .stateinfo_cache .lock() @@ -143,12 +146,11 @@ impl Service { Ok(stack) } - async fn new_shortstatehash_info(&self, shortstatehash: ShortStateHash) -> Result { - let StateDiff { - parent, - added, - removed, - } = self.get_statediff(shortstatehash).await?; + async fn new_shortstatehash_info( + &self, + shortstatehash: ShortStateHash, + ) -> Result { + let StateDiff { parent, added, removed } = self.get_statediff(shortstatehash).await?; let Some(parent) = parent else { return Ok(vec![ShortStateInfo { @@ -180,9 +182,17 @@ impl Service { Ok(stack) } - pub fn compress_state_events<'a, I>(&'a self, state: I) -> impl Stream + Send + 'a + pub fn compress_state_events<'a, I>( + &'a self, + state: I, + ) -> impl Stream + Send + 'a where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, + I: Iterator + + Clone + + Debug + + ExactSizeIterator + + Send + + 'a, { let event_ids = state.clone().map(at!(1)); @@ -195,10 +205,16 @@ impl Service { .stream() .map(at!(0)) .zip(short_event_ids) - .map(|(shortstatekey, shorteventid)| compress_state_event(*shortstatekey, shorteventid)) + .map(|(shortstatekey, shorteventid)| { + compress_state_event(*shortstatekey, shorteventid) + }) } - pub async fn compress_state_event(&self, shortstatekey: ShortStateKey, event_id: &EventId) -> CompressedStateEvent { + pub async fn compress_state_event( + &self, + shortstatekey: ShortStateKey, + event_id: &EventId, + ) -> CompressedStateEvent { let shorteventid = self .services .short @@ -227,8 +243,11 @@ impl Service { /// * `parent_states` - A stack with info on shortstatehash, full state, /// added diff and removed diff for each parent layer pub fn save_state_from_diff( - &self, shortstatehash: ShortStateHash, statediffnew: Arc>, - statediffremoved: Arc>, diff_to_sibling: usize, + &self, + shortstatehash: ShortStateHash, + statediffnew: Arc>, + statediffremoved: Arc>, + diff_to_sibling: usize, mut parent_states: ParentStatesVec, ) -> Result { let statediffnew_len = statediffnew.len(); @@ -274,14 +293,11 @@ impl Service { if parent_states.is_empty() { // There is no parent layer, create a new state - self.save_statediff( - shortstatehash, - &StateDiff { - parent: None, - added: statediffnew, - removed: statediffremoved, - }, - ); + self.save_statediff(shortstatehash, &StateDiff { + parent: None, + added: statediffnew, + removed: statediffremoved, + }); return Ok(()); }; @@ -327,14 +343,11 @@ impl Service { )?; } else { // Diff small enough, we add diff as layer on top of parent - self.save_statediff( - shortstatehash, - &StateDiff { - parent: Some(parent.shortstatehash), - added: statediffnew, - removed: statediffremoved, - }, - ); + self.save_statediff(shortstatehash, &StateDiff { + parent: Some(parent.shortstatehash), + added: statediffnew, + removed: statediffremoved, + }); } Ok(()) @@ -344,7 +357,9 @@ impl Service { /// room state #[tracing::instrument(skip(self, new_state_ids_compressed), level = "debug")] pub async fn save_state( - &self, room_id: &RoomId, new_state_ids_compressed: Arc>, + &self, + room_id: &RoomId, + new_state_ids_compressed: Arc>, ) -> Result { let previous_shortstatehash = self .services @@ -353,7 +368,8 @@ impl Service { .await .ok(); - let state_hash = utils::calculate_hash(new_state_ids_compressed.iter().map(|bytes| &bytes[..])); + let state_hash = + utils::calculate_hash(new_state_ids_compressed.iter().map(|bytes| &bytes[..])); let (new_shortstatehash, already_existed) = self .services @@ -374,22 +390,23 @@ impl Service { ShortStateInfoVec::new() }; - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.full_state) - .copied() - .collect(); + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew: HashSet<_> = new_state_ids_compressed + .difference(&parent_stateinfo.full_state) + .copied() + .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo - .full_state - .difference(&new_state_ids_compressed) - .copied() - .collect(); + let statediffremoved: HashSet<_> = parent_stateinfo + .full_state + .difference(&new_state_ids_compressed) + .copied() + .collect(); - (Arc::new(statediffnew), Arc::new(statediffremoved)) - } else { - (new_state_ids_compressed, Arc::new(HashSet::new())) - }; + (Arc::new(statediffnew), Arc::new(statediffremoved)) + } else { + (new_state_ids_compressed, Arc::new(HashSet::new())) + }; if !already_existed { self.save_state_from_diff( @@ -418,7 +435,9 @@ impl Service { .shortstatehash_statediff .aqry::(&shortstatehash) .await - .map_err(|e| err!(Database("Failed to find StateDiff from short {shortstatehash:?}: {e}")))?; + .map_err(|e| { + err!(Database("Failed to find StateDiff from short {shortstatehash:?}: {e}")) + })?; let parent = utils::u64_from_bytes(&value[0..size_of::()]) .ok() @@ -484,7 +503,10 @@ impl Service { #[inline] #[must_use] -fn compress_state_event(shortstatekey: ShortStateKey, shorteventid: ShortEventId) -> CompressedStateEvent { +fn compress_state_event( + shortstatekey: ShortStateKey, + shorteventid: ShortEventId, +) -> CompressedStateEvent { const SIZE: usize = size_of::(); let mut v = ArrayVec::::new(); @@ -497,7 +519,9 @@ fn compress_state_event(shortstatekey: ShortStateKey, shorteventid: ShortEventId #[inline] #[must_use] -pub fn parse_compressed_state_event(compressed_event: CompressedStateEvent) -> (ShortStateKey, ShortEventId) { +pub fn parse_compressed_state_event( + compressed_event: CompressedStateEvent, +) -> (ShortStateKey, ShortEventId) { use utils::u64_from_u8; let shortstatekey = u64_from_u8(&compressed_event[0..size_of::()]); diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index f65be902..bc995e27 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -11,8 +11,8 @@ use conduwuit::{ use database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{ - api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, CanonicalJsonValue, - EventId, OwnedUserId, RoomId, UserId, + api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, + CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, }; use serde_json::json; @@ -55,7 +55,9 @@ impl Service { .timeline .get_pdu_id(root_event_id) .await - .map_err(|e| err!(Request(InvalidParam("Invalid event_id in thread message: {e:?}"))))?; + .map_err(|e| { + err!(Request(InvalidParam("Invalid event_id in thread message: {e:?}"))) + })?; let root_pdu = self .services @@ -79,8 +81,9 @@ impl Service { .get("m.relations") .and_then(|r| r.as_object()) .and_then(|r| r.get("m.thread")) - .and_then(|relations| serde_json::from_value::(relations.clone().into()).ok()) - { + .and_then(|relations| { + serde_json::from_value::(relations.clone().into()).ok() + }) { // Thread already existed relations.count = relations.count.saturating_add(uint!(1)); relations.latest_event = pdu.to_message_like_event(); @@ -129,7 +132,11 @@ impl Service { } pub async fn threads_until<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, shorteventid: PduCount, _inc: &'a IncludeThreads, + &'a self, + user_id: &'a UserId, + room_id: &'a RoomId, + shorteventid: PduCount, + _inc: &'a IncludeThreads, ) -> Result + Send + 'a> { let shortroomid: ShortRoomId = self.services.short.get_shortroomid(room_id).await?; @@ -160,7 +167,11 @@ impl Service { Ok(stream) } - pub(super) fn update_participants(&self, root_id: &RawPduId, participants: &[OwnedUserId]) -> Result { + pub(super) fn update_participants( + &self, + root_id: &RawPduId, + participants: &[OwnedUserId], + ) -> Result { let users = participants .iter() .map(|user| user.as_bytes()) diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index a32ff54f..0be8aa52 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -13,7 +13,9 @@ use conduwuit::{ }; use database::{Database, Deserialized, Json, KeyVal, Map}; use futures::{future::select_ok, FutureExt, Stream, StreamExt}; -use ruma::{api::Direction, CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId}; +use ruma::{ + api::Direction, CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId, +}; use tokio::sync::Mutex; use super::{PduId, RawPduId}; @@ -54,15 +56,19 @@ impl Data { } } - pub(super) async fn last_timeline_count(&self, sender_user: Option<&UserId>, room_id: &RoomId) -> Result { + pub(super) async fn last_timeline_count( + &self, + sender_user: Option<&UserId>, + room_id: &RoomId, + ) -> Result { match self .lasttimelinecount_cache .lock() .await .entry(room_id.into()) { - hash_map::Entry::Occupied(o) => Ok(*o.get()), - hash_map::Entry::Vacant(v) => Ok(self + | hash_map::Entry::Occupied(o) => Ok(*o.get()), + | hash_map::Entry::Vacant(v) => Ok(self .pdus_rev(sender_user, room_id, PduCount::max()) .await? .next() @@ -93,7 +99,10 @@ impl Data { } /// Returns the json of a pdu. - pub(super) async fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result { + pub(super) async fn get_non_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result { let pduid = self.get_pdu_id(event_id).await?; self.pduid_pdu.get(&pduid).await.deserialized() @@ -160,12 +169,19 @@ impl Data { } /// Returns the pdu as a `BTreeMap`. - pub(super) async fn get_pdu_json_from_id(&self, pdu_id: &RawPduId) -> Result { + pub(super) async fn get_pdu_json_from_id( + &self, + pdu_id: &RawPduId, + ) -> Result { self.pduid_pdu.get(pdu_id).await.deserialized() } pub(super) async fn append_pdu( - &self, pdu_id: &RawPduId, pdu: &PduEvent, json: &CanonicalJsonObject, count: PduCount, + &self, + pdu_id: &RawPduId, + pdu: &PduEvent, + json: &CanonicalJsonObject, + count: PduCount, ) { debug_assert!(matches!(count, PduCount::Normal(_)), "PduCount not Normal"); @@ -179,7 +195,12 @@ impl Data { self.eventid_outlierpdu.remove(pdu.event_id.as_bytes()); } - pub(super) fn prepend_backfill_pdu(&self, pdu_id: &RawPduId, event_id: &EventId, json: &CanonicalJsonObject) { + pub(super) fn prepend_backfill_pdu( + &self, + pdu_id: &RawPduId, + event_id: &EventId, + json: &CanonicalJsonObject, + ) { self.pduid_pdu.raw_put(pdu_id, Json(json)); self.eventid_pduid.insert(event_id, pdu_id); self.eventid_outlierpdu.remove(event_id); @@ -187,7 +208,10 @@ impl Data { /// Removes a pdu and creates a new one with the same id. pub(super) async fn replace_pdu( - &self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObject, _pdu: &PduEvent, + &self, + pdu_id: &RawPduId, + pdu_json: &CanonicalJsonObject, + _pdu: &PduEvent, ) -> Result { if self.pduid_pdu.get(pdu_id).await.is_not_found() { return Err!(Request(NotFound("PDU does not exist."))); @@ -202,7 +226,10 @@ impl Data { /// happened before the event with id `until` in reverse-chronological /// order. pub(super) async fn pdus_rev<'a>( - &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, until: PduCount, + &'a self, + user_id: Option<&'a UserId>, + room_id: &'a RoomId, + until: PduCount, ) -> Result + Send + 'a> { let current = self .count_to_id(room_id, until, Direction::Backward) @@ -219,7 +246,10 @@ impl Data { } pub(super) async fn pdus<'a>( - &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, from: PduCount, + &'a self, + user_id: Option<&'a UserId>, + room_id: &'a RoomId, + from: PduCount, ) -> Result + Send + Unpin + 'a> { let current = self.count_to_id(room_id, from, Direction::Forward).await?; let prefix = current.shortroomid(); @@ -236,8 +266,8 @@ impl Data { fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: Option<&UserId>) -> PdusIterItem { let pdu_id: RawPduId = pdu_id.into(); - let mut pdu = - serde_json::from_slice::(pdu).expect("PduEvent in pduid_pdu database column is invalid JSON"); + let mut pdu = serde_json::from_slice::(pdu) + .expect("PduEvent in pduid_pdu database column is invalid JSON"); if Some(pdu.sender.borrow()) != user_id { pdu.remove_transaction_id().log_err().ok(); @@ -249,7 +279,10 @@ impl Data { } pub(super) fn increment_notification_counts( - &self, room_id: &RoomId, notifies: Vec, highlights: Vec, + &self, + room_id: &RoomId, + notifies: Vec, + highlights: Vec, ) { let _cork = self.db.cork(); @@ -268,7 +301,12 @@ impl Data { } } - async fn count_to_id(&self, room_id: &RoomId, shorteventid: PduCount, dir: Direction) -> Result { + async fn count_to_id( + &self, + room_id: &RoomId, + shorteventid: PduCount, + dir: Direction, + ) -> Result { let shortroomid: ShortRoomId = self .services .short diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2bc5cf73..2ae66546 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -15,7 +15,9 @@ use conduwuit::{ validated, warn, Err, Error, Result, Server, }; pub use conduwuit::{PduId, RawPduId}; -use futures::{future, future::ready, Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{ + future, future::ready, Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, +}; use ruma::{ api::federation, canonical_json::to_canonical_value, @@ -32,8 +34,8 @@ use ruma::{ }, push::{Action, Ruleset, Tweak}, state_res::{self, Event, RoomVersion}, - uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - RoomId, RoomVersionId, ServerName, UserId, + uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -116,7 +118,8 @@ impl crate::Service for Service { short: args.depend::("rooms::short"), state: args.depend::("rooms::state"), state_cache: args.depend::("rooms::state_cache"), - state_accessor: args.depend::("rooms::state_accessor"), + state_accessor: args + .depend::("rooms::state_accessor"), pdu_metadata: args.depend::("rooms::pdu_metadata"), read_receipt: args.depend::("rooms::read_receipt"), sending: args.depend::("sending"), @@ -127,7 +130,8 @@ impl crate::Service for Service { threads: args.depend::("rooms::threads"), search: args.depend::("rooms::search"), spaces: args.depend::("rooms::spaces"), - event_handler: args.depend::("rooms::event_handler"), + event_handler: args + .depend::("rooms::event_handler"), }, db: Data::new(&args), mutex_insert: RoomMutexMap::new(), @@ -185,12 +189,18 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub async fn last_timeline_count(&self, sender_user: Option<&UserId>, room_id: &RoomId) -> Result { + pub async fn last_timeline_count( + &self, + sender_user: Option<&UserId>, + room_id: &RoomId, + ) -> Result { self.db.last_timeline_count(sender_user, room_id).await } /// Returns the `count` of this pdu's id. - pub async fn get_pdu_count(&self, event_id: &EventId) -> Result { self.db.get_pdu_count(event_id).await } + pub async fn get_pdu_count(&self, event_id: &EventId) -> Result { + self.db.get_pdu_count(event_id).await + } // TODO Is this the same as the function above? /* @@ -222,13 +232,18 @@ impl Service { /// Returns the json of a pdu. #[inline] - pub async fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result { + pub async fn get_non_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result { self.db.get_non_outlier_pdu_json(event_id).await } /// Returns the pdu's id. #[inline] - pub async fn get_pdu_id(&self, event_id: &EventId) -> Result { self.db.get_pdu_id(event_id).await } + pub async fn get_pdu_id(&self, event_id: &EventId) -> Result { + self.db.get_pdu_id(event_id).await + } /// Returns the pdu. /// @@ -241,19 +256,26 @@ impl Service { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub async fn get_pdu(&self, event_id: &EventId) -> Result { self.db.get_pdu(event_id).await } + pub async fn get_pdu(&self, event_id: &EventId) -> Result { + self.db.get_pdu(event_id).await + } /// Checks if pdu exists /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn pdu_exists<'a>(&'a self, event_id: &'a EventId) -> impl Future + Send + 'a { + pub fn pdu_exists<'a>( + &'a self, + event_id: &'a EventId, + ) -> impl Future + Send + 'a { self.db.pdu_exists(event_id) } /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result { self.db.get_pdu_from_id(pdu_id).await } + pub async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result { + self.db.get_pdu_from_id(pdu_id).await + } /// Returns the pdu as a `BTreeMap`. pub async fn get_pdu_json_from_id(&self, pdu_id: &RawPduId) -> Result { @@ -262,7 +284,12 @@ impl Service { /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self), level = "debug")] - pub async fn replace_pdu(&self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObject, pdu: &PduEvent) -> Result<()> { + pub async fn replace_pdu( + &self, + pdu_id: &RawPduId, + pdu_json: &CanonicalJsonObject, + pdu: &PduEvent, + ) -> Result<()> { self.db.replace_pdu(pdu_id, pdu_json, pdu).await } @@ -278,7 +305,8 @@ impl Service { pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, leaves: Vec, - state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex + state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state + * mutex */ ) -> Result { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); @@ -313,10 +341,16 @@ impl Service { unsigned.insert( "prev_content".to_owned(), CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()).map_err(|e| { - error!("Failed to convert prev_state to canonical JSON: {e}"); - Error::bad_database("Failed to convert prev_state to canonical JSON.") - })?, + utils::to_canonical_object(prev_state.content.clone()).map_err( + |e| { + error!( + "Failed to convert prev_state to canonical JSON: {e}" + ); + Error::bad_database( + "Failed to convert prev_state to canonical JSON.", + ) + }, + )?, ), ); unsigned.insert( @@ -357,11 +391,7 @@ impl Service { .reset_notification_counts(&pdu.sender, &pdu.room_id); let count2 = PduCount::Normal(self.services.globals.next_count().unwrap()); - let pdu_id: RawPduId = PduId { - shortroomid, - shorteventid: count2, - } - .into(); + let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count2 }.into(); // Insert pdu self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2).await; @@ -408,7 +438,10 @@ impl Service { .account_data .get_global(user, GlobalAccountDataEventType::PushRules) .await - .map_or_else(|_| Ruleset::server_default(user), |ev: PushRulesEvent| ev.content.global); + .map_or_else( + |_| Ruleset::server_default(user), + |ev: PushRulesEvent| ev.content.global, + ); let mut highlight = false; let mut notify = false; @@ -420,11 +453,11 @@ impl Service { .await { match action { - Action::Notify => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { + | Action::Notify => notify = true, + | Action::SetTweak(Tweak::Highlight(true)) => { highlight = true; }, - _ => {}, + | _ => {}, }; // Break early if both conditions are true @@ -457,12 +490,12 @@ impl Service { .increment_notification_counts(&pdu.room_id, notifies, highlights); match pdu.kind { - TimelineEventType::RoomRedaction => { + | TimelineEventType::RoomRedaction => { use RoomVersionId::*; let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; match room_version_id { - V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { if let Some(redact_id) = &pdu.redacts { if self .services @@ -474,7 +507,7 @@ impl Service { } } }, - _ => { + | _ => { let content: RoomRedactionEventContent = pdu.get_content()?; if let Some(redact_id) = &content.redacts { if self @@ -489,7 +522,7 @@ impl Service { }, }; }, - TimelineEventType::SpaceChild => { + | TimelineEventType::SpaceChild => if let Some(_state_key) = &pdu.state_key { self.services .spaces @@ -497,18 +530,18 @@ impl Service { .lock() .await .remove(&pdu.room_id); - } - }, - TimelineEventType::RoomMember => { + }, + | TimelineEventType::RoomMember => { if let Some(state_key) = &pdu.state_key { // if the state_key fails - let target_user_id = - UserId::parse(state_key.clone()).expect("This state_key was previously validated"); + let target_user_id = UserId::parse(state_key.clone()) + .expect("This state_key was previously validated"); let content: RoomMemberEventContent = pdu.get_content()?; let invite_state = match content.membership { - MembershipState::Invite => self.services.state.summary_stripped(pdu).await.into(), - _ => None, + | MembershipState::Invite => + self.services.state.summary_stripped(pdu).await.into(), + | _ => None, }; // Update our membership info, we do this here incase a user is invited @@ -527,7 +560,7 @@ impl Service { .await?; } }, - TimelineEventType::RoomMessage => { + | TimelineEventType::RoomMessage => { let content: ExtractBody = pdu.get_content()?; if let Some(body) = content.body { self.services.search.index_pdu(shortroomid, &pdu_id, &body); @@ -539,7 +572,7 @@ impl Service { } } }, - _ => {}, + | _ => {}, } if let Ok(content) = pdu.get_content::() { @@ -552,24 +585,23 @@ impl Service { if let Ok(content) = pdu.get_content::() { match content.relates_to { - Relation::Reply { - in_reply_to, - } => { + | Relation::Reply { in_reply_to } => { // We need to do it again here, because replies don't have // event_id as a top level field - if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await { + if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await + { self.services .pdu_metadata .add_relation(count2, related_pducount); } }, - Relation::Thread(thread) => { + | Relation::Thread(thread) => { self.services .threads .add_to_thread(&thread.event_id, pdu) .await?; }, - _ => {}, // TODO: Aggregate other types + | _ => {}, // TODO: Aggregate other types } } @@ -637,7 +669,8 @@ impl Service { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - _mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room + * state mutex */ ) -> Result<(PduEvent, CanonicalJsonObject)> { let PduBuilder { event_type, @@ -707,7 +740,8 @@ impl Service { unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value()); unsigned.insert( "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), + serde_json::to_value(&prev_pdu.sender) + .expect("UserId::to_value always works"), ); unsigned.insert( "replaces_state".to_owned(), @@ -744,9 +778,7 @@ impl Service { } else { Some(to_raw_value(&unsigned).expect("to_raw_value always works")) }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, + hashes: EventHash { sha256: "aaa".to_owned() }, signatures: None, }; @@ -769,13 +801,14 @@ impl Service { } // Hash and sign - let mut pdu_json = utils::to_canonical_object(&pdu) - .map_err(|e| err!(Request(BadJson(warn!("Failed to convert PDU to canonical JSON: {e}")))))?; + let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| { + err!(Request(BadJson(warn!("Failed to convert PDU to canonical JSON: {e}")))) + })?; // room v3 and above removed the "event_id" field from remote PDU format match room_version_id { - RoomVersionId::V1 | RoomVersionId::V2 => {}, - _ => { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { pdu_json.remove("event_id"); }, }; @@ -783,7 +816,8 @@ impl Service { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(self.services.globals.server_name()).expect("server name is a valid CanonicalJsonValue"), + to_canonical_value(self.services.globals.server_name()) + .expect("server name is a valid CanonicalJsonValue"), ); if let Err(e) = self @@ -792,17 +826,18 @@ impl Service { .hash_and_sign_event(&mut pdu_json, &room_version_id) { return match e { - Error::Signatures(ruma::signatures::Error::PduSize) => { + | Error::Signatures(ruma::signatures::Error::PduSize) => { Err!(Request(TooLarge("Message/PDU is too long (exceeds 65535 bytes)"))) }, - _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))), + | _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))), }; } // Generate event id pdu.event_id = EventId::parse_arc(format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id).expect("ruma can calculate reference hashes") + ruma::signatures::reference_hash(&pdu_json, &room_version_id) + .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -830,7 +865,8 @@ impl Service { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex + state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state + * mutex */ ) -> Result> { let (pdu, pdu_json) = self .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) @@ -844,7 +880,7 @@ impl Service { if pdu.kind == TimelineEventType::RoomRedaction { use RoomVersionId::*; match self.services.state.get_room_version(&pdu.room_id).await? { - V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { if let Some(redact_id) = &pdu.redacts { if !self .services @@ -856,7 +892,7 @@ impl Service { } }; }, - _ => { + | _ => { let content: RoomRedactionEventContent = pdu.get_content()?; if let Some(redact_id) = &content.redacts { if !self @@ -937,7 +973,8 @@ impl Service { new_room_leaves: Vec, state_ids_compressed: Arc>, soft_fail: bool, - state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex + state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state + * mutex */ ) -> Result> { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't @@ -971,7 +1008,9 @@ impl Service { /// items. #[inline] pub fn all_pdus<'a>( - &'a self, user_id: &'a UserId, room_id: &'a RoomId, + &'a self, + user_id: &'a UserId, + room_id: &'a RoomId, ) -> impl Stream + Send + Unpin + 'a { self.pdus(Some(user_id), room_id, None) .map_ok(|stream| stream.map(Ok)) @@ -983,7 +1022,10 @@ impl Service { /// Reverse iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] pub async fn pdus_rev<'a>( - &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, until: Option, + &'a self, + user_id: Option<&'a UserId>, + room_id: &'a RoomId, + until: Option, ) -> Result + Send + 'a> { self.db .pdus_rev(user_id, room_id, until.unwrap_or_else(PduCount::max)) @@ -993,7 +1035,10 @@ impl Service { /// Forward iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] pub async fn pdus<'a>( - &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, from: Option, + &'a self, + user_id: Option<&'a UserId>, + room_id: &'a RoomId, + from: Option, ) -> Result + Send + 'a> { self.db .pdus(user_id, room_id, from.unwrap_or_else(PduCount::min)) @@ -1002,17 +1047,21 @@ impl Service { /// Replace a PDU with the redacted form. #[tracing::instrument(skip(self, reason))] - pub async fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent, shortroomid: ShortRoomId) -> Result { + pub async fn redact_pdu( + &self, + event_id: &EventId, + reason: &PduEvent, + shortroomid: ShortRoomId, + ) -> Result { // TODO: Don't reserialize, keep original json let Ok(pdu_id) = self.get_pdu_id(event_id).await else { // If event does not exist, just noop return Ok(()); }; - let mut pdu = self - .get_pdu_from_id(&pdu_id) - .await - .map_err(|e| err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))))?; + let mut pdu = self.get_pdu_from_id(&pdu_id).await.map_err(|e| { + err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))) + })?; if let Ok(content) = pdu.get_content::() { if let Some(body) = content.body { @@ -1026,8 +1075,9 @@ impl Service { pdu.redact(&room_version_id, reason)?; - let obj = utils::to_canonical_object(&pdu) - .map_err(|e| err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON"))))?; + let obj = utils::to_canonical_object(&pdu).map_err(|e| { + err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON"))) + })?; self.replace_pdu(&pdu_id, &obj, &pdu).await } @@ -1069,7 +1119,9 @@ impl Service { .unwrap_or_default(); let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| { - if level > &power_levels.users_default && !self.services.globals.user_is_local(user_id) { + if level > &power_levels.users_default + && !self.services.globals.user_is_local(user_id) + { Some(user_id.server_name()) } else { None @@ -1124,7 +1176,7 @@ impl Service { ) .await; match response { - Ok(response) => { + | Ok(response) => { for pdu in response.pdus { if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await { debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}"); @@ -1132,7 +1184,7 @@ impl Service { } return Ok(()); }, - Err(e) => { + | Err(e) => { warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}"); }, } @@ -1144,7 +1196,8 @@ impl Service { #[tracing::instrument(skip(self, pdu))] pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box) -> Result<()> { - let (event_id, value, room_id) = self.services.event_handler.parse_incoming_pdu(&pdu).await?; + let (event_id, value, room_id) = + self.services.event_handler.parse_incoming_pdu(&pdu).await?; // Lock so we cannot backfill the same pdu twice at the same time let mutex_lock = self @@ -1210,10 +1263,10 @@ impl Service { #[tracing::instrument(skip_all, level = "debug")] async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Result<()> { match pdu.event_type() { - TimelineEventType::RoomEncryption => { + | TimelineEventType::RoomEncryption => { return Err!(Request(Forbidden(error!("Encryption not supported in admins room.")))); }, - TimelineEventType::RoomMember => { + | TimelineEventType::RoomMember => { let target = pdu .state_key() .filter(|v| v.starts_with('@')) @@ -1223,9 +1276,11 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res let content: RoomMemberEventContent = pdu.get_content()?; match content.membership { - MembershipState::Leave => { + | MembershipState::Leave => { if target == server_user { - return Err!(Request(Forbidden(error!("Server user cannot leave the admins room.")))); + return Err!(Request(Forbidden(error!( + "Server user cannot leave the admins room." + )))); } let count = self @@ -1239,13 +1294,17 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res .await; if count < 2 { - return Err!(Request(Forbidden(error!("Last admin cannot leave the admins room.")))); + return Err!(Request(Forbidden(error!( + "Last admin cannot leave the admins room." + )))); } }, - MembershipState::Ban if pdu.state_key().is_some() => { + | MembershipState::Ban if pdu.state_key().is_some() => { if target == server_user { - return Err!(Request(Forbidden(error!("Server cannot be banned from admins room.")))); + return Err!(Request(Forbidden(error!( + "Server cannot be banned from admins room." + )))); } let count = self @@ -1259,13 +1318,15 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res .await; if count < 2 { - return Err!(Request(Forbidden(error!("Last admin cannot be banned from admins room.")))); + return Err!(Request(Forbidden(error!( + "Last admin cannot be banned from admins room." + )))); } }, - _ => {}, + | _ => {}, }; }, - _ => {}, + | _ => {}, }; Ok(()) diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index 59862a85..a6123322 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -52,7 +52,12 @@ impl crate::Service for Service { impl Service { /// Sets a user as typing until the timeout timestamp is reached or /// roomtyping_remove is called. - pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { + pub async fn typing_add( + &self, + user_id: &UserId, + room_id: &RoomId, + timeout: u64, + ) -> Result<()> { debug_info!("typing started {user_id:?} in {room_id:?} timeout:{timeout:?}"); // update clients self.typing @@ -177,15 +182,15 @@ impl Service { /// Returns a new typing EDU. pub async fn typings_all( - &self, room_id: &RoomId, sender_user: &UserId, + &self, + room_id: &RoomId, + sender_user: &UserId, ) -> Result> { let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); let Some(typing_indicators) = room_typing_indicators else { return Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: Vec::new(), - }, + content: ruma::events::typing::TypingEventContent { user_ids: Vec::new() }, }); }; @@ -204,13 +209,16 @@ impl Service { .await; Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids, - }, + content: ruma::events::typing::TypingEventContent { user_ids }, }) } - async fn federation_send(&self, room_id: &RoomId, user_id: &UserId, typing: bool) -> Result<()> { + async fn federation_send( + &self, + room_id: &RoomId, + user_id: &UserId, + typing: bool, + ) -> Result<()> { debug_assert!( self.services.globals.user_is_local(user_id), "tried to broadcast typing status of remote user", diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 80d33de4..6a0c6aa1 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -92,7 +92,12 @@ pub async fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) - } #[implement(Service)] -pub async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: ShortStateHash) { +pub async fn associate_token_shortstatehash( + &self, + room_id: &RoomId, + token: u64, + shortstatehash: ShortStateHash, +) { let shortroomid = self .services .short @@ -108,7 +113,11 @@ pub async fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, } #[implement(Service)] -pub async fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result { +pub async fn get_token_shortstatehash( + &self, + room_id: &RoomId, + token: u64, +) -> Result { let shortroomid = self.services.short.get_shortroomid(room_id).await?; let key: &[u64] = &[shortroomid, token]; diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index 9008a21f..6b58d964 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -3,14 +3,18 @@ use std::{fmt::Debug, mem}; use bytes::BytesMut; use conduwuit::{debug_error, err, trace, utils, warn, Err, Result}; use reqwest::Client; -use ruma::api::{appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; +use ruma::api::{ + appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, +}; /// Sends a request to an appservice /// /// Only returns Ok(None) if there is no url specified in the appservice /// registration file pub(crate) async fn send_request( - client: &Client, registration: Registration, request: T, + client: &Client, + registration: Registration, + request: T, ) -> Result> where T: OutgoingRequest + Debug + Send, @@ -25,17 +29,17 @@ where let hs_token = registration.hs_token.as_str(); let mut http_request = request - .try_into_http_request::(&dest, SendAccessToken::IfRequired(hs_token), &VERSIONS) + .try_into_http_request::( + &dest, + SendAccessToken::IfRequired(hs_token), + &VERSIONS, + ) .map_err(|e| err!(BadServerResponse(warn!("Failed to find destination {dest}: {e}"))))? .map(BytesMut::freeze); let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains('?') { - "&" - } else { - "?" - }; + let symbol = if old_path_and_query.contains('?') { "&" } else { "?" }; parts.path_and_query = Some( (old_path_and_query + symbol + "access_token=" + hs_token) diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 372d8e14..ac06424f 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -43,7 +43,9 @@ impl Data { } } - pub(super) fn delete_active_request(&self, key: &[u8]) { self.servercurrentevent_data.remove(key); } + pub(super) fn delete_active_request(&self, key: &[u8]) { + self.servercurrentevent_data.remove(key); + } pub(super) async fn delete_all_active_requests_for(&self, destination: &Destination) { let prefix = destination.get_prefix(); @@ -76,11 +78,7 @@ impl Data { events .filter(|(key, _)| !key.is_empty()) .for_each(|(key, val)| { - let val = if let SendingEvent::Edu(val) = &val { - &**val - } else { - &[] - }; + let val = if let SendingEvent::Edu(val) = &val { &**val } else { &[] }; self.servercurrentevent_data.insert(key, val); self.servernameevent_data.remove(key); @@ -93,21 +91,26 @@ impl Data { .raw_stream() .ignore_err() .map(|(key, val)| { - let (dest, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); + let (dest, event) = + parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); (key.to_vec(), event, dest) }) } #[inline] - pub fn active_requests_for(&self, destination: &Destination) -> impl Stream + Send + '_ { + pub fn active_requests_for( + &self, + destination: &Destination, + ) -> impl Stream + Send + '_ { let prefix = destination.get_prefix(); self.servercurrentevent_data .raw_stream_from(&prefix) .ignore_err() .ready_take_while(move |(key, _)| key.starts_with(&prefix)) .map(|(key, val)| { - let (_, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); + let (_, event) = + parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); (key.to_vec(), event) }) @@ -150,14 +153,18 @@ impl Data { keys } - pub fn queued_requests(&self, destination: &Destination) -> impl Stream + Send + '_ { + pub fn queued_requests( + &self, + destination: &Destination, + ) -> impl Stream + Send + '_ { let prefix = destination.get_prefix(); self.servernameevent_data .raw_stream_from(&prefix) .ignore_err() .ready_take_while(move |(key, _)| key.starts_with(&prefix)) .map(|(key, val)| { - let (_, event) = parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); + let (_, event) = + parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); (key.to_vec(), event) }) @@ -186,8 +193,9 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(server) - .map_err(|_| Error::bad_database("Invalid server bytes in server_currenttransaction"))?; + let server = utils::string_from_bytes(server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; ( Destination::Appservice(server), @@ -203,8 +211,8 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se let user = parts.next().expect("splitn always returns one element"); let user_string = utils::string_from_bytes(user) .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; - let user_id = - UserId::parse(user_string).map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; + let user_id = UserId::parse(user_string) + .map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; let pushkey = parts .next() @@ -233,14 +241,14 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(server) - .map_err(|_| Error::bad_database("Invalid server bytes in server_currenttransaction"))?; + let server = utils::string_from_bytes(server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; ( - Destination::Normal( - ServerName::parse(server) - .map_err(|_| Error::bad_database("Invalid server string in server_currenttransaction"))?, - ), + Destination::Normal(ServerName::parse(server).map_err(|_| { + Error::bad_database("Invalid server string in server_currenttransaction") + })?), if value.is_empty() { SendingEvent::Pdu(event.into()) } else { diff --git a/src/service/sending/dest.rs b/src/service/sending/dest.rs index 0a5893b7..2c6063cc 100644 --- a/src/service/sending/dest.rs +++ b/src/service/sending/dest.rs @@ -14,7 +14,7 @@ pub enum Destination { #[must_use] pub(super) fn get_prefix(&self) -> Vec { match self { - Self::Normal(server) => { + | Self::Normal(server) => { let len = server.as_bytes().len().saturating_add(1); let mut p = Vec::with_capacity(len); @@ -22,7 +22,7 @@ pub(super) fn get_prefix(&self) -> Vec { p.push(0xFF); p }, - Self::Appservice(server) => { + | Self::Appservice(server) => { let sigil = b"+"; let len = sigil.len().saturating_add(server.len()).saturating_add(1); @@ -32,7 +32,7 @@ pub(super) fn get_prefix(&self) -> Vec { p.push(0xFF); p }, - Self::Push(user, pushkey) => { + | Self::Push(user, pushkey) => { let sigil = b"$"; let len = sigil .len() diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index f9828178..2038f4eb 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -25,8 +25,8 @@ pub use self::{ sender::{EDU_LIMIT, PDU_LIMIT}, }; use crate::{ - account_data, client, globals, presence, pusher, resolver, rooms, rooms::timeline::RawPduId, server_keys, users, - Dep, + account_data, client, globals, presence, pusher, resolver, rooms, rooms::timeline::RawPduId, + server_keys, users, Dep, }; pub struct Service { @@ -156,18 +156,16 @@ impl Service { { let _cork = self.db.db.cork(); let requests = servers - .map(|server| (Destination::Normal(server.into()), SendingEvent::Pdu(pdu_id.to_owned()))) + .map(|server| { + (Destination::Normal(server.into()), SendingEvent::Pdu(pdu_id.to_owned())) + }) .collect::>() .await; let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); for ((dest, event), queue_id) in requests.into_iter().zip(keys) { - self.dispatch(Msg { - dest, - event, - queue_id, - })?; + self.dispatch(Msg { dest, event, queue_id })?; } Ok(()) @@ -204,18 +202,16 @@ impl Service { { let _cork = self.db.db.cork(); let requests = servers - .map(|server| (Destination::Normal(server.to_owned()), SendingEvent::Edu(serialized.clone()))) + .map(|server| { + (Destination::Normal(server.to_owned()), SendingEvent::Edu(serialized.clone())) + }) .collect::>() .await; let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); for ((dest, event), queue_id) in requests.into_iter().zip(keys) { - self.dispatch(Msg { - dest, - event, - queue_id, - })?; + self.dispatch(Msg { dest, event, queue_id })?; } Ok(()) @@ -253,7 +249,11 @@ impl Service { /// Sends a request to a federation server #[tracing::instrument(skip_all, name = "request")] - pub async fn send_federation_request(&self, dest: &ServerName, request: T) -> Result + pub async fn send_federation_request( + &self, + dest: &ServerName, + request: T, + ) -> Result where T: OutgoingRequest + Debug + Send, { @@ -263,7 +263,11 @@ impl Service { /// Like send_federation_request() but with a very large timeout #[tracing::instrument(skip_all, name = "synapse")] - pub async fn send_synapse_request(&self, dest: &ServerName, request: T) -> Result + pub async fn send_synapse_request( + &self, + dest: &ServerName, + request: T, + ) -> Result where T: OutgoingRequest + Debug + Send, { @@ -276,7 +280,9 @@ impl Service { /// Only returns None if there is no url specified in the appservice /// registration file pub async fn send_appservice_request( - &self, registration: Registration, request: T, + &self, + registration: Registration, + request: T, ) -> Result> where T: OutgoingRequest + Debug + Send, @@ -291,24 +297,30 @@ impl Service { /// key #[tracing::instrument(skip(self), level = "debug")] pub async fn cleanup_events( - &self, appservice_id: Option<&str>, user_id: Option<&UserId>, push_key: Option<&str>, + &self, + appservice_id: Option<&str>, + user_id: Option<&UserId>, + push_key: Option<&str>, ) -> Result { match (appservice_id, user_id, push_key) { - (None, Some(user_id), Some(push_key)) => { + | (None, Some(user_id), Some(push_key)) => { self.db - .delete_all_requests_for(&Destination::Push(user_id.to_owned(), push_key.to_owned())) + .delete_all_requests_for(&Destination::Push( + user_id.to_owned(), + push_key.to_owned(), + )) .await; Ok(()) }, - (Some(appservice_id), None, None) => { + | (Some(appservice_id), None, None) => { self.db .delete_all_requests_for(&Destination::Appservice(appservice_id.to_owned())) .await; Ok(()) }, - _ => { + | _ => { debug_warn!("cleanup_events called with too many or too few arguments"); Ok(()) }, diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 862d2a42..81467c16 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -2,16 +2,16 @@ use std::mem; use bytes::Bytes; use conduwuit::{ - debug, debug_error, debug_warn, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, Err, Error, - Result, + debug, debug_error, debug_warn, err, error::inspect_debug_log, implement, trace, + utils::string::EMPTY, Err, Error, Result, }; use http::{header::AUTHORIZATION, HeaderValue}; use ipaddress::IPAddress; use reqwest::{Client, Method, Request, Response, Url}; use ruma::{ api::{ - client::error::Error as RumaError, EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, - SendAccessToken, + client::error::Error as RumaError, EndpointError, IncomingResponse, MatrixVersion, + OutgoingRequest, SendAccessToken, }, serde::Base64, server_util::authorization::XMatrix, @@ -25,7 +25,12 @@ use crate::{ impl super::Service { #[tracing::instrument(skip_all, level = "debug")] - pub async fn send(&self, client: &Client, dest: &ServerName, request: T) -> Result + pub async fn send( + &self, + client: &Client, + dest: &ServerName, + request: T, + ) -> Result where T: OutgoingRequest + Send, { @@ -39,7 +44,9 @@ impl super::Service { .forbidden_remote_server_names .contains(dest) { - return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); + return Err!(Request(Forbidden(debug_warn!( + "Federation with {dest} is not allowed." + )))); } let actual = self.services.resolver.get_actual_dest(dest).await?; @@ -49,7 +56,11 @@ impl super::Service { } async fn execute( - &self, dest: &ServerName, actual: &ActualDest, request: Request, client: &Client, + &self, + dest: &ServerName, + actual: &ActualDest, + request: Request, + client: &Client, ) -> Result where T: OutgoingRequest + Send, @@ -59,8 +70,18 @@ impl super::Service { debug!(?method, ?url, "Sending request"); match client.execute(request).await { - Ok(response) => handle_response::(&self.services.resolver, dest, actual, &method, &url, response).await, - Err(error) => Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), + | Ok(response) => + handle_response::( + &self.services.resolver, + dest, + actual, + &method, + &url, + response, + ) + .await, + | Err(error) => + Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), } } @@ -86,7 +107,11 @@ impl super::Service { } async fn handle_response( - resolver: &resolver::Service, dest: &ServerName, actual: &ActualDest, method: &Method, url: &Url, + resolver: &resolver::Service, + dest: &ServerName, + actual: &ActualDest, + method: &Method, + url: &Url, response: Response, ) -> Result where @@ -96,21 +121,22 @@ where let result = T::IncomingResponse::try_from_http_response(response); if result.is_ok() && !actual.cached { - resolver.set_cached_destination( - dest.to_owned(), - CachedDest { - dest: actual.dest.clone(), - host: actual.host.clone(), - expire: CachedDest::default_expire(), - }, - ); + resolver.set_cached_destination(dest.to_owned(), CachedDest { + dest: actual.dest.clone(), + host: actual.host.clone(), + expire: CachedDest::default_expire(), + }); } result.map_err(|e| err!(BadServerResponse("Server returned bad 200 response: {e:?}"))) } async fn into_http_response( - dest: &ServerName, actual: &ActualDest, method: &Method, url: &Url, mut response: Response, + dest: &ServerName, + actual: &ActualDest, + method: &Method, + url: &Url, + mut response: Response, ) -> Result> { let status = response.status(); trace!( @@ -146,13 +172,21 @@ async fn into_http_response( debug!("Got {status:?} for {method} {url}"); if !status.is_success() { - return Err(Error::Federation(dest.to_owned(), RumaError::from_http_response(http_response))); + return Err(Error::Federation( + dest.to_owned(), + RumaError::from_http_response(http_response), + )); } Ok(http_response) } -fn handle_error(actual: &ActualDest, method: &Method, url: &Url, mut e: reqwest::Error) -> Result { +fn handle_error( + actual: &ActualDest, + method: &Method, + url: &Url, + mut e: reqwest::Error, +) -> Result { if e.is_timeout() || e.is_connect() { e = e.without_url(); debug_warn!("{e:?}"); @@ -186,7 +220,8 @@ fn sign_request(&self, http_request: &mut http::Request>, dest: &ServerN .expect("http::Request missing path_and_query"); let mut req: Object = if !body.is_empty() { - let content: CanonicalJsonValue = serde_json::from_slice(body).expect("failed to serialize body"); + let content: CanonicalJsonValue = + serde_json::from_slice(body).expect("failed to serialize body"); let authorization: [Member; 5] = [ ("content".into(), content), diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 77fd7d5c..1f462f39 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -24,15 +24,19 @@ use ruma::{ appservice::event::push_events::v1::Edu as RumaEdu, federation::transactions::{ edu::{ - DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, ReceiptData, ReceiptMap, + DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, + ReceiptData, ReceiptMap, }, send_transaction_message, }, }, device_id, - events::{push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType}, - push, uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, - RoomVersionId, ServerName, UInt, + events::{ + push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, + GlobalAccountDataEventType, + }, + push, uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UInt, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -86,11 +90,14 @@ impl Service { } async fn handle_response<'a>( - &'a self, response: SendingResult, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus, + &'a self, + response: SendingResult, + futures: &mut SendingFutures<'a>, + statuses: &mut CurTransactionStatus, ) { match response { - Ok(dest) => self.handle_response_ok(&dest, futures, statuses).await, - Err((dest, e)) => Self::handle_response_err(dest, statuses, &e), + | Ok(dest) => self.handle_response_ok(&dest, futures, statuses).await, + | Err((dest, e)) => Self::handle_response_err(dest, statuses, &e), }; } @@ -98,16 +105,22 @@ impl Service { debug!(dest = ?dest, "{e:?}"); statuses.entry(dest).and_modify(|e| { *e = match e { - TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - TransactionStatus::Retrying(ref n) => TransactionStatus::Failed(n.saturating_add(1), Instant::now()), - TransactionStatus::Failed(..) => panic!("Request that was not even running failed?!"), + | TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), + | TransactionStatus::Retrying(ref n) => + TransactionStatus::Failed(n.saturating_add(1), Instant::now()), + | TransactionStatus::Failed(..) => { + panic!("Request that was not even running failed?!") + }, } }); } #[allow(clippy::needless_pass_by_ref_mut)] async fn handle_response_ok<'a>( - &'a self, dest: &Destination, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus, + &'a self, + dest: &Destination, + futures: &mut SendingFutures<'a>, + statuses: &mut CurTransactionStatus, ) { let _cork = self.db.db.cork(); self.db.delete_all_active_requests_for(dest).await; @@ -133,7 +146,10 @@ impl Service { #[allow(clippy::needless_pass_by_ref_mut)] async fn handle_request<'a>( - &'a self, msg: Msg, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus, + &'a self, + msg: Msg, + futures: &mut SendingFutures<'a>, + statuses: &mut CurTransactionStatus, ) { let iv = vec![(msg.queue_id, msg.event)]; if let Ok(Some(events)) = self.select_events(&msg.dest, iv, statuses).await { @@ -168,8 +184,13 @@ impl Service { } #[allow(clippy::needless_pass_by_ref_mut)] - async fn initial_requests<'a>(&'a self, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus) { - let keep = usize::try_from(self.server.config.startup_netburst_keep).unwrap_or(usize::MAX); + async fn initial_requests<'a>( + &'a self, + futures: &mut SendingFutures<'a>, + statuses: &mut CurTransactionStatus, + ) { + let keep = + usize::try_from(self.server.config.startup_netburst_keep).unwrap_or(usize::MAX); let mut txns = HashMap::>::new(); let mut active = self.db.active_requests().boxed(); @@ -240,7 +261,11 @@ impl Service { } #[tracing::instrument(skip_all, level = "debug")] - fn select_events_current(&self, dest: Destination, statuses: &mut CurTransactionStatus) -> Result<(bool, bool)> { + fn select_events_current( + &self, + dest: Destination, + statuses: &mut CurTransactionStatus, + ) -> Result<(bool, bool)> { let (mut allow, mut retry) = (true, false); statuses .entry(dest.clone()) // TODO: can we avoid cloning? @@ -278,7 +303,8 @@ impl Service { let events_len = AtomicUsize::default(); let max_edu_count = AtomicU64::new(since); - let device_changes = self.select_edus_device_changes(server_name, batch, &max_edu_count, &events_len); + let device_changes = + self.select_edus_device_changes(server_name, batch, &max_edu_count, &events_len); let receipts: OptionFuture<_> = self .server @@ -305,7 +331,11 @@ impl Service { /// Look for presence async fn select_edus_device_changes( - &self, server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, events_len: &AtomicUsize, + &self, + server_name: &ServerName, + since: (u64, u64), + max_edu_count: &AtomicU64, + events_len: &AtomicUsize, ) -> Vec> { let mut events = Vec::new(); let server_rooms = self.services.state_cache.server_rooms(server_name); @@ -342,7 +372,8 @@ impl Service { keys: None, }); - let edu = serde_json::to_vec(&edu).expect("failed to serialize device list update to JSON"); + let edu = serde_json::to_vec(&edu) + .expect("failed to serialize device list update to JSON"); events.push(edu); if events_len.fetch_add(1, Ordering::Relaxed) >= SELECT_EDU_LIMIT - 1 { @@ -356,7 +387,10 @@ impl Service { /// Look for read receipts in this room async fn select_edus_receipts( - &self, server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, + &self, + server_name: &ServerName, + since: (u64, u64), + max_edu_count: &AtomicU64, ) -> Option> { let server_rooms = self.services.state_cache.server_rooms(server_name); @@ -377,19 +411,21 @@ impl Service { return None; } - let receipt_content = Edu::Receipt(ReceiptContent { - receipts, - }); + let receipt_content = Edu::Receipt(ReceiptContent { receipts }); - let receipt_content = - serde_json::to_vec(&receipt_content).expect("Failed to serialize Receipt EDU to JSON vec"); + let receipt_content = serde_json::to_vec(&receipt_content) + .expect("Failed to serialize Receipt EDU to JSON vec"); Some(receipt_content) } /// Look for read receipts in this room async fn select_edus_receipts_room( - &self, room_id: &RoomId, since: (u64, u64), max_edu_count: &AtomicU64, num: &mut usize, + &self, + room_id: &RoomId, + since: (u64, u64), + max_edu_count: &AtomicU64, + num: &mut usize, ) -> ReceiptMap { let receipts = self .services @@ -444,14 +480,15 @@ impl Service { } } - ReceiptMap { - read, - } + ReceiptMap { read } } /// Look for presence async fn select_edus_presence( - &self, server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, + &self, + server_name: &ServerName, + since: (u64, u64), + max_edu_count: &AtomicU64, ) -> Option> { let presence_since = self.services.presence.presence_since(since.0); @@ -511,7 +548,8 @@ impl Service { push: presence_updates.into_values().collect(), }); - let presence_content = serde_json::to_vec(&presence_content).expect("failed to serialize Presence EDU to JSON"); + let presence_content = serde_json::to_vec(&presence_content) + .expect("failed to serialize Presence EDU to JSON"); Some(presence_content) } @@ -519,21 +557,28 @@ impl Service { async fn send_events(&self, dest: Destination, events: Vec) -> SendingResult { //debug_assert!(!events.is_empty(), "sending empty transaction"); match dest { - Destination::Normal(ref server) => self.send_events_dest_normal(&dest, server, events).await, - Destination::Appservice(ref id) => self.send_events_dest_appservice(&dest, id, events).await, - Destination::Push(ref userid, ref pushkey) => { + | Destination::Normal(ref server) => + self.send_events_dest_normal(&dest, server, events).await, + | Destination::Appservice(ref id) => + self.send_events_dest_appservice(&dest, id, events).await, + | Destination::Push(ref userid, ref pushkey) => self.send_events_dest_push(&dest, userid, pushkey, events) - .await - }, + .await, } } #[tracing::instrument(skip(self, dest, events), name = "appservice")] async fn send_events_dest_appservice( - &self, dest: &Destination, id: &str, events: Vec, + &self, + dest: &Destination, + id: &str, + events: Vec, ) -> SendingResult { let Some(appservice) = self.services.appservice.get_registration(id).await else { - return Err((dest.clone(), err!(Database(warn!(?id, "Missing appservice registration"))))); + return Err(( + dest.clone(), + err!(Database(warn!(?id, "Missing appservice registration"))), + )); }; let mut pdu_jsons = Vec::with_capacity( @@ -550,12 +595,12 @@ impl Service { ); for event in &events { match event { - SendingEvent::Pdu(pdu_id) => { + | SendingEvent::Pdu(pdu_id) => { if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { pdu_jsons.push(pdu.to_room_event()); } }, - SendingEvent::Edu(edu) => { + | SendingEvent::Edu(edu) => { if appservice .receive_ephemeral .is_some_and(|receive_edus| receive_edus) @@ -565,14 +610,14 @@ impl Service { } } }, - SendingEvent::Flush => {}, // flush only; no new content + | SendingEvent::Flush => {}, // flush only; no new content } } let txn_hash = calculate_hash(events.iter().filter_map(|e| match e { - SendingEvent::Edu(b) => Some(&**b), - SendingEvent::Pdu(b) => Some(b.as_ref()), - SendingEvent::Flush => None, + | SendingEvent::Edu(b) => Some(&**b), + | SendingEvent::Pdu(b) => Some(b.as_ref()), + | SendingEvent::Flush => None, })); let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); @@ -592,28 +637,35 @@ impl Service { ) .await { - Ok(_) => Ok(dest.clone()), - Err(e) => Err((dest.clone(), e)), + | Ok(_) => Ok(dest.clone()), + | Err(e) => Err((dest.clone(), e)), } } #[tracing::instrument(skip(self, dest, events), name = "push")] async fn send_events_dest_push( - &self, dest: &Destination, userid: &OwnedUserId, pushkey: &str, events: Vec, + &self, + dest: &Destination, + userid: &OwnedUserId, + pushkey: &str, + events: Vec, ) -> SendingResult { let Ok(pusher) = self.services.pusher.get_pusher(userid, pushkey).await else { - return Err((dest.clone(), err!(Database(error!(?userid, ?pushkey, "Missing pusher"))))); + return Err(( + dest.clone(), + err!(Database(error!(?userid, ?pushkey, "Missing pusher"))), + )); }; let mut pdus = Vec::new(); for event in &events { match event { - SendingEvent::Pdu(pdu_id) => { + | SendingEvent::Pdu(pdu_id) => { if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { pdus.push(pdu); } }, - SendingEvent::Edu(_) | SendingEvent::Flush => { + | SendingEvent::Edu(_) | SendingEvent::Flush => { // Push gateways don't need EDUs (?) and flush only; // no new content }, @@ -657,7 +709,10 @@ impl Service { #[tracing::instrument(skip(self, dest, events), name = "", level = "debug")] async fn send_events_dest_normal( - &self, dest: &Destination, server: &OwnedServerName, events: Vec, + &self, + dest: &Destination, + server: &OwnedServerName, + events: Vec, ) -> SendingResult { let mut pdu_jsons = Vec::with_capacity( events @@ -675,17 +730,16 @@ impl Service { for event in &events { match event { // TODO: check room version and remove event_id if needed - SendingEvent::Pdu(pdu_id) => { + | SendingEvent::Pdu(pdu_id) => { if let Ok(pdu) = self.services.timeline.get_pdu_json_from_id(pdu_id).await { pdu_jsons.push(self.convert_to_outgoing_federation_event(pdu).await); } }, - SendingEvent::Edu(edu) => { + | SendingEvent::Edu(edu) => if let Ok(raw) = serde_json::from_slice(edu) { edu_jsons.push(raw); - } - }, - SendingEvent::Flush => {}, // flush only; no new content + }, + | SendingEvent::Flush => {}, // flush only; no new content } } @@ -693,9 +747,9 @@ impl Service { // transaction"); let txn_hash = calculate_hash(events.iter().filter_map(|e| match e { - SendingEvent::Edu(b) => Some(&**b), - SendingEvent::Pdu(b) => Some(b.as_ref()), - SendingEvent::Flush => None, + | SendingEvent::Edu(b) => Some(&**b), + | SendingEvent::Pdu(b) => Some(b.as_ref()), + | SendingEvent::Flush => None, })); let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); @@ -725,7 +779,10 @@ impl Service { } /// This does not return a full `Pdu` it is only to satisfy ruma's types. - pub async fn convert_to_outgoing_federation_event(&self, mut pdu_json: CanonicalJsonObject) -> Box { + pub async fn convert_to_outgoing_federation_event( + &self, + mut pdu_json: CanonicalJsonObject, + ) -> Box { if let Some(unsigned) = pdu_json .get_mut("unsigned") .and_then(|val| val.as_object_mut()) @@ -739,11 +796,11 @@ impl Service { .and_then(|val| RoomId::parse(val.as_str()?).ok()) { match self.services.state.get_room_version(&room_id).await { - Ok(room_version_id) => match room_version_id { - RoomVersionId::V1 | RoomVersionId::V2 => {}, - _ => _ = pdu_json.remove("event_id"), + | Ok(room_version_id) => match room_version_id { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => _ = pdu_json.remove("event_id"), }, - Err(_) => _ = pdu_json.remove("event_id"), + | Err(_) => _ = pdu_json.remove("event_id"), } } else { pdu_json.remove("event_id"); diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index 886c4750..305cbfef 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -4,11 +4,13 @@ use std::{ time::Duration, }; -use conduwuit::{debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn}; +use conduwuit::{ + debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn, +}; use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ - api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, OwnedServerName, - OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, + OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, }; use serde_json::value::RawValue as RawJsonValue; use tokio::time::{timeout_at, Instant}; @@ -79,7 +81,9 @@ where return; } - warn!("missing {missing_keys} keys for {missing_servers} servers from all notaries first"); + warn!( + "missing {missing_keys} keys for {missing_servers} servers from all notaries first" + ); } if !notary_only { @@ -101,13 +105,15 @@ where return; } - debug_warn!("still missing {missing_keys} keys for {missing_servers} servers from all notaries."); + debug_warn!( + "still missing {missing_keys} keys for {missing_servers} servers from all notaries." + ); } if missing_keys > 0 { warn!( - "did not obtain {missing_keys} keys for {missing_servers} servers out of {requested_keys} total keys for \ - {requested_servers} total servers." + "did not obtain {missing_keys} keys for {missing_servers} servers out of \ + {requested_keys} total keys for {requested_servers} total servers." ); } @@ -162,12 +168,15 @@ where #[implement(super::Service)] async fn acquire_origin( - &self, origin: OwnedServerName, mut key_ids: Vec, timeout: Instant, + &self, + origin: OwnedServerName, + mut key_ids: Vec, + timeout: Instant, ) -> (OwnedServerName, Vec) { match timeout_at(timeout, self.server_request(&origin)).await { - Err(e) => debug_warn!(?origin, "timed out: {e}"), - Ok(Err(e)) => debug_error!(?origin, "{e}"), - Ok(Ok(server_keys)) => { + | Err(e) => debug_warn!(?origin, "timed out: {e}"), + | Ok(Err(e)) => debug_error!(?origin, "{e}"), + | Ok(Ok(server_keys)) => { trace!( %origin, ?key_ids, @@ -192,19 +201,21 @@ where for notary in self.services.globals.trusted_servers() { let missing_keys = keys_count(&missing); let missing_servers = missing.len(); - debug!("Asking notary {notary} for {missing_keys} missing keys from {missing_servers} servers"); + debug!( + "Asking notary {notary} for {missing_keys} missing keys from {missing_servers} \ + servers" + ); let batch = missing .iter() .map(|(server, keys)| (server.borrow(), keys.iter().map(Borrow::borrow))); match self.batch_notary_request(notary, batch).await { - Err(e) => error!("Failed to contact notary {notary:?}: {e}"), - Ok(results) => { + | Err(e) => error!("Failed to contact notary {notary:?}: {e}"), + | Ok(results) => for server_keys in results { self.acquire_notary_result(&mut missing, server_keys).await; - } - }, + }, } } @@ -224,4 +235,6 @@ async fn acquire_notary_result(&self, missing: &mut Batch, server_keys: ServerSi } } -fn keys_count(batch: &Batch) -> usize { batch.iter().flat_map(|(_, key_ids)| key_ids.iter()).count() } +fn keys_count(batch: &Batch) -> usize { + batch.iter().flat_map(|(_, key_ids)| key_ids.iter()).count() +} diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 94d2575a..5a027d64 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -1,17 +1,25 @@ use std::borrow::Borrow; use conduwuit::{implement, Err, Result}; -use ruma::{api::federation::discovery::VerifyKey, CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId}; +use ruma::{ + api::federation::discovery::VerifyKey, CanonicalJsonObject, RoomVersionId, ServerName, + ServerSigningKeyId, +}; use super::{extract_key, PubKeyMap, PubKeys}; #[implement(super::Service)] -pub async fn get_event_keys(&self, object: &CanonicalJsonObject, version: &RoomVersionId) -> Result { +pub async fn get_event_keys( + &self, + object: &CanonicalJsonObject, + version: &RoomVersionId, +) -> Result { use ruma::signatures::required_keys; let required = match required_keys(object, version) { - Ok(required) => required, - Err(e) => return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")), + | Ok(required) => required, + | Err(e) => + return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")), }; let batch = required @@ -52,7 +60,11 @@ where } #[implement(super::Service)] -pub async fn get_verify_key(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> Result { +pub async fn get_verify_key( + &self, + origin: &ServerName, + key_id: &ServerSigningKeyId, +) -> Result { let notary_first = self.services.server.config.query_trusted_key_servers_first; let notary_only = self.services.server.config.only_query_trusted_key_servers; @@ -86,7 +98,11 @@ pub async fn get_verify_key(&self, origin: &ServerName, key_id: &ServerSigningKe } #[implement(super::Service)] -async fn get_verify_key_from_notaries(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> Result { +async fn get_verify_key_from_notaries( + &self, + origin: &ServerName, + key_id: &ServerSigningKeyId, +) -> Result { for notary in self.services.globals.trusted_servers() { if let Ok(server_keys) = self.notary_request(notary, origin).await { for server_key in server_keys.clone() { @@ -105,7 +121,11 @@ async fn get_verify_key_from_notaries(&self, origin: &ServerName, key_id: &Serve } #[implement(super::Service)] -async fn get_verify_key_from_origin(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> Result { +async fn get_verify_key_from_origin( + &self, + origin: &ServerName, + key_id: &ServerSigningKeyId, +) -> Result { if let Ok(server_key) = self.server_request(origin).await { self.add_signing_keys(server_key.clone()).await; if let Some(result) = extract_key(server_key, key_id) { diff --git a/src/service/server_keys/keypair.rs b/src/service/server_keys/keypair.rs index 927171f3..6f983c26 100644 --- a/src/service/server_keys/keypair.rs +++ b/src/service/server_keys/keypair.rs @@ -39,14 +39,15 @@ fn load(db: &Arc) -> Result> { create(db) })?; - let key = - Ed25519KeyPair::from_der(&key, version).map_err(|e| err!("Failed to load ed25519 keypair from der: {e:?}"))?; + let key = Ed25519KeyPair::from_der(&key, version) + .map_err(|e| err!("Failed to load ed25519 keypair from der: {e:?}"))?; Ok(Box::new(key)) } fn create(db: &Arc) -> Result<(String, Vec)> { - let keypair = Ed25519KeyPair::generate().map_err(|e| err!("Failed to generate new ed25519 keypair: {e:?}"))?; + let keypair = Ed25519KeyPair::generate() + .map_err(|e| err!("Failed to generate new ed25519 keypair: {e:?}"))?; let id = utils::rand::string(8); debug_info!("Generated new Ed25519 keypair: {id:?}"); diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index 45c01c0b..3f6a3039 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -18,8 +18,8 @@ use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, serde::Raw, signatures::{Ed25519KeyPair, PublicKeyMap, PublicKeySet}, - CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, ServerName, - ServerSigningKeyId, + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, + ServerName, ServerSigningKeyId, }; use serde_json::value::RawValue as RawJsonValue; @@ -113,7 +113,11 @@ async fn add_signing_keys(&self, new_keys: ServerSigningKeys) { } #[implement(Service)] -pub async fn required_keys_exist(&self, object: &CanonicalJsonObject, version: &RoomVersionId) -> bool { +pub async fn required_keys_exist( + &self, + object: &CanonicalJsonObject, + version: &RoomVersionId, +) -> bool { use ruma::signatures::required_keys; let Ok(required_keys) = required_keys(object, version) else { @@ -179,7 +183,8 @@ pub async fn signing_keys_for(&self, origin: &ServerName) -> Result MilliSecondsSinceUnixEpoch { - let timepoint = timepoint_from_now(self.minimum_valid).expect("SystemTime should not overflow"); + let timepoint = + timepoint_from_now(self.minimum_valid).expect("SystemTime should not overflow"); MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow") } diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index da7fa08a..afe8958b 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -12,7 +12,9 @@ use ruma::{ #[implement(super::Service)] pub(super) async fn batch_notary_request<'a, S, K>( - &self, notary: &ServerName, batch: S, + &self, + notary: &ServerName, + batch: S, ) -> Result> where S: Iterator + Send, @@ -74,7 +76,9 @@ where #[implement(super::Service)] pub async fn notary_request( - &self, notary: &ServerName, target: &ServerName, + &self, + notary: &ServerName, + target: &ServerName, ) -> Result + Clone + Debug + Send> { use get_remote_server_keys::v2::Request; diff --git a/src/service/server_keys/sign.rs b/src/service/server_keys/sign.rs index 71565380..8d6f108c 100644 --- a/src/service/server_keys/sign.rs +++ b/src/service/server_keys/sign.rs @@ -10,7 +10,11 @@ pub fn sign_json(&self, object: &mut CanonicalJsonObject) -> Result { } #[implement(super::Service)] -pub fn hash_and_sign_event(&self, object: &mut CanonicalJsonObject, room_version: &RoomVersionId) -> Result { +pub fn hash_and_sign_event( + &self, + object: &mut CanonicalJsonObject, + room_version: &RoomVersionId, +) -> Result { use ruma::signatures::hash_and_sign_event; let server_name = self.services.globals.server_name().as_str(); diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs index d06b55ab..0f03e59e 100644 --- a/src/service/server_keys/verify.rs +++ b/src/service/server_keys/verify.rs @@ -1,14 +1,20 @@ use conduwuit::{implement, pdu::gen_event_id_canonical_json, Err, Result}; -use ruma::{signatures::Verified, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId}; +use ruma::{ + signatures::Verified, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, +}; use serde_json::value::RawValue as RawJsonValue; #[implement(super::Service)] pub async fn validate_and_add_event_id( - &self, pdu: &RawJsonValue, room_version: &RoomVersionId, + &self, + pdu: &RawJsonValue, + room_version: &RoomVersionId, ) -> Result<(OwnedEventId, CanonicalJsonObject)> { let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?; if let Err(e) = self.verify_event(&value, Some(room_version)).await { - return Err!(BadServerResponse(debug_error!("Event {event_id} failed verification: {e:?}"))); + return Err!(BadServerResponse(debug_error!( + "Event {event_id} failed verification: {e:?}" + ))); } value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into())); @@ -18,7 +24,9 @@ pub async fn validate_and_add_event_id( #[implement(super::Service)] pub async fn validate_and_add_event_id_no_fetch( - &self, pdu: &RawJsonValue, room_version: &RoomVersionId, + &self, + pdu: &RawJsonValue, + room_version: &RoomVersionId, ) -> Result<(OwnedEventId, CanonicalJsonObject)> { let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?; if !self.required_keys_exist(&value, room_version).await { @@ -28,7 +36,9 @@ pub async fn validate_and_add_event_id_no_fetch( } if let Err(e) = self.verify_event(&value, Some(room_version)).await { - return Err!(BadServerResponse(debug_error!("Event {event_id} failed verification: {e:?}"))); + return Err!(BadServerResponse(debug_error!( + "Event {event_id} failed verification: {e:?}" + ))); } value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into())); @@ -38,7 +48,9 @@ pub async fn validate_and_add_event_id_no_fetch( #[implement(super::Service)] pub async fn verify_event( - &self, event: &CanonicalJsonObject, room_version: Option<&RoomVersionId>, + &self, + event: &CanonicalJsonObject, + room_version: Option<&RoomVersionId>, ) -> Result { let room_version = room_version.unwrap_or(&RoomVersionId::V11); let keys = self.get_event_keys(event, room_version).await?; @@ -46,7 +58,11 @@ pub async fn verify_event( } #[implement(super::Service)] -pub async fn verify_json(&self, event: &CanonicalJsonObject, room_version: Option<&RoomVersionId>) -> Result { +pub async fn verify_json( + &self, + event: &CanonicalJsonObject, + room_version: Option<&RoomVersionId>, +) -> Result { let room_version = room_version.unwrap_or(&RoomVersionId::V11); let keys = self.get_event_keys(event, room_version).await?; ruma::signatures::verify_json(&keys, event.clone()).map_err(Into::into) diff --git a/src/service/service.rs b/src/service/service.rs index 4b1774cc..7adb189e 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -114,7 +114,9 @@ impl<'a> Args<'a> { /// Create a reference immediately to a service when constructing another /// Service. The other service must be constructed. #[inline] - pub(crate) fn require(&'a self, name: &str) -> Arc { require::(self.service, name) } + pub(crate) fn require(&'a self, name: &str) -> Arc { + require::(self.service, name) + } } /// Reference a Service by name. Panics if the Service does not exist or was diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index a4523246..97f4ce9c 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -47,7 +47,8 @@ struct Services { struct SlidingSyncCache { lists: BTreeMap, subscriptions: BTreeMap, - known_rooms: BTreeMap>, // For every room, the roomsince number + known_rooms: BTreeMap>, /* For every room, the + * roomsince number */ extensions: ExtensionsConfig, } @@ -85,14 +86,24 @@ impl crate::Service for Service { } impl Service { - pub fn remembered(&self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String) -> bool { + pub fn remembered( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, + ) -> bool { self.connections .lock() .unwrap() .contains_key(&(user_id, device_id, conn_id)) } - pub fn forget_sync_request_connection(&self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String) { + pub fn forget_sync_request_connection( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, + ) { self.connections .lock() .expect("locked") @@ -100,25 +111,26 @@ impl Service { } pub fn update_sync_request_with_cache( - &self, user_id: OwnedUserId, device_id: OwnedDeviceId, request: &mut sync_events::v4::Request, + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + request: &mut sync_events::v4::Request, ) -> BTreeMap> { let Some(conn_id) = request.conn_id.clone() else { return BTreeMap::new(); }; let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); + let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( + || { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }, + )); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -141,13 +153,15 @@ impl Service { .clone() .or_else(|| cached_list.include_old_rooms.clone()); match (&mut list.filters, cached_list.filters.clone()) { - (Some(list_filters), Some(cached_filters)) => { + | (Some(list_filters), Some(cached_filters)) => { list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); if list_filters.spaces.is_empty() { list_filters.spaces = cached_filters.spaces; } - list_filters.is_encrypted = list_filters.is_encrypted.or(cached_filters.is_encrypted); - list_filters.is_invite = list_filters.is_invite.or(cached_filters.is_invite); + list_filters.is_encrypted = + list_filters.is_encrypted.or(cached_filters.is_encrypted); + list_filters.is_invite = + list_filters.is_invite.or(cached_filters.is_invite); if list_filters.room_types.is_empty() { list_filters.room_types = cached_filters.room_types; } @@ -165,9 +179,9 @@ impl Service { list_filters.not_tags = cached_filters.not_tags; } }, - (_, Some(cached_filters)) => list.filters = Some(cached_filters), - (Some(list_filters), _) => list.filters = Some(list_filters.clone()), - (..) => {}, + | (_, Some(cached_filters)) => list.filters = Some(cached_filters), + | (Some(list_filters), _) => list.filters = Some(list_filters.clone()), + | (..) => {}, } if list.bump_event_types.is_empty() { list.bump_event_types @@ -220,22 +234,23 @@ impl Service { } pub fn update_sync_subscriptions( - &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, subscriptions: BTreeMap, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); + let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( + || { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }, + )); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -243,22 +258,25 @@ impl Service { } pub fn update_sync_known_rooms( - &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, list_id: String, - new_cached_rooms: BTreeSet, globalsince: u64, + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, + list_id: String, + new_cached_rooms: BTreeSet, + globalsince: u64, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); + let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( + || { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }, + )); let cached = &mut cached.lock().expect("locked"); drop(cache); diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 2b979f99..912c0b49 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -25,7 +25,13 @@ impl crate::Service for Service { } #[implement(Service)] -pub fn add_txnid(&self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, data: &[u8]) { +pub fn add_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + data: &[u8], +) { let mut key = user_id.as_bytes().to_vec(); key.push(0xFF); key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default()); @@ -38,7 +44,10 @@ pub fn add_txnid(&self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: // If there's no entry, this is a new transaction #[implement(Service)] pub async fn existing_txnid( - &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, ) -> Result> { let key = (user_id, device_id, txn_id); self.db.userdevicetxnid_response.qry(&key).await diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 4d30a612..f7e55251 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -58,7 +58,13 @@ impl crate::Service for Service { /// Creates a new Uiaa session. Make sure the session token is unique. #[implement(Service)] -pub fn create(&self, user_id: &UserId, device_id: &DeviceId, uiaainfo: &UiaaInfo, json_body: &CanonicalJsonValue) { +pub fn create( + &self, + user_id: &UserId, + device_id: &DeviceId, + uiaainfo: &UiaaInfo, + json_body: &CanonicalJsonValue, +) { // TODO: better session error handling (why is uiaainfo.session optional in // ruma?) self.set_uiaa_request( @@ -78,7 +84,11 @@ pub fn create(&self, user_id: &UserId, device_id: &DeviceId, uiaainfo: &UiaaInfo #[implement(Service)] pub async fn try_auth( - &self, user_id: &UserId, device_id: &DeviceId, auth: &AuthData, uiaainfo: &UiaaInfo, + &self, + user_id: &UserId, + device_id: &DeviceId, + auth: &AuthData, + uiaainfo: &UiaaInfo, ) -> Result<(bool, UiaaInfo)> { let mut uiaainfo = if let Some(session) = auth.session() { self.get_uiaa_session(user_id, device_id, session).await? @@ -92,7 +102,7 @@ pub async fn try_auth( match auth { // Find out what the user completed - AuthData::Password(Password { + | AuthData::Password(Password { identifier, password, #[cfg(feature = "element_hacks")] @@ -105,17 +115,26 @@ pub async fn try_auth( } else if let Some(username) = user { username } else { - return Err(Error::BadRequest(ErrorKind::Unrecognized, "Identifier type not recognized.")); + return Err(Error::BadRequest( + ErrorKind::Unrecognized, + "Identifier type not recognized.", + )); }; #[cfg(not(feature = "element_hacks"))] let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier else { - return Err(Error::BadRequest(ErrorKind::Unrecognized, "Identifier type not recognized.")); + return Err(Error::BadRequest( + ErrorKind::Unrecognized, + "Identifier type not recognized.", + )); }; - let user_id = UserId::parse_with_server_name(username.clone(), self.services.globals.server_name()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; + let user_id = UserId::parse_with_server_name( + username.clone(), + self.services.globals.server_name(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; // Check if password is correct if let Ok(hash) = self.services.users.password_hash(&user_id).await { @@ -132,7 +151,7 @@ pub async fn try_auth( // Password was correct! Let's add it to `completed` uiaainfo.completed.push(AuthType::Password); }, - AuthData::RegistrationToken(t) => { + | AuthData::RegistrationToken(t) => { if self .services .globals @@ -149,10 +168,10 @@ pub async fn try_auth( return Ok((false, uiaainfo)); } }, - AuthData::Dummy(_) => { + | AuthData::Dummy(_) => { uiaainfo.completed.push(AuthType::Dummy); }, - k => error!("type not supported: {:?}", k), + | k => error!("type not supported: {:?}", k), } // Check if a flow now succeeds @@ -190,7 +209,13 @@ pub async fn try_auth( } #[implement(Service)] -fn set_uiaa_request(&self, user_id: &UserId, device_id: &DeviceId, session: &str, request: &CanonicalJsonValue) { +fn set_uiaa_request( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + request: &CanonicalJsonValue, +) { let key = (user_id.to_owned(), device_id.to_owned(), session.to_owned()); self.userdevicesessionid_uiaarequest .write() @@ -200,7 +225,10 @@ fn set_uiaa_request(&self, user_id: &UserId, device_id: &DeviceId, session: &str #[implement(Service)] pub fn get_uiaa_request( - &self, user_id: &UserId, device_id: Option<&DeviceId>, session: &str, + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + session: &str, ) -> Option { let key = ( user_id.to_owned(), @@ -216,7 +244,13 @@ pub fn get_uiaa_request( } #[implement(Service)] -fn update_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session: &str, uiaainfo: Option<&UiaaInfo>) { +fn update_uiaa_session( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + uiaainfo: Option<&UiaaInfo>, +) { let key = (user_id, device_id, session); if let Some(uiaainfo) = uiaainfo { @@ -229,7 +263,12 @@ fn update_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session: & } #[implement(Service)] -async fn get_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session: &str) -> Result { +async fn get_uiaa_session( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, +) -> Result { let key = (user_id, device_id, session); self.db .userdevicesessionid_uiaainfo diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index 1d13337d..1f499692 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -119,7 +119,8 @@ impl Service { self.services .admin .send_message(RoomMessageEventContent::text_markdown(format!( - "### the following is a message from the conduwuit puppy\n\nit was sent on `{}`:\n\n@room: {}", + "### the following is a message from the conduwuit puppy\n\nit was sent on \ + `{}`:\n\n@room: {}", update.date, update.message ))) .await @@ -127,7 +128,9 @@ impl Service { } #[inline] - pub fn update_check_for_updates_id(&self, id: u64) { self.db.raw_put(LAST_CHECK_FOR_UPDATES_COUNT, id); } + pub fn update_check_for_updates_id(&self, id: u64) { + self.db.raw_put(LAST_CHECK_FOR_UPDATES_COUNT, id); + } pub async fn last_check_for_updates_id(&self) -> u64 { self.db diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 8ceec2a0..1d7f4248 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -10,10 +10,12 @@ use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; use ruma::{ api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType}, + events::{ + ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType, + }, serde::Raw, - DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, OneTimeKeyName, OwnedDeviceId, - OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, + DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, + OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, }; use serde_json::json; @@ -65,7 +67,8 @@ impl crate::Service for Service { account_data: args.depend::("account_data"), admin: args.depend::("admin"), globals: args.depend::("globals"), - state_accessor: args.depend::("rooms::state_accessor"), + state_accessor: args + .depend::("rooms::state_accessor"), state_cache: args.depend::("rooms::state_cache"), }, db: Data { @@ -114,7 +117,9 @@ impl Service { /// Check if a user is an admin #[inline] - pub async fn is_admin(&self, user_id: &UserId) -> bool { self.services.admin.user_is_admin(user_id).await } + pub async fn is_admin(&self, user_id: &UserId) -> bool { + self.services.admin.user_is_admin(user_id).await + } /// Create a new user account on this homeserver. #[inline] @@ -141,7 +146,9 @@ impl Service { /// Check if a user has an account on this homeserver. #[inline] - pub async fn exists(&self, user_id: &UserId) -> bool { self.db.userid_password.get(user_id).await.is_ok() } + pub async fn exists(&self, user_id: &UserId) -> bool { + self.db.userid_password.get(user_id).await.is_ok() + } /// Check if account is deactivated pub async fn is_deactivated(&self, user_id: &UserId) -> Result { @@ -154,7 +161,9 @@ impl Service { } /// Check if account is active, infallible - pub async fn is_active(&self, user_id: &UserId) -> bool { !self.is_deactivated(user_id).await.unwrap_or(true) } + pub async fn is_active(&self, user_id: &UserId) -> bool { + !self.is_deactivated(user_id).await.unwrap_or(true) + } /// Check if account is active, infallible pub async fn is_active_local(&self, user_id: &UserId) -> bool { @@ -173,10 +182,14 @@ impl Service { /// Returns an iterator over all users on this homeserver (offered for /// compatibility) #[allow(clippy::iter_without_into_iter, clippy::iter_not_returning_iterator)] - pub fn iter(&self) -> impl Stream + Send + '_ { self.stream().map(ToOwned::to_owned) } + pub fn iter(&self) -> impl Stream + Send + '_ { + self.stream().map(ToOwned::to_owned) + } /// Returns an iterator over all users on this homeserver. - pub fn stream(&self) -> impl Stream + Send { self.db.userid_password.keys().ignore_err() } + pub fn stream(&self) -> impl Stream + Send { + self.db.userid_password.keys().ignore_err() + } /// Returns a list of local users as list of usernames. /// @@ -200,7 +213,9 @@ impl Service { password .map(utils::hash::password) .transpose() - .map_err(|e| err!(Request(InvalidParam("Password does not meet the requirements: {e}"))))? + .map_err(|e| { + err!(Request(InvalidParam("Password does not meet the requirements: {e}"))) + })? .map_or_else( || self.db.userid_password.insert(user_id, b""), |hash| self.db.userid_password.insert(user_id, hash), @@ -254,13 +269,19 @@ impl Service { /// Adds a new device to a user. pub async fn create_device( - &self, user_id: &UserId, device_id: &DeviceId, token: &str, initial_device_display_name: Option, + &self, + user_id: &UserId, + device_id: &DeviceId, + token: &str, + initial_device_display_name: Option, client_ip: Option, ) -> Result<()> { // This method should never be called for nonexistent users. We shouldn't assert // though... if !self.exists(user_id).await { - return Err!(Request(InvalidParam(error!("Called create_device for non-existent {user_id}")))); + return Err!(Request(InvalidParam(error!( + "Called create_device for non-existent {user_id}" + )))); } let key = (user_id, device_id); @@ -304,7 +325,10 @@ impl Service { } /// Returns an iterator over all device ids of this user. - pub fn all_device_ids<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + pub fn all_device_ids<'a>( + &'a self, + user_id: &'a UserId, + ) -> impl Stream + Send + 'a { let prefix = (user_id, Interfix); self.db .userdeviceid_metadata @@ -319,7 +343,12 @@ impl Service { } /// Replaces the access token of one device. - pub async fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + pub async fn set_token( + &self, + user_id: &UserId, + device_id: &DeviceId, + token: &str, + ) -> Result<()> { let key = (user_id, device_id); // should not be None, but we shouldn't assert either lol... if self.db.userdeviceid_metadata.qry(&key).await.is_err() { @@ -344,7 +373,10 @@ impl Service { } pub async fn add_one_time_key( - &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &KeyId, + &self, + user_id: &UserId, + device_id: &DeviceId, + one_time_key_key: &KeyId, one_time_key_value: &Raw, ) -> Result { // All devices have metadata @@ -391,7 +423,10 @@ impl Service { } pub async fn take_one_time_key( - &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &OneTimeKeyAlgorithm, + &self, + user_id: &UserId, + device_id: &DeviceId, + key_algorithm: &OneTimeKeyAlgorithm, ) -> Result<(OwnedKeyId, Raw)> { let count = self.services.globals.next_count()?.to_be_bytes(); self.db.userid_lastonetimekeyupdate.insert(user_id, count); @@ -435,7 +470,9 @@ impl Service { } pub async fn count_one_time_keys( - &self, user_id: &UserId, device_id: &DeviceId, + &self, + user_id: &UserId, + device_id: &DeviceId, ) -> BTreeMap { type KeyVal<'a> = ((Ignore, Ignore, &'a Unquoted), Ignore); @@ -462,7 +499,12 @@ impl Service { algorithm_counts } - pub async fn add_device_keys(&self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw) { + pub async fn add_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + device_keys: &Raw, + ) { let key = (user_id, device_id); self.db.keyid_key.put(key, Json(device_keys)); @@ -470,8 +512,12 @@ impl Service { } pub async fn add_cross_signing_keys( - &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, - user_signing_key: &Option>, notify: bool, + &self, + user_id: &UserId, + master_key: &Raw, + self_signing_key: &Option>, + user_signing_key: &Option>, + notify: bool, ) -> Result<()> { // TODO: Check signatures let mut prefix = user_id.as_bytes().to_vec(); @@ -495,9 +541,10 @@ impl Service { .keys .into_values(); - let self_signing_key_id = self_signing_key_ids - .next() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Self signing key contained no key."))?; + let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Self signing key contained no key.", + ))?; if self_signing_key_ids.next().is_some() { return Err(Error::BadRequest( @@ -531,7 +578,9 @@ impl Service { .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; if user_signing_key_ids.next().is_some() { - return Err!(Request(InvalidParam("User signing key contained more than one key."))); + return Err!(Request(InvalidParam( + "User signing key contained more than one key." + ))); } let mut user_signing_key_key = prefix; @@ -554,7 +603,11 @@ impl Service { } pub async fn sign_key( - &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, + &self, + target_id: &UserId, + key_id: &str, + signature: (String, String), + sender_id: &UserId, ) -> Result<()> { let key = (target_id, key_id); @@ -590,7 +643,10 @@ impl Service { #[inline] pub fn keys_changed<'a>( - &'a self, user_id: &'a UserId, from: u64, to: Option, + &'a self, + user_id: &'a UserId, + from: u64, + to: Option, ) -> impl Stream + Send + 'a { self.keys_changed_user_or_room(user_id.as_str(), from, to) .map(|(user_id, ..)| user_id) @@ -598,13 +654,19 @@ impl Service { #[inline] pub fn room_keys_changed<'a>( - &'a self, room_id: &'a RoomId, from: u64, to: Option, + &'a self, + room_id: &'a RoomId, + from: u64, + to: Option, ) -> impl Stream + Send + 'a { self.keys_changed_user_or_room(room_id.as_str(), from, to) } fn keys_changed_user_or_room<'a>( - &'a self, user_or_room_id: &'a str, from: u64, to: Option, + &'a self, + user_or_room_id: &'a str, + from: u64, + to: Option, ) -> impl Stream + Send + 'a { type KeyVal<'a> = ((&'a str, u64), &'a UserId); @@ -614,7 +676,9 @@ impl Service { .keychangeid_userid .stream_from(&start) .ignore_err() - .ready_take_while(move |((prefix, count), _): &KeyVal<'_>| *prefix == user_or_room_id && *count <= to) + .ready_take_while(move |((prefix, count), _): &KeyVal<'_>| { + *prefix == user_or_room_id && *count <= to + }) .map(|((_, count), user_id): KeyVal<'_>| (user_id, count)) } @@ -636,13 +700,21 @@ impl Service { self.db.keychangeid_userid.put_raw(key, user_id); } - pub async fn get_device_keys<'a>(&'a self, user_id: &'a UserId, device_id: &DeviceId) -> Result> { + pub async fn get_device_keys<'a>( + &'a self, + user_id: &'a UserId, + device_id: &DeviceId, + ) -> Result> { let key_id = (user_id, device_id); self.db.keyid_key.qry(&key_id).await.deserialized() } pub async fn get_key( - &self, key_id: &[u8], sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &F, + &self, + key_id: &[u8], + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &F, ) -> Result> where F: Fn(&UserId) -> bool + Send + Sync, @@ -655,7 +727,10 @@ impl Service { } pub async fn get_master_key( - &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &F, + &self, + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &F, ) -> Result> where F: Fn(&UserId) -> bool + Send + Sync, @@ -667,7 +742,10 @@ impl Service { } pub async fn get_self_signing_key( - &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &F, + &self, + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &F, ) -> Result> where F: Fn(&UserId) -> bool + Send + Sync, @@ -688,7 +766,11 @@ impl Service { } pub async fn add_to_device_event( - &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, + &self, + sender: &UserId, + target_user_id: &UserId, + target_device_id: &DeviceId, + event_type: &str, content: serde_json::Value, ) { let count = self.services.globals.next_count().unwrap(); @@ -705,7 +787,9 @@ impl Service { } pub fn get_to_device_events<'a>( - &'a self, user_id: &'a UserId, device_id: &'a DeviceId, + &'a self, + user_id: &'a UserId, + device_id: &'a DeviceId, ) -> impl Stream> + Send + 'a { let prefix = (user_id, device_id, Interfix); self.db @@ -715,7 +799,12 @@ impl Service { .map(|(_, val): (Ignore, Raw)| val) } - pub async fn remove_to_device_events(&self, user_id: &UserId, device_id: &DeviceId, until: u64) { + pub async fn remove_to_device_events( + &self, + user_id: &UserId, + device_id: &DeviceId, + until: u64, + ) { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xFF); prefix.extend_from_slice(device_id.as_bytes()); @@ -742,7 +831,12 @@ impl Service { .await; } - pub async fn update_device_metadata(&self, user_id: &UserId, device_id: &DeviceId, device: &Device) -> Result<()> { + pub async fn update_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + device: &Device, + ) -> Result<()> { increment(&self.db.userid_devicelistversion, user_id.as_bytes()); let key = (user_id, device_id); @@ -752,7 +846,11 @@ impl Service { } /// Get device metadata. - pub async fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) -> Result { + pub async fn get_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result { self.db .userdeviceid_metadata .qry(&(user_id, device_id)) @@ -768,7 +866,10 @@ impl Service { .deserialized() } - pub fn all_devices_metadata<'a>(&'a self, user_id: &'a UserId) -> impl Stream + Send + 'a { + pub fn all_devices_metadata<'a>( + &'a self, + user_id: &'a UserId, + ) -> impl Stream + Send + 'a { let key = (user_id, Interfix); self.db .userdeviceid_metadata @@ -787,7 +888,11 @@ impl Service { filter_id } - pub async fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result { + pub async fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result { let key = (user_id, filter_id); self.db.userfilterid_filter.qry(&key).await.deserialized() } @@ -817,11 +922,10 @@ impl Service { }; let (expires_at_bytes, user_bytes) = value.split_at(0_u64.to_be_bytes().len()); - let expires_at = u64::from_be_bytes( - expires_at_bytes - .try_into() - .map_err(|e| err!(Database("expires_at in openid_userid is invalid u64. {e}")))?, - ); + let expires_at = + u64::from_be_bytes(expires_at_bytes.try_into().map_err(|e| { + err!(Database("expires_at in openid_userid is invalid u64. {e}")) + })?); if expires_at < utils::millis_since_unix_epoch() { debug_warn!("OpenID token is expired, removing"); @@ -833,11 +937,16 @@ impl Service { let user_string = utils::string_from_bytes(user_bytes) .map_err(|e| err!(Database("User ID in openid_userid is invalid unicode. {e}")))?; - UserId::parse(user_string).map_err(|e| err!(Database("User ID in openid_userid is invalid. {e}"))) + UserId::parse(user_string) + .map_err(|e| err!(Database("User ID in openid_userid is invalid. {e}"))) } /// Gets a specific user profile key - pub async fn profile_key(&self, user_id: &UserId, profile_key: &str) -> Result { + pub async fn profile_key( + &self, + user_id: &UserId, + profile_key: &str, + ) -> Result { let key = (user_id, profile_key); self.db .useridprofilekey_value @@ -848,7 +957,8 @@ impl Service { /// Gets all the user's profile keys and values in an iterator pub fn all_profile_keys<'a>( - &'a self, user_id: &'a UserId, + &'a self, + user_id: &'a UserId, ) -> impl Stream + 'a + Send { type KeyVal = ((Ignore, String), serde_json::Value); @@ -861,7 +971,12 @@ impl Service { } /// Sets a new profile key value, removes the key if value is None - pub fn set_profile_key(&self, user_id: &UserId, profile_key: &str, profile_key_value: Option) { + pub fn set_profile_key( + &self, + user_id: &UserId, + profile_key: &str, + profile_key_value: Option, + ) { // TODO: insert to the stable MSC4175 key when it's stable let key = (user_id, profile_key); @@ -901,7 +1016,10 @@ impl Service { } } -pub fn parse_master_key(user_id: &UserId, master_key: &Raw) -> Result<(Vec, CrossSigningKey)> { +pub fn parse_master_key( + user_id: &UserId, + master_key: &Raw, +) -> Result<(Vec, CrossSigningKey)> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xFF); @@ -925,7 +1043,10 @@ pub fn parse_master_key(user_id: &UserId, master_key: &Raw) -> /// Ensure that a user only sees signatures from themselves and the target user fn clean_signatures( - mut cross_signing_key: serde_json::Value, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &F, + mut cross_signing_key: serde_json::Value, + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &F, ) -> Result where F: Fn(&UserId) -> bool + Send + Sync, @@ -937,9 +1058,11 @@ where // Don't allocate for the full size of the current signatures, but require // at most one resize if nothing is dropped let new_capacity = signatures.len() / 2; - for (user, signature) in mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) { - let sid = - <&UserId>::try_from(user.as_str()).map_err(|_| Error::bad_database("Invalid user ID in database."))?; + for (user, signature) in + mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) + { + let sid = <&UserId>::try_from(user.as_str()) + .map_err(|_| Error::bad_database("Invalid user ID in database."))?; if sender_user == Some(user_id) || sid == user_id || allowed_signatures(sid) { signatures.insert(user, signature); } From 52693db4779d8c19c446b4bd967815a8d6cd1d34 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 01:02:52 -0500 Subject: [PATCH 0398/1248] hide messages from users servers on the forbidden_remote_server_names list Signed-off-by: strawberry --- src/api/client/message.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 93582de0..800dce9d 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -237,6 +237,17 @@ pub(crate) async fn ignored_filter( return None; } + if IGNORED_MESSAGE_TYPES.iter().any(is_equal_to!(&pdu.kind)) + && services + .server + .config + .forbidden_remote_server_names + .iter() + .any(is_equal_to!(pdu.sender().server_name())) + { + return None; + } + Some(item) } From 6c96acc482baaacb784b8dc1b1c1e4c2a34f3c3b Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 01:41:50 -0500 Subject: [PATCH 0399/1248] use fast binary_search for some const slices Signed-off-by: strawberry --- src/api/client/message.rs | 53 ++++++++++++------------- src/api/client/room/upgrade.rs | 12 ++++-- src/api/client/sync/v4.rs | 7 ++-- src/service/rooms/state_accessor/mod.rs | 4 +- src/service/rooms/timeline/mod.rs | 2 +- src/service/updates/mod.rs | 2 +- 6 files changed, 41 insertions(+), 39 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 800dce9d..58f4f916 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -27,23 +27,24 @@ use crate::Ruma; pub(crate) type LazySet = HashSet; /// list of safe and common non-state events to ignore if the user is ignored -const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 16] = &[ +const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 17] = &[ + Audio, + CallInvite, + Emote, + File, + Image, + KeyVerificationStart, + Location, + PollStart, + UnstablePollStart, + Beacon, + Reaction, + RoomEncrypted, RoomMessage, Sticker, - CallInvite, - CallNotify, - RoomEncrypted, - Image, - File, - Audio, - Voice, Video, - UnstablePollStart, - PollStart, - KeyVerificationStart, - Reaction, - Emote, - Location, + Voice, + CallNotify, ]; const LIMIT_MAX: usize = 100; @@ -59,6 +60,7 @@ pub(crate) async fn get_message_events_route( State(services): State, body: Ruma, ) -> Result { + debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted"); let sender = body.sender(); let (sender_user, sender_device) = sender; let room_id = &body.room_id; @@ -193,7 +195,7 @@ pub(crate) async fn update_lazy( let (_, event) = &item; let (sender_user, sender_device) = sender; - /* TODO: Remove the not "element_hacks" check when these are resolved: + /* TODO: Remove the "element_hacks" check when these are resolved: * https://github.com/vector-im/element-android/issues/3417 * https://github.com/vector-im/element-web/issues/21034 */ @@ -231,19 +233,14 @@ pub(crate) async fn ignored_filter( return None; } - if IGNORED_MESSAGE_TYPES.iter().any(is_equal_to!(&pdu.kind)) - && services.users.user_is_ignored(&pdu.sender, user_id).await - { - return None; - } - - if IGNORED_MESSAGE_TYPES.iter().any(is_equal_to!(&pdu.kind)) - && services - .server - .config - .forbidden_remote_server_names - .iter() - .any(is_equal_to!(pdu.sender().server_name())) + if IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok() + && (services.users.user_is_ignored(&pdu.sender, user_id).await + || services + .server + .config + .forbidden_remote_server_names + .iter() + .any(is_equal_to!(pdu.sender().server_name()))) { return None; } diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index cc6cca5e..2f9706f4 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -21,15 +21,15 @@ use crate::Ruma; /// Recommended transferable state events list from the spec const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 9] = &[ - StateEventType::RoomServerAcl, - StateEventType::RoomEncryption, - StateEventType::RoomName, StateEventType::RoomAvatar, - StateEventType::RoomTopic, + StateEventType::RoomEncryption, StateEventType::RoomGuestAccess, StateEventType::RoomHistoryVisibility, StateEventType::RoomJoinRules, + StateEventType::RoomName, StateEventType::RoomPowerLevels, + StateEventType::RoomServerAcl, + StateEventType::RoomTopic, ]; /// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` @@ -46,6 +46,10 @@ pub(crate) async fn upgrade_room_route( State(services): State, body: Ruma, ) -> Result { + debug_assert!( + TRANSFERABLE_STATE_EVENTS.is_sorted(), + "TRANSFERABLE_STATE_EVENTS is not sorted" + ); let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !services.server.supported_room_version(&body.new_version) { diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 7e24adff..0c6ea650 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -30,7 +30,6 @@ use ruma::{ TimelineEventType::{self, *}, }, serde::Raw, - state_res::Event, uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, UInt, UserId, }; use service::{rooms::read_receipt::pack_receipts, Services}; @@ -39,8 +38,9 @@ use super::{load_timeline, share_encrypted_room}; use crate::{client::ignored_filter, Ruma}; const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; + const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = - &[RoomMessage, RoomEncrypted, Sticker, CallInvite, PollStart, Beacon]; + &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; /// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` /// @@ -49,6 +49,7 @@ pub(crate) async fn sync_events_v4_route( State(services): State, body: Ruma, ) -> Result { + debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let mut body = body.body; @@ -595,7 +596,7 @@ pub(crate) async fn sync_events_v4_route( for (_, pdu) in timeline_pdus { let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts); - if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) + if DEFAULT_BUMP_TYPES.binary_search(&pdu.kind).is_ok() && timestamp.is_none_or(|time| time <= ts) { timestamp = Some(ts); diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index d8093dd7..6ddf198d 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -11,7 +11,7 @@ use conduwuit::{ err, error, pdu::PduBuilder, utils::{math::usize_from_f64, ReadyExt}, - Err, Error, Event, PduEvent, Result, + Err, Error, PduEvent, Result, }; use futures::StreamExt; use lru_cache::LruCache; @@ -507,7 +507,7 @@ impl Service { if redacting_event .as_ref() - .is_ok_and(|event| event.event_type() == &TimelineEventType::RoomCreate) + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) { return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2ae66546..028b270f 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1262,7 +1262,7 @@ impl Service { #[implement(Service)] #[tracing::instrument(skip_all, level = "debug")] async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Result<()> { - match pdu.event_type() { + match &pdu.kind { | TimelineEventType::RoomEncryption => { return Err!(Request(Forbidden(error!("Encryption not supported in admins room.")))); }, diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index 1f499692..7fd93b6c 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -40,7 +40,7 @@ struct CheckForUpdatesResponseEntry { const CHECK_FOR_UPDATES_URL: &str = "https://pupbrain.dev/check-for-updates/stable"; const CHECK_FOR_UPDATES_INTERVAL: u64 = 7200; // 2 hours -const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u"; +const LAST_CHECK_FOR_UPDATES_COUNT: &[u8; 1] = b"u"; #[async_trait] impl crate::Service for Service { From c8349988aac40f24368085a3d7404536c9a26410 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 02:53:32 -0500 Subject: [PATCH 0400/1248] ping online presence on typing updates like synapse Signed-off-by: strawberry --- src/api/client/typing.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index 4ae3ac5e..6eabe96a 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,7 +1,8 @@ use axum::extract::State; -use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; +use conduwuit::Err; +use ruma::api::client::typing::create_typing_event; -use crate::{utils, Error, Result, Ruma}; +use crate::{utils, Result, Ruma}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// @@ -11,8 +12,11 @@ pub(crate) async fn create_typing_event_route( body: Ruma, ) -> Result { use create_typing_event::v3::Typing; + let sender_user = body.sender_user(); - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if sender_user != body.user_id && body.appservice_info.is_none() { + return Err!(Request(Forbidden("You cannot update typing status of other users."))); + } if !services .rooms @@ -20,7 +24,7 @@ pub(crate) async fn create_typing_event_route( .is_joined(sender_user, &body.room_id) .await { - return Err(Error::BadRequest(ErrorKind::forbidden(), "You are not in this room.")); + return Err!(Request(Forbidden("You are not in this room."))); } if let Typing::Yes(duration) = body.state { @@ -58,5 +62,13 @@ pub(crate) async fn create_typing_event_route( .await?; } + // ping presence + if services.globals.allow_local_presence() { + services + .presence + .ping_presence(&body.user_id, &ruma::presence::PresenceState::Online) + .await?; + } + Ok(create_typing_event::v3::Response {}) } From 9c6908b8c166fe451f3ca5bfb86ea2a132cc6c7a Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 14:05:28 -0500 Subject: [PATCH 0401/1248] bump ruwuma, implement MSC4076, add more pusher HTTP URL validation Signed-off-by: strawberry --- Cargo.lock | 26 +++++++------- Cargo.toml | 2 +- src/service/pusher/mod.rs | 73 ++++++++++++++++++++++++++++++++++++--- 3 files changed, 83 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 95ab8d04..f7c7079f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3162,7 +3162,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "assign", "js_int", @@ -3184,7 +3184,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "js_int", "ruma-common", @@ -3196,7 +3196,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "as_variant", "assign", @@ -3219,7 +3219,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "as_variant", "base64 0.22.1", @@ -3249,7 +3249,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3273,7 +3273,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "bytes", "http", @@ -3291,7 +3291,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3300,7 +3300,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "js_int", "ruma-common", @@ -3310,7 +3310,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3325,7 +3325,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "js_int", "ruma-common", @@ -3337,7 +3337,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "headers", "http", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3366,7 +3366,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=08f58cd3236fdf175913b2bcaf8865359696d94d#08f58cd3236fdf175913b2bcaf8865359696d94d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 2d99db02..ea9cfa3c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -334,7 +334,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "08f58cd3236fdf175913b2bcaf8865359696d94d" +rev = "5a826d31a32b6473671a5b9f813ad2e4b47676b4" features = [ "compat", "rand", diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index cea3ba35..43d60c08 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -92,6 +92,36 @@ impl Service { ))); } + // add some validation to the pusher URL + let pusher_kind = &data.pusher.kind; + if let PusherKind::Http(http) = pusher_kind { + let url = &http.url; + let url = url::Url::parse(&http.url).map_err(|e| { + err!(Request(InvalidParam( + warn!(%url, "HTTP pusher URL is not a valid URL: {e}") + ))) + })?; + + if ["http", "https"] + .iter() + .all(|&scheme| scheme != url.scheme().to_lowercase()) + { + return Err!(Request(InvalidParam( + warn!(%url, "HTTP pusher URL is not a valid HTTP/HTTPS URL") + ))); + } + + if let Ok(ip) = + IPAddress::parse(url.host_str().expect("URL previously validated")) + { + if !self.services.client.valid_cidr_range(&ip) { + return Err!(Request(InvalidParam( + warn!(%url, "HTTP pusher URL is a forbidden remote address") + ))); + } + } + } + let key = (sender, data.pusher.ids.pushkey.as_str()); self.db.senderkey_pusher.put(key, Json(pusher)); }, @@ -330,16 +360,42 @@ impl Service { pusher: &Pusher, tweaks: Vec, event: &PduEvent, - ) -> Result<()> { + ) -> Result { // TODO: email match &pusher.kind { | PusherKind::Http(http) => { + let url = &http.url; + let url = url::Url::parse(&http.url).map_err(|e| { + err!(Request(InvalidParam( + warn!(%url, "HTTP pusher URL is not a valid URL: {e}") + ))) + })?; + + if ["http", "https"] + .iter() + .all(|&scheme| scheme != url.scheme().to_lowercase()) + { + return Err!(Request(InvalidParam( + warn!(%url, "HTTP pusher URL is not a valid HTTP/HTTPS URL") + ))); + } + + if let Ok(ip) = + IPAddress::parse(url.host_str().expect("URL previously validated")) + { + if !self.services.client.valid_cidr_range(&ip) { + return Err!(Request(InvalidParam( + warn!(%url, "HTTP pusher URL is a forbidden remote address") + ))); + } + } + // TODO (timo): can pusher/devices have conflicting formats let event_id_only = http.format == Some(PushFormat::EventIdOnly); let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone()); - device.data.default_payload = http.default_payload.clone(); + device.data.data.clone_from(&http.data); device.data.format.clone_from(&http.format); // Tweaks are only added if the format is NOT event_id_only @@ -352,8 +408,17 @@ impl Service { notifi.event_id = Some((*event.event_id).to_owned()); notifi.room_id = Some((*event.room_id).to_owned()); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); + if http + .data + .get("org.matrix.msc4076.disable_badge_count") + .is_none() && http.data.get("disable_badge_count").is_none() + { + notifi.counts = NotificationCounts::new(unread, uint!(0)); + } else { + // counts will not be serialised if it's the default (0, 0) + // skip_serializing_if = "NotificationCounts::is_default" + notifi.counts = NotificationCounts::default(); + } if event_id_only { self.send_request( From 468071336b1d8e834d7db4c55d1a3c5e0c84b211 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 14:12:00 -0500 Subject: [PATCH 0402/1248] ping online presence on read updates like synapse Signed-off-by: strawberry --- src/api/client/read_marker.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index 2e98afbc..89fe003a 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -49,6 +49,14 @@ pub(crate) async fn set_read_marker_route( .reset_notification_counts(sender_user, &body.room_id); } + // ping presence + if services.globals.allow_local_presence() { + services + .presence + .ping_presence(sender_user, &ruma::presence::PresenceState::Online) + .await?; + } + if let Some(event) = &body.read_receipt { let receipt_content = BTreeMap::from_iter([( event.to_owned(), @@ -117,6 +125,14 @@ pub(crate) async fn create_receipt_route( .reset_notification_counts(sender_user, &body.room_id); } + // ping presence + if services.globals.allow_local_presence() { + services + .presence + .ping_presence(sender_user, &ruma::presence::PresenceState::Online) + .await?; + } + match body.receipt_type { | create_receipt::v3::ReceiptType::FullyRead => { let fully_read_event = ruma::events::fully_read::FullyReadEvent { From 2ba0400758b21eec043f7c75cb8642017c01de20 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 14:23:13 -0500 Subject: [PATCH 0403/1248] return proper error for attempting to update m.room.create Signed-off-by: strawberry --- src/api/client/state.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 6a65f0f9..f56444c7 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -207,6 +207,11 @@ async fn allowed_to_send_state_event( json: &Raw, ) -> Result { match event_type { + | StateEventType::RoomCreate => { + return Err!(Request(BadJson( + "You cannot update m.room.create after a room has been created." + ))); + }, // Forbid m.room.encryption if encryption is disabled | StateEventType::RoomEncryption => if !services.globals.allow_encryption() { From 74c5bfd3118822035ff242274d8bcefd70afa453 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 14:31:11 -0500 Subject: [PATCH 0404/1248] dont allow m.room.server_acl to be redacted Signed-off-by: strawberry --- src/service/rooms/state_accessor/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 6ddf198d..fd132798 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -512,6 +512,16 @@ impl Service { return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); } + if redacting_event + .as_ref() + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) + { + return Err!(Request(Forbidden( + "Redacting m.room.server_acl will result in the room being inaccessible for \ + everyone (empty allow key), forbidding." + ))); + } + if let Ok(pl_event_content) = self .room_state_get_content::( room_id, From aeae67a7ee975c1987b33a91aca4e9d5ae29c2f2 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 15:20:28 -0500 Subject: [PATCH 0405/1248] bump ruwuma to stop sending empty content rulesets Signed-off-by: strawberry --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f7c7079f..c86904d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3162,7 +3162,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "assign", "js_int", @@ -3184,7 +3184,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "js_int", "ruma-common", @@ -3196,7 +3196,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "as_variant", "assign", @@ -3219,7 +3219,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "as_variant", "base64 0.22.1", @@ -3249,7 +3249,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3273,7 +3273,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "bytes", "http", @@ -3291,7 +3291,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3300,7 +3300,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "js_int", "ruma-common", @@ -3310,7 +3310,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3325,7 +3325,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "js_int", "ruma-common", @@ -3337,7 +3337,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "headers", "http", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3366,7 +3366,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5a826d31a32b6473671a5b9f813ad2e4b47676b4#5a826d31a32b6473671a5b9f813ad2e4b47676b4" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index ea9cfa3c..cb2ab916 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -334,7 +334,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "5a826d31a32b6473671a5b9f813ad2e4b47676b4" +rev = "a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" features = [ "compat", "rand", From 004671b437020e4463ed4defe26ad2882138aa13 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 15:41:19 -0500 Subject: [PATCH 0406/1248] dont copy join_authorized_via_users_server on leaving rooms Signed-off-by: strawberry --- src/api/client/membership.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 0ddcab32..3eb52138 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -7,7 +7,7 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, debug_info, debug_warn, err, error, info, + debug, debug_info, debug_warn, err, info, pdu::{self, gen_event_id_canonical_json, PduBuilder}, result::FlatOk, trace, @@ -1621,7 +1621,9 @@ pub async fn leave_room( .await else { // Fix for broken rooms - error!("Trying to leave a room you are not a member of."); + warn!( + "Trying to leave a room you are not a member of, marking room as left locally." + ); services .rooms @@ -1647,6 +1649,8 @@ pub async fn leave_room( PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { membership: MembershipState::Leave, reason, + join_authorized_via_users_server: None, + is_direct: None, ..event }), user_id, From c8c9d73b20f698a8c5cc5e2aec9f04b35e0de680 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 17:24:45 -0500 Subject: [PATCH 0407/1248] enable edns0 on hickory resolver config Signed-off-by: strawberry --- src/service/resolver/dns.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index 633b397a..c331dfba 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -57,6 +57,7 @@ impl Resolver { opts.attempts = config.dns_attempts as usize; opts.try_tcp_on_error = config.dns_tcp_fallback; opts.num_concurrent_reqs = 1; + opts.edns0 = true; opts.shuffle_dns_servers = true; opts.rotate = true; opts.ip_strategy = match config.ip_lookup_strategy { From 7ad710d96ae2860b2a7f15cd0574d58ea4cfd3f9 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 18:00:49 -0500 Subject: [PATCH 0408/1248] bump complement Signed-off-by: strawberry --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 25d6de81..28437890 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1733767359, - "narHash": "sha256-3ZBFqvmTSE5Rtbb0Co89aut7q4ERBAeP7HntLN6sFik=", + "lastModified": 1734303596, + "narHash": "sha256-HjDRyLR4MBqQ3IjfMM6eE+8ayztXlbz3gXdyDmFla68=", "owner": "girlbossceo", "repo": "complement", - "rev": "6ec9a3a28fb2baec9afb4fc8b9974a5f5d6525fc", + "rev": "14cc5be797b774f1a2b9f826f38181066d4952b8", "type": "github" }, "original": { From 8dcdb4b09a3b6f5347ebbb1a26b7e591e2154cb9 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 22:46:56 -0500 Subject: [PATCH 0409/1248] update some more docs Signed-off-by: strawberry --- debian/README.md | 2 ++ docs/deploying/docker.md | 3 ++ docs/deploying/generic.md | 43 ++++++++++++++++++++------- docs/deploying/nixos.md | 6 ++++ docs/maintenance.md | 62 +++++++++++++++++++++++++++++++-------- docs/troubleshooting.md | 10 ------- 6 files changed, 93 insertions(+), 33 deletions(-) diff --git a/debian/README.md b/debian/README.md index 89354469..800a2e09 100644 --- a/debian/README.md +++ b/debian/README.md @@ -9,6 +9,8 @@ It is recommended to see the [generic deployment guide](../deploying/generic.md) for further information if needed as usage of the Debian package is generally related. +No `apt` repository is currently offered yet, it is in the works/development. + ### Configuration When installed, the example config is placed at `/etc/conduwuit/conduwuit.toml` diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index d0aa13b3..bdbfb59c 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -24,6 +24,9 @@ OCI images for conduwuit are available in the registries listed below. [shield-latest]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest [shield-main]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main +OCI image `.tar.gz` files are also hosted directly at when uploaded by CI with a +commit hash/revision or a tagged release: + Use ```bash diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 398ba67f..cc50544e 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -1,6 +1,6 @@ # Generic deployment documentation -> ## Getting help +> ### Getting help > > If you run into any problems while setting up conduwuit, ask us in > `#conduwuit:puppygock.gay` or [open an issue on @@ -8,29 +8,50 @@ ## Installing conduwuit -You may simply download the binary that fits your machine. Run `uname -m` to see -what you need. +### Static prebuilt binary + +You may simply download the binary that fits your machine architecture (x86_64 +or aarch64). Run `uname -m` to see what you need. Prebuilt fully static musl binaries can be downloaded from the latest tagged release [here](https://github.com/girlbossceo/conduwuit/releases/latest) or -`main` CI branch workflow artifact output. These also include Debian/Ubuntu packages. +`main` CI branch workflow artifact output. These also include Debian/Ubuntu +packages. + +Binaries are also available on my website directly at: + +These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit +hash/revision, and `releases` are tagged releases. Sort by descending last +modified for the latest. These binaries have jemalloc and io_uring statically linked and included with them, so no additional dynamic dependencies need to be installed. +For the **best** performance; if using an `x86_64` CPU made in the last ~15 years, +we recommend using the `-haswell-` optimised binaries. This sets +`-march=haswell` which is the most compatible and highest performance with +optimised binaries. The database backend, RocksDB, most benefits from this as it +will then use hardware accelerated CRC32 hashing/checksumming which is critical +for performance. + +### Compiling + Alternatively, you may compile the binary yourself. We recommend using -Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most guaranteed -reproducibiltiy and easiest to get a build environment and output going. This also -allows easy cross-compilation. +Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most +guaranteed reproducibiltiy and easiest to get a build environment and output +going. This also allows easy cross-compilation. You can run the `nix build -L .#static-x86_64-linux-musl-all-features` or `nix build -L .#static-aarch64-linux-musl-all-features` commands based on architecture to cross-compile the necessary static binary located at -`result/bin/conduwuit`. This is reproducible with the static binaries produced in our CI. +`result/bin/conduwuit`. This is reproducible with the static binaries produced +in our CI. -Otherwise, follow standard Rust project build guides (installing git and cloning -the repo, getting the Rust toolchain via rustup, installing LLVM toolchain + -libclang for RocksDB, installing liburing for io_uring and RocksDB, etc). +If wanting to build using standard Rust toolchains, make sure you install: +- `liburing-dev` on the compiling machine, and `liburing` on the target host +- LLVM and libclang for RocksDB + +You can build conduwuit using `cargo build --release --all-features` ## Migrating from Conduit diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md index 0372228d..3c5b0e69 100644 --- a/docs/deploying/nixos.md +++ b/docs/deploying/nixos.md @@ -29,6 +29,12 @@ conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg= conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw= ``` +If needed, we have a binary cache on Cachix but it is only limited to 5GB: + +``` +https://conduwuit.cachix.org +conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= +``` If specifying a Git remote URL in your flake, you can use any remotes that are specified on the README (the mirrors), such as the GitHub: `github:girlbossceo/conduwuit` diff --git a/docs/maintenance.md b/docs/maintenance.md index c8df95af..5c8c853a 100644 --- a/docs/maintenance.md +++ b/docs/maintenance.md @@ -22,23 +22,59 @@ conduwuit has moderation admin commands for: Any commands with `-list` in them will require a codeblock in the message with each object being newline delimited. An example of doing this is: -```` !admin rooms moderation ban-list-of-rooms ``` !roomid1:server.name -!roomid2:server.name !roomid3:server.name ``` ```` +```` +!admin rooms moderation ban-list-of-rooms +``` +!roomid1:server.name +#badroomalias1:server.name +!roomid2:server.name +!roomid3:server.name +#badroomalias2:server.name +``` +```` -## Database +## Database (RocksDB) -If using RocksDB, there's very little you need to do. Compaction is ran -automatically based on various defined thresholds tuned for conduwuit to be high -performance with the least I/O amplifcation or overhead. Manually running -compaction is not recommended, or compaction via a timer. RocksDB is built with -io_uring support via liburing for async read I/O. +Generally there is very little you need to do. [Compaction][rocksdb-compaction] +is ran automatically based on various defined thresholds tuned for conduwuit to +be high performance with the least I/O amplifcation or overhead. Manually +running compaction is not recommended, or compaction via a timer, due to +creating unnecessary I/O amplification. RocksDB is built with io_uring support +via liburing for improved read performance. + +RocksDB troubleshooting can be found [in the RocksDB section of troubleshooting](troubleshooting.md). + +### Compression Some RocksDB settings can be adjusted such as the compression method chosen. See -the RocksDB section in the [example config](configuration/examples.md). btrfs -users may benefit from disabling compression on RocksDB if CoW is in use. +the RocksDB section in the [example config](configuration/examples.md). -RocksDB troubleshooting can be found [in the RocksDB section of -troubleshooting](troubleshooting.md). +btrfs users have reported that database compression does not need to be disabled +on conduwuit as the filesystem already does not attempt to compress. This can be +validated by using `filefrag -v` on a `.SST` file in your database, and ensure +the `physical_offset` matches (no filesystem compression). It is very important +to ensure no additional filesystem compression takes place as this can render +unbuffered Direct IO inoperable, significantly slowing down read and write +performance. See + +> Compression is done using the COW mechanism so it’s incompatible with +> nodatacow. Direct IO read works on compressed files but will fall back to +> buffered writes and leads to no compression even if force compression is set. +> Currently nodatasum and compression don’t work together. + +### Files in database + +Do not touch any of the files in the database directory. This must be said due +to users being mislead by the `.log` files in the RocksDB directory, thinking +they're server logs or database logs, however they are critical RocksDB files +related to WAL tracking. + +The only safe files that can be deleted are the `LOG` files (all caps). These +are the real RocksDB telemetry/log files, however conduwuit has already +configured to only store up to 3 RocksDB `LOG` files due to generall being +useless for average users unless troubleshooting something low-level. If you +would like to store nearly none at all, see the `rocksdb_max_log_files` +config option. ## Backups @@ -95,3 +131,5 @@ Built-in S3 support is also planned, but for now using a "S3 filesystem" on `media/` works. conduwuit also sends a `Cache-Control` header of 1 year and immutable for all media requests (download and thumbnail) to reduce unnecessary media requests from browsers, reduce bandwidth usage, and reduce load. + +[rocksdb-compaction]: https://github.com/facebook/rocksdb/wiki/Compaction diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 283cdeee..d25c9762 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -91,16 +91,6 @@ reliability at a slight performance cost due to TCP overhead. ## RocksDB / database issues -#### Direct IO - -Some filesystems may not like RocksDB using [Direct -IO](https://github.com/facebook/rocksdb/wiki/Direct-IO). Direct IO is for -non-buffered I/O which improves conduwuit performance and reduces system CPU -usage, but at least FUSE and possibly ZFS are filesystems potentially known -to not like this. See the [example config](configuration/examples.md) for -disabling it if needed. Issues from Direct IO on unsupported filesystems are -usually shown as startup errors. - #### Database corruption If your database is corrupted *and* is failing to start (e.g. checksum From 5813419f4b3a2cce7d55182a2181206ec9b0e906 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 15 Dec 2024 22:49:34 -0500 Subject: [PATCH 0410/1248] free up ci runner space on both jobs Signed-off-by: strawberry --- .github/workflows/ci.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 66c8f635..d0d78238 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -266,6 +266,15 @@ jobs: - target: aarch64-linux-musl - target: x86_64-linux-musl steps: + - name: Free up a bit of runner space + run: | + set +o pipefail + sudo docker image prune --all --force || true + sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true + sudo apt clean + sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku + set -o pipefail + - name: Sync repository uses: actions/checkout@v4 with: From 63c4975483446d14b08c431c6378d2024cd4401a Mon Sep 17 00:00:00 2001 From: AsenHu <78863300+AsenHu@users.noreply.github.com> Date: Tue, 17 Dec 2024 02:08:52 +0000 Subject: [PATCH 0411/1248] fix bug --- .github/workflows/release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b0b1ec8d..511d6a0e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,7 +2,7 @@ name: Upload Release Assets on: release: - types: [published, prereleased] + types: [published] workflow_dispatch: inputs: tag: @@ -51,7 +51,7 @@ jobs: exit 0 fi - tag="${GH_TAG}}" + tag="${GH_TAG}" fi echo "ci_id=$id" >> "$GITHUB_OUTPUT" From 9ec35cf68483b14857684c69076e95d905090f90 Mon Sep 17 00:00:00 2001 From: AsenHu <78863300+AsenHu@users.noreply.github.com> Date: Tue, 17 Dec 2024 03:01:40 +0000 Subject: [PATCH 0412/1248] skip uploading JSON files in release workflow --- .github/workflows/release.yml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 511d6a0e..cfe72d2a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -77,8 +77,10 @@ jobs: TAG: ${{ steps.get_ci_id.outputs.tag }} run: | for file in $(find . -type f); do - echo "Uploading $file..." - gh release upload $TAG "$file" --clobber --repo="${GH_REPOSITORY}" || echo "Something went wrong, skipping." + case "$file" in + *json*) echo "Skipping $file...";; + *) echo "Uploading $file..."; gh release upload $TAG "$file" --clobber --repo="${GH_REPOSITORY}" || echo "Something went wrong, skipping.";; + esac done - name: upload release assets to website @@ -109,6 +111,8 @@ jobs: ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/releases/$TAG/" for file in $(find . -type f); do - echo "Uploading $file to website" - scp $file website:/var/www/girlboss.ceo/~strawberry/conduwuit/releases/$TAG/$file + case "$file" in + *json*) echo "Skipping $file...";; + *) echo "Uploading $file to website"; scp $file website:/var/www/girlboss.ceo/~strawberry/conduwuit/releases/$TAG/$file;; + esac done From 5b6ff3869c118c4d5cde7b43c48f0b4be0d406ae Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 17 Dec 2024 19:29:46 +0000 Subject: [PATCH 0413/1248] Fix typo in test from rename --- src/core/utils/string.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index 6baa9c35..cc692c14 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -84,7 +84,7 @@ where /// Find the common prefix from a collection of strings and return a slice /// ``` -/// use conduwiit_core::utils::string::common_prefix; +/// use conduwuit_core::utils::string::common_prefix; /// let input = ["conduwuit", "conduit", "construct"]; /// common_prefix(&input) == "con"; /// ``` From 3675c941f8fe4b92b5426f24a667e0b03cabd9da Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 18 Dec 2024 03:04:39 +0000 Subject: [PATCH 0414/1248] Send read reciept and typing indicator EDUs to appservices with receive_ephemeral --- Cargo.lock | 26 ++++++------- Cargo.toml | 4 +- src/api/client/read_marker.rs | 14 +++---- src/api/server/send.rs | 2 +- src/service/rooms/read_receipt/mod.rs | 18 +++++++-- src/service/rooms/typing/mod.rs | 54 +++++++++++++++++++++++---- src/service/sending/mod.rs | 48 +++++++++++++++++++++++- src/service/sending/sender.rs | 15 +++----- 8 files changed, 135 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c86904d7..2c0ae75c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3162,7 +3162,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "assign", "js_int", @@ -3184,7 +3184,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "js_int", "ruma-common", @@ -3196,7 +3196,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "as_variant", "assign", @@ -3219,7 +3219,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "as_variant", "base64 0.22.1", @@ -3249,7 +3249,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3273,7 +3273,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "bytes", "http", @@ -3291,7 +3291,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3300,7 +3300,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "js_int", "ruma-common", @@ -3310,7 +3310,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3325,7 +3325,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "js_int", "ruma-common", @@ -3337,7 +3337,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "headers", "http", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3366,7 +3366,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index cb2ab916..38d6d729 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -334,7 +334,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +rev = "112ccc24cb14de26757715d611285d0806d5d91f" features = [ "compat", "rand", @@ -350,7 +350,6 @@ features = [ "compat-upload-signatures", "identifiers-validation", "unstable-unspecified", - "unstable-msc2409", "unstable-msc2448", "unstable-msc2666", "unstable-msc2867", @@ -366,6 +365,7 @@ features = [ "unstable-msc4121", "unstable-msc4125", "unstable-msc4186", + "unstable-msc4203", # sending to-device events to appservices "unstable-msc4210", # remove legacy mentions "unstable-extensible-events", ] diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index 89fe003a..ab7cc6ad 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -72,14 +72,10 @@ pub(crate) async fn set_read_marker_route( services .rooms .read_receipt - .readreceipt_update( - sender_user, - &body.room_id, - &ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - ) + .readreceipt_update(sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }) .await; } @@ -171,7 +167,7 @@ pub(crate) async fn create_receipt_route( .readreceipt_update( sender_user, &body.room_id, - &ruma::events::receipt::ReceiptEvent { + ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), }, diff --git a/src/api/server/send.rs b/src/api/server/send.rs index c5fc7118..db6fd748 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -275,7 +275,7 @@ async fn handle_edu_receipt( services .rooms .read_receipt - .readreceipt_update(&user_id, &room_id, &event) + .readreceipt_update(&user_id, &room_id, event) .await; } } else { diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 53e64957..4075c447 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,9 +2,10 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; +use conduwuit::{debug, err, result::LogErr, warn, PduCount, PduId, RawPduId, Result}; use futures::{try_join, Stream, TryFutureExt}; use ruma::{ + api::appservice::event::push_events::v1::EphemeralData, events::{ receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, @@ -48,14 +49,25 @@ impl Service { &self, user_id: &UserId, room_id: &RoomId, - event: &ReceiptEvent, + event: ReceiptEvent, ) { - self.db.readreceipt_update(user_id, room_id, event).await; + self.db.readreceipt_update(user_id, room_id, &event).await; self.services .sending .flush_room(room_id) .await .expect("room flush failed"); + // update appservices + let edu = EphemeralData::Receipt(event); + let _ = self + .services + .sending + .send_edu_appservice_room( + room_id, + serde_json::to_vec(&edu).expect("Serialized EphemeralData::Receipt"), + ) + .await + .log_err(); } /// Gets the latest private read receipt from the user in the room diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index a6123322..31ea40ae 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -7,8 +7,11 @@ use conduwuit::{ }; use futures::StreamExt; use ruma::{ - api::federation::transactions::edu::{Edu, TypingContent}, - events::SyncEphemeralRoomEvent, + api::{ + appservice::event::push_events::v1::EphemeralData, + federation::transactions::edu::{Edu, TypingContent}, + }, + events::{typing::TypingEventContent, EphemeralRoomEvent, SyncEphemeralRoomEvent}, OwnedRoomId, OwnedUserId, RoomId, UserId, }; use tokio::sync::{broadcast, RwLock}; @@ -76,6 +79,9 @@ impl Service { trace!("receiver found what it was looking for and is no longer interested"); } + // update appservices + self.appservice_send(room_id).await?; + // update federation if self.services.globals.user_is_local(user_id) { self.federation_send(room_id, user_id, true).await?; @@ -103,7 +109,8 @@ impl Service { if self.typing_update_sender.send(room_id.to_owned()).is_err() { trace!("receiver found what it was looking for and is no longer interested"); } - + // update appservices + self.appservice_send(room_id).await?; // update federation if self.services.globals.user_is_local(user_id) { self.federation_send(room_id, user_id, false).await?; @@ -157,6 +164,9 @@ impl Service { trace!("receiver found what it was looking for and is no longer interested"); } + // update appservices + self.appservice_send(room_id).await?; + // update federation for user in &removable { if self.services.globals.user_is_local(user) { @@ -180,17 +190,30 @@ impl Service { .unwrap_or(0)) } + /// Returns a new typing EDU. + pub async fn typings_content(&self, room_id: &RoomId) -> Result { + let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); + + let Some(typing_indicators) = room_typing_indicators else { + return Ok(TypingEventContent { user_ids: Vec::new() }); + }; + + let user_ids: Vec<_> = typing_indicators.into_keys().collect(); + + Ok(TypingEventContent { user_ids }) + } + /// Returns a new typing EDU. pub async fn typings_all( &self, room_id: &RoomId, sender_user: &UserId, - ) -> Result> { + ) -> Result> { let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); let Some(typing_indicators) = room_typing_indicators else { return Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { user_ids: Vec::new() }, + content: TypingEventContent { user_ids: Vec::new() }, }); }; @@ -208,9 +231,7 @@ impl Service { .collect() .await; - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { user_ids }, - }) + Ok(SyncEphemeralRoomEvent { content: TypingEventContent { user_ids } }) } async fn federation_send( @@ -237,4 +258,21 @@ impl Service { Ok(()) } + + async fn appservice_send(&self, room_id: &RoomId) -> Result<()> { + let edu = EphemeralData::Typing(EphemeralRoomEvent { + content: self.typings_content(room_id).await?, + room_id: room_id.into(), + }); + + self.services + .sending + .send_edu_appservice_room( + room_id, + serde_json::to_vec(&edu).expect("Serialized EphemeralData::Typing"), + ) + .await?; + + Ok(()) + } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 2038f4eb..2b571034 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -25,7 +25,10 @@ pub use self::{ sender::{EDU_LIMIT, PDU_LIMIT}, }; use crate::{ - account_data, client, globals, presence, pusher, resolver, rooms, rooms::timeline::RawPduId, + account_data, + appservice::NamespaceRegex, + client, globals, presence, pusher, resolver, + rooms::{self, timeline::RawPduId}, server_keys, users, Dep, }; @@ -38,6 +41,7 @@ pub struct Service { } struct Services { + alias: Dep, client: Dep, globals: Dep, resolver: Dep, @@ -76,6 +80,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { server: args.server.clone(), services: Services { + alias: args.depend::("rooms::alias"), client: args.depend::("client"), globals: args.depend::("globals"), resolver: args.depend::("resolver"), @@ -184,6 +189,47 @@ impl Service { }) } + #[tracing::instrument(skip(self, serialized), level = "debug")] + pub fn send_edu_appservice(&self, appservice_id: String, serialized: Vec) -> Result { + let dest = Destination::Appservice(appservice_id); + let event = SendingEvent::Edu(serialized); + let _cork = self.db.db.cork(); + let keys = self.db.queue_requests(once((&event, &dest))); + self.dispatch(Msg { + dest, + event, + queue_id: keys.into_iter().next().expect("request queue key"), + }) + } + + #[tracing::instrument(skip(self, room_id, serialized), level = "debug")] + pub async fn send_edu_appservice_room( + &self, + room_id: &RoomId, + serialized: Vec, + ) -> Result<()> { + for appservice in self.services.appservice.read().await.values() { + let matching_aliases = |aliases: NamespaceRegex| { + self.services + .alias + .local_aliases_for_room(room_id) + .ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) + }; + + if appservice.rooms.is_match(room_id.as_str()) + || matching_aliases(appservice.aliases.clone()).await + || self + .services + .state_cache + .appservice_in_room(room_id, appservice) + .await + { + self.send_edu_appservice(appservice.registration.id.clone(), serialized.clone())?; + } + } + Ok(()) + } + #[tracing::instrument(skip(self, room_id, serialized), level = "debug")] pub async fn send_edu_room(&self, room_id: &RoomId, serialized: Vec) -> Result<()> { let servers = self diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 1f462f39..1589101b 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -1,3 +1,4 @@ +use core::str; use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, @@ -21,7 +22,7 @@ use futures::{ }; use ruma::{ api::{ - appservice::event::push_events::v1::Edu as RumaEdu, + appservice::event::push_events::v1::EphemeralData, federation::transactions::{ edu::{ DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, @@ -587,7 +588,7 @@ impl Service { .filter(|event| matches!(event, SendingEvent::Pdu(_))) .count(), ); - let mut edu_jsons: Vec = Vec::with_capacity( + let mut edu_jsons: Vec = Vec::with_capacity( events .iter() .filter(|event| matches!(event, SendingEvent::Edu(_))) @@ -600,16 +601,12 @@ impl Service { pdu_jsons.push(pdu.to_room_event()); } }, - | SendingEvent::Edu(edu) => { - if appservice - .receive_ephemeral - .is_some_and(|receive_edus| receive_edus) - { + | SendingEvent::Edu(edu) => + if appservice.receive_ephemeral { if let Ok(edu) = serde_json::from_slice(edu) { edu_jsons.push(edu); } - } - }, + }, | SendingEvent::Flush => {}, // flush only; no new content } } From 4dd809fdc4c84725d87f23f55bad510b63c27f1b Mon Sep 17 00:00:00 2001 From: strawberry Date: Tue, 17 Dec 2024 23:06:04 -0500 Subject: [PATCH 0415/1248] misc typing cleanup Signed-off-by: strawberry --- src/api/client/sync/v3.rs | 2 +- src/service/rooms/read_receipt/mod.rs | 2 +- src/service/rooms/typing/mod.rs | 72 +++++++++++++-------------- src/service/sending/mod.rs | 6 +-- 4 files changed, 40 insertions(+), 42 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a05bcf98..fe3877aa 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -722,7 +722,7 @@ async fn load_joined_room( .rooms .typing .typings_all(room_id, sender_user) - .await?; + .await; Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) }) diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 4075c447..04c1426e 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -59,7 +59,7 @@ impl Service { .expect("room flush failed"); // update appservices let edu = EphemeralData::Receipt(event); - let _ = self + _ = self .services .sending .send_edu_appservice_room( diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index 31ea40ae..1de91e5e 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -146,32 +146,34 @@ impl Service { } }; - if !removable.is_empty() { - let typing = &mut self.typing.write().await; - let room = typing.entry(room_id.to_owned()).or_default(); - for user in &removable { - debug_info!("typing timeout {user:?} in {room_id:?}"); - room.remove(user); - } + if removable.is_empty() { + return Ok(()); + } - // update clients - self.last_typing_update - .write() - .await - .insert(room_id.to_owned(), self.services.globals.next_count()?); + let typing = &mut self.typing.write().await; + let room = typing.entry(room_id.to_owned()).or_default(); + for user in &removable { + debug_info!("typing timeout {user:?} in {room_id:?}"); + room.remove(user); + } - if self.typing_update_sender.send(room_id.to_owned()).is_err() { - trace!("receiver found what it was looking for and is no longer interested"); - } + // update clients + self.last_typing_update + .write() + .await + .insert(room_id.to_owned(), self.services.globals.next_count()?); - // update appservices - self.appservice_send(room_id).await?; + if self.typing_update_sender.send(room_id.to_owned()).is_err() { + trace!("receiver found what it was looking for and is no longer interested"); + } - // update federation - for user in &removable { - if self.services.globals.user_is_local(user) { - self.federation_send(room_id, user, false).await?; - } + // update appservices + self.appservice_send(room_id).await?; + + // update federation + for user in &removable { + if self.services.globals.user_is_local(user) { + self.federation_send(room_id, user, false).await?; } } @@ -190,17 +192,17 @@ impl Service { .unwrap_or(0)) } - /// Returns a new typing EDU. - pub async fn typings_content(&self, room_id: &RoomId) -> Result { + /// Returns a new typing EDU's content. + pub async fn typings_content(&self, room_id: &RoomId) -> TypingEventContent { let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); let Some(typing_indicators) = room_typing_indicators else { - return Ok(TypingEventContent { user_ids: Vec::new() }); + return TypingEventContent { user_ids: Vec::new() }; }; let user_ids: Vec<_> = typing_indicators.into_keys().collect(); - Ok(TypingEventContent { user_ids }) + TypingEventContent { user_ids } } /// Returns a new typing EDU. @@ -208,13 +210,13 @@ impl Service { &self, room_id: &RoomId, sender_user: &UserId, - ) -> Result> { + ) -> SyncEphemeralRoomEvent { let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); let Some(typing_indicators) = room_typing_indicators else { - return Ok(SyncEphemeralRoomEvent { + return SyncEphemeralRoomEvent { content: TypingEventContent { user_ids: Vec::new() }, - }); + }; }; let user_ids: Vec<_> = typing_indicators @@ -231,7 +233,7 @@ impl Service { .collect() .await; - Ok(SyncEphemeralRoomEvent { content: TypingEventContent { user_ids } }) + SyncEphemeralRoomEvent { content: TypingEventContent { user_ids } } } async fn federation_send( @@ -254,14 +256,12 @@ impl Service { self.services .sending .send_edu_room(room_id, serde_json::to_vec(&edu).expect("Serialized Edu::Typing")) - .await?; - - Ok(()) + .await } async fn appservice_send(&self, room_id: &RoomId) -> Result<()> { let edu = EphemeralData::Typing(EphemeralRoomEvent { - content: self.typings_content(room_id).await?, + content: self.typings_content(room_id).await, room_id: room_id.into(), }); @@ -271,8 +271,6 @@ impl Service { room_id, serde_json::to_vec(&edu).expect("Serialized EphemeralData::Typing"), ) - .await?; - - Ok(()) + .await } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 2b571034..a100367c 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -190,8 +190,8 @@ impl Service { } #[tracing::instrument(skip(self, serialized), level = "debug")] - pub fn send_edu_appservice(&self, appservice_id: String, serialized: Vec) -> Result { - let dest = Destination::Appservice(appservice_id); + pub fn send_edu_appservice(&self, appservice_id: &str, serialized: Vec) -> Result { + let dest = Destination::Appservice(appservice_id.to_owned()); let event = SendingEvent::Edu(serialized); let _cork = self.db.db.cork(); let keys = self.db.queue_requests(once((&event, &dest))); @@ -224,7 +224,7 @@ impl Service { .appservice_in_room(room_id, appservice) .await { - self.send_edu_appservice(appservice.registration.id.clone(), serialized.clone())?; + self.send_edu_appservice(&appservice.registration.id, serialized.clone())?; } } Ok(()) From 9040ad054e172661bb9869b955cae02a51f6abae Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 18 Dec 2024 11:26:04 -0500 Subject: [PATCH 0416/1248] Revert "misc typing cleanup" This reverts commit 4dd809fdc4c84725d87f23f55bad510b63c27f1b. --- src/api/client/sync/v3.rs | 2 +- src/service/rooms/read_receipt/mod.rs | 2 +- src/service/rooms/typing/mod.rs | 72 ++++++++++++++------------- src/service/sending/mod.rs | 6 +-- 4 files changed, 42 insertions(+), 40 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index fe3877aa..a05bcf98 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -722,7 +722,7 @@ async fn load_joined_room( .rooms .typing .typings_all(room_id, sender_user) - .await; + .await?; Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) }) diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 04c1426e..4075c447 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -59,7 +59,7 @@ impl Service { .expect("room flush failed"); // update appservices let edu = EphemeralData::Receipt(event); - _ = self + let _ = self .services .sending .send_edu_appservice_room( diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index 1de91e5e..31ea40ae 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -146,34 +146,32 @@ impl Service { } }; - if removable.is_empty() { - return Ok(()); - } + if !removable.is_empty() { + let typing = &mut self.typing.write().await; + let room = typing.entry(room_id.to_owned()).or_default(); + for user in &removable { + debug_info!("typing timeout {user:?} in {room_id:?}"); + room.remove(user); + } - let typing = &mut self.typing.write().await; - let room = typing.entry(room_id.to_owned()).or_default(); - for user in &removable { - debug_info!("typing timeout {user:?} in {room_id:?}"); - room.remove(user); - } + // update clients + self.last_typing_update + .write() + .await + .insert(room_id.to_owned(), self.services.globals.next_count()?); - // update clients - self.last_typing_update - .write() - .await - .insert(room_id.to_owned(), self.services.globals.next_count()?); + if self.typing_update_sender.send(room_id.to_owned()).is_err() { + trace!("receiver found what it was looking for and is no longer interested"); + } - if self.typing_update_sender.send(room_id.to_owned()).is_err() { - trace!("receiver found what it was looking for and is no longer interested"); - } + // update appservices + self.appservice_send(room_id).await?; - // update appservices - self.appservice_send(room_id).await?; - - // update federation - for user in &removable { - if self.services.globals.user_is_local(user) { - self.federation_send(room_id, user, false).await?; + // update federation + for user in &removable { + if self.services.globals.user_is_local(user) { + self.federation_send(room_id, user, false).await?; + } } } @@ -192,17 +190,17 @@ impl Service { .unwrap_or(0)) } - /// Returns a new typing EDU's content. - pub async fn typings_content(&self, room_id: &RoomId) -> TypingEventContent { + /// Returns a new typing EDU. + pub async fn typings_content(&self, room_id: &RoomId) -> Result { let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); let Some(typing_indicators) = room_typing_indicators else { - return TypingEventContent { user_ids: Vec::new() }; + return Ok(TypingEventContent { user_ids: Vec::new() }); }; let user_ids: Vec<_> = typing_indicators.into_keys().collect(); - TypingEventContent { user_ids } + Ok(TypingEventContent { user_ids }) } /// Returns a new typing EDU. @@ -210,13 +208,13 @@ impl Service { &self, room_id: &RoomId, sender_user: &UserId, - ) -> SyncEphemeralRoomEvent { + ) -> Result> { let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); let Some(typing_indicators) = room_typing_indicators else { - return SyncEphemeralRoomEvent { + return Ok(SyncEphemeralRoomEvent { content: TypingEventContent { user_ids: Vec::new() }, - }; + }); }; let user_ids: Vec<_> = typing_indicators @@ -233,7 +231,7 @@ impl Service { .collect() .await; - SyncEphemeralRoomEvent { content: TypingEventContent { user_ids } } + Ok(SyncEphemeralRoomEvent { content: TypingEventContent { user_ids } }) } async fn federation_send( @@ -256,12 +254,14 @@ impl Service { self.services .sending .send_edu_room(room_id, serde_json::to_vec(&edu).expect("Serialized Edu::Typing")) - .await + .await?; + + Ok(()) } async fn appservice_send(&self, room_id: &RoomId) -> Result<()> { let edu = EphemeralData::Typing(EphemeralRoomEvent { - content: self.typings_content(room_id).await, + content: self.typings_content(room_id).await?, room_id: room_id.into(), }); @@ -271,6 +271,8 @@ impl Service { room_id, serde_json::to_vec(&edu).expect("Serialized EphemeralData::Typing"), ) - .await + .await?; + + Ok(()) } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index a100367c..2b571034 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -190,8 +190,8 @@ impl Service { } #[tracing::instrument(skip(self, serialized), level = "debug")] - pub fn send_edu_appservice(&self, appservice_id: &str, serialized: Vec) -> Result { - let dest = Destination::Appservice(appservice_id.to_owned()); + pub fn send_edu_appservice(&self, appservice_id: String, serialized: Vec) -> Result { + let dest = Destination::Appservice(appservice_id); let event = SendingEvent::Edu(serialized); let _cork = self.db.db.cork(); let keys = self.db.queue_requests(once((&event, &dest))); @@ -224,7 +224,7 @@ impl Service { .appservice_in_room(room_id, appservice) .await { - self.send_edu_appservice(&appservice.registration.id, serialized.clone())?; + self.send_edu_appservice(appservice.registration.id.clone(), serialized.clone())?; } } Ok(()) From f54a62dda0e989eda9a28f09121a142f692638c2 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 18 Dec 2024 11:26:18 -0500 Subject: [PATCH 0417/1248] Revert "Send read reciept and typing indicator EDUs to appservices with receive_ephemeral" This reverts commit 3675c941f8fe4b92b5426f24a667e0b03cabd9da. --- Cargo.lock | 26 ++++++------- Cargo.toml | 4 +- src/api/client/read_marker.rs | 14 ++++--- src/api/server/send.rs | 2 +- src/service/rooms/read_receipt/mod.rs | 18 ++------- src/service/rooms/typing/mod.rs | 54 ++++----------------------- src/service/sending/mod.rs | 48 +----------------------- src/service/sending/sender.rs | 15 +++++--- 8 files changed, 46 insertions(+), 135 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2c0ae75c..c86904d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3162,7 +3162,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "assign", "js_int", @@ -3184,7 +3184,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "js_int", "ruma-common", @@ -3196,7 +3196,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "as_variant", "assign", @@ -3219,7 +3219,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "as_variant", "base64 0.22.1", @@ -3249,7 +3249,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3273,7 +3273,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "bytes", "http", @@ -3291,7 +3291,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3300,7 +3300,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "js_int", "ruma-common", @@ -3310,7 +3310,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3325,7 +3325,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "js_int", "ruma-common", @@ -3337,7 +3337,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "headers", "http", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3366,7 +3366,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 38d6d729..cb2ab916 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -334,7 +334,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "112ccc24cb14de26757715d611285d0806d5d91f" +rev = "a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" features = [ "compat", "rand", @@ -350,6 +350,7 @@ features = [ "compat-upload-signatures", "identifiers-validation", "unstable-unspecified", + "unstable-msc2409", "unstable-msc2448", "unstable-msc2666", "unstable-msc2867", @@ -365,7 +366,6 @@ features = [ "unstable-msc4121", "unstable-msc4125", "unstable-msc4186", - "unstable-msc4203", # sending to-device events to appservices "unstable-msc4210", # remove legacy mentions "unstable-extensible-events", ] diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index ab7cc6ad..89fe003a 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -72,10 +72,14 @@ pub(crate) async fn set_read_marker_route( services .rooms .read_receipt - .readreceipt_update(sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }) + .readreceipt_update( + sender_user, + &body.room_id, + &ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + ) .await; } @@ -167,7 +171,7 @@ pub(crate) async fn create_receipt_route( .readreceipt_update( sender_user, &body.room_id, - ruma::events::receipt::ReceiptEvent { + &ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), }, diff --git a/src/api/server/send.rs b/src/api/server/send.rs index db6fd748..c5fc7118 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -275,7 +275,7 @@ async fn handle_edu_receipt( services .rooms .read_receipt - .readreceipt_update(&user_id, &room_id, event) + .readreceipt_update(&user_id, &room_id, &event) .await; } } else { diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 4075c447..53e64957 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,10 +2,9 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{debug, err, result::LogErr, warn, PduCount, PduId, RawPduId, Result}; +use conduwuit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; use futures::{try_join, Stream, TryFutureExt}; use ruma::{ - api::appservice::event::push_events::v1::EphemeralData, events::{ receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, @@ -49,25 +48,14 @@ impl Service { &self, user_id: &UserId, room_id: &RoomId, - event: ReceiptEvent, + event: &ReceiptEvent, ) { - self.db.readreceipt_update(user_id, room_id, &event).await; + self.db.readreceipt_update(user_id, room_id, event).await; self.services .sending .flush_room(room_id) .await .expect("room flush failed"); - // update appservices - let edu = EphemeralData::Receipt(event); - let _ = self - .services - .sending - .send_edu_appservice_room( - room_id, - serde_json::to_vec(&edu).expect("Serialized EphemeralData::Receipt"), - ) - .await - .log_err(); } /// Gets the latest private read receipt from the user in the room diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index 31ea40ae..a6123322 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -7,11 +7,8 @@ use conduwuit::{ }; use futures::StreamExt; use ruma::{ - api::{ - appservice::event::push_events::v1::EphemeralData, - federation::transactions::edu::{Edu, TypingContent}, - }, - events::{typing::TypingEventContent, EphemeralRoomEvent, SyncEphemeralRoomEvent}, + api::federation::transactions::edu::{Edu, TypingContent}, + events::SyncEphemeralRoomEvent, OwnedRoomId, OwnedUserId, RoomId, UserId, }; use tokio::sync::{broadcast, RwLock}; @@ -79,9 +76,6 @@ impl Service { trace!("receiver found what it was looking for and is no longer interested"); } - // update appservices - self.appservice_send(room_id).await?; - // update federation if self.services.globals.user_is_local(user_id) { self.federation_send(room_id, user_id, true).await?; @@ -109,8 +103,7 @@ impl Service { if self.typing_update_sender.send(room_id.to_owned()).is_err() { trace!("receiver found what it was looking for and is no longer interested"); } - // update appservices - self.appservice_send(room_id).await?; + // update federation if self.services.globals.user_is_local(user_id) { self.federation_send(room_id, user_id, false).await?; @@ -164,9 +157,6 @@ impl Service { trace!("receiver found what it was looking for and is no longer interested"); } - // update appservices - self.appservice_send(room_id).await?; - // update federation for user in &removable { if self.services.globals.user_is_local(user) { @@ -190,30 +180,17 @@ impl Service { .unwrap_or(0)) } - /// Returns a new typing EDU. - pub async fn typings_content(&self, room_id: &RoomId) -> Result { - let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); - - let Some(typing_indicators) = room_typing_indicators else { - return Ok(TypingEventContent { user_ids: Vec::new() }); - }; - - let user_ids: Vec<_> = typing_indicators.into_keys().collect(); - - Ok(TypingEventContent { user_ids }) - } - /// Returns a new typing EDU. pub async fn typings_all( &self, room_id: &RoomId, sender_user: &UserId, - ) -> Result> { + ) -> Result> { let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); let Some(typing_indicators) = room_typing_indicators else { return Ok(SyncEphemeralRoomEvent { - content: TypingEventContent { user_ids: Vec::new() }, + content: ruma::events::typing::TypingEventContent { user_ids: Vec::new() }, }); }; @@ -231,7 +208,9 @@ impl Service { .collect() .await; - Ok(SyncEphemeralRoomEvent { content: TypingEventContent { user_ids } }) + Ok(SyncEphemeralRoomEvent { + content: ruma::events::typing::TypingEventContent { user_ids }, + }) } async fn federation_send( @@ -258,21 +237,4 @@ impl Service { Ok(()) } - - async fn appservice_send(&self, room_id: &RoomId) -> Result<()> { - let edu = EphemeralData::Typing(EphemeralRoomEvent { - content: self.typings_content(room_id).await?, - room_id: room_id.into(), - }); - - self.services - .sending - .send_edu_appservice_room( - room_id, - serde_json::to_vec(&edu).expect("Serialized EphemeralData::Typing"), - ) - .await?; - - Ok(()) - } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 2b571034..2038f4eb 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -25,10 +25,7 @@ pub use self::{ sender::{EDU_LIMIT, PDU_LIMIT}, }; use crate::{ - account_data, - appservice::NamespaceRegex, - client, globals, presence, pusher, resolver, - rooms::{self, timeline::RawPduId}, + account_data, client, globals, presence, pusher, resolver, rooms, rooms::timeline::RawPduId, server_keys, users, Dep, }; @@ -41,7 +38,6 @@ pub struct Service { } struct Services { - alias: Dep, client: Dep, globals: Dep, resolver: Dep, @@ -80,7 +76,6 @@ impl crate::Service for Service { Ok(Arc::new(Self { server: args.server.clone(), services: Services { - alias: args.depend::("rooms::alias"), client: args.depend::("client"), globals: args.depend::("globals"), resolver: args.depend::("resolver"), @@ -189,47 +184,6 @@ impl Service { }) } - #[tracing::instrument(skip(self, serialized), level = "debug")] - pub fn send_edu_appservice(&self, appservice_id: String, serialized: Vec) -> Result { - let dest = Destination::Appservice(appservice_id); - let event = SendingEvent::Edu(serialized); - let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(once((&event, &dest))); - self.dispatch(Msg { - dest, - event, - queue_id: keys.into_iter().next().expect("request queue key"), - }) - } - - #[tracing::instrument(skip(self, room_id, serialized), level = "debug")] - pub async fn send_edu_appservice_room( - &self, - room_id: &RoomId, - serialized: Vec, - ) -> Result<()> { - for appservice in self.services.appservice.read().await.values() { - let matching_aliases = |aliases: NamespaceRegex| { - self.services - .alias - .local_aliases_for_room(room_id) - .ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) - }; - - if appservice.rooms.is_match(room_id.as_str()) - || matching_aliases(appservice.aliases.clone()).await - || self - .services - .state_cache - .appservice_in_room(room_id, appservice) - .await - { - self.send_edu_appservice(appservice.registration.id.clone(), serialized.clone())?; - } - } - Ok(()) - } - #[tracing::instrument(skip(self, room_id, serialized), level = "debug")] pub async fn send_edu_room(&self, room_id: &RoomId, serialized: Vec) -> Result<()> { let servers = self diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 1589101b..1f462f39 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -1,4 +1,3 @@ -use core::str; use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, @@ -22,7 +21,7 @@ use futures::{ }; use ruma::{ api::{ - appservice::event::push_events::v1::EphemeralData, + appservice::event::push_events::v1::Edu as RumaEdu, federation::transactions::{ edu::{ DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, @@ -588,7 +587,7 @@ impl Service { .filter(|event| matches!(event, SendingEvent::Pdu(_))) .count(), ); - let mut edu_jsons: Vec = Vec::with_capacity( + let mut edu_jsons: Vec = Vec::with_capacity( events .iter() .filter(|event| matches!(event, SendingEvent::Edu(_))) @@ -601,12 +600,16 @@ impl Service { pdu_jsons.push(pdu.to_room_event()); } }, - | SendingEvent::Edu(edu) => - if appservice.receive_ephemeral { + | SendingEvent::Edu(edu) => { + if appservice + .receive_ephemeral + .is_some_and(|receive_edus| receive_edus) + { if let Ok(edu) = serde_json::from_slice(edu) { edu_jsons.push(edu); } - }, + } + }, | SendingEvent::Flush => {}, // flush only; no new content } } From ad8cbcaac1ab77b0b074132c477c7d1943c93919 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 17 Dec 2024 10:43:14 +0000 Subject: [PATCH 0418/1248] check cache prior to offloading iterator seek Signed-off-by: Jason Volk --- src/database/map/keys_from.rs | 5 +++++ src/database/map/rev_keys_from.rs | 5 +++++ src/database/map/rev_stream.rs | 17 +++++++++++++++++ src/database/map/rev_stream_from.rs | 23 +++++++++++++++++++++++ src/database/map/stream.rs | 17 +++++++++++++++++ src/database/map/stream_from.rs | 20 ++++++++++++++++++++ src/database/stream.rs | 14 +++++++++----- 7 files changed, 96 insertions(+), 5 deletions(-) diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 2ffc68df..95c6611b 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -5,6 +5,7 @@ use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; +use super::stream_from::is_cached; use crate::{ keyval::{result_deserialize_key, serialize_key, Key}, stream, @@ -54,6 +55,10 @@ where let opts = super::iter_options_default(); let state = stream::State::new(&self.db, &self.cf, opts); + if is_cached(self, from) { + return stream::Keys::<'_>::from(state.init_fwd(from.as_ref().into())).boxed(); + } + let seek = Seek { map: self.clone(), dir: Direction::Forward, diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index a398f315..e208c505 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -5,6 +5,7 @@ use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; +use super::rev_stream_from::is_cached; use crate::{ keyval::{result_deserialize_key, serialize_key, Key}, stream, @@ -62,6 +63,10 @@ where let opts = super::iter_options_default(); let state = stream::State::new(&self.db, &self.cf, opts); + if is_cached(self, from) { + return stream::KeysRev::<'_>::from(state.init_rev(from.as_ref().into())).boxed(); + } + let seek = Seek { map: self.clone(), dir: Direction::Reverse, diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index 81359800..7f58582f 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -26,3 +26,20 @@ pub fn rev_raw_stream(&self) -> impl Stream>> + Send { let opts = super::iter_options_default(); stream::ItemsRev::new(&self.db, &self.cf, opts).init(None) } + +#[tracing::instrument( + name = "cached", + level = "trace", + skip_all, + fields(%map), +)] +pub(super) fn _is_cached

    (map: &super::Map) -> bool +where + P: AsRef<[u8]> + ?Sized, +{ + let opts = super::cache_read_options_default(); + let mut state = stream::State::new(&map.db, &map.cf, opts); + + state.seek_rev(); + !state.is_incomplete() +} diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 6ddb9bc7..d166aa0f 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -11,6 +11,7 @@ use serde::{Deserialize, Serialize}; use crate::{ keyval::{result_deserialize, serialize_key, KeyVal}, stream, + util::is_incomplete, }; /// Iterate key-value entries in the map starting from upper-bound. @@ -83,6 +84,10 @@ where let opts = super::iter_options_default(); let state = stream::State::new(&self.db, &self.cf, opts); + if is_cached(self, from) { + return stream::ItemsRev::<'_>::from(state.init_rev(from.as_ref().into())).boxed(); + }; + let seek = Seek { map: self.clone(), dir: Direction::Reverse, @@ -99,3 +104,21 @@ where .try_flatten() .boxed() } + +#[tracing::instrument( + name = "cached", + level = "trace", + skip(map, from), + fields(%map), +)] +pub(super) fn is_cached

    (map: &Arc, from: &P) -> bool +where + P: AsRef<[u8]> + ?Sized, +{ + let cache_opts = super::cache_read_options_default(); + let cache_status = stream::State::new(&map.db, &map.cf, cache_opts) + .init_rev(from.as_ref().into()) + .status(); + + !matches!(cache_status, Some(e) if is_incomplete(&e)) +} diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index c2d9b6b8..1a90b8fb 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -25,3 +25,20 @@ pub fn raw_stream(&self) -> impl Stream>> + Send { let opts = super::iter_options_default(); stream::Items::new(&self.db, &self.cf, opts).init(None) } + +#[tracing::instrument( + name = "cached", + level = "trace", + skip_all, + fields(%map), +)] +pub(super) fn _is_cached

    (map: &super::Map) -> bool +where + P: AsRef<[u8]> + ?Sized, +{ + let opts = super::cache_read_options_default(); + let mut state = stream::State::new(&map.db, &map.cf, opts); + + state.seek_fwd(); + !state.is_incomplete() +} diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 1dae9d78..107ce4b1 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -81,6 +81,10 @@ where let opts = super::read_options_default(); let state = stream::State::new(&self.db, &self.cf, opts); + if is_cached(self, from) { + return stream::Items::<'_>::from(state.init_fwd(from.as_ref().into())).boxed(); + }; + let seek = Seek { map: self.clone(), dir: Direction::Forward, @@ -97,3 +101,19 @@ where .try_flatten() .boxed() } + +#[tracing::instrument( + name = "cached", + level = "trace", + skip(map, from), + fields(%map), +)] +pub(super) fn is_cached

    (map: &Arc, from: &P) -> bool +where + P: AsRef<[u8]> + ?Sized, +{ + let opts = super::cache_read_options_default(); + let state = stream::State::new(&map.db, &map.cf, opts).init_fwd(from.as_ref().into()); + + !state.is_incomplete() +} diff --git a/src/database/stream.rs b/src/database/stream.rs index 775fb930..f849d08f 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -5,14 +5,14 @@ mod keys_rev; use std::sync::Arc; -use conduwuit::{utils::exchange, Error, Result}; +use conduwuit::{utils::exchange, Result}; use rocksdb::{ColumnFamily, DBRawIteratorWithThreadMode, ReadOptions}; pub(crate) use self::{items::Items, items_rev::ItemsRev, keys::Keys, keys_rev::KeysRev}; use crate::{ engine::Db, keyval::{Key, KeyVal, Val}, - util::map_err, + util::{is_incomplete, map_err}, Engine, Slice, }; @@ -34,7 +34,7 @@ pub(crate) trait Cursor<'a, T> { fn get(&self) -> Option> { self.fetch() .map(Ok) - .or_else(|| self.state().status().map(Err)) + .or_else(|| self.state().status().map(map_err).map(Err)) } fn seek_and_get(&mut self) -> Option> { @@ -91,16 +91,20 @@ impl<'a> State<'a> { } } + pub(super) fn is_incomplete(&self) -> bool { + matches!(self.status(), Some(e) if is_incomplete(&e)) + } + fn fetch_key(&self) -> Option> { self.inner.key().map(Key::from) } fn _fetch_val(&self) -> Option> { self.inner.value().map(Val::from) } fn fetch(&self) -> Option> { self.inner.item().map(KeyVal::from) } - fn status(&self) -> Option { self.inner.status().map_err(map_err).err() } + pub(super) fn status(&self) -> Option { self.inner.status().err() } #[inline] - fn valid(&self) -> bool { self.inner.valid() } + pub(super) fn valid(&self) -> bool { self.inner.valid() } } fn keyval_longevity<'a, 'b: 'a>(item: KeyVal<'a>) -> KeyVal<'b> { From f78104a95914acb3f8a8761cd1d5853c6cdfee82 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 17 Dec 2024 14:52:12 +0000 Subject: [PATCH 0419/1248] relax Send on input iterator Signed-off-by: Jason Volk --- src/core/utils/set.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/utils/set.rs b/src/core/utils/set.rs index 8eac7157..032a9835 100644 --- a/src/core/utils/set.rs +++ b/src/core/utils/set.rs @@ -17,7 +17,7 @@ pub fn intersection(mut input: Iters) -> impl Iterator + Clone + Send, Iter: Iterator + Send, - Item: Eq + Send, + Item: Eq, { input.next().into_iter().flat_map(move |first| { let input = input.clone(); @@ -38,7 +38,7 @@ pub fn intersection_sorted( where Iters: Iterator + Clone + Send, Iter: Iterator + Send, - Item: Eq + Ord + Send, + Item: Eq + Ord, { input.next().into_iter().flat_map(move |first| { let mut input = input.clone().collect::>(); From 5f7636f177ddde0c630f24f347a8f638a7304278 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 17 Dec 2024 14:51:21 +0000 Subject: [PATCH 0420/1248] set dbpool thread name (gated by tokio_unstable) Signed-off-by: Jason Volk Signed-off-by: strawberry --- Cargo.toml | 1 + src/database/pool.rs | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index cb2ab916..cffbebfa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,6 +242,7 @@ features = [ "time", "rt-multi-thread", "io-util", + "tracing", ] [workspace.dependencies.tokio-metrics] diff --git a/src/database/pool.rs b/src/database/pool.rs index e4d78897..65012527 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -122,8 +122,16 @@ async fn spawn_until(self: &Arc, recv: Receiver, max: usize) -> Resul fn spawn_one(self: &Arc, workers: &mut JoinSet<()>, recv: Receiver) -> Result { let id = workers.len(); let self_ = self.clone(); + + #[cfg(not(tokio_unstable))] let _abort = workers.spawn_blocking_on(move || self_.worker(id, recv), self.server.runtime()); + #[cfg(tokio_unstable)] + let _abort = workers + .build_task() + .name("conduwuit:dbpool") + .spawn_blocking_on(move || self_.worker(id, recv), self.server.runtime()); + Ok(()) } From 71673b2a886cd7ec1693f09bff77c3f12f7a7aa2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 17 Dec 2024 14:32:54 +0000 Subject: [PATCH 0421/1248] add worker_affinity feature split runtime init from main.rs Signed-off-by: Jason Volk --- Cargo.lock | 12 ++++ Cargo.toml | 3 + src/main/Cargo.toml | 57 ++++++++-------- src/main/main.rs | 29 ++------ src/main/runtime.rs | 163 ++++++++++++++++++++++++++++++++++++++++++++ src/main/server.rs | 2 +- 6 files changed, 212 insertions(+), 54 deletions(-) create mode 100644 src/main/runtime.rs diff --git a/Cargo.lock b/Cargo.lock index c86904d7..f05da4eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -631,6 +631,7 @@ dependencies = [ "conduwuit_service", "console-subscriber", "const-str", + "core_affinity", "hardened_malloc-rs", "log", "opentelemetry", @@ -933,6 +934,17 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "core_affinity" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622892f5635ce1fc38c8f16dfc938553ed64af482edb5e150bf4caedbfcb2304" +dependencies = [ + "libc", + "num_cpus", + "winapi", +] + [[package]] name = "cpufeatures" version = "0.2.16" diff --git a/Cargo.toml b/Cargo.toml index cffbebfa..05fc3bc5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -494,6 +494,9 @@ version = "1.0.89" [workspace.dependencies.bytesize] version = "1.3.0" +[workspace.dependencies.core_affinity] +version = "0.8.1" + # # Patches # diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 99d41614..fe24d4c1 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -129,6 +129,9 @@ tokio_console = [ "dep:console-subscriber", "tokio/tracing", ] +worker_affinity = [ + "dep:core_affinity", +] zstd_compression = [ "conduwuit-api/zstd_compression", "conduwuit-core/zstd_compression", @@ -144,36 +147,34 @@ conduwuit-database.workspace = true conduwuit-router.workspace = true conduwuit-service.workspace = true -tokio.workspace = true -log.workspace = true -tracing.workspace = true -tracing-subscriber.workspace = true clap.workspace = true -const-str.workspace = true - -opentelemetry.workspace = true -opentelemetry.optional = true -tracing-flame.workspace = true -tracing-flame.optional = true -tracing-opentelemetry.workspace = true -tracing-opentelemetry.optional = true -opentelemetry_sdk.workspace = true -opentelemetry_sdk.optional = true -opentelemetry-jaeger.workspace = true -opentelemetry-jaeger.optional = true - -sentry.workspace = true -sentry.optional = true -sentry-tracing.workspace = true -sentry-tracing.optional = true -sentry-tower.workspace = true -sentry-tower.optional = true - -tokio-metrics.workspace = true -tokio-metrics.optional = true - -console-subscriber.workspace = true console-subscriber.optional = true +console-subscriber.workspace = true +const-str.workspace = true +core_affinity.optional = true +core_affinity.workspace = true +log.workspace = true +opentelemetry-jaeger.optional = true +opentelemetry-jaeger.workspace = true +opentelemetry.optional = true +opentelemetry.workspace = true +opentelemetry_sdk.optional = true +opentelemetry_sdk.workspace = true +sentry-tower.optional = true +sentry-tower.workspace = true +sentry-tracing.optional = true +sentry-tracing.workspace = true +sentry.optional = true +sentry.workspace = true +tokio-metrics.optional = true +tokio-metrics.workspace = true +tokio.workspace = true +tracing-flame.optional = true +tracing-flame.workspace = true +tracing-opentelemetry.optional = true +tracing-opentelemetry.workspace = true +tracing-subscriber.workspace = true +tracing.workspace = true [target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies] hardened_malloc-rs.workspace = true diff --git a/src/main/main.rs b/src/main/main.rs index 0946e835..e7aaf3fc 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -2,45 +2,24 @@ pub(crate) mod clap; mod logging; mod mods; mod restart; +mod runtime; mod sentry; mod server; mod signal; extern crate conduwuit_core as conduwuit; -use std::{ - sync::{atomic::Ordering, Arc}, - time::Duration, -}; +use std::sync::{atomic::Ordering, Arc}; use conduwuit::{debug_info, error, rustc_flags_capture, Error, Result}; use server::Server; -use tokio::runtime; - -const WORKER_NAME: &str = "conduwuit:worker"; -const WORKER_MIN: usize = 2; -const WORKER_KEEPALIVE: u64 = 36; -const GLOBAL_QUEUE_INTERVAL: u32 = 192; -const SYSTEM_QUEUE_INTERVAL: u32 = 256; -const SYSTEM_EVENTS_PER_TICK: usize = 512; rustc_flags_capture! {} fn main() -> Result<(), Error> { let args = clap::parse(); - let runtime = runtime::Builder::new_multi_thread() - .enable_io() - .enable_time() - .thread_name(WORKER_NAME) - .worker_threads(args.worker_threads.max(WORKER_MIN)) - .thread_keep_alive(Duration::from_secs(WORKER_KEEPALIVE)) - .global_queue_interval(GLOBAL_QUEUE_INTERVAL) - .event_interval(SYSTEM_QUEUE_INTERVAL) - .max_io_events_per_tick(SYSTEM_EVENTS_PER_TICK) - .build() - .expect("built runtime"); - - let server: Arc = Server::build(&args, Some(runtime.handle()))?; + let runtime = runtime::new(&args)?; + let server = Server::new(&args, Some(runtime.handle()))?; runtime.spawn(signal::signal(server.clone())); runtime.block_on(async_main(&server))?; diff --git a/src/main/runtime.rs b/src/main/runtime.rs new file mode 100644 index 00000000..ad0c3cde --- /dev/null +++ b/src/main/runtime.rs @@ -0,0 +1,163 @@ +use std::{thread, time::Duration}; + +use conduwuit::Result; +use tokio::runtime::Builder; + +use crate::clap::Args; + +const WORKER_NAME: &str = "conduwuit:worker"; +const WORKER_MIN: usize = 2; +const WORKER_KEEPALIVE: u64 = 36; +const GLOBAL_QUEUE_INTERVAL: u32 = 192; +const KERNEL_QUEUE_INTERVAL: u32 = 256; +const KERNEL_EVENTS_PER_TICK: usize = 512; + +pub(super) fn new(args: &Args) -> Result { + let mut builder = Builder::new_multi_thread(); + + builder + .enable_io() + .enable_time() + .thread_name(WORKER_NAME) + .worker_threads(args.worker_threads.max(WORKER_MIN)) + .thread_keep_alive(Duration::from_secs(WORKER_KEEPALIVE)) + .max_io_events_per_tick(KERNEL_EVENTS_PER_TICK) + .event_interval(KERNEL_QUEUE_INTERVAL) + .global_queue_interval(GLOBAL_QUEUE_INTERVAL) + .on_thread_start(thread_start) + .on_thread_stop(thread_stop) + .on_thread_unpark(thread_unpark) + .on_thread_park(thread_park); + + #[cfg(tokio_unstable)] + builder + .on_task_spawn(task_spawn) + .on_task_terminate(task_terminate); + + #[cfg(tokio_unstable)] + enable_histogram(&mut builder); + + builder.build().map_err(Into::into) +} + +#[cfg(tokio_unstable)] +fn enable_histogram(builder: &mut Builder) { + use tokio::runtime::{HistogramConfiguration, LogHistogram}; + + let config = LogHistogram::builder() + .min_value(Duration::from_micros(10)) + .max_value(Duration::from_millis(1)) + .max_error(0.5) + .max_buckets(32) + .expect("erroneous histogram configuration"); + + builder + .enable_metrics_poll_time_histogram() + .metrics_poll_time_histogram_configuration(HistogramConfiguration::log(config)); +} + +#[tracing::instrument( + name = "fork", + level = "debug", + skip_all, + fields( + id = ?thread::current().id(), + name = %thread::current().name().unwrap_or("None"), + ), +)] +fn thread_start() { + #[cfg(feature = "worker_affinity")] + set_worker_affinity(); +} + +#[cfg(feature = "worker_affinity")] +fn set_worker_affinity() { + use std::sync::{ + atomic::{AtomicUsize, Ordering}, + LazyLock, + }; + + static CORES_OCCUPIED: AtomicUsize = AtomicUsize::new(0); + static CORES_AVAILABLE: LazyLock>> = LazyLock::new(|| { + core_affinity::get_core_ids().map(|mut cores| { + cores.sort_unstable(); + cores + }) + }); + + let Some(cores) = CORES_AVAILABLE.as_ref() else { + return; + }; + + if thread::current().name() != Some(WORKER_NAME) { + return; + } + + let handle = tokio::runtime::Handle::current(); + let num_workers = handle.metrics().num_workers(); + let i = CORES_OCCUPIED.fetch_add(1, Ordering::Relaxed); + if i >= num_workers { + return; + } + + let Some(id) = cores.get(i) else { + return; + }; + + let _set = core_affinity::set_for_current(*id); +} + +#[tracing::instrument( + name = "join", + level = "debug", + skip_all, + fields( + id = ?thread::current().id(), + name = %thread::current().name().unwrap_or("None"), + ), +)] +fn thread_stop() {} + +#[tracing::instrument( + name = "work", + level = "trace", + skip_all, + fields( + id = ?thread::current().id(), + name = %thread::current().name().unwrap_or("None"), + ), +)] +fn thread_unpark() {} + +#[tracing::instrument( + name = "park", + level = "trace", + skip_all, + fields( + id = ?thread::current().id(), + name = %thread::current().name().unwrap_or("None"), + ), +)] +fn thread_park() {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "spawn", + level = "trace", + skip_all, + fields( + id = %meta.id(), + ), +)] +fn task_spawn(meta: &tokio::runtime::TaskMeta<'_>) {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "finish", + level = "trace", + skip_all, + fields( + id = %meta.id() + ), +)] +fn task_terminate(meta: &tokio::runtime::TaskMeta<'_>) {} diff --git a/src/main/server.rs b/src/main/server.rs index 00c7a6cc..bc2cff85 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -23,7 +23,7 @@ pub(crate) struct Server { } impl Server { - pub(crate) fn build( + pub(crate) fn new( args: &Args, runtime: Option<&runtime::Handle>, ) -> Result, Error> { From 6c42ac2726fe8fa0fbb09ac9ffd9753965459881 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 17 Dec 2024 22:34:09 +0000 Subject: [PATCH 0422/1248] add num_workers() convenience to core metrics Signed-off-by: Jason Volk --- src/core/metrics/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/core/metrics/mod.rs b/src/core/metrics/mod.rs index d5f937d7..f2022166 100644 --- a/src/core/metrics/mod.rs +++ b/src/core/metrics/mod.rs @@ -66,8 +66,16 @@ impl Metrics { .expect("next interval") } + #[inline] pub fn task_root(&self) -> Option<&TaskMonitor> { self.task_monitor.as_ref() } + #[inline] + pub fn num_workers(&self) -> usize { + self.runtime_metrics() + .map_or(0, runtime::RuntimeMetrics::num_workers) + } + + #[inline] pub fn runtime_metrics(&self) -> Option<&runtime::RuntimeMetrics> { self.runtime_metrics.as_ref() } From 7b8320e0eb369e74149a30e1b55fbe871a075d0b Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 18 Dec 2024 13:41:30 -0500 Subject: [PATCH 0423/1248] bump rocksdb to v9.9.3 Signed-off-by: strawberry --- Cargo.lock | 8 ++++---- deps/rust-rocksdb/Cargo.toml | 2 +- flake.lock | 8 ++++---- flake.nix | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f05da4eb..ad22c8c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3392,8 +3392,8 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.30.0+9.8.4" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=fcb772d84572c63b1defc57baf9cdbdf5577c6a4#fcb772d84572c63b1defc57baf9cdbdf5577c6a4" +version = "0.31.0+9.9.3" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=123d6302fed23fc706344becb2f19623265a83f8#123d6302fed23fc706344becb2f19623265a83f8" dependencies = [ "bindgen", "bzip2-sys", @@ -3409,8 +3409,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.34.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=fcb772d84572c63b1defc57baf9cdbdf5577c6a4#fcb772d84572c63b1defc57baf9cdbdf5577c6a4" +version = "0.35.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=123d6302fed23fc706344becb2f19623265a83f8#123d6302fed23fc706344becb2f19623265a83f8" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index e89c26d7..f06c44e8 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "fcb772d84572c63b1defc57baf9cdbdf5577c6a4" +rev = "123d6302fed23fc706344becb2f19623265a83f8" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index 28437890..94cef4dd 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1733704887, - "narHash": "sha256-4ijNmXACyTJWKRcTdlgObdbOVm2oN3Zefg55/4UPIL0=", + "lastModified": 1734469478, + "narHash": "sha256-IcQ4N8xADYal79K+ONmNq4RLlIwdgUqgrVzgNgiIaG8=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "9c656e58c9f969aea28f25b22bc52ea03109677a", + "rev": "8b4808e7de2fbb5d119d8d72cdca76d8ab84bc47", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.8.4", + "ref": "v9.9.3", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index 08120ffd..d8ad47a8 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.8.4"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 4d46df2af5f678bdf1566b9a9509f8b628c58921 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 18 Dec 2024 03:29:42 +0000 Subject: [PATCH 0424/1248] abort tasks for non-async pool shudown Signed-off-by: Jason Volk --- src/database/engine.rs | 3 ++- src/database/pool.rs | 14 ++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index 63a6087d..73ea559d 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -347,7 +347,8 @@ impl Drop for Engine { fn drop(&mut self) { const BLOCKING: bool = true; - debug_assert!(!self.pool.close(), "request pool was not closed"); + debug!("Closing frontend pool"); + self.pool.close(); debug!("Waiting for background tasks to finish..."); self.db.cancel_all_background_work(BLOCKING); diff --git a/src/database/pool.rs b/src/database/pool.rs index 65012527..28eb38bd 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -2,7 +2,7 @@ use std::{ mem::take, sync::{ atomic::{AtomicUsize, Ordering}, - Arc, + Arc, Mutex, }, }; @@ -11,7 +11,7 @@ use conduwuit::{debug, debug_warn, defer, err, implement, result::DebugInspect, use futures::{channel::oneshot, TryFutureExt}; use oneshot::Sender as ResultSender; use rocksdb::Direction; -use tokio::{sync::Mutex, task::JoinSet}; +use tokio::task::JoinSet; use crate::{keyval::KeyBuf, stream, Handle, Map}; @@ -79,7 +79,7 @@ pub(crate) async fn new(server: &Arc, opts: &Opts) -> Result> pub(crate) async fn shutdown(self: &Arc) { self.close(); - let workers = take(&mut *self.workers.lock().await); + let workers = take(&mut *self.workers.lock().expect("locked")); debug!(workers = workers.len(), "Waiting for workers to join..."); workers.join_all().await; @@ -92,7 +92,13 @@ pub(crate) fn close(&self) -> bool { return false; } + let mut workers = take(&mut *self.workers.lock().expect("locked")); + debug!(workers = workers.len(), "Waiting for workers to join..."); + workers.abort_all(); + drop(workers); + std::thread::yield_now(); + debug_assert!(self.queue.is_empty(), "channel is not empty"); debug!( senders = self.queue.sender_count(), receivers = self.queue.receiver_count(), @@ -104,7 +110,7 @@ pub(crate) fn close(&self) -> bool { #[implement(Pool)] async fn spawn_until(self: &Arc, recv: Receiver, max: usize) -> Result { - let mut workers = self.workers.lock().await; + let mut workers = self.workers.lock().expect("locked"); while workers.len() < max { self.spawn_one(&mut workers, recv.clone())?; } From 60a952508e0591c380e293d71c442a02584ae08d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 18 Dec 2024 03:32:58 +0000 Subject: [PATCH 0425/1248] use macro for error constructions Signed-off-by: Jason Volk --- src/service/rooms/alias/mod.rs | 35 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 23 deletions(-) diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 0790c376..9dcf9d8e 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -5,12 +5,11 @@ use std::sync::Arc; use conduwuit::{ err, utils::{stream::TryIgnore, ReadyExt}, - Err, Error, Result, + Err, Result, }; use database::{Deserialized, Ignore, Interfix, Map}; -use futures::{Stream, StreamExt}; +use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ - api::client::error::ErrorKind, events::{ room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, StateEventType, @@ -72,10 +71,7 @@ impl Service { if alias == self.services.globals.admin_alias && user_id != self.services.globals.server_user { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Only the server user can set this alias", - )); + return Err!(Request(Forbidden("Only the server user can set this alias"))); } // Comes first as we don't want a stuck alias @@ -220,7 +216,7 @@ impl Service { } // Checking whether the user is able to change canonical aliases of the room - if let Ok(content) = self + if let Ok(power_levels) = self .services .state_accessor .room_state_get_content::( @@ -228,10 +224,12 @@ impl Service { &StateEventType::RoomPowerLevels, "", ) + .map_ok(RoomPowerLevels::from) .await { - return Ok(RoomPowerLevels::from(content) - .user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)); + return Ok( + power_levels.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias) + ); } // If there is no power levels event, only the room creator can change @@ -291,18 +289,12 @@ impl Service { .globals .server_is_ours(room_alias.server_name()) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Alias is from another server.", - )); + return Err!(Request(InvalidParam("Alias is from another server."))); } - if let Some(ref info) = appservice_info { + if let Some(info) = appservice_info { if !info.aliases.is_match(room_alias.as_str()) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "Room alias is not in namespace.", - )); + return Err!(Request(Exclusive("Room alias is not in namespace."))); } } else if self .services @@ -310,10 +302,7 @@ impl Service { .is_exclusive_alias(room_alias) .await { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "Room alias reserved by appservice.", - )); + return Err!(Request(Exclusive("Room alias reserved by appservice."))); } Ok(()) From 802395bdb7f83b04a54c0d4b7a356ae2c4919254 Mon Sep 17 00:00:00 2001 From: "admin@dimensionproject.net" Date: Wed, 18 Dec 2024 20:46:01 -0500 Subject: [PATCH 0426/1248] add password argument to reset-password admin cmd Signed-off-by: strawberry --- src/admin/user/commands.rs | 8 ++++++-- src/admin/user/mod.rs | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 803fbcf2..1cbbf856 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -242,7 +242,11 @@ pub(super) async fn deactivate( } #[admin_command] -pub(super) async fn reset_password(&self, username: String) -> Result { +pub(super) async fn reset_password( + &self, + username: String, + password: Option, +) -> Result { let user_id = parse_local_user_id(self.services, &username)?; if user_id == self.services.globals.server_user { @@ -252,7 +256,7 @@ pub(super) async fn reset_password(&self, username: String) -> Result, }, /// - Deactivate a user From 8f73caae0b673ac3a69a0ef5f47b4c8f60ea9481 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 19 Dec 2024 18:56:58 +0000 Subject: [PATCH 0427/1248] fix github actions runs on forks This commit contains two separate fixes: 1) use lowercased github username/repository for the docker repository name. This is because image registries only accept lowercase in image repository names, but github stores the repository and username in a case-sensitive manner. This broke image uploads for me, as my username has uppercase chars. 2) change run conditions for some steps. It will no longer attempt to set up SSH web publishing if the SSH private key is not set. It will also run the image registry upload steps if registry usernames are missing, instead skipping individual uploads where the token for that registry is missing. Finally, it simplifies the sccache run conditions to use the `SCCACHE_GHA_ENABLED` env variable, rather than duplicating that logic. Signed-off-by: Jade Ellis --- .github/workflows/ci.yml | 179 +++++++++++++++++++++++++-------------- 1 file changed, 114 insertions(+), 65 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d0d78238..6c2c2822 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,7 +64,9 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Setup SSH web publish - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + env: + web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | mkdir -p -v ~/.ssh @@ -184,7 +186,7 @@ jobs: # use sccache for Rust - name: Run sccache-cache - if: (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') + if: (env.SCCACHE_GHA_ENABLED == 'true') uses: mozilla-actions/sccache-action@main with: version: "v0.8.2" @@ -281,7 +283,9 @@ jobs: persist-credentials: false - name: Setup SSH web publish - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + env: + web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | mkdir -p -v ~/.ssh @@ -360,7 +364,7 @@ jobs: # use sccache for Rust - name: Run sccache-cache - if: (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') + if: (env.SCCACHE_GHA_ENABLED == 'true') uses: mozilla-actions/sccache-action@main with: version: "v0.8.2" @@ -492,7 +496,7 @@ jobs: fi - name: Upload static-${{ matrix.target }}-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }} @@ -506,7 +510,7 @@ jobs: fi - name: Upload static deb ${{ matrix.target }}-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}.deb @@ -528,14 +532,14 @@ jobs: compression-level: 0 - name: Upload static-${{ matrix.target }}-debug-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }}-debug fi - name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}-debug.deb @@ -584,14 +588,14 @@ jobs: fi - name: Upload OCI image ${{ matrix.target }}-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}.tar.gz fi - name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}-debug.tar.gz @@ -610,7 +614,9 @@ jobs: persist-credentials: false - name: Setup SSH web publish - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + env: + web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | mkdir -p -v ~/.ssh @@ -647,7 +653,7 @@ jobs: # use sccache for Rust - name: Run sccache-cache - if: (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') + if: (env.SCCACHE_GHA_ENABLED == 'true') uses: mozilla-actions/sccache-action@main # use rust-cache @@ -711,24 +717,35 @@ jobs: name: conduwuit-macos-arm64 path: conduwuit-macos-arm64 if-no-files-found: error - + variables: + outputs: + github_repository: ${{ steps.var.outputs.github_repository }} + runs-on: "ubuntu-latest" + steps: + - name: Setting global variables + uses: actions/github-script@v7 + id: var + with: + script: | + console.log('${{ github.repository }}'.toLowerCase()) + core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase()) docker: name: Docker publish runs-on: ubuntu-24.04 - needs: build + needs: [build, variables] permissions: packages: write contents: read - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && github.event.pull_request.user.login != 'renovate[bot]' + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' env: - DOCKER_ARM64: docker.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 - DOCKER_AMD64: docker.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 - DOCKER_TAG: docker.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - DOCKER_BRANCH: docker.io/${{ github.repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} - GHCR_ARM64: ghcr.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 - GHCR_AMD64: ghcr.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 - GHCR_TAG: ghcr.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - GHCR_BRANCH: ghcr.io/${{ github.repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} + DOCKER_ARM64: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 + DOCKER_AMD64: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 + DOCKER_TAG: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} + DOCKER_BRANCH: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} + GHCR_ARM64: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 + GHCR_AMD64: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 + GHCR_TAG: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} + GHCR_BRANCH: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} GLCR_ARM64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 GLCR_AMD64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 GLCR_TAG: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} @@ -737,6 +754,12 @@ jobs: DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} steps: + - name: log variables + uses: actions/github-script@v7 + id: var + with: + script: | + console.log(`${{ toJson(needs) }}`) - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: @@ -771,95 +794,121 @@ jobs: mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz - name: Load and push amd64 image - if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | docker load -i oci-image-amd64.tar.gz - docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64} + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64} + docker push ${DOCKER_AMD64} + fi docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64} - docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64} - docker push ${DOCKER_AMD64} docker push ${GHCR_AMD64} - docker push ${GLCR_AMD64} + if [ ! -z $GITLAB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64} + docker push ${GLCR_AMD64} + fi - name: Load and push arm64 image - if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | docker load -i oci-image-arm64v8.tar.gz - docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64} + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64} + docker push ${DOCKER_ARM64} + fi docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64} - docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64} - docker push ${DOCKER_ARM64} docker push ${GHCR_ARM64} - docker push ${GLCR_ARM64} + if [ ! -z $GITLAB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64} + docker push ${GLCR_ARM64} + fi - name: Load and push amd64 debug image - if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | docker load -i oci-image-amd64-debug.tar.gz - docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64}-debug + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64}-debug + docker push ${DOCKER_AMD64}-debug + fi docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64}-debug - docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64}-debug - docker push ${DOCKER_AMD64}-debug docker push ${GHCR_AMD64}-debug - docker push ${GLCR_AMD64}-debug + if [ ! -z $GITLAB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64}-debug + docker push ${GLCR_AMD64}-debug + fi - name: Load and push arm64 debug image - if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | docker load -i oci-image-arm64v8-debug.tar.gz - docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64}-debug + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64}-debug + docker push ${DOCKER_ARM64}-debug + fi docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64}-debug - docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64}-debug - docker push ${DOCKER_ARM64}-debug docker push ${GHCR_ARM64}-debug - docker push ${GLCR_ARM64}-debug + if [ ! -z $GITLAB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64}-debug + docker push ${GLCR_ARM64}-debug + fi - name: Create Docker combined manifests run: | # Dockerhub Container Registry - docker manifest create ${DOCKER_TAG} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} - docker manifest create ${DOCKER_BRANCH} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker manifest create ${DOCKER_TAG} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} + docker manifest create ${DOCKER_BRANCH} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} + fi # GitHub Container Registry docker manifest create ${GHCR_TAG} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} docker manifest create ${GHCR_BRANCH} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} # GitLab Container Registry - docker manifest create ${GLCR_TAG} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} - docker manifest create ${GLCR_BRANCH} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} + if [ ! -z $GITLAB_TOKEN ]; then + docker manifest create ${GLCR_TAG} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} + docker manifest create ${GLCR_BRANCH} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} + fi - name: Create Docker combined debug manifests run: | # Dockerhub Container Registry - docker manifest create ${DOCKER_TAG}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug - docker manifest create ${DOCKER_BRANCH}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker manifest create ${DOCKER_TAG}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug + docker manifest create ${DOCKER_BRANCH}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug + fi # GitHub Container Registry docker manifest create ${GHCR_TAG}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug docker manifest create ${GHCR_BRANCH}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug # GitLab Container Registry - docker manifest create ${GLCR_TAG}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug - docker manifest create ${GLCR_BRANCH}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug + if [ ! -z $GITLAB_TOKEN ]; then + docker manifest create ${GLCR_TAG}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug + docker manifest create ${GLCR_BRANCH}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug + fi - name: Push manifests to Docker registries - if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | - docker manifest push ${DOCKER_TAG} - docker manifest push ${DOCKER_BRANCH} + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker manifest push ${DOCKER_TAG} + docker manifest push ${DOCKER_BRANCH} + docker manifest push ${DOCKER_TAG}-debug + docker manifest push ${DOCKER_BRANCH}-debug + fi docker manifest push ${GHCR_TAG} docker manifest push ${GHCR_BRANCH} - docker manifest push ${GLCR_TAG} - docker manifest push ${GLCR_BRANCH} - docker manifest push ${DOCKER_TAG}-debug - docker manifest push ${DOCKER_BRANCH}-debug docker manifest push ${GHCR_TAG}-debug docker manifest push ${GHCR_BRANCH}-debug - docker manifest push ${GLCR_TAG}-debug - docker manifest push ${GLCR_BRANCH}-debug + if [ ! -z $GITLAB_TOKEN ]; then + docker manifest push ${GLCR_TAG} + docker manifest push ${GLCR_BRANCH} + docker manifest push ${GLCR_TAG}-debug + docker manifest push ${GLCR_BRANCH}-debug + fi - name: Add Image Links to Job Summary - if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} run: | - echo "- \`docker pull ${DOCKER_TAG}\`" >> $GITHUB_STEP_SUMMARY + if [ ! -z $DOCKERHUB_TOKEN ]; then + echo "- \`docker pull ${DOCKER_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${DOCKER_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + fi echo "- \`docker pull ${GHCR_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${DOCKER_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY echo "- \`docker pull ${GHCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + if [ ! -z $GITLAB_TOKEN ]; then + echo "- \`docker pull ${GLCR_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GLCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + fi \ No newline at end of file From 0602e38ada1ef310671f4806b920732b76f0aa0a Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 19 Dec 2024 19:07:50 +0000 Subject: [PATCH 0428/1248] remove logging --- .github/workflows/ci.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6c2c2822..ec06323a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -727,7 +727,6 @@ jobs: id: var with: script: | - console.log('${{ github.repository }}'.toLowerCase()) core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase()) docker: name: Docker publish @@ -754,12 +753,6 @@ jobs: DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} steps: - - name: log variables - uses: actions/github-script@v7 - id: var - with: - script: | - console.log(`${{ toJson(needs) }}`) - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: From 025950139e733907a7b5ccd80a056378d6fb0c65 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 19 Dec 2024 22:11:26 +0000 Subject: [PATCH 0429/1248] do not try to push to GHCR on pull requests from forks --- .github/workflows/ci.yml | 53 ++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ec06323a..5b20a43e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -752,6 +752,7 @@ jobs: DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} + GHCR_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}" steps: - name: Login to GitHub Container Registry uses: docker/login-action@v3 @@ -793,8 +794,10 @@ jobs: docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64} docker push ${DOCKER_AMD64} fi - docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64} - docker push ${GHCR_AMD64} + if [ $GHCR_ENABLED = "true" ]; then + docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64} + docker push ${GHCR_AMD64} + fi if [ ! -z $GITLAB_TOKEN ]; then docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64} docker push ${GLCR_AMD64} @@ -807,8 +810,10 @@ jobs: docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64} docker push ${DOCKER_ARM64} fi - docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64} - docker push ${GHCR_ARM64} + if [ $GHCR_ENABLED = "true" ]; then + docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64} + docker push ${GHCR_ARM64} + fi if [ ! -z $GITLAB_TOKEN ]; then docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64} docker push ${GLCR_ARM64} @@ -821,8 +826,10 @@ jobs: docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64}-debug docker push ${DOCKER_AMD64}-debug fi - docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64}-debug - docker push ${GHCR_AMD64}-debug + if [ $GHCR_ENABLED = "true" ]; then + docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64}-debug + docker push ${GHCR_AMD64}-debug + fi if [ ! -z $GITLAB_TOKEN ]; then docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64}-debug docker push ${GLCR_AMD64}-debug @@ -835,8 +842,10 @@ jobs: docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64}-debug docker push ${DOCKER_ARM64}-debug fi - docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64}-debug - docker push ${GHCR_ARM64}-debug + if [ $GHCR_ENABLED = "true" ]; then + docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64}-debug + docker push ${GHCR_ARM64}-debug + fi if [ ! -z $GITLAB_TOKEN ]; then docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64}-debug docker push ${GLCR_ARM64}-debug @@ -850,8 +859,10 @@ jobs: docker manifest create ${DOCKER_BRANCH} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} fi # GitHub Container Registry - docker manifest create ${GHCR_TAG} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} - docker manifest create ${GHCR_BRANCH} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} + if [ $GHCR_ENABLED = "true" ]; then + docker manifest create ${GHCR_TAG} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} + docker manifest create ${GHCR_BRANCH} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} + fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then docker manifest create ${GLCR_TAG} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} @@ -866,8 +877,10 @@ jobs: docker manifest create ${DOCKER_BRANCH}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug fi # GitHub Container Registry - docker manifest create ${GHCR_TAG}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug - docker manifest create ${GHCR_BRANCH}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug + if [ $GHCR_ENABLED = "true" ]; then + docker manifest create ${GHCR_TAG}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug + docker manifest create ${GHCR_BRANCH}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug + fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then docker manifest create ${GLCR_TAG}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug @@ -882,10 +895,12 @@ jobs: docker manifest push ${DOCKER_TAG}-debug docker manifest push ${DOCKER_BRANCH}-debug fi - docker manifest push ${GHCR_TAG} - docker manifest push ${GHCR_BRANCH} - docker manifest push ${GHCR_TAG}-debug - docker manifest push ${GHCR_BRANCH}-debug + if [ $GHCR_ENABLED = "true" ]; then + docker manifest push ${GHCR_TAG} + docker manifest push ${GHCR_BRANCH} + docker manifest push ${GHCR_TAG}-debug + docker manifest push ${GHCR_BRANCH}-debug + fi if [ ! -z $GITLAB_TOKEN ]; then docker manifest push ${GLCR_TAG} docker manifest push ${GLCR_BRANCH} @@ -899,8 +914,10 @@ jobs: echo "- \`docker pull ${DOCKER_TAG}\`" >> $GITHUB_STEP_SUMMARY echo "- \`docker pull ${DOCKER_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY fi - echo "- \`docker pull ${GHCR_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GHCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + if [ $GHCR_ENABLED = "true" ]; then + echo "- \`docker pull ${GHCR_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GHCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + fi if [ ! -z $GITLAB_TOKEN ]; then echo "- \`docker pull ${GLCR_TAG}\`" >> $GITHUB_STEP_SUMMARY echo "- \`docker pull ${GLCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY From 5b5735f653e1169ebf5eeaa7add51070fd7cd1cc Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 19 Dec 2024 20:19:05 +0000 Subject: [PATCH 0430/1248] update ruwuma --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 4 ++-- src/service/sending/sender.rs | 14 +++++--------- 3 files changed, 20 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ad22c8c2..d65ae18f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3174,7 +3174,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "assign", "js_int", @@ -3196,7 +3196,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "js_int", "ruma-common", @@ -3208,7 +3208,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "as_variant", "assign", @@ -3231,7 +3231,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "as_variant", "base64 0.22.1", @@ -3261,7 +3261,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3285,7 +3285,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "bytes", "http", @@ -3303,7 +3303,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3312,7 +3312,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "js_int", "ruma-common", @@ -3322,7 +3322,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3337,7 +3337,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "js_int", "ruma-common", @@ -3349,7 +3349,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "headers", "http", @@ -3362,7 +3362,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3378,7 +3378,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2#a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 05fc3bc5..15f054bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -335,7 +335,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "a204cb56dbc20f72a1cbd0e9d6c827bbfd4082f2" +rev = "112ccc24cb14de26757715d611285d0806d5d91f" features = [ "compat", "rand", @@ -351,7 +351,6 @@ features = [ "compat-upload-signatures", "identifiers-validation", "unstable-unspecified", - "unstable-msc2409", "unstable-msc2448", "unstable-msc2666", "unstable-msc2867", @@ -367,6 +366,7 @@ features = [ "unstable-msc4121", "unstable-msc4125", "unstable-msc4186", + "unstable-msc4203", # sending to-device events to appservices "unstable-msc4210", # remove legacy mentions "unstable-extensible-events", ] diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 1f462f39..f6b83e83 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -21,7 +21,7 @@ use futures::{ }; use ruma::{ api::{ - appservice::event::push_events::v1::Edu as RumaEdu, + appservice::event::push_events::v1::EphemeralData, federation::transactions::{ edu::{ DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, @@ -587,7 +587,7 @@ impl Service { .filter(|event| matches!(event, SendingEvent::Pdu(_))) .count(), ); - let mut edu_jsons: Vec = Vec::with_capacity( + let mut edu_jsons: Vec = Vec::with_capacity( events .iter() .filter(|event| matches!(event, SendingEvent::Edu(_))) @@ -600,16 +600,12 @@ impl Service { pdu_jsons.push(pdu.to_room_event()); } }, - | SendingEvent::Edu(edu) => { - if appservice - .receive_ephemeral - .is_some_and(|receive_edus| receive_edus) - { + | SendingEvent::Edu(edu) => + if appservice.receive_ephemeral { if let Ok(edu) = serde_json::from_slice(edu) { edu_jsons.push(edu); } - } - }, + }, | SendingEvent::Flush => {}, // flush only; no new content } } From 7f645ff0e9111cc6e05e3abc1abad7d0b1f6a5a9 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Thu, 19 Dec 2024 23:06:01 +0000 Subject: [PATCH 0431/1248] Make some doc changes to `admin media delete-past-remote-media` Adjust before/after doc to clarify internationally Courtesy of @Aranjedeath Run `cargo fmt`, properly Fix stupid doc command issue Signed-off-by: strawberry --- src/admin/media/mod.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index 046be556..d212aab4 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -26,23 +26,23 @@ pub(super) enum MediaCommand { /// filesystem. This will always ignore errors. DeleteList, - /// - Deletes all remote media in the last/after "X" time using filesystem - /// metadata first created at date, or fallback to last modified date. - /// This will always ignore errors by default. - /// - /// Synapse + /// - Deletes all remote (and optionally local) media created before or + /// after \[duration] time using filesystem metadata first created at + /// date, or fallback to last modified date. This will always ignore + /// errors by default. DeletePastRemoteMedia { - /// - The duration (at or after/before), e.g. "5m" to delete all media - /// in the past or up to 5 minutes + /// - The relative time (e.g. 30s, 5m, 7d) within which to search duration: String, + /// - Only delete media created more recently than \[duration] ago #[arg(long, short)] before: bool, + /// - Only delete media created after \[duration] ago #[arg(long, short)] after: bool, - /// Long argument to delete local media + /// - Long argument to additionally delete local media #[arg(long)] yes_i_want_to_delete_local_media: bool, }, From c53e9e07f23756d92ecd124da10a9a970756977c Mon Sep 17 00:00:00 2001 From: Neek <33990922+neektwothousand@users.noreply.github.com> Date: Fri, 27 Dec 2024 19:41:16 +0100 Subject: [PATCH 0432/1248] update cohost link added an archive.org link to the cohost post as it will fully shut down at the end of the year --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e6ad1981..796f96f8 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ heavily appreciated! 💜🥺 Original repo and Matrix room picture was from bran (<3). Current banner image and logo is directly from [this cohost -post](https://cohost.org/RatBaby/post/1028290-finally-a-flag-for). +post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for). #### Is it conduwuit or Conduwuit? From 5bce0a3a4630c234fc7c64eba10a49127bd77d65 Mon Sep 17 00:00:00 2001 From: Neil Svedberg Date: Tue, 31 Dec 2024 17:19:28 -0500 Subject: [PATCH 0433/1248] Improve documentation for configuration Although the configuration file was mostly wrapped to a line-width of 80, some lines were wrapped slightly shorter. I fixed this. In general, all sentences were changed to start with a capital letter and end with a period or other punctuation mark. Many of the documentation commets read as, "config option to do XYZ". I shortened these to simply "do XYZ". --- clippy.toml | 2 +- conduwuit-example.toml | 487 +++++++++++++++++++++------------------- src/core/config/mod.rs | 499 ++++++++++++++++++++++------------------- 3 files changed, 530 insertions(+), 458 deletions(-) diff --git a/clippy.toml b/clippy.toml index d9dd99ca..42427101 100644 --- a/clippy.toml +++ b/clippy.toml @@ -15,5 +15,5 @@ disallowed-macros = [ ] disallowed-methods = [ - { path = "tokio::spawn", reason = "use and pass conduuwit_core::server::Server::runtime() to spawn from"}, + { path = "tokio::spawn", reason = "use and pass conduuwit_core::server::Server::runtime() to spawn from" }, ] diff --git a/conduwuit-example.toml b/conduwuit-example.toml index e179ce30..a82d8f69 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1,25 +1,28 @@ ### conduwuit Configuration ### -### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL -### BE OVERWRITTEN! +### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE +### OVERWRITTEN! ### -### You should rename this file before configuring your server. Changes -### to documentation and defaults can be contributed in source code at +### You should rename this file before configuring your server. Changes to +### documentation and defaults can be contributed in source code at ### src/core/config/mod.rs. This file is generated when building. ### ### Any values pre-populated are the default values for said config option. ### ### At the minimum, you MUST edit all the config options to your environment ### that say "YOU NEED TO EDIT THIS". -### See https://conduwuit.puppyirl.gay/configuration.html for ways to -### configure conduwuit +### +### For more information, see: +### https://conduwuit.puppyirl.gay/configuration.html [global] # The server_name is the pretty name of this server. It is used as a # suffix for user and room IDs/aliases. # -# See the docs for reverse proxying and delegation: https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy +# See the docs for reverse proxying and delegation: +# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy +# # Also see the `[global.well_known]` config section at the very bottom. # # Examples of delegation: @@ -33,7 +36,7 @@ # #server_name = -# default address (IPv4 or IPv6) conduwuit will listen on. +# The default address (IPv4 or IPv6) conduwuit will listen on. # # If you are using Docker or a container NAT networking setup, this must # be "0.0.0.0". @@ -43,22 +46,26 @@ # #address = ["127.0.0.1", "::1"] -# The port(s) conduwuit will be running on. +# The port(s) conduwuit will listen on. # -# See https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy for reverse proxying. +# For reverse proxying, see: +# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy # -# Docker users: Don't change this, you'll need to map an external port to -# this. +# If you are using Docker, don't change this, you'll need to map an +# external port to this. # # To listen on multiple ports, specify a vector e.g. [8080, 8448] # #port = 8008 -# Uncomment unix_socket_path to listen on a UNIX socket at the specified -# path. If listening on a UNIX socket, you MUST remove/comment the -# 'address' key if definedm AND add your reverse proxy to the 'conduwuit' -# group, unless world RW permissions are specified with unix_socket_perms -# (666 minimum). +# The UNIX socket conduwuit will listen on. +# +# conduwuit cannot listen on both an IP address and a UNIX socket. If +# listening on a UNIX socket, you MUST remove/comment the `address` key. +# +# Remember to make sure that your reverse proxy has access to this socket +# file, either by adding your reverse proxy to the 'conduwuit' group or +# granting world R/W permissions with `unix_socket_perms` (666 minimum). # # example: "/run/conduwuit/conduwuit.sock" # @@ -69,8 +76,7 @@ #unix_socket_perms = 660 # This is the only directory where conduwuit will save its data, including -# media. -# Note: this was previously "/var/lib/matrix-conduit" +# media. Note: this was previously "/var/lib/matrix-conduit". # # YOU NEED TO EDIT THIS. # @@ -82,7 +88,8 @@ # API. To use this, set a database backup path that conduwuit can write # to. # -# See https://conduwuit.puppyirl.gay/maintenance.html#backups for more information. +# For more information, see: +# https://conduwuit.puppyirl.gay/maintenance.html#backups # # example: "/opt/conduwuit-db-backups" # @@ -103,14 +110,15 @@ # Similar to the individual LRU caches, this is scaled up with your CPU # core count. # -# This defaults to 128.0 + (64.0 * CPU core count) +# This defaults to 128.0 + (64.0 * CPU core count). # #db_cache_capacity_mb = varies by system -# Option to control adding arbitrary text to the end of the user's -# displayname upon registration with a space before the text. This was the -# lightning bolt emoji option, just replaced with support for adding your -# own custom text or emojis. To disable, set this to "" (an empty string). +# Text which will be added to the end of the user's displayname upon +# registration with a space before the text. In Conduit, this was the +# lightning bolt emoji. +# +# To disable, set this to "" (an empty string). # # The default is the trans pride flag. # @@ -191,8 +199,8 @@ # Maximum entries stored in DNS memory-cache. The size of an entry may # vary so please take care if raising this value excessively. Only -# decrease this when using an external DNS cache. Please note -# that systemd-resolved does *not* count as an external cache, even when +# decrease this when using an external DNS cache. Please note that +# systemd-resolved does *not* count as an external cache, even when # configured to do so. # #dns_cache_entries = 32768 @@ -207,8 +215,8 @@ # Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. # This value is critical for the server to federate efficiently. -# NXDOMAIN's are assumed to not be returning to the federation -# and aggressively cached rather than constantly rechecked. +# NXDOMAIN's are assumed to not be returning to the federation and +# aggressively cached rather than constantly rechecked. # # Defaults to 3 days as these are *very rarely* false negatives. # @@ -237,10 +245,12 @@ # #query_all_nameservers = true -# Enables using *only* TCP for querying your specified nameservers instead +# Enable using *only* TCP for querying your specified nameservers instead # of UDP. # -# If you are running conduwuit in a container environment, this config option may need to be enabled. See https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker for more details. +# If you are running conduwuit in a container environment, this config +# option may need to be enabled. For more details, see: +# https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker # #query_over_tcp_only = false @@ -288,8 +298,8 @@ # Default/base request total timeout (seconds). The time limit for a whole # request. This is set very high to not cancel healthy requests while -# serving as a backstop. This is used only by URL previews and -# update/news endpoint checks. +# serving as a backstop. This is used only by URL previews and update/news +# endpoint checks. # #request_total_timeout = 320 @@ -304,11 +314,11 @@ # #request_idle_per_host = 1 -# Federation well-known resolution connection timeout (seconds) +# Federation well-known resolution connection timeout (seconds). # #well_known_conn_timeout = 6 -# Federation HTTP well-known resolution request timeout (seconds) +# Federation HTTP well-known resolution request timeout (seconds). # #well_known_timeout = 10 @@ -318,12 +328,12 @@ # #federation_timeout = 300 -# Federation client idle connection pool timeout (seconds) +# Federation client idle connection pool timeout (seconds). # #federation_idle_timeout = 25 # Federation client max idle connections per host. Defaults to 1 as -# generally the same open connection can be re-used +# generally the same open connection can be re-used. # #federation_idle_per_host = 1 @@ -332,11 +342,11 @@ # #sender_timeout = 180 -# Federation sender idle connection pool timeout (seconds) +# Federation sender idle connection pool timeout (seconds). # #sender_idle_timeout = 180 -# Federation sender transaction retry backoff limit (seconds) +# Federation sender transaction retry backoff limit (seconds). # #sender_retry_backoff_limit = 86400 @@ -345,11 +355,11 @@ # #appservice_timeout = 35 -# Appservice URL idle connection pool timeout (seconds) +# Appservice URL idle connection pool timeout (seconds). # #appservice_idle_timeout = 300 -# Notification gateway pusher idle connection pool timeout +# Notification gateway pusher idle connection pool timeout. # #pusher_idle_timeout = 15 @@ -357,9 +367,8 @@ # server. # # If set to true without a token configured, users can register with no -# form of 2nd-step only if you set -# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` to -# true in your config. +# form of 2nd-step only if you set the following option to true: +# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` # # If you would like registration only via token reg, please configure # `registration_token` or `registration_token_file`. @@ -413,8 +422,8 @@ # Set this to true to allow your server's public room directory to be # federated. Set this to false to protect against /publicRooms spiders, # but will forbid external users from viewing your server's public room -# directory. If federation is disabled entirely (`allow_federation`), -# this is inherently false. +# directory. If federation is disabled entirely (`allow_federation`), this +# is inherently false. # #allow_public_room_directory_over_federation = false @@ -424,10 +433,10 @@ # #allow_public_room_directory_without_auth = false -# allow guests/unauthenticated users to access TURN credentials +# Allow guests/unauthenticated users to access TURN credentials. # -# this is the equivalent of Synapse's `turn_allow_guests` config option. -# this allows any unauthenticated user to call the endpoint +# This is the equivalent of Synapse's `turn_allow_guests` config option. +# This allows any unauthenticated user to call the endpoint # `/_matrix/client/v3/voip/turnServer`. # # It is unlikely you need to enable this as all major clients support @@ -462,24 +471,24 @@ # #allow_inbound_profile_lookup_federation_requests = true -# controls whether standard users are allowed to create rooms. appservices -# and admins are always allowed to create rooms +# Allow standard users to create rooms. Appservices and admins are always +# allowed to create rooms # #allow_room_creation = true # Set to false to disable users from joining or creating room versions -# that aren't 100% officially supported by conduwuit. +# that aren't officially supported by conduwuit. # # conduwuit officially supports room versions 6 - 11. # # conduwuit has slightly experimental (though works fine in practice) -# support for versions 3 - 5 +# support for versions 3 - 5. # #allow_unstable_room_versions = true -# default room version conduwuit will create rooms with. +# Default room version conduwuit will create rooms with. # -# per spec, room version 10 is the default. +# Per spec, room version 10 is the default. # #default_room_version = 10 @@ -510,22 +519,28 @@ #tracing_flame_output_path = "./tracing.folded" # Examples: +# # - No proxy (default): -# proxy ="none" +# +# proxy = "none" # # - For global proxy, create the section at the bottom of this file: -# [global.proxy] -# global = { url = "socks5h://localhost:9050" } +# +# [global.proxy] +# global = { url = "socks5h://localhost:9050" } # # - To proxy some domains: -# [global.proxy] -# [[global.proxy.by_domain]] -# url = "socks5h://localhost:9050" -# include = ["*.onion", "matrix.myspecial.onion"] -# exclude = ["*.myspecial.onion"] +# +# [global.proxy] +# [[global.proxy.by_domain]] +# url = "socks5h://localhost:9050" +# include = ["*.onion", "matrix.myspecial.onion"] +# exclude = ["*.myspecial.onion"] # # Include vs. Exclude: +# # - If include is an empty list, it is assumed to be `["*"]`. +# # - If a domain matches both the exclude and include list, the proxy will # only be used if it was included because of a more specific rule than # it was excluded. In the above example, the proxy would be used for @@ -542,7 +557,7 @@ # (notary trusted key servers). # # Currently, conduwuit doesn't support inbound batched key requests, so -# this list should only contain other Synapse servers +# this list should only contain other Synapse servers. # # example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] # @@ -563,10 +578,10 @@ # compromised trusted server to room joins only. The join operation # requires gathering keys from many origin servers which can cause # significant delays. Therefor this defaults to true to mitigate -# unexpected delays out-of-the-box. The security-paranoid or those -# willing to tolerate delays are advised to set this to false. Note that -# setting query_trusted_key_servers_first to true causes this option to -# be ignored. +# unexpected delays out-of-the-box. The security-paranoid or those willing +# to tolerate delays are advised to set this to false. Note that setting +# query_trusted_key_servers_first to true causes this option to be +# ignored. # #query_trusted_key_servers_first_on_join = true @@ -582,8 +597,10 @@ # #trusted_server_batch_size = 1024 -# max log level for conduwuit. allows debug, info, warn, or error -# see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives +# Max log level for conduwuit. Allows debug, info, warn, or error. +# +# See also: +# https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives # # **Caveat**: # For release builds, the tracing crate is configured to only implement @@ -593,48 +610,48 @@ # #log = "info" -# controls whether logs will be outputted with ANSI colours +# Output logs with ANSI colours. # #log_colors = true -# configures the span events which will be outputted with the log +# Configures the span events which will be outputted with the log. # #log_span_events = "none" -# configures whether CONDUWUIT_LOG EnvFilter matches values using regular +# Configures whether CONDUWUIT_LOG EnvFilter matches values using regular # expressions. See the tracing_subscriber documentation on Directives. # #log_filter_regex = true -# toggles the display of ThreadId in tracing log output +# Toggles the display of ThreadId in tracing log output. # #log_thread_ids = false -# OpenID token expiration/TTL in seconds +# OpenID token expiration/TTL in seconds. # # These are the OpenID tokens that are primarily used for Matrix account # integrations (e.g. Vector Integrations in Element), *not* OIDC/OpenID -# Connect/etc +# Connect/etc. # #openid_token_ttl = 3600 -# static TURN username to provide the client if not using a shared secret +# Static TURN username to provide the client if not using a shared secret # ("turn_secret"), It is recommended to use a shared secret over static # credentials. # #turn_username = false -# static TURN password to provide the client if not using a shared secret +# Static TURN password to provide the client if not using a shared secret # ("turn_secret"). It is recommended to use a shared secret over static # credentials. # #turn_password = false -# vector list of TURN URIs/servers to use +# Vector list of TURN URIs/servers to use. # -# replace "example.turn.uri" with your TURN domain, such as the coturn -# "realm" config option. if using TURN over TLS, replace the URI prefix -# "turn:" with "turns:" +# Replace "example.turn.uri" with your TURN domain, such as the coturn +# "realm" config option. If using TURN over TLS, replace the URI prefix +# "turn:" with "turns:". # # example: ["turn:example.turn.uri?transport=udp", # "turn:example.turn.uri?transport=tcp"] @@ -642,29 +659,29 @@ #turn_uris = [] # TURN secret to use for generating the HMAC-SHA1 hash apart of username -# and password generation +# and password generation. # -# this is more secure, but if needed you can use traditional -# static username/password credentials. +# This is more secure, but if needed you can use traditional static +# username/password credentials. # #turn_secret = false -# TURN secret to use that's read from the file path specified +# TURN secret to use that's read from the file path specified. # -# this takes priority over "turn_secret" first, and falls back to +# This takes priority over "turn_secret" first, and falls back to # "turn_secret" if invalid or failed to open. # # example: "/etc/conduwuit/.turn_secret" # #turn_secret_file = -# TURN TTL in seconds +# TURN TTL, in seconds. # #turn_ttl = 86400 # List/vector of room IDs or room aliases that conduwuit will make newly -# registered users join. The rooms specified must be rooms that you -# have joined at least once on the server, and must be public. +# registered users join. The rooms specified must be rooms that you have +# joined at least once on the server, and must be public. # # example: ["#conduwuit:puppygock.gay", # "!eoIzvAvVwY23LPDay8:puppygock.gay"] @@ -686,7 +703,7 @@ # room invites) are ignored here. # # Defaults to false as rooms can be banned for non-moderation-related -# reasons and this performs a full user deactivation +# reasons and this performs a full user deactivation. # #auto_deactivate_banned_room_attempts = false @@ -711,7 +728,7 @@ #rocksdb_log_time_to_roll = 0 # Set this to true to use RocksDB config options that are tailored to HDDs -# (slower device storage) +# (slower device storage). # # It is worth noting that by default, conduwuit will use RocksDB with # Direct IO enabled. *Generally* speaking this improves performance as it @@ -724,13 +741,15 @@ # feel free to report in the conduwuit Matrix room if this option fixes # your DB issues. # -# See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. +# For more information, see: +# https://github.com/facebook/rocksdb/wiki/Direct-IO # #rocksdb_optimize_for_spinning_disks = false # Enables direct-io to increase database performance via unbuffered I/O. # -# See https://github.com/facebook/rocksdb/wiki/Direct-IO for more details about Direct IO and RocksDB. +# For more details about direct I/O and RockDB, see: +# https://github.com/facebook/rocksdb/wiki/Direct-IO # # Set this option to false if the database resides on a filesystem which # does not support direct-io like FUSE, or any form of complex filesystem @@ -739,25 +758,28 @@ #rocksdb_direct_io = true # Amount of threads that RocksDB will use for parallelism on database -# operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use -# all your logical threads. Defaults to your CPU logical thread count. +# operations such as cleanup, sync, flush, compaction, etc. Set to 0 to +# use all your logical threads. Defaults to your CPU logical thread count. # #rocksdb_parallelism_threads = varies by system # Maximum number of LOG files RocksDB will keep. This must *not* be set to # 0. It must be at least 1. Defaults to 3 as these are not very useful -# unless troubleshooting/debugging a RocksDB bug. +# unless troubleshooting/debugging a RocksDB bug. # #rocksdb_max_log_files = 3 # Type of RocksDB database compression to use. # -# Available options are "zstd", "zlib", "bz2", "lz4", or "none" +# Available options are "zstd", "zlib", "bz2", "lz4", or "none". # # It is best to use ZSTD as an overall good balance between -# speed/performance, storage, IO amplification, and CPU usage. -# For more performance but less compression (more storage used) and less -# CPU usage, use LZ4. See https://github.com/facebook/rocksdb/wiki/Compression for more details. +# speed/performance, storage, IO amplification, and CPU usage. For more +# performance but less compression (more storage used) and less CPU usage, +# use LZ4. +# +# For more details, see: +# https://github.com/facebook/rocksdb/wiki/Compression # # "none" will disable compression. # @@ -766,22 +788,21 @@ # Level of compression the specified compression algorithm for RocksDB to # use. # -# Default is 32767, which is internally read by RocksDB as the -# default magic number and translated to the library's default -# compression level as they all differ. -# See their `kDefaultCompressionLevel`. +# Default is 32767, which is internally read by RocksDB as the default +# magic number and translated to the library's default compression level +# as they all differ. See their `kDefaultCompressionLevel`. # #rocksdb_compression_level = 32767 # Level of compression the specified compression algorithm for the # bottommost level/data for RocksDB to use. Default is 32767, which is -# internally read by RocksDB as the default magic number and translated -# to the library's default compression level as they all differ. -# See their `kDefaultCompressionLevel`. +# internally read by RocksDB as the default magic number and translated to +# the library's default compression level as they all differ. See their +# `kDefaultCompressionLevel`. # # Since this is the bottommost level (generally old and least used data), # it may be desirable to have a very high compression level here as it's -# lesss likely for this data to be used. Research your chosen compression +# less likely for this data to be used. Research your chosen compression # algorithm. # #rocksdb_bottommost_compression_level = 32767 @@ -797,7 +818,7 @@ # #rocksdb_bottommost_compression = false -# Database recovery mode (for RocksDB WAL corruption) +# Database recovery mode (for RocksDB WAL corruption). # # Use this option when the server reports corruption and refuses to start. # Set mode 2 (PointInTime) to cleanly recover from this corruption. The @@ -805,8 +826,8 @@ # minutes prior to the crash. Clients may have to run "clear-cache & # reload" to account for the rollback. Upon success, you may reset the # mode back to default and restart again. Please note in some cases the -# corruption error may not be cleared for at least 30 minutes of -# operation in PointInTime mode. +# corruption error may not be cleared for at least 30 minutes of operation +# in PointInTime mode. # # As a very last ditch effort, if PointInTime does not fix or resolve # anything, you can try mode 3 (SkipAnyCorruptedRecord) but this will @@ -824,9 +845,11 @@ # 2 = PointInTime (use me if trying to recover) # 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) # -# See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information on these modes. +# For more information on these modes, see: +# https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes # -# See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. +# For more details on recovering a corrupt database, see: +# https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption # #rocksdb_recovery_mode = 1 @@ -834,11 +857,12 @@ # database consistency at a potential performance impact due to further # safety checks ran. # -# See https://github.com/facebook/rocksdb/wiki/Online-Verification#columnfamilyoptionsparanoid_file_checks for more information. +# For more information, see: +# https://github.com/facebook/rocksdb/wiki/Online-Verification#columnfamilyoptionsparanoid_file_checks # #rocksdb_paranoid_file_checks = false -# Database repair mode (for RocksDB SST corruption) +# Database repair mode (for RocksDB SST corruption). # # Use this option when the server reports corruption while running or # panics. If the server refuses to start use the recovery mode options @@ -847,6 +871,7 @@ # # - Backing up your database directory is recommended prior to running the # repair. +# # - Disabling repair mode and restarting the server is recommended after # running the repair. # @@ -874,10 +899,10 @@ # #rocksdb_compaction_ioprio_idle = true -# Config option to disable RocksDB compaction. You should never ever have -# to disable this. If you for some reason find yourself needing to disable -# this as part of troubleshooting or a bug, please reach out to us in the -# conduwuit Matrix room with information and details. +# Disables RocksDB compaction. You should never ever have to set this +# option to true. If you for some reason find yourself needing to use this +# option as part of troubleshooting or a bug, please reach out to us in +# the conduwuit Matrix room with information and details. # # Disabling compaction will lead to a significantly bloated and # explosively large database, gradually poor performance, unnecessarily @@ -916,79 +941,76 @@ # #notification_push_path = "/_matrix/push/v1/notify" -# Config option to control local (your server only) presence -# updates/requests. Note that presence on conduwuit is -# very fast unlike Synapse's. If using outgoing presence, this MUST be -# enabled. +# Allow local (your server only) presence updates/requests. +# +# Note that presence on conduwuit is very fast unlike Synapse's. If using +# outgoing presence, this MUST be enabled. # #allow_local_presence = true -# Config option to control incoming federated presence updates/requests. +# Allow incoming federated presence updates/requests. # -# This option receives presence updates from other -# servers, but does not send any unless `allow_outgoing_presence` is true. -# Note that presence on conduwuit is very fast unlike Synapse's. +# This option receives presence updates from other servers, but does not +# send any unless `allow_outgoing_presence` is true. Note that presence on +# conduwuit is very fast unlike Synapse's. # #allow_incoming_presence = true -# Config option to control outgoing presence updates/requests. +# Allow outgoing presence updates/requests. # # This option sends presence updates to other servers, but does not -# receive any unless `allow_incoming_presence` is true. -# Note that presence on conduwuit is very fast unlike Synapse's. -# If using outgoing presence, you MUST enable `allow_local_presence` as -# well. +# receive any unless `allow_incoming_presence` is true. Note that presence +# on conduwuit is very fast unlike Synapse's. If using outgoing presence, +# you MUST enable `allow_local_presence` as well. # #allow_outgoing_presence = true -# Config option to control how many seconds before presence updates that -# you are idle. Defaults to 5 minutes. +# How many seconds without presence updates before you become idle. +# Defaults to 5 minutes. # #presence_idle_timeout_s = 300 -# Config option to control how many seconds before presence updates that -# you are offline. Defaults to 30 minutes. +# How many seconds without presence updates before you become offline. +# Defaults to 30 minutes. # #presence_offline_timeout_s = 1800 -# Config option to enable the presence idle timer for remote users. +# Enable the presence idle timer for remote users. +# # Disabling is offered as an optimization for servers participating in # many large rooms or when resources are limited. Disabling it may cause -# incorrect presence states (i.e. stuck online) to be seen for some -# remote users. +# incorrect presence states (i.e. stuck online) to be seen for some remote +# users. # #presence_timeout_remote_users = true -# Config option to control whether we should receive remote incoming read -# receipts. +# Allow receiving incoming read receipts from remote servers. # #allow_incoming_read_receipts = true -# Config option to control whether we should send read receipts to remote -# servers. +# Allow sending read receipts to remote servers. # #allow_outgoing_read_receipts = true -# Config option to control outgoing typing updates to federation. +# Allow outgoing typing updates to federation. # #allow_outgoing_typing = true -# Config option to control incoming typing updates from federation. +# Allow incoming typing updates from federation. # #allow_incoming_typing = true -# Config option to control maximum time federation user can indicate -# typing. +# Maximum time federation user can indicate typing. # #typing_federation_timeout_s = 30 -# Config option to control minimum time local client can indicate typing. -# This does not override a client's request to stop typing. It only -# enforces a minimum value in case of no stop request. +# Minimum time local client can indicate typing. This does not override a +# client's request to stop typing. It only enforces a minimum value in +# case of no stop request. # #typing_client_timeout_min_s = 15 -# Config option to control maximum time local client can indicate typing. +# Maximum time local client can indicate typing. # #typing_client_timeout_max_s = 45 @@ -1017,8 +1039,8 @@ # brotli. This option does nothing if conduwuit was not built with # `brotli_compression` feature. Please be aware that enabling HTTP # compression may weaken TLS. Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before -# deciding to enable this. +# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH +# before deciding to enable this. # #brotli_compression = false @@ -1037,8 +1059,8 @@ # #allow_guests_auto_join_rooms = false -# Config option to control whether the legacy unauthenticated Matrix media -# repository endpoints will be enabled. These endpoints consist of: +# Enable the legacy unauthenticated Matrix media repository endpoints. +# These endpoints consist of: # - /_matrix/media/*/config # - /_matrix/media/*/upload # - /_matrix/media/*/preview_url @@ -1056,8 +1078,8 @@ # #freeze_legacy_media = true -# Checks consistency of the media directory at startup: -# 1. When `media_compat_file_link` is enbled, this check will upgrade +# Check consistency of the media directory at startup: +# 1. When `media_compat_file_link` is enabled, this check will upgrade # media when switching back and forth between Conduit and conduwuit. # Both options must be enabled to handle this. # 2. When media is deleted from the directory, this check will also delete @@ -1070,18 +1092,22 @@ #media_startup_check = true # Enable backward-compatibility with Conduit's media directory by creating -# symlinks of media. This option is only necessary if you plan on using -# Conduit again. Otherwise setting this to false reduces filesystem -# clutter and overhead for managing these symlinks in the directory. This -# is now disabled by default. You may still return to upstream Conduit -# but you have to run conduwuit at least once with this set to true and -# allow the media_startup_check to take place before shutting -# down to return to Conduit. +# symlinks of media. +# +# This option is only necessary if you plan on using Conduit again. +# Otherwise setting this to false reduces filesystem clutter and overhead +# for managing these symlinks in the directory. This is now disabled by +# default. You may still return to upstream Conduit but you have to run +# conduwuit at least once with this set to true and allow the +# media_startup_check to take place before shutting down to return to +# Conduit. # #media_compat_file_link = false -# Prunes missing media from the database as part of the media startup -# checks. This means if you delete files from the media directory the +# Prune missing media from the database as part of the media startup +# checks. +# +# This means if you delete files from the media directory the # corresponding entries will be removed from the database. This is # disabled by default because if the media directory is accidentally moved # or inaccessible, the metadata entries in the database will be lost with @@ -1146,44 +1172,47 @@ #url_preview_bound_interface = # Vector list of domains allowed to send requests to for URL previews. -# Defaults to none. Note: this is a *contains* match, not an explicit -# match. Putting "google.com" will match "https://google.com" and +# +# This is a *contains* match, not an explicit match. Putting "google.com" +# will match "https://google.com" and # "http://mymaliciousdomainexamplegoogle.com" Setting this to "*" will # allow all URL previews. Please note that this opens up significant -# attack surface to your server, you are expected to be aware of the -# risks by doing so. +# attack surface to your server, you are expected to be aware of the risks +# by doing so. # #url_preview_domain_contains_allowlist = [] # Vector list of explicit domains allowed to send requests to for URL -# previews. Defaults to none. Note: This is an *explicit* match, not a -# contains match. Putting "google.com" will match "https://google.com", -# "http://google.com", but not +# previews. +# +# This is an *explicit* match, not a contains match. Putting "google.com" +# will match "https://google.com", "http://google.com", but not # "https://mymaliciousdomainexamplegoogle.com". Setting this to "*" will # allow all URL previews. Please note that this opens up significant -# attack surface to your server, you are expected to be aware of the -# risks by doing so. +# attack surface to your server, you are expected to be aware of the risks +# by doing so. # #url_preview_domain_explicit_allowlist = [] # Vector list of explicit domains not allowed to send requests to for URL -# previews. Defaults to none. Note: This is an *explicit* match, not a -# contains match. Putting "google.com" will match "https://google.com", -# "http://google.com", but not +# previews. +# +# This is an *explicit* match, not a contains match. Putting "google.com" +# will match "https://google.com", "http://google.com", but not # "https://mymaliciousdomainexamplegoogle.com". The denylist is checked # first before allowlist. Setting this to "*" will not do anything. # #url_preview_domain_explicit_denylist = [] # Vector list of URLs allowed to send requests to for URL previews. -# Defaults to none. Note that this is a *contains* match, not an -# explicit match. Putting "google.com" will match -# "https://google.com/", +# +# Note that this is a *contains* match, not an explicit match. Putting +# "google.com" will match "https://google.com/", # "https://google.com/url?q=https://mymaliciousdomainexample.com", and -# "https://mymaliciousdomainexample.com/hi/google.com" Setting this to -# "*" will allow all URL previews. Please note that this opens up -# significant attack surface to your server, you are expected to be -# aware of the risks by doing so. +# "https://mymaliciousdomainexample.com/hi/google.com" Setting this to "*" +# will allow all URL previews. Please note that this opens up significant +# attack surface to your server, you are expected to be aware of the risks +# by doing so. # #url_preview_url_contains_allowlist = [] @@ -1196,21 +1225,20 @@ # checks (contains and explicit) on the root domain or not. Does not apply # to URL contains allowlist. Defaults to false. # -# Example usecase: If this is -# enabled and you have "wikipedia.org" allowed in the explicit and/or -# contains domain allowlist, it will allow all subdomains under -# "wikipedia.org" such as "en.m.wikipedia.org" as the root domain is -# checked and matched. Useful if the domain contains allowlist is still -# too broad for you but you still want to allow all the subdomains under a -# root domain. +# Example usecase: If this is enabled and you have "wikipedia.org" allowed +# in the explicit and/or contains domain allowlist, it will allow all +# subdomains under "wikipedia.org" such as "en.m.wikipedia.org" as the +# root domain is checked and matched. Useful if the domain contains +# allowlist is still too broad for you but you still want to allow all the +# subdomains under a root domain. # #url_preview_check_root_domain = false # List of forbidden room aliases and room IDs as strings of regex # patterns. # -# Regex can be used or explicit contains matches can be done by -# just specifying the words (see example). +# Regex can be used or explicit contains matches can be done by just +# specifying the words (see example). # # This is checked upon room alias creation, custom room ID creation if # used, and startup as warnings if any room aliases in your database have @@ -1234,15 +1262,15 @@ #forbidden_usernames = [] # Retry failed and incomplete messages to remote servers immediately upon -# startup. This is called bursting. If this is disabled, said messages -# may not be delivered until more messages are queued for that server. Do -# not change this option unless server resources are extremely limited or -# the scale of the server's deployment is huge. Do not disable this -# unless you know what you are doing. +# startup. This is called bursting. If this is disabled, said messages may +# not be delivered until more messages are queued for that server. Do not +# change this option unless server resources are extremely limited or the +# scale of the server's deployment is huge. Do not disable this unless you +# know what you are doing. # #startup_netburst = true -# messages are dropped and not reattempted. The `startup_netburst` option +# Messages are dropped and not reattempted. The `startup_netburst` option # must be enabled for this value to have any effect. Do not change this # value unless you know what you are doing. Set this value to -1 to # reattempt every message without trimming the queues; this may consume @@ -1251,44 +1279,44 @@ # #startup_netburst_keep = 50 -# controls whether non-admin local users are forbidden from sending room -# invites (local and remote), and if non-admin users can receive remote -# room invites. admins are always allowed to send and receive all room -# invites. +# Block non-admin local users from sending room invites (local and +# remote), and block non-admin users from receiving remote room invites. +# +# Admins are always allowed to send and receive all room invites. # #block_non_admin_invites = false -# Allows admins to enter commands in rooms other than "#admins" (admin -# room) by prefixing your message with "\!admin" or "\\!admin" followed -# up a normal conduwuit admin command. The reply will be publicly visible -# to the room, originating from the sender. +# Allow admins to enter commands in rooms other than "#admins" (admin +# room) by prefixing your message with "\!admin" or "\\!admin" followed up +# a normal conduwuit admin command. The reply will be publicly visible to +# the room, originating from the sender. # # example: \\!admin debug ping puppygock.gay # #admin_escape_commands = true -# Controls whether the conduwuit admin room console / CLI will immediately -# activate on startup. This option can also be enabled with `--console` -# conduwuit argument. +# Automatically activate the conduwuit admin room console / CLI on +# startup. This option can also be enabled with `--console` conduwuit +# argument. # #admin_console_automatic = false -# Controls what admin commands will be executed on startup. This is a -# vector list of strings of admin commands to run. -# +# List of admin commands to execute on startup. # # This option can also be configured with the `--execute` conduwuit # argument and can take standard shell commands and environment variables # -# Such example could be: `./conduwuit --execute "server admin-notice -# conduwuit has started up at $(date)"` +# For example: `./conduwuit --execute "server admin-notice conduwuit has +# started up at $(date)"` # # example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` # #admin_execute = [] -# Controls whether conduwuit should error and fail to start if an admin -# execute command (`--execute` / `admin_execute`) fails. +# Ignore errors in startup commands. +# +# If false, conduwuit will error and fail to start if an admin execute +# command (`--execute` / `admin_execute`) fails. # #admin_execute_errors_ignore = false @@ -1309,19 +1337,20 @@ # Sentry.io crash/panic reporting, performance monitoring/metrics, etc. # This is NOT enabled by default. conduwuit's default Sentry reporting -# endpoint domain is o4506996327251968.ingest.us.sentry.io +# endpoint domain is `o4506996327251968.ingest.us.sentry.io`. # #sentry = false -# Sentry reporting URL if a custom one is desired +# Sentry reporting URL, if a custom one is desired. # #sentry_endpoint = "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" -# Report your conduwuit server_name in Sentry.io crash reports and metrics +# Report your conduwuit server_name in Sentry.io crash reports and +# metrics. # #sentry_send_server_name = false -# Performance monitoring/tracing sample rate for Sentry.io +# Performance monitoring/tracing sample rate for Sentry.io. # # Note that too high values may impact performance, and can be disabled by # setting it to 0.0 (0%) This value is read as a percentage to Sentry, @@ -1333,8 +1362,8 @@ # #sentry_attach_stacktrace = false -# Send panics to sentry. This is true by default, but sentry has to be -# enabled. The global "sentry" config option must be enabled to send any +# Send panics to Sentry. This is true by default, but Sentry has to be +# enabled. The global `sentry` config option must be enabled to send any # data. # #sentry_send_panic = true @@ -1351,7 +1380,9 @@ #sentry_filter = "info" # Enable the tokio-console. This option is only relevant to developers. -# See https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console for more information. +# +# For more information, see: +# https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console # #tokio_console = false @@ -1360,8 +1391,8 @@ #test = false # Controls whether admin room notices like account registrations, password -# changes, account deactivations, room directory publications, etc will -# be sent to the admin room. Update notices and normal admin command +# changes, account deactivations, room directory publications, etc will be +# sent to the admin room. Update notices and normal admin command # responses will still be sent. # #admin_room_notices = true diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 44b66f41..8fd5621f 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -28,7 +28,7 @@ pub use self::check::check; use self::proxy::ProxyConfig; use crate::{err, error::Error, utils::sys, Result}; -/// all the config options for conduwuit +/// All the config options for conduwuit. #[allow(clippy::struct_excessive_bools)] #[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] #[derive(Clone, Debug, Deserialize)] @@ -36,19 +36,32 @@ use crate::{err, error::Error, utils::sys, Result}; filename = "conduwuit-example.toml", section = "global", undocumented = "# This item is undocumented. Please contribute documentation for it.", - header = "### conduwuit Configuration\n###\n### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL\n### BE \ - OVERWRITTEN!\n###\n### You should rename this file before configuring your server. Changes\n### to \ - documentation and defaults can be contributed in source code at\n### src/core/config/mod.rs. This file \ - is generated when building.\n###\n### Any values pre-populated are the default values for said config \ - option.\n###\n### At the minimum, you MUST edit all the config options to your environment\n### that say \ - \"YOU NEED TO EDIT THIS\".\n### See https://conduwuit.puppyirl.gay/configuration.html for ways to\n### configure conduwuit\n", + header = r#"### conduwuit Configuration +### +### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE +### OVERWRITTEN! +### +### You should rename this file before configuring your server. Changes to +### documentation and defaults can be contributed in source code at +### src/core/config/mod.rs. This file is generated when building. +### +### Any values pre-populated are the default values for said config option. +### +### At the minimum, you MUST edit all the config options to your environment +### that say "YOU NEED TO EDIT THIS". +### +### For more information, see: +### https://conduwuit.puppyirl.gay/configuration.html +"#, ignore = "catchall well_known tls" )] pub struct Config { /// The server_name is the pretty name of this server. It is used as a /// suffix for user and room IDs/aliases. /// - /// See the docs for reverse proxying and delegation: https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy + /// See the docs for reverse proxying and delegation: + /// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy + /// /// Also see the `[global.well_known]` config section at the very bottom. /// /// Examples of delegation: @@ -61,7 +74,7 @@ pub struct Config { /// example: "conduwuit.woof" pub server_name: OwnedServerName, - /// default address (IPv4 or IPv6) conduwuit will listen on. + /// The default address (IPv4 or IPv6) conduwuit will listen on. /// /// If you are using Docker or a container NAT networking setup, this must /// be "0.0.0.0". @@ -73,12 +86,13 @@ pub struct Config { #[serde(default = "default_address")] address: ListeningAddr, - /// The port(s) conduwuit will be running on. + /// The port(s) conduwuit will listen on. /// - /// See https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy for reverse proxying. + /// For reverse proxying, see: + /// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy /// - /// Docker users: Don't change this, you'll need to map an external port to - /// this. + /// If you are using Docker, don't change this, you'll need to map an + /// external port to this. /// /// To listen on multiple ports, specify a vector e.g. [8080, 8448] /// @@ -90,11 +104,14 @@ pub struct Config { #[serde(default)] pub tls: TlsConfig, - /// Uncomment unix_socket_path to listen on a UNIX socket at the specified - /// path. If listening on a UNIX socket, you MUST remove/comment the - /// 'address' key if definedm AND add your reverse proxy to the 'conduwuit' - /// group, unless world RW permissions are specified with unix_socket_perms - /// (666 minimum). + /// The UNIX socket conduwuit will listen on. + /// + /// conduwuit cannot listen on both an IP address and a UNIX socket. If + /// listening on a UNIX socket, you MUST remove/comment the `address` key. + /// + /// Remember to make sure that your reverse proxy has access to this socket + /// file, either by adding your reverse proxy to the 'conduwuit' group or + /// granting world R/W permissions with `unix_socket_perms` (666 minimum). /// /// example: "/run/conduwuit/conduwuit.sock" pub unix_socket_path: Option, @@ -106,8 +123,7 @@ pub struct Config { pub unix_socket_perms: u32, /// This is the only directory where conduwuit will save its data, including - /// media. - /// Note: this was previously "/var/lib/matrix-conduit" + /// media. Note: this was previously "/var/lib/matrix-conduit". /// /// YOU NEED TO EDIT THIS. /// @@ -118,7 +134,8 @@ pub struct Config { /// API. To use this, set a database backup path that conduwuit can write /// to. /// - /// See https://conduwuit.puppyirl.gay/maintenance.html#backups for more information. + /// For more information, see: + /// https://conduwuit.puppyirl.gay/maintenance.html#backups /// /// example: "/opt/conduwuit-db-backups" pub database_backup_path: Option, @@ -140,16 +157,17 @@ pub struct Config { /// Similar to the individual LRU caches, this is scaled up with your CPU /// core count. /// - /// This defaults to 128.0 + (64.0 * CPU core count) + /// This defaults to 128.0 + (64.0 * CPU core count). /// /// default: varies by system #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, - /// Option to control adding arbitrary text to the end of the user's - /// displayname upon registration with a space before the text. This was the - /// lightning bolt emoji option, just replaced with support for adding your - /// own custom text or emojis. To disable, set this to "" (an empty string). + /// Text which will be added to the end of the user's displayname upon + /// registration with a space before the text. In Conduit, this was the + /// lightning bolt emoji. + /// + /// To disable, set this to "" (an empty string). /// /// The default is the trans pride flag. /// @@ -237,8 +255,8 @@ pub struct Config { /// Maximum entries stored in DNS memory-cache. The size of an entry may /// vary so please take care if raising this value excessively. Only - /// decrease this when using an external DNS cache. Please note - /// that systemd-resolved does *not* count as an external cache, even when + /// decrease this when using an external DNS cache. Please note that + /// systemd-resolved does *not* count as an external cache, even when /// configured to do so. /// /// default: 32768 @@ -257,8 +275,8 @@ pub struct Config { /// Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. /// This value is critical for the server to federate efficiently. - /// NXDOMAIN's are assumed to not be returning to the federation - /// and aggressively cached rather than constantly rechecked. + /// NXDOMAIN's are assumed to not be returning to the federation and + /// aggressively cached rather than constantly rechecked. /// /// Defaults to 3 days as these are *very rarely* false negatives. /// @@ -293,10 +311,12 @@ pub struct Config { #[serde(default = "true_fn")] pub query_all_nameservers: bool, - /// Enables using *only* TCP for querying your specified nameservers instead + /// Enable using *only* TCP for querying your specified nameservers instead /// of UDP. /// - /// If you are running conduwuit in a container environment, this config option may need to be enabled. See https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker for more details. + /// If you are running conduwuit in a container environment, this config + /// option may need to be enabled. For more details, see: + /// https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker #[serde(default)] pub query_over_tcp_only: bool, @@ -352,8 +372,8 @@ pub struct Config { /// Default/base request total timeout (seconds). The time limit for a whole /// request. This is set very high to not cancel healthy requests while - /// serving as a backstop. This is used only by URL previews and - /// update/news endpoint checks. + /// serving as a backstop. This is used only by URL previews and update/news + /// endpoint checks. /// /// default: 320 #[serde(default = "default_request_total_timeout")] @@ -374,13 +394,13 @@ pub struct Config { #[serde(default = "default_request_idle_per_host")] pub request_idle_per_host: u16, - /// Federation well-known resolution connection timeout (seconds) + /// Federation well-known resolution connection timeout (seconds). /// /// default: 6 #[serde(default = "default_well_known_conn_timeout")] pub well_known_conn_timeout: u64, - /// Federation HTTP well-known resolution request timeout (seconds) + /// Federation HTTP well-known resolution request timeout (seconds). /// /// default: 10 #[serde(default = "default_well_known_timeout")] @@ -394,14 +414,14 @@ pub struct Config { #[serde(default = "default_federation_timeout")] pub federation_timeout: u64, - /// Federation client idle connection pool timeout (seconds) + /// Federation client idle connection pool timeout (seconds). /// /// default: 25 #[serde(default = "default_federation_idle_timeout")] pub federation_idle_timeout: u64, /// Federation client max idle connections per host. Defaults to 1 as - /// generally the same open connection can be re-used + /// generally the same open connection can be re-used. /// /// default: 1 #[serde(default = "default_federation_idle_per_host")] @@ -414,13 +434,13 @@ pub struct Config { #[serde(default = "default_sender_timeout")] pub sender_timeout: u64, - /// Federation sender idle connection pool timeout (seconds) + /// Federation sender idle connection pool timeout (seconds). /// /// default: 180 #[serde(default = "default_sender_idle_timeout")] pub sender_idle_timeout: u64, - /// Federation sender transaction retry backoff limit (seconds) + /// Federation sender transaction retry backoff limit (seconds). /// /// default: 86400 #[serde(default = "default_sender_retry_backoff_limit")] @@ -433,13 +453,13 @@ pub struct Config { #[serde(default = "default_appservice_timeout")] pub appservice_timeout: u64, - /// Appservice URL idle connection pool timeout (seconds) + /// Appservice URL idle connection pool timeout (seconds). /// /// default: 300 #[serde(default = "default_appservice_idle_timeout")] pub appservice_idle_timeout: u64, - /// Notification gateway pusher idle connection pool timeout + /// Notification gateway pusher idle connection pool timeout. /// /// default: 15 #[serde(default = "default_pusher_idle_timeout")] @@ -449,9 +469,8 @@ pub struct Config { /// server. /// /// If set to true without a token configured, users can register with no - /// form of 2nd-step only if you set - /// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` to - /// true in your config. + /// form of 2nd-step only if you set the following option to true: + /// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` /// /// If you would like registration only via token reg, please configure /// `registration_token` or `registration_token_file`. @@ -501,8 +520,8 @@ pub struct Config { /// Set this to true to allow your server's public room directory to be /// federated. Set this to false to protect against /publicRooms spiders, /// but will forbid external users from viewing your server's public room - /// directory. If federation is disabled entirely (`allow_federation`), - /// this is inherently false. + /// directory. If federation is disabled entirely (`allow_federation`), this + /// is inherently false. #[serde(default)] pub allow_public_room_directory_over_federation: bool, @@ -512,10 +531,10 @@ pub struct Config { #[serde(default)] pub allow_public_room_directory_without_auth: bool, - /// allow guests/unauthenticated users to access TURN credentials + /// Allow guests/unauthenticated users to access TURN credentials. /// - /// this is the equivalent of Synapse's `turn_allow_guests` config option. - /// this allows any unauthenticated user to call the endpoint + /// This is the equivalent of Synapse's `turn_allow_guests` config option. + /// This allows any unauthenticated user to call the endpoint /// `/_matrix/client/v3/voip/turnServer`. /// /// It is unlikely you need to enable this as all major clients support @@ -550,24 +569,24 @@ pub struct Config { #[serde(default = "true_fn", alias = "allow_profile_lookup_federation_requests")] pub allow_inbound_profile_lookup_federation_requests: bool, - /// controls whether standard users are allowed to create rooms. appservices - /// and admins are always allowed to create rooms + /// Allow standard users to create rooms. Appservices and admins are always + /// allowed to create rooms #[serde(default = "true_fn")] pub allow_room_creation: bool, /// Set to false to disable users from joining or creating room versions - /// that aren't 100% officially supported by conduwuit. + /// that aren't officially supported by conduwuit. /// /// conduwuit officially supports room versions 6 - 11. /// /// conduwuit has slightly experimental (though works fine in practice) - /// support for versions 3 - 5 + /// support for versions 3 - 5. #[serde(default = "true_fn")] pub allow_unstable_room_versions: bool, - /// default room version conduwuit will create rooms with. + /// Default room version conduwuit will create rooms with. /// - /// per spec, room version 10 is the default. + /// Per spec, room version 10 is the default. /// /// default: 10 #[serde(default = "default_default_room_version")] @@ -603,22 +622,28 @@ pub struct Config { pub tracing_flame_output_path: String, /// Examples: + /// /// - No proxy (default): - /// proxy ="none" + /// + /// proxy = "none" /// /// - For global proxy, create the section at the bottom of this file: - /// [global.proxy] - /// global = { url = "socks5h://localhost:9050" } + /// + /// [global.proxy] + /// global = { url = "socks5h://localhost:9050" } /// /// - To proxy some domains: - /// [global.proxy] - /// [[global.proxy.by_domain]] - /// url = "socks5h://localhost:9050" - /// include = ["*.onion", "matrix.myspecial.onion"] - /// exclude = ["*.myspecial.onion"] + /// + /// [global.proxy] + /// [[global.proxy.by_domain]] + /// url = "socks5h://localhost:9050" + /// include = ["*.onion", "matrix.myspecial.onion"] + /// exclude = ["*.myspecial.onion"] /// /// Include vs. Exclude: + /// /// - If include is an empty list, it is assumed to be `["*"]`. + /// /// - If a domain matches both the exclude and include list, the proxy will /// only be used if it was included because of a more specific rule than /// it was excluded. In the above example, the proxy would be used for @@ -635,7 +660,7 @@ pub struct Config { /// (notary trusted key servers). /// /// Currently, conduwuit doesn't support inbound batched key requests, so - /// this list should only contain other Synapse servers + /// this list should only contain other Synapse servers. /// /// example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] /// @@ -658,10 +683,10 @@ pub struct Config { /// compromised trusted server to room joins only. The join operation /// requires gathering keys from many origin servers which can cause /// significant delays. Therefor this defaults to true to mitigate - /// unexpected delays out-of-the-box. The security-paranoid or those - /// willing to tolerate delays are advised to set this to false. Note that - /// setting query_trusted_key_servers_first to true causes this option to - /// be ignored. + /// unexpected delays out-of-the-box. The security-paranoid or those willing + /// to tolerate delays are advised to set this to false. Note that setting + /// query_trusted_key_servers_first to true causes this option to be + /// ignored. #[serde(default = "true_fn")] pub query_trusted_key_servers_first_on_join: bool, @@ -679,8 +704,10 @@ pub struct Config { #[serde(default = "default_trusted_server_batch_size")] pub trusted_server_batch_size: usize, - /// max log level for conduwuit. allows debug, info, warn, or error - /// see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives + /// Max log level for conduwuit. Allows debug, info, warn, or error. + /// + /// See also: + /// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives /// /// **Caveat**: /// For release builds, the tracing crate is configured to only implement @@ -692,56 +719,56 @@ pub struct Config { #[serde(default = "default_log")] pub log: String, - /// controls whether logs will be outputted with ANSI colours + /// Output logs with ANSI colours. #[serde(default = "true_fn", alias = "log_colours")] pub log_colors: bool, - /// configures the span events which will be outputted with the log + /// Configures the span events which will be outputted with the log. /// /// default: "none" #[serde(default = "default_log_span_events")] pub log_span_events: String, - /// configures whether CONDUWUIT_LOG EnvFilter matches values using regular + /// Configures whether CONDUWUIT_LOG EnvFilter matches values using regular /// expressions. See the tracing_subscriber documentation on Directives. /// /// default: true #[serde(default = "true_fn")] pub log_filter_regex: bool, - /// toggles the display of ThreadId in tracing log output + /// Toggles the display of ThreadId in tracing log output. /// /// default: false #[serde(default)] pub log_thread_ids: bool, - /// OpenID token expiration/TTL in seconds + /// OpenID token expiration/TTL in seconds. /// /// These are the OpenID tokens that are primarily used for Matrix account /// integrations (e.g. Vector Integrations in Element), *not* OIDC/OpenID - /// Connect/etc + /// Connect/etc. /// /// default: 3600 #[serde(default = "default_openid_token_ttl")] pub openid_token_ttl: u64, - /// static TURN username to provide the client if not using a shared secret + /// Static TURN username to provide the client if not using a shared secret /// ("turn_secret"), It is recommended to use a shared secret over static /// credentials. #[serde(default)] pub turn_username: String, - /// static TURN password to provide the client if not using a shared secret + /// Static TURN password to provide the client if not using a shared secret /// ("turn_secret"). It is recommended to use a shared secret over static /// credentials. #[serde(default)] pub turn_password: String, - /// vector list of TURN URIs/servers to use + /// Vector list of TURN URIs/servers to use. /// - /// replace "example.turn.uri" with your TURN domain, such as the coturn - /// "realm" config option. if using TURN over TLS, replace the URI prefix - /// "turn:" with "turns:" + /// Replace "example.turn.uri" with your TURN domain, such as the coturn + /// "realm" config option. If using TURN over TLS, replace the URI prefix + /// "turn:" with "turns:". /// /// example: ["turn:example.turn.uri?transport=udp", /// "turn:example.turn.uri?transport=tcp"] @@ -751,30 +778,30 @@ pub struct Config { pub turn_uris: Vec, /// TURN secret to use for generating the HMAC-SHA1 hash apart of username - /// and password generation + /// and password generation. /// - /// this is more secure, but if needed you can use traditional - /// static username/password credentials. + /// This is more secure, but if needed you can use traditional static + /// username/password credentials. #[serde(default)] pub turn_secret: String, - /// TURN secret to use that's read from the file path specified + /// TURN secret to use that's read from the file path specified. /// - /// this takes priority over "turn_secret" first, and falls back to + /// This takes priority over "turn_secret" first, and falls back to /// "turn_secret" if invalid or failed to open. /// /// example: "/etc/conduwuit/.turn_secret" pub turn_secret_file: Option, - /// TURN TTL in seconds + /// TURN TTL, in seconds. /// /// default: 86400 #[serde(default = "default_turn_ttl")] pub turn_ttl: u64, /// List/vector of room IDs or room aliases that conduwuit will make newly - /// registered users join. The rooms specified must be rooms that you - /// have joined at least once on the server, and must be public. + /// registered users join. The rooms specified must be rooms that you have + /// joined at least once on the server, and must be public. /// /// example: ["#conduwuit:puppygock.gay", /// "!eoIzvAvVwY23LPDay8:puppygock.gay"] @@ -798,7 +825,7 @@ pub struct Config { /// room invites) are ignored here. /// /// Defaults to false as rooms can be banned for non-moderation-related - /// reasons and this performs a full user deactivation + /// reasons and this performs a full user deactivation. #[serde(default)] pub auto_deactivate_banned_room_attempts: bool, @@ -828,7 +855,7 @@ pub struct Config { pub rocksdb_log_time_to_roll: usize, /// Set this to true to use RocksDB config options that are tailored to HDDs - /// (slower device storage) + /// (slower device storage). /// /// It is worth noting that by default, conduwuit will use RocksDB with /// Direct IO enabled. *Generally* speaking this improves performance as it @@ -841,13 +868,15 @@ pub struct Config { /// feel free to report in the conduwuit Matrix room if this option fixes /// your DB issues. /// - /// See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. + /// For more information, see: + /// https://github.com/facebook/rocksdb/wiki/Direct-IO #[serde(default)] pub rocksdb_optimize_for_spinning_disks: bool, /// Enables direct-io to increase database performance via unbuffered I/O. /// - /// See https://github.com/facebook/rocksdb/wiki/Direct-IO for more details about Direct IO and RocksDB. + /// For more details about direct I/O and RockDB, see: + /// https://github.com/facebook/rocksdb/wiki/Direct-IO /// /// Set this option to false if the database resides on a filesystem which /// does not support direct-io like FUSE, or any form of complex filesystem @@ -856,8 +885,8 @@ pub struct Config { pub rocksdb_direct_io: bool, /// Amount of threads that RocksDB will use for parallelism on database - /// operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use - /// all your logical threads. Defaults to your CPU logical thread count. + /// operations such as cleanup, sync, flush, compaction, etc. Set to 0 to + /// use all your logical threads. Defaults to your CPU logical thread count. /// /// default: varies by system #[serde(default = "default_rocksdb_parallelism_threads")] @@ -865,7 +894,7 @@ pub struct Config { /// Maximum number of LOG files RocksDB will keep. This must *not* be set to /// 0. It must be at least 1. Defaults to 3 as these are not very useful - /// unless troubleshooting/debugging a RocksDB bug. + /// unless troubleshooting/debugging a RocksDB bug. /// /// default: 3 #[serde(default = "default_rocksdb_max_log_files")] @@ -873,12 +902,15 @@ pub struct Config { /// Type of RocksDB database compression to use. /// - /// Available options are "zstd", "zlib", "bz2", "lz4", or "none" + /// Available options are "zstd", "zlib", "bz2", "lz4", or "none". /// /// It is best to use ZSTD as an overall good balance between - /// speed/performance, storage, IO amplification, and CPU usage. - /// For more performance but less compression (more storage used) and less - /// CPU usage, use LZ4. See https://github.com/facebook/rocksdb/wiki/Compression for more details. + /// speed/performance, storage, IO amplification, and CPU usage. For more + /// performance but less compression (more storage used) and less CPU usage, + /// use LZ4. + /// + /// For more details, see: + /// https://github.com/facebook/rocksdb/wiki/Compression /// /// "none" will disable compression. /// @@ -889,10 +921,9 @@ pub struct Config { /// Level of compression the specified compression algorithm for RocksDB to /// use. /// - /// Default is 32767, which is internally read by RocksDB as the - /// default magic number and translated to the library's default - /// compression level as they all differ. - /// See their `kDefaultCompressionLevel`. + /// Default is 32767, which is internally read by RocksDB as the default + /// magic number and translated to the library's default compression level + /// as they all differ. See their `kDefaultCompressionLevel`. /// /// default: 32767 #[serde(default = "default_rocksdb_compression_level")] @@ -900,13 +931,13 @@ pub struct Config { /// Level of compression the specified compression algorithm for the /// bottommost level/data for RocksDB to use. Default is 32767, which is - /// internally read by RocksDB as the default magic number and translated - /// to the library's default compression level as they all differ. - /// See their `kDefaultCompressionLevel`. + /// internally read by RocksDB as the default magic number and translated to + /// the library's default compression level as they all differ. See their + /// `kDefaultCompressionLevel`. /// /// Since this is the bottommost level (generally old and least used data), /// it may be desirable to have a very high compression level here as it's - /// lesss likely for this data to be used. Research your chosen compression + /// less likely for this data to be used. Research your chosen compression /// algorithm. /// /// default: 32767 @@ -924,7 +955,7 @@ pub struct Config { #[serde(default)] pub rocksdb_bottommost_compression: bool, - /// Database recovery mode (for RocksDB WAL corruption) + /// Database recovery mode (for RocksDB WAL corruption). /// /// Use this option when the server reports corruption and refuses to start. /// Set mode 2 (PointInTime) to cleanly recover from this corruption. The @@ -932,8 +963,8 @@ pub struct Config { /// minutes prior to the crash. Clients may have to run "clear-cache & /// reload" to account for the rollback. Upon success, you may reset the /// mode back to default and restart again. Please note in some cases the - /// corruption error may not be cleared for at least 30 minutes of - /// operation in PointInTime mode. + /// corruption error may not be cleared for at least 30 minutes of operation + /// in PointInTime mode. /// /// As a very last ditch effort, if PointInTime does not fix or resolve /// anything, you can try mode 3 (SkipAnyCorruptedRecord) but this will @@ -951,9 +982,11 @@ pub struct Config { /// 2 = PointInTime (use me if trying to recover) /// 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) /// - /// See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information on these modes. + /// For more information on these modes, see: + /// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes /// - /// See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. + /// For more details on recovering a corrupt database, see: + /// https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption /// /// default: 1 #[serde(default = "default_rocksdb_recovery_mode")] @@ -963,11 +996,12 @@ pub struct Config { /// database consistency at a potential performance impact due to further /// safety checks ran. /// - /// See https://github.com/facebook/rocksdb/wiki/Online-Verification#columnfamilyoptionsparanoid_file_checks for more information. + /// For more information, see: + /// https://github.com/facebook/rocksdb/wiki/Online-Verification#columnfamilyoptionsparanoid_file_checks #[serde(default)] pub rocksdb_paranoid_file_checks: bool, - /// Database repair mode (for RocksDB SST corruption) + /// Database repair mode (for RocksDB SST corruption). /// /// Use this option when the server reports corruption while running or /// panics. If the server refuses to start use the recovery mode options @@ -976,6 +1010,7 @@ pub struct Config { /// /// - Backing up your database directory is recommended prior to running the /// repair. + /// /// - Disabling repair mode and restarting the server is recommended after /// running the repair. /// @@ -1001,10 +1036,10 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_compaction_ioprio_idle: bool, - /// Config option to disable RocksDB compaction. You should never ever have - /// to disable this. If you for some reason find yourself needing to disable - /// this as part of troubleshooting or a bug, please reach out to us in the - /// conduwuit Matrix room with information and details. + /// Disables RocksDB compaction. You should never ever have to set this + /// option to true. If you for some reason find yourself needing to use this + /// option as part of troubleshooting or a bug, please reach out to us in + /// the conduwuit Matrix room with information and details. /// /// Disabling compaction will lead to a significantly bloated and /// explosively large database, gradually poor performance, unnecessarily @@ -1044,87 +1079,84 @@ pub struct Config { #[serde(default = "default_notification_push_path")] pub notification_push_path: String, - /// Config option to control local (your server only) presence - /// updates/requests. Note that presence on conduwuit is - /// very fast unlike Synapse's. If using outgoing presence, this MUST be - /// enabled. + /// Allow local (your server only) presence updates/requests. + /// + /// Note that presence on conduwuit is very fast unlike Synapse's. If using + /// outgoing presence, this MUST be enabled. #[serde(default = "true_fn")] pub allow_local_presence: bool, - /// Config option to control incoming federated presence updates/requests. + /// Allow incoming federated presence updates/requests. /// - /// This option receives presence updates from other - /// servers, but does not send any unless `allow_outgoing_presence` is true. - /// Note that presence on conduwuit is very fast unlike Synapse's. + /// This option receives presence updates from other servers, but does not + /// send any unless `allow_outgoing_presence` is true. Note that presence on + /// conduwuit is very fast unlike Synapse's. #[serde(default = "true_fn")] pub allow_incoming_presence: bool, - /// Config option to control outgoing presence updates/requests. + /// Allow outgoing presence updates/requests. /// /// This option sends presence updates to other servers, but does not - /// receive any unless `allow_incoming_presence` is true. - /// Note that presence on conduwuit is very fast unlike Synapse's. - /// If using outgoing presence, you MUST enable `allow_local_presence` as - /// well. + /// receive any unless `allow_incoming_presence` is true. Note that presence + /// on conduwuit is very fast unlike Synapse's. If using outgoing presence, + /// you MUST enable `allow_local_presence` as well. #[serde(default = "true_fn")] pub allow_outgoing_presence: bool, - /// Config option to control how many seconds before presence updates that - /// you are idle. Defaults to 5 minutes. + /// How many seconds without presence updates before you become idle. + /// Defaults to 5 minutes. /// /// default: 300 #[serde(default = "default_presence_idle_timeout_s")] pub presence_idle_timeout_s: u64, - /// Config option to control how many seconds before presence updates that - /// you are offline. Defaults to 30 minutes. + /// How many seconds without presence updates before you become offline. + /// Defaults to 30 minutes. /// /// default: 1800 #[serde(default = "default_presence_offline_timeout_s")] pub presence_offline_timeout_s: u64, - /// Config option to enable the presence idle timer for remote users. + /// Enable the presence idle timer for remote users. + /// /// Disabling is offered as an optimization for servers participating in /// many large rooms or when resources are limited. Disabling it may cause - /// incorrect presence states (i.e. stuck online) to be seen for some - /// remote users. + /// incorrect presence states (i.e. stuck online) to be seen for some remote + /// users. #[serde(default = "true_fn")] pub presence_timeout_remote_users: bool, - /// Config option to control whether we should receive remote incoming read - /// receipts. + /// Allow receiving incoming read receipts from remote servers. #[serde(default = "true_fn")] pub allow_incoming_read_receipts: bool, - /// Config option to control whether we should send read receipts to remote - /// servers. + /// Allow sending read receipts to remote servers. #[serde(default = "true_fn")] pub allow_outgoing_read_receipts: bool, - /// Config option to control outgoing typing updates to federation. + /// Allow outgoing typing updates to federation. #[serde(default = "true_fn")] pub allow_outgoing_typing: bool, - /// Config option to control incoming typing updates from federation. + /// Allow incoming typing updates from federation. #[serde(default = "true_fn")] pub allow_incoming_typing: bool, - /// Config option to control maximum time federation user can indicate - /// typing. + /// Maximum time federation user can indicate typing. /// /// default: 30 #[serde(default = "default_typing_federation_timeout_s")] pub typing_federation_timeout_s: u64, - /// Config option to control minimum time local client can indicate typing. - /// This does not override a client's request to stop typing. It only - /// enforces a minimum value in case of no stop request. + /// Minimum time local client can indicate typing. This does not override a + /// client's request to stop typing. It only enforces a minimum value in + /// case of no stop request. /// /// default: 15 #[serde(default = "default_typing_client_timeout_min_s")] pub typing_client_timeout_min_s: u64, - /// Config option to control maximum time local client can indicate typing. + /// Maximum time local client can indicate typing. /// /// default: 45 #[serde(default = "default_typing_client_timeout_max_s")] @@ -1155,8 +1187,8 @@ pub struct Config { /// brotli. This option does nothing if conduwuit was not built with /// `brotli_compression` feature. Please be aware that enabling HTTP /// compression may weaken TLS. Most users should not need to enable this. - /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before - /// deciding to enable this. + /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH + /// before deciding to enable this. #[serde(default)] pub brotli_compression: bool, @@ -1175,8 +1207,8 @@ pub struct Config { #[serde(default)] pub allow_guests_auto_join_rooms: bool, - /// Config option to control whether the legacy unauthenticated Matrix media - /// repository endpoints will be enabled. These endpoints consist of: + /// Enable the legacy unauthenticated Matrix media repository endpoints. + /// These endpoints consist of: /// - /_matrix/media/*/config /// - /_matrix/media/*/upload /// - /_matrix/media/*/preview_url @@ -1193,8 +1225,8 @@ pub struct Config { #[serde(default = "true_fn")] pub freeze_legacy_media: bool, - /// Checks consistency of the media directory at startup: - /// 1. When `media_compat_file_link` is enbled, this check will upgrade + /// Check consistency of the media directory at startup: + /// 1. When `media_compat_file_link` is enabled, this check will upgrade /// media when switching back and forth between Conduit and conduwuit. /// Both options must be enabled to handle this. /// 2. When media is deleted from the directory, this check will also delete @@ -1207,18 +1239,22 @@ pub struct Config { pub media_startup_check: bool, /// Enable backward-compatibility with Conduit's media directory by creating - /// symlinks of media. This option is only necessary if you plan on using - /// Conduit again. Otherwise setting this to false reduces filesystem - /// clutter and overhead for managing these symlinks in the directory. This - /// is now disabled by default. You may still return to upstream Conduit - /// but you have to run conduwuit at least once with this set to true and - /// allow the media_startup_check to take place before shutting - /// down to return to Conduit. + /// symlinks of media. + /// + /// This option is only necessary if you plan on using Conduit again. + /// Otherwise setting this to false reduces filesystem clutter and overhead + /// for managing these symlinks in the directory. This is now disabled by + /// default. You may still return to upstream Conduit but you have to run + /// conduwuit at least once with this set to true and allow the + /// media_startup_check to take place before shutting down to return to + /// Conduit. #[serde(default)] pub media_compat_file_link: bool, - /// Prunes missing media from the database as part of the media startup - /// checks. This means if you delete files from the media directory the + /// Prune missing media from the database as part of the media startup + /// checks. + /// + /// This means if you delete files from the media directory the /// corresponding entries will be removed from the database. This is /// disabled by default because if the media directory is accidentally moved /// or inaccessible, the metadata entries in the database will be lost with @@ -1291,34 +1327,37 @@ pub struct Config { pub url_preview_bound_interface: Option>, /// Vector list of domains allowed to send requests to for URL previews. - /// Defaults to none. Note: this is a *contains* match, not an explicit - /// match. Putting "google.com" will match "https://google.com" and + /// + /// This is a *contains* match, not an explicit match. Putting "google.com" + /// will match "https://google.com" and /// "http://mymaliciousdomainexamplegoogle.com" Setting this to "*" will /// allow all URL previews. Please note that this opens up significant - /// attack surface to your server, you are expected to be aware of the - /// risks by doing so. + /// attack surface to your server, you are expected to be aware of the risks + /// by doing so. /// /// default: [] #[serde(default)] pub url_preview_domain_contains_allowlist: Vec, /// Vector list of explicit domains allowed to send requests to for URL - /// previews. Defaults to none. Note: This is an *explicit* match, not a - /// contains match. Putting "google.com" will match "https://google.com", - /// "http://google.com", but not + /// previews. + /// + /// This is an *explicit* match, not a contains match. Putting "google.com" + /// will match "https://google.com", "http://google.com", but not /// "https://mymaliciousdomainexamplegoogle.com". Setting this to "*" will /// allow all URL previews. Please note that this opens up significant - /// attack surface to your server, you are expected to be aware of the - /// risks by doing so. + /// attack surface to your server, you are expected to be aware of the risks + /// by doing so. /// /// default: [] #[serde(default)] pub url_preview_domain_explicit_allowlist: Vec, /// Vector list of explicit domains not allowed to send requests to for URL - /// previews. Defaults to none. Note: This is an *explicit* match, not a - /// contains match. Putting "google.com" will match "https://google.com", - /// "http://google.com", but not + /// previews. + /// + /// This is an *explicit* match, not a contains match. Putting "google.com" + /// will match "https://google.com", "http://google.com", but not /// "https://mymaliciousdomainexamplegoogle.com". The denylist is checked /// first before allowlist. Setting this to "*" will not do anything. /// @@ -1327,14 +1366,14 @@ pub struct Config { pub url_preview_domain_explicit_denylist: Vec, /// Vector list of URLs allowed to send requests to for URL previews. - /// Defaults to none. Note that this is a *contains* match, not an - /// explicit match. Putting "google.com" will match - /// "https://google.com/", + /// + /// Note that this is a *contains* match, not an explicit match. Putting + /// "google.com" will match "https://google.com/", /// "https://google.com/url?q=https://mymaliciousdomainexample.com", and - /// "https://mymaliciousdomainexample.com/hi/google.com" Setting this to - /// "*" will allow all URL previews. Please note that this opens up - /// significant attack surface to your server, you are expected to be - /// aware of the risks by doing so. + /// "https://mymaliciousdomainexample.com/hi/google.com" Setting this to "*" + /// will allow all URL previews. Please note that this opens up significant + /// attack surface to your server, you are expected to be aware of the risks + /// by doing so. /// /// default: [] #[serde(default)] @@ -1351,21 +1390,20 @@ pub struct Config { /// checks (contains and explicit) on the root domain or not. Does not apply /// to URL contains allowlist. Defaults to false. /// - /// Example usecase: If this is - /// enabled and you have "wikipedia.org" allowed in the explicit and/or - /// contains domain allowlist, it will allow all subdomains under - /// "wikipedia.org" such as "en.m.wikipedia.org" as the root domain is - /// checked and matched. Useful if the domain contains allowlist is still - /// too broad for you but you still want to allow all the subdomains under a - /// root domain. + /// Example usecase: If this is enabled and you have "wikipedia.org" allowed + /// in the explicit and/or contains domain allowlist, it will allow all + /// subdomains under "wikipedia.org" such as "en.m.wikipedia.org" as the + /// root domain is checked and matched. Useful if the domain contains + /// allowlist is still too broad for you but you still want to allow all the + /// subdomains under a root domain. #[serde(default)] pub url_preview_check_root_domain: bool, /// List of forbidden room aliases and room IDs as strings of regex /// patterns. /// - /// Regex can be used or explicit contains matches can be done by - /// just specifying the words (see example). + /// Regex can be used or explicit contains matches can be done by just + /// specifying the words (see example). /// /// This is checked upon room alias creation, custom room ID creation if /// used, and startup as warnings if any room aliases in your database have @@ -1395,15 +1433,15 @@ pub struct Config { pub forbidden_usernames: RegexSet, /// Retry failed and incomplete messages to remote servers immediately upon - /// startup. This is called bursting. If this is disabled, said messages - /// may not be delivered until more messages are queued for that server. Do - /// not change this option unless server resources are extremely limited or - /// the scale of the server's deployment is huge. Do not disable this - /// unless you know what you are doing. + /// startup. This is called bursting. If this is disabled, said messages may + /// not be delivered until more messages are queued for that server. Do not + /// change this option unless server resources are extremely limited or the + /// scale of the server's deployment is huge. Do not disable this unless you + /// know what you are doing. #[serde(default = "true_fn")] pub startup_netburst: bool, - /// messages are dropped and not reattempted. The `startup_netburst` option + /// Messages are dropped and not reattempted. The `startup_netburst` option /// must be enabled for this value to have any effect. Do not change this /// value unless you know what you are doing. Set this value to -1 to /// reattempt every message without trimming the queues; this may consume @@ -1414,37 +1452,35 @@ pub struct Config { #[serde(default = "default_startup_netburst_keep")] pub startup_netburst_keep: i64, - /// controls whether non-admin local users are forbidden from sending room - /// invites (local and remote), and if non-admin users can receive remote - /// room invites. admins are always allowed to send and receive all room - /// invites. + /// Block non-admin local users from sending room invites (local and + /// remote), and block non-admin users from receiving remote room invites. + /// + /// Admins are always allowed to send and receive all room invites. #[serde(default)] pub block_non_admin_invites: bool, - /// Allows admins to enter commands in rooms other than "#admins" (admin - /// room) by prefixing your message with "\!admin" or "\\!admin" followed - /// up a normal conduwuit admin command. The reply will be publicly visible - /// to the room, originating from the sender. + /// Allow admins to enter commands in rooms other than "#admins" (admin + /// room) by prefixing your message with "\!admin" or "\\!admin" followed up + /// a normal conduwuit admin command. The reply will be publicly visible to + /// the room, originating from the sender. /// /// example: \\!admin debug ping puppygock.gay #[serde(default = "true_fn")] pub admin_escape_commands: bool, - /// Controls whether the conduwuit admin room console / CLI will immediately - /// activate on startup. This option can also be enabled with `--console` - /// conduwuit argument. + /// Automatically activate the conduwuit admin room console / CLI on + /// startup. This option can also be enabled with `--console` conduwuit + /// argument. #[serde(default)] pub admin_console_automatic: bool, - /// Controls what admin commands will be executed on startup. This is a - /// vector list of strings of admin commands to run. - /// + /// List of admin commands to execute on startup. /// /// This option can also be configured with the `--execute` conduwuit /// argument and can take standard shell commands and environment variables /// - /// Such example could be: `./conduwuit --execute "server admin-notice - /// conduwuit has started up at $(date)"` + /// For example: `./conduwuit --execute "server admin-notice conduwuit has + /// started up at $(date)"` /// /// example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` /// @@ -1452,8 +1488,10 @@ pub struct Config { #[serde(default)] pub admin_execute: Vec, - /// Controls whether conduwuit should error and fail to start if an admin - /// execute command (`--execute` / `admin_execute`) fails. + /// Ignore errors in startup commands. + /// + /// If false, conduwuit will error and fail to start if an admin execute + /// command (`--execute` / `admin_execute`) fails. #[serde(default)] pub admin_execute_errors_ignore: bool, @@ -1478,21 +1516,22 @@ pub struct Config { /// Sentry.io crash/panic reporting, performance monitoring/metrics, etc. /// This is NOT enabled by default. conduwuit's default Sentry reporting - /// endpoint domain is o4506996327251968.ingest.us.sentry.io + /// endpoint domain is `o4506996327251968.ingest.us.sentry.io`. #[serde(default)] pub sentry: bool, - /// Sentry reporting URL if a custom one is desired + /// Sentry reporting URL, if a custom one is desired. /// /// default: "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, - /// Report your conduwuit server_name in Sentry.io crash reports and metrics + /// Report your conduwuit server_name in Sentry.io crash reports and + /// metrics. #[serde(default)] pub sentry_send_server_name: bool, - /// Performance monitoring/tracing sample rate for Sentry.io + /// Performance monitoring/tracing sample rate for Sentry.io. /// /// Note that too high values may impact performance, and can be disabled by /// setting it to 0.0 (0%) This value is read as a percentage to Sentry, @@ -1506,8 +1545,8 @@ pub struct Config { #[serde(default)] pub sentry_attach_stacktrace: bool, - /// Send panics to sentry. This is true by default, but sentry has to be - /// enabled. The global "sentry" config option must be enabled to send any + /// Send panics to Sentry. This is true by default, but Sentry has to be + /// enabled. The global `sentry` config option must be enabled to send any /// data. #[serde(default = "true_fn")] pub sentry_send_panic: bool, @@ -1526,7 +1565,9 @@ pub struct Config { pub sentry_filter: String, /// Enable the tokio-console. This option is only relevant to developers. - /// See https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console for more information. + /// + /// For more information, see: + /// https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console #[serde(default)] pub tokio_console: bool, @@ -1534,8 +1575,8 @@ pub struct Config { pub test: BTreeSet, /// Controls whether admin room notices like account registrations, password - /// changes, account deactivations, room directory publications, etc will - /// be sent to the admin room. Update notices and normal admin command + /// changes, account deactivations, room directory publications, etc will be + /// sent to the admin room. Update notices and normal admin command /// responses will still be sent. #[serde(default = "true_fn")] pub admin_room_notices: bool, From 14341bb906c0428269f59aae6c10071da55d7608 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 18 Dec 2024 22:54:16 +0000 Subject: [PATCH 0434/1248] simplify and improve db iter State init interface Signed-off-by: Jason Volk --- src/database/pool.rs | 23 ----------------------- src/database/stream.rs | 14 ++++++++++++-- 2 files changed, 12 insertions(+), 25 deletions(-) diff --git a/src/database/pool.rs b/src/database/pool.rs index 28eb38bd..4f018a38 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -273,29 +273,6 @@ fn handle_iter(&self, mut cmd: Seek) { let _chan_sent = chan_result.is_ok(); } -#[implement(Pool)] -#[tracing::instrument( - name = "seek", - level = "trace", - skip_all, - fields(%cmd.map), -)] -fn _handle_seek(&self, mut cmd: Seek) { - let chan = cmd.res.take().expect("missing result channel"); - - if chan.is_canceled() { - return; - } - - match cmd.dir { - | Direction::Forward => cmd.state.seek_fwd(), - | Direction::Reverse => cmd.state.seek_rev(), - }; - - let chan_result = chan.send(into_send_seek(cmd.state)); - let _chan_sent = chan_result.is_ok(); -} - #[implement(Pool)] #[tracing::instrument( name = "get", diff --git a/src/database/stream.rs b/src/database/stream.rs index f849d08f..f8e6733d 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -56,20 +56,30 @@ impl<'a> State<'a> { } pub(super) fn init_fwd(mut self, from: From<'_>) -> Self { + debug_assert!(self.init, "init must be set to make this call"); + debug_assert!(!self.seek, "seek must not be set to make this call"); + if let Some(key) = from { self.inner.seek(key); - self.seek = true; + } else { + self.inner.seek_to_first(); } + self.seek = true; self } pub(super) fn init_rev(mut self, from: From<'_>) -> Self { + debug_assert!(self.init, "init must be set to make this call"); + debug_assert!(!self.seek, "seek must not be set to make this call"); + if let Some(key) = from { self.inner.seek_for_prev(key); - self.seek = true; + } else { + self.inner.seek_to_last(); } + self.seek = true; self } From 98e6c81e4962791a71ec479cf4f43c63bd119e13 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 18 Dec 2024 22:56:53 +0000 Subject: [PATCH 0435/1248] offload remaining db iterator initial seeks on cache miss consume task budget on cache hit Signed-off-by: Jason Volk --- src/database/map/count.rs | 4 +- src/database/map/keys.rs | 45 +++++++++++++++++---- src/database/map/keys_prefix.rs | 6 +-- src/database/map/rev_keys.rs | 45 +++++++++++++++++---- src/database/map/rev_keys_prefix.rs | 6 +-- src/database/map/rev_stream.rs | 56 ++++++++++++++++++++------- src/database/map/rev_stream_from.rs | 13 ++++--- src/database/map/rev_stream_prefix.rs | 6 +-- src/database/map/stream.rs | 56 ++++++++++++++++++++------- src/database/map/stream_from.rs | 13 ++++--- src/database/map/stream_prefix.rs | 6 +-- src/database/stream.rs | 2 - src/database/stream/items.rs | 18 ++------- src/database/stream/items_rev.rs | 18 ++------- src/database/stream/keys.rs | 18 ++------- src/database/stream/keys_rev.rs | 18 ++------- 16 files changed, 199 insertions(+), 131 deletions(-) diff --git a/src/database/map/count.rs b/src/database/map/count.rs index b9b34613..22b298b9 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -7,7 +7,9 @@ use serde::Serialize; /// Count the total number of entries in the map. #[implement(super::Map)] #[inline] -pub fn count(&self) -> impl Future + Send + '_ { self.raw_keys().count() } +pub fn count(self: &Arc) -> impl Future + Send + '_ { + self.raw_keys().count() +} /// Count the number of entries in the map starting from a lower-bound. /// diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 3ab5bacc..7d09f3da 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -1,11 +1,16 @@ -use conduwuit::{implement, Result}; -use futures::{Stream, StreamExt}; -use serde::Deserialize; +use std::sync::Arc; -use crate::{keyval, keyval::Key, stream, stream::Cursor}; +use conduwuit::{implement, Result}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use rocksdb::Direction; +use serde::Deserialize; +use tokio::task; + +use super::stream::is_cached; +use crate::{keyval, keyval::Key, stream}; #[implement(super::Map)] -pub fn keys<'a, K>(&'a self) -> impl Stream>> + Send +pub fn keys<'a, K>(self: &'a Arc) -> impl Stream>> + Send where K: Deserialize<'a> + Send, { @@ -14,7 +19,33 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn raw_keys(&self) -> impl Stream>> + Send { +pub fn raw_keys(self: &Arc) -> impl Stream>> + Send { + use crate::pool::Seek; + let opts = super::iter_options_default(); - stream::Keys::new(&self.db, &self.cf, opts).init(None) + let state = stream::State::new(&self.db, &self.cf, opts); + if is_cached(self) { + let state = state.init_fwd(None); + return task::consume_budget() + .map(move |()| stream::Keys::<'_>::from(state)) + .into_stream() + .flatten() + .boxed(); + } + + let seek = Seek { + map: self.clone(), + dir: Direction::Forward, + state: crate::pool::into_send_seek(state), + key: None, + res: None, + }; + + self.db + .pool + .execute_iter(seek) + .ok_into::>() + .into_stream() + .try_flatten() + .boxed() } diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index 32a1f04c..28bc7ccd 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -1,11 +1,7 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduwuit::{implement, Result}; -use futures::{ - future, - stream::{Stream, StreamExt}, - TryStreamExt, -}; +use futures::{future, Stream, StreamExt, TryStreamExt}; use serde::{Deserialize, Serialize}; use crate::keyval::{result_deserialize_key, serialize_key, Key}; diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index 7eb4ce63..0ca6ad0f 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -1,11 +1,16 @@ -use conduwuit::{implement, Result}; -use futures::{Stream, StreamExt}; -use serde::Deserialize; +use std::sync::Arc; -use crate::{keyval, keyval::Key, stream, stream::Cursor}; +use conduwuit::{implement, Result}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use rocksdb::Direction; +use serde::Deserialize; +use tokio::task; + +use super::rev_stream::is_cached; +use crate::{keyval, keyval::Key, stream}; #[implement(super::Map)] -pub fn rev_keys<'a, K>(&'a self) -> impl Stream>> + Send +pub fn rev_keys<'a, K>(self: &'a Arc) -> impl Stream>> + Send where K: Deserialize<'a> + Send, { @@ -14,7 +19,33 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_raw_keys(&self) -> impl Stream>> + Send { +pub fn rev_raw_keys(self: &Arc) -> impl Stream>> + Send { + use crate::pool::Seek; + let opts = super::iter_options_default(); - stream::KeysRev::new(&self.db, &self.cf, opts).init(None) + let state = stream::State::new(&self.db, &self.cf, opts); + if is_cached(self) { + let state = state.init_rev(None); + return task::consume_budget() + .map(move |()| stream::KeysRev::<'_>::from(state)) + .into_stream() + .flatten() + .boxed(); + } + + let seek = Seek { + map: self.clone(), + dir: Direction::Reverse, + state: crate::pool::into_send_seek(state), + key: None, + res: None, + }; + + self.db + .pool + .execute_iter(seek) + .ok_into::>() + .into_stream() + .try_flatten() + .boxed() } diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index 9fda49a0..fb29acaf 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -1,11 +1,7 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduwuit::{implement, Result}; -use futures::{ - future, - stream::{Stream, StreamExt}, - TryStreamExt, -}; +use futures::{future, Stream, StreamExt, TryStreamExt}; use serde::{Deserialize, Serialize}; use crate::keyval::{result_deserialize_key, serialize_key, Key}; diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index 7f58582f..d882dd91 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -1,14 +1,20 @@ -use conduwuit::{implement, Result}; -use futures::stream::{Stream, StreamExt}; -use serde::Deserialize; +use std::sync::Arc; -use crate::{keyval, keyval::KeyVal, stream, stream::Cursor}; +use conduwuit::{implement, Result}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use rocksdb::Direction; +use serde::Deserialize; +use tokio::task; + +use crate::{keyval, keyval::KeyVal, stream}; /// Iterate key-value entries in the map from the end. /// /// - Result is deserialized #[implement(super::Map)] -pub fn rev_stream<'a, K, V>(&'a self) -> impl Stream>> + Send +pub fn rev_stream<'a, K, V>( + self: &'a Arc, +) -> impl Stream>> + Send where K: Deserialize<'a> + Send, V: Deserialize<'a> + Send, @@ -22,9 +28,35 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_raw_stream(&self) -> impl Stream>> + Send { - let opts = super::iter_options_default(); - stream::ItemsRev::new(&self.db, &self.cf, opts).init(None) +pub fn rev_raw_stream(self: &Arc) -> impl Stream>> + Send { + use crate::pool::Seek; + + let opts = super::read_options_default(); + let state = stream::State::new(&self.db, &self.cf, opts); + if is_cached(self) { + let state = state.init_rev(None); + return task::consume_budget() + .map(move |()| stream::ItemsRev::<'_>::from(state)) + .into_stream() + .flatten() + .boxed(); + }; + + let seek = Seek { + map: self.clone(), + dir: Direction::Reverse, + state: crate::pool::into_send_seek(state), + key: None, + res: None, + }; + + self.db + .pool + .execute_iter(seek) + .ok_into::>() + .into_stream() + .try_flatten() + .boxed() } #[tracing::instrument( @@ -33,13 +65,9 @@ pub fn rev_raw_stream(&self) -> impl Stream>> + Send { skip_all, fields(%map), )] -pub(super) fn _is_cached

    (map: &super::Map) -> bool -where - P: AsRef<[u8]> + ?Sized, -{ +pub(super) fn is_cached(map: &super::Map) -> bool { let opts = super::cache_read_options_default(); - let mut state = stream::State::new(&map.db, &map.cf, opts); + let state = stream::State::new(&map.db, &map.cf, opts).init_rev(None); - state.seek_rev(); !state.is_incomplete() } diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index d166aa0f..72fc739c 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -1,12 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduwuit::{implement, Result}; -use futures::{ - stream::{Stream, StreamExt}, - FutureExt, TryFutureExt, TryStreamExt, -}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; +use tokio::task; use crate::{ keyval::{result_deserialize, serialize_key, KeyVal}, @@ -85,7 +83,12 @@ where let opts = super::iter_options_default(); let state = stream::State::new(&self.db, &self.cf, opts); if is_cached(self, from) { - return stream::ItemsRev::<'_>::from(state.init_rev(from.as_ref().into())).boxed(); + let state = state.init_rev(from.as_ref().into()); + return task::consume_budget() + .map(move |()| stream::ItemsRev::<'_>::from(state)) + .into_stream() + .flatten() + .boxed(); }; let seek = Seek { diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index 857aa3a5..22a2ce53 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -1,11 +1,7 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduwuit::{implement, Result}; -use futures::{ - future, - stream::{Stream, StreamExt}, - TryStreamExt, -}; +use futures::{future, Stream, StreamExt, TryStreamExt}; use serde::{Deserialize, Serialize}; use crate::keyval::{result_deserialize, serialize_key, KeyVal}; diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index 1a90b8fb..11b0676c 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -1,14 +1,20 @@ -use conduwuit::{implement, Result}; -use futures::stream::{Stream, StreamExt}; -use serde::Deserialize; +use std::sync::Arc; -use crate::{keyval, keyval::KeyVal, stream, stream::Cursor}; +use conduwuit::{implement, Result}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use rocksdb::Direction; +use serde::Deserialize; +use tokio::task; + +use crate::{keyval, keyval::KeyVal, stream}; /// Iterate key-value entries in the map from the beginning. /// /// - Result is deserialized #[implement(super::Map)] -pub fn stream<'a, K, V>(&'a self) -> impl Stream>> + Send +pub fn stream<'a, K, V>( + self: &'a Arc, +) -> impl Stream>> + Send where K: Deserialize<'a> + Send, V: Deserialize<'a> + Send, @@ -21,9 +27,35 @@ where /// - Result is raw #[implement(super::Map)] #[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn raw_stream(&self) -> impl Stream>> + Send { - let opts = super::iter_options_default(); - stream::Items::new(&self.db, &self.cf, opts).init(None) +pub fn raw_stream(self: &Arc) -> impl Stream>> + Send { + use crate::pool::Seek; + + let opts = super::read_options_default(); + let state = stream::State::new(&self.db, &self.cf, opts); + if is_cached(self) { + let state = state.init_fwd(None); + return task::consume_budget() + .map(move |()| stream::Items::<'_>::from(state)) + .into_stream() + .flatten() + .boxed(); + }; + + let seek = Seek { + map: self.clone(), + dir: Direction::Forward, + state: crate::pool::into_send_seek(state), + key: None, + res: None, + }; + + self.db + .pool + .execute_iter(seek) + .ok_into::>() + .into_stream() + .try_flatten() + .boxed() } #[tracing::instrument( @@ -32,13 +64,9 @@ pub fn raw_stream(&self) -> impl Stream>> + Send { skip_all, fields(%map), )] -pub(super) fn _is_cached

    (map: &super::Map) -> bool -where - P: AsRef<[u8]> + ?Sized, -{ +pub(super) fn is_cached(map: &super::Map) -> bool { let opts = super::cache_read_options_default(); - let mut state = stream::State::new(&map.db, &map.cf, opts); + let state = stream::State::new(&map.db, &map.cf, opts).init_fwd(None); - state.seek_fwd(); !state.is_incomplete() } diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 107ce4b1..79ea8f51 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -1,12 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduwuit::{implement, Result}; -use futures::{ - stream::{Stream, StreamExt}, - FutureExt, TryFutureExt, TryStreamExt, -}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; +use tokio::task; use crate::{ keyval::{result_deserialize, serialize_key, KeyVal}, @@ -82,7 +80,12 @@ where let opts = super::read_options_default(); let state = stream::State::new(&self.db, &self.cf, opts); if is_cached(self, from) { - return stream::Items::<'_>::from(state.init_fwd(from.as_ref().into())).boxed(); + let state = state.init_fwd(from.as_ref().into()); + return task::consume_budget() + .map(move |()| stream::Items::<'_>::from(state)) + .into_stream() + .flatten() + .boxed(); }; let seek = Seek { diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index a05e2fc5..adacfc81 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -1,11 +1,7 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduwuit::{implement, Result}; -use futures::{ - future, - stream::{Stream, StreamExt}, - TryStreamExt, -}; +use futures::{future, Stream, StreamExt, TryStreamExt}; use serde::{Deserialize, Serialize}; use crate::keyval::{result_deserialize, serialize_key, KeyVal}; diff --git a/src/database/stream.rs b/src/database/stream.rs index f8e6733d..d7cb16c6 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -29,8 +29,6 @@ pub(crate) trait Cursor<'a, T> { fn seek(&mut self); - fn init(self, from: From<'a>) -> Self; - fn get(&self) -> Option> { self.fetch() .map(Ok) diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index 2a38d97e..cd81b4a0 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -1,4 +1,4 @@ -use std::{convert, pin::Pin, sync::Arc}; +use std::pin::Pin; use conduwuit::Result; use futures::{ @@ -6,22 +6,15 @@ use futures::{ task::{Context, Poll}, Stream, }; -use rocksdb::{ColumnFamily, ReadOptions}; -use super::{keyval_longevity, Cursor, From, State}; -use crate::{keyval::KeyVal, Engine}; +use super::{keyval_longevity, Cursor, State}; +use crate::keyval::KeyVal; pub(crate) struct Items<'a> { state: State<'a>, } -impl<'a> Items<'a> { - pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { - Self { state: State::new(db, cf, opts) } - } -} - -impl<'a> convert::From> for Items<'a> { +impl<'a> From> for Items<'a> { fn from(state: State<'a>) -> Self { Self { state } } } @@ -32,9 +25,6 @@ impl<'a> Cursor<'a, KeyVal<'a>> for Items<'a> { #[inline] fn seek(&mut self) { self.state.seek_fwd(); } - - #[inline] - fn init(self, from: From<'a>) -> Self { Self { state: self.state.init_fwd(from) } } } impl<'a> Stream for Items<'a> { diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index c3a6cc7f..c6cf9b53 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -1,4 +1,4 @@ -use std::{convert, pin::Pin, sync::Arc}; +use std::pin::Pin; use conduwuit::Result; use futures::{ @@ -6,22 +6,15 @@ use futures::{ task::{Context, Poll}, Stream, }; -use rocksdb::{ColumnFamily, ReadOptions}; -use super::{keyval_longevity, Cursor, From, State}; -use crate::{keyval::KeyVal, Engine}; +use super::{keyval_longevity, Cursor, State}; +use crate::keyval::KeyVal; pub(crate) struct ItemsRev<'a> { state: State<'a>, } -impl<'a> ItemsRev<'a> { - pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { - Self { state: State::new(db, cf, opts) } - } -} - -impl<'a> convert::From> for ItemsRev<'a> { +impl<'a> From> for ItemsRev<'a> { fn from(state: State<'a>) -> Self { Self { state } } } @@ -32,9 +25,6 @@ impl<'a> Cursor<'a, KeyVal<'a>> for ItemsRev<'a> { #[inline] fn seek(&mut self) { self.state.seek_rev(); } - - #[inline] - fn init(self, from: From<'a>) -> Self { Self { state: self.state.init_rev(from) } } } impl<'a> Stream for ItemsRev<'a> { diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index 0696781d..9bf27507 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -1,4 +1,4 @@ -use std::{convert, pin::Pin, sync::Arc}; +use std::pin::Pin; use conduwuit::Result; use futures::{ @@ -6,22 +6,15 @@ use futures::{ task::{Context, Poll}, Stream, }; -use rocksdb::{ColumnFamily, ReadOptions}; -use super::{slice_longevity, Cursor, From, State}; -use crate::{keyval::Key, Engine}; +use super::{slice_longevity, Cursor, State}; +use crate::keyval::Key; pub(crate) struct Keys<'a> { state: State<'a>, } -impl<'a> Keys<'a> { - pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { - Self { state: State::new(db, cf, opts) } - } -} - -impl<'a> convert::From> for Keys<'a> { +impl<'a> From> for Keys<'a> { fn from(state: State<'a>) -> Self { Self { state } } } @@ -33,9 +26,6 @@ impl<'a> Cursor<'a, Key<'a>> for Keys<'a> { #[inline] fn seek(&mut self) { self.state.seek_fwd(); } - - #[inline] - fn init(self, from: From<'a>) -> Self { Self { state: self.state.init_fwd(from) } } } impl<'a> Stream for Keys<'a> { diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index 42706d9f..8657df0f 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -1,4 +1,4 @@ -use std::{convert, pin::Pin, sync::Arc}; +use std::pin::Pin; use conduwuit::Result; use futures::{ @@ -6,22 +6,15 @@ use futures::{ task::{Context, Poll}, Stream, }; -use rocksdb::{ColumnFamily, ReadOptions}; -use super::{slice_longevity, Cursor, From, State}; -use crate::{keyval::Key, Engine}; +use super::{slice_longevity, Cursor, State}; +use crate::keyval::Key; pub(crate) struct KeysRev<'a> { state: State<'a>, } -impl<'a> KeysRev<'a> { - pub(crate) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { - Self { state: State::new(db, cf, opts) } - } -} - -impl<'a> convert::From> for KeysRev<'a> { +impl<'a> From> for KeysRev<'a> { fn from(state: State<'a>) -> Self { Self { state } } } @@ -33,9 +26,6 @@ impl<'a> Cursor<'a, Key<'a>> for KeysRev<'a> { #[inline] fn seek(&mut self) { self.state.seek_rev(); } - - #[inline] - fn init(self, from: From<'a>) -> Self { Self { state: self.state.init_rev(from) } } } impl<'a> Stream for KeysRev<'a> { From af3d6a2e372b7054126d8f31e34eb42d51aa8829 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 18 Dec 2024 03:34:56 +0000 Subject: [PATCH 0436/1248] shard sender into multiple task workers by destination hash rename Destination::Normal variant tracing instruments Signed-off-by: Jason Volk --- conduwuit-example.toml | 7 ++ src/admin/query/sending.rs | 4 +- src/core/config/mod.rs | 9 ++ src/service/resolver/actual.rs | 3 +- src/service/sending/data.rs | 2 +- src/service/sending/dest.rs | 4 +- src/service/sending/mod.rs | 119 ++++++++++++++---- src/service/sending/send.rs | 5 +- src/service/sending/sender.rs | 217 +++++++++++++++++++++++---------- 9 files changed, 275 insertions(+), 95 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index a82d8f69..3669961a 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1410,6 +1410,13 @@ # #db_pool_queue_size = 256 +# Number of sender task workers; determines sender parallelism. Default is +# '0' which means the value is determined internally, likely matching the +# number of tokio worker-threads or number of cores, etc. Override by +# setting a non-zero value. +# +#sender_workers = 0 + [global.tls] # Path to a valid TLS certificate file. diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 696067b7..3edbbe87 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -113,7 +113,7 @@ pub(super) async fn process( | (None, Some(server_name), None, None) => services .sending .db - .queued_requests(&Destination::Normal(server_name.into())), + .queued_requests(&Destination::Federation(server_name.into())), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { return Ok(RoomMessageEventContent::text_plain( @@ -183,7 +183,7 @@ pub(super) async fn process( | (None, Some(server_name), None, None) => services .sending .db - .active_requests_for(&Destination::Normal(server_name.into())), + .active_requests_for(&Destination::Federation(server_name.into())), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { return Ok(RoomMessageEventContent::text_plain( diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 8fd5621f..23feb0ca 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1598,6 +1598,15 @@ pub struct Config { #[serde(default = "default_db_pool_queue_size")] pub db_pool_queue_size: usize, + /// Number of sender task workers; determines sender parallelism. Default is + /// '0' which means the value is determined internally, likely matching the + /// number of tokio worker-threads or number of cores, etc. Override by + /// setting a non-zero value. + /// + /// default: 0 + #[serde(default)] + pub sender_workers: usize, + #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 63de6539..3f609b95 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -4,6 +4,7 @@ use std::{ }; use conduwuit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; +use futures::FutureExt; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; use ruma::ServerName; @@ -32,7 +33,7 @@ impl super::Service { (result, true) } else { self.validate_dest(server_name)?; - (self.resolve_actual_dest(server_name, true).await?, false) + (self.resolve_actual_dest(server_name, true).boxed().await?, false) }; let CachedDest { dest, host, .. } = result; diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index ac06424f..a699b8ee 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -246,7 +246,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se })?; ( - Destination::Normal(ServerName::parse(server).map_err(|_| { + Destination::Federation(ServerName::parse(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), if value.is_empty() { diff --git a/src/service/sending/dest.rs b/src/service/sending/dest.rs index 2c6063cc..4099d372 100644 --- a/src/service/sending/dest.rs +++ b/src/service/sending/dest.rs @@ -7,14 +7,14 @@ use ruma::{OwnedServerName, OwnedUserId}; pub enum Destination { Appservice(String), Push(OwnedUserId, String), // user and pushkey - Normal(OwnedServerName), + Federation(OwnedServerName), } #[implement(Destination)] #[must_use] pub(super) fn get_prefix(&self) -> Vec { match self { - | Self::Normal(server) => { + | Self::Federation(server) => { let len = server.as_bytes().len().saturating_add(1); let mut p = Vec::with_capacity(len); diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 2038f4eb..5ccba249 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -4,20 +4,25 @@ mod dest; mod send; mod sender; -use std::{fmt::Debug, iter::once, sync::Arc}; +use std::{ + fmt::Debug, + hash::{DefaultHasher, Hash, Hasher}, + iter::once, + sync::Arc, +}; use async_trait::async_trait; use conduwuit::{ - debug_warn, err, - utils::{ReadyExt, TryReadyExt}, + debug, debug_warn, err, error, + utils::{available_parallelism, math::usize_from_u64_truncated, ReadyExt, TryReadyExt}, warn, Result, Server, }; -use futures::{Stream, StreamExt}; +use futures::{FutureExt, Stream, StreamExt}; use ruma::{ api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, }; -use tokio::sync::Mutex; +use tokio::task::JoinSet; use self::data::Data; pub use self::{ @@ -30,11 +35,10 @@ use crate::{ }; pub struct Service { + pub db: Data, server: Arc, services: Services, - pub db: Data, - sender: loole::Sender, - receiver: Mutex>, + channels: Vec<(loole::Sender, loole::Receiver)>, } struct Services { @@ -72,8 +76,9 @@ pub enum SendingEvent { #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { - let (sender, receiver) = loole::unbounded(); + let num_senders = num_senders(&args); Ok(Arc::new(Self { + db: Data::new(&args), server: args.server.clone(), services: Services { client: args.depend::("client"), @@ -91,20 +96,41 @@ impl crate::Service for Service { pusher: args.depend::("pusher"), server_keys: args.depend::("server_keys"), }, - db: Data::new(&args), - sender, - receiver: Mutex::new(receiver), + channels: (0..num_senders).map(|_| loole::unbounded()).collect(), })) } - async fn worker(self: Arc) -> Result<()> { - // trait impl can't be split between files so this just glues to mod sender - self.sender().await + async fn worker(self: Arc) -> Result { + let mut senders = + self.channels + .iter() + .enumerate() + .fold(JoinSet::new(), |mut joinset, (id, _)| { + let self_ = self.clone(); + let runtime = self.server.runtime(); + let _abort = joinset.spawn_on(self_.sender(id).boxed(), runtime); + joinset + }); + + while let Some(ret) = senders.join_next_with_id().await { + match ret { + | Ok((id, _)) => { + debug!(?id, "sender worker finished"); + }, + | Err(error) => { + error!(id = ?error.id(), ?error, "sender worker finished"); + }, + }; + } + + Ok(()) } fn interrupt(&self) { - if !self.sender.is_closed() { - self.sender.close(); + for (sender, _) in &self.channels { + if !sender.is_closed() { + sender.close(); + } } } @@ -157,7 +183,7 @@ impl Service { let _cork = self.db.db.cork(); let requests = servers .map(|server| { - (Destination::Normal(server.into()), SendingEvent::Pdu(pdu_id.to_owned())) + (Destination::Federation(server.into()), SendingEvent::Pdu(pdu_id.to_owned())) }) .collect::>() .await; @@ -173,7 +199,7 @@ impl Service { #[tracing::instrument(skip(self, server, serialized), level = "debug")] pub fn send_edu_server(&self, server: &ServerName, serialized: Vec) -> Result<()> { - let dest = Destination::Normal(server.to_owned()); + let dest = Destination::Federation(server.to_owned()); let event = SendingEvent::Edu(serialized); let _cork = self.db.db.cork(); let keys = self.db.queue_requests(once((&event, &dest))); @@ -203,7 +229,10 @@ impl Service { let _cork = self.db.db.cork(); let requests = servers .map(|server| { - (Destination::Normal(server.to_owned()), SendingEvent::Edu(serialized.clone())) + ( + Destination::Federation(server.to_owned()), + SendingEvent::Edu(serialized.clone()), + ) }) .collect::>() .await; @@ -235,7 +264,7 @@ impl Service { { servers .map(ToOwned::to_owned) - .map(Destination::Normal) + .map(Destination::Federation) .map(Ok) .ready_try_for_each(|dest| { self.dispatch(Msg { @@ -327,9 +356,49 @@ impl Service { } } - fn dispatch(&self, msg: Msg) -> Result<()> { - debug_assert!(!self.sender.is_full(), "channel full"); - debug_assert!(!self.sender.is_closed(), "channel closed"); - self.sender.send(msg).map_err(|e| err!("{e}")) + fn dispatch(&self, msg: Msg) -> Result { + let shard = self.shard_id(&msg.dest); + let sender = &self + .channels + .get(shard) + .expect("missing sender worker channels") + .0; + + debug_assert!(!sender.is_full(), "channel full"); + debug_assert!(!sender.is_closed(), "channel closed"); + sender.send(msg).map_err(|e| err!("{e}")) + } + + pub(super) fn shard_id(&self, dest: &Destination) -> usize { + if self.channels.len() <= 1 { + return 0; + } + + let mut hash = DefaultHasher::default(); + dest.hash(&mut hash); + + let hash: u64 = hash.finish(); + let hash = usize_from_u64_truncated(hash); + + let chans = self.channels.len().max(1); + hash.overflowing_rem(chans).0 } } + +fn num_senders(args: &crate::Args<'_>) -> usize { + const MIN_SENDERS: usize = 1; + // Limit the number of senders to the number of workers threads or number of + // cores, conservatively. + let max_senders = args + .server + .metrics + .num_workers() + .min(available_parallelism()); + + // If the user doesn't override the default 0, this is intended to then default + // to 1 for now as multiple senders is experimental. + args.server + .config + .sender_workers + .clamp(MIN_SENDERS, max_senders) +} diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 81467c16..3d13a3b0 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -24,7 +24,10 @@ use crate::{ }; impl super::Service { - #[tracing::instrument(skip_all, level = "debug")] + #[tracing::instrument( + level = "debug" + skip(self, client, request), + )] pub async fn send( &self, client: &Client, diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index f6b83e83..4e806ce8 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -1,7 +1,10 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, - sync::atomic::{AtomicU64, AtomicUsize, Ordering}, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, time::{Duration, Instant}, }; @@ -66,29 +69,56 @@ pub const PDU_LIMIT: usize = 50; pub const EDU_LIMIT: usize = 100; impl Service { - #[tracing::instrument(skip_all, level = "debug")] - pub(super) async fn sender(&self) -> Result<()> { + #[tracing::instrument(skip(self), level = "debug")] + pub(super) async fn sender(self: Arc, id: usize) -> Result { let mut statuses: CurTransactionStatus = CurTransactionStatus::new(); let mut futures: SendingFutures<'_> = FuturesUnordered::new(); - let receiver = self.receiver.lock().await; - self.initial_requests(&mut futures, &mut statuses).await; - while !receiver.is_closed() { - tokio::select! { - request = receiver.recv_async() => match request { - Ok(request) => self.handle_request(request, &mut futures, &mut statuses).await, - Err(_) => break, - }, - Some(response) = futures.next() => { - self.handle_response(response, &mut futures, &mut statuses).await; - }, - } - } - self.finish_responses(&mut futures).await; + self.startup_netburst(id, &mut futures, &mut statuses) + .boxed() + .await; + + self.work_loop(id, &mut futures, &mut statuses).await; + + self.finish_responses(&mut futures).boxed().await; Ok(()) } + #[tracing::instrument( + name = "work", + level = "trace" + skip_all, + fields( + futures = %futures.len(), + statuses = %statuses.len(), + ), + )] + async fn work_loop<'a>( + &'a self, + id: usize, + futures: &mut SendingFutures<'a>, + statuses: &mut CurTransactionStatus, + ) { + let receiver = self + .channels + .get(id) + .map(|(_, receiver)| receiver.clone()) + .expect("Missing channel for sender worker"); + loop { + tokio::select! { + Some(response) = futures.next() => { + self.handle_response(response, futures, statuses).await; + }, + request = receiver.recv_async() => match request { + Ok(request) => self.handle_request(request, futures, statuses).await, + Err(_) => return, + }, + } + } + } + + #[tracing::instrument(name = "response", level = "debug", skip_all)] async fn handle_response<'a>( &'a self, response: SendingResult, @@ -138,13 +168,14 @@ impl Service { self.db.mark_as_active(new_events.iter()); let new_events_vec = new_events.into_iter().map(|(_, event)| event).collect(); - futures.push(self.send_events(dest.clone(), new_events_vec).boxed()); + futures.push(self.send_events(dest.clone(), new_events_vec)); } else { statuses.remove(dest); } } #[allow(clippy::needless_pass_by_ref_mut)] + #[tracing::instrument(name = "request", level = "debug", skip_all)] async fn handle_request<'a>( &'a self, msg: Msg, @@ -154,13 +185,19 @@ impl Service { let iv = vec![(msg.queue_id, msg.event)]; if let Ok(Some(events)) = self.select_events(&msg.dest, iv, statuses).await { if !events.is_empty() { - futures.push(self.send_events(msg.dest, events).boxed()); + futures.push(self.send_events(msg.dest, events)); } else { statuses.remove(&msg.dest); } } } + #[tracing::instrument( + name = "finish", + level = "info", + skip_all, + fields(futures = %futures.len()), + )] async fn finish_responses<'a>(&'a self, futures: &mut SendingFutures<'a>) { use tokio::{ select, @@ -183,9 +220,16 @@ impl Service { } } + #[tracing::instrument( + name = "netburst", + level = "debug", + skip_all, + fields(futures = %futures.len()), + )] #[allow(clippy::needless_pass_by_ref_mut)] - async fn initial_requests<'a>( + async fn startup_netburst<'a>( &'a self, + id: usize, futures: &mut SendingFutures<'a>, statuses: &mut CurTransactionStatus, ) { @@ -195,6 +239,10 @@ impl Service { let mut active = self.db.active_requests().boxed(); while let Some((key, event, dest)) = active.next().await { + if self.shard_id(&dest) != id { + continue; + } + let entry = txns.entry(dest.clone()).or_default(); if self.server.config.startup_netburst_keep >= 0 && entry.len() >= keep { warn!("Dropping unsent event {dest:?} {:?}", String::from_utf8_lossy(&key)); @@ -207,19 +255,27 @@ impl Service { for (dest, events) in txns { if self.server.config.startup_netburst && !events.is_empty() { statuses.insert(dest.clone(), TransactionStatus::Running); - futures.push(self.send_events(dest.clone(), events).boxed()); + futures.push(self.send_events(dest.clone(), events)); } } } - #[tracing::instrument(skip_all, level = "debug")] + #[tracing::instrument( + name = "select",, + level = "debug", + skip_all, + fields( + ?dest, + new_events = %new_events.len(), + ) + )] async fn select_events( &self, dest: &Destination, new_events: Vec, // Events we want to send: event and full key statuses: &mut CurTransactionStatus, ) -> Result>> { - let (allow, retry) = self.select_events_current(dest.clone(), statuses)?; + let (allow, retry) = self.select_events_current(dest, statuses)?; // Nothing can be done for this remote, bail out. if !allow { @@ -249,7 +305,7 @@ impl Service { } // Add EDU's into the transaction - if let Destination::Normal(server_name) = dest { + if let Destination::Federation(server_name) = dest { if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { debug_assert!(select_edus.len() <= EDU_LIMIT, "exceeded edus limit"); events.extend(select_edus.into_iter().map(SendingEvent::Edu)); @@ -260,10 +316,9 @@ impl Service { Ok(Some(events)) } - #[tracing::instrument(skip_all, level = "debug")] fn select_events_current( &self, - dest: Destination, + dest: &Destination, statuses: &mut CurTransactionStatus, ) -> Result<(bool, bool)> { let (mut allow, mut retry) = (true, false); @@ -292,7 +347,11 @@ impl Service { Ok((allow, retry)) } - #[tracing::instrument(skip_all, level = "debug")] + #[tracing::instrument( + name = "edus",, + level = "debug", + skip_all, + )] async fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { // selection window let since = self.db.get_latest_educount(server_name).await; @@ -329,7 +388,12 @@ impl Service { Ok((events, max_edu_count.load(Ordering::Acquire))) } - /// Look for presence + /// Look for device changes + #[tracing::instrument( + name = "device_changes", + level = "trace", + skip(self, server_name, max_edu_count) + )] async fn select_edus_device_changes( &self, server_name: &ServerName, @@ -386,6 +450,11 @@ impl Service { } /// Look for read receipts in this room + #[tracing::instrument( + name = "receipts", + level = "trace", + skip(self, server_name, max_edu_count) + )] async fn select_edus_receipts( &self, server_name: &ServerName, @@ -420,6 +489,7 @@ impl Service { } /// Look for read receipts in this room + #[tracing::instrument(name = "receipts", level = "trace", skip(self, since, max_edu_count))] async fn select_edus_receipts_room( &self, room_id: &RoomId, @@ -484,6 +554,11 @@ impl Service { } /// Look for presence + #[tracing::instrument( + name = "presence", + level = "trace", + skip(self, server_name, max_edu_count) + )] async fn select_edus_presence( &self, server_name: &ServerName, @@ -554,29 +629,33 @@ impl Service { Some(presence_content) } - async fn send_events(&self, dest: Destination, events: Vec) -> SendingResult { + fn send_events(&self, dest: Destination, events: Vec) -> SendingFuture<'_> { //debug_assert!(!events.is_empty(), "sending empty transaction"); match dest { - | Destination::Normal(ref server) => - self.send_events_dest_normal(&dest, server, events).await, - | Destination::Appservice(ref id) => - self.send_events_dest_appservice(&dest, id, events).await, - | Destination::Push(ref userid, ref pushkey) => - self.send_events_dest_push(&dest, userid, pushkey, events) - .await, + | Destination::Federation(server) => + self.send_events_dest_federation(server, events).boxed(), + | Destination::Appservice(id) => self.send_events_dest_appservice(id, events).boxed(), + | Destination::Push(user_id, pushkey) => + self.send_events_dest_push(user_id, pushkey, events).boxed(), } } - #[tracing::instrument(skip(self, dest, events), name = "appservice")] + #[tracing::instrument( + name = "appservice", + level = "debug", + skip(self, events), + fields( + events = %events.len(), + ), + )] async fn send_events_dest_appservice( &self, - dest: &Destination, - id: &str, + id: String, events: Vec, ) -> SendingResult { - let Some(appservice) = self.services.appservice.get_registration(id).await else { + let Some(appservice) = self.services.appservice.get_registration(&id).await else { return Err(( - dest.clone(), + Destination::Appservice(id.clone()), err!(Database(warn!(?id, "Missing appservice registration"))), )); }; @@ -633,23 +712,29 @@ impl Service { ) .await { - | Ok(_) => Ok(dest.clone()), - | Err(e) => Err((dest.clone(), e)), + | Ok(_) => Ok(Destination::Appservice(id)), + | Err(e) => Err((Destination::Appservice(id), e)), } } - #[tracing::instrument(skip(self, dest, events), name = "push")] + #[tracing::instrument( + name = "push", + level = "info", + skip(self, events), + fields( + events = %events.len(), + ), + )] async fn send_events_dest_push( &self, - dest: &Destination, - userid: &OwnedUserId, - pushkey: &str, + user_id: OwnedUserId, + pushkey: String, events: Vec, ) -> SendingResult { - let Ok(pusher) = self.services.pusher.get_pusher(userid, pushkey).await else { + let Ok(pusher) = self.services.pusher.get_pusher(&user_id, &pushkey).await else { return Err(( - dest.clone(), - err!(Database(error!(?userid, ?pushkey, "Missing pusher"))), + Destination::Push(user_id.clone(), pushkey.clone()), + err!(Database(error!(?user_id, ?pushkey, "Missing pusher"))), )); }; @@ -677,17 +762,17 @@ impl Service { let rules_for_user = self .services .account_data - .get_global(userid, GlobalAccountDataEventType::PushRules) + .get_global(&user_id, GlobalAccountDataEventType::PushRules) .await .map_or_else( - |_| push::Ruleset::server_default(userid), + |_| push::Ruleset::server_default(&user_id), |ev: PushRulesEvent| ev.content.global, ); let unread: UInt = self .services .user - .notification_count(userid, &pdu.room_id) + .notification_count(&user_id, &pdu.room_id) .await .try_into() .expect("notification count can't go that high"); @@ -695,19 +780,25 @@ impl Service { let _response = self .services .pusher - .send_push_notice(userid, unread, &pusher, rules_for_user, &pdu) + .send_push_notice(&user_id, unread, &pusher, rules_for_user, &pdu) .await - .map_err(|e| (dest.clone(), e)); + .map_err(|e| (Destination::Push(user_id.clone(), pushkey.clone()), e)); } - Ok(dest.clone()) + Ok(Destination::Push(user_id, pushkey)) } - #[tracing::instrument(skip(self, dest, events), name = "", level = "debug")] - async fn send_events_dest_normal( + #[tracing::instrument( + name = "fed", + level = "debug", + skip(self, events), + fields( + events = %events.len(), + ), + )] + async fn send_events_dest_federation( &self, - dest: &Destination, - server: &OwnedServerName, + server: OwnedServerName, events: Vec, ) -> SendingResult { let mut pdu_jsons = Vec::with_capacity( @@ -759,7 +850,7 @@ impl Service { }; let client = &self.services.client.sender; - self.send(client, server, request) + self.send(client, &server, request) .await .inspect(|response| { response @@ -770,8 +861,8 @@ impl Service { |(pdu_id, res)| warn!(%txn_id, %server, "error sending PDU {pdu_id} to remote server: {res:?}"), ); }) - .map(|_| dest.clone()) - .map_err(|e| (dest.clone(), e)) + .map_err(|e| (Destination::Federation(server.clone()), e)) + .map(|_| Destination::Federation(server)) } /// This does not return a full `Pdu` it is only to satisfy ruma's types. From 674acc86571a78ea722a03200babdabb78a21100 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 19 Dec 2024 07:01:15 +0000 Subject: [PATCH 0437/1248] add program options for tokio histogram settings Signed-off-by: Jason Volk --- src/main/clap.rs | 12 ++++++++++++ src/main/runtime.rs | 18 +++++++----------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/main/clap.rs b/src/main/clap.rs index 7e70bd80..28577356 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -38,6 +38,18 @@ pub(crate) struct Args { /// Override the tokio worker_thread count. #[arg(long, hide(true), env = "TOKIO_WORKER_THREADS", default_value = available_parallelism().to_string())] pub(crate) worker_threads: usize, + + /// Set the histogram bucket size, in microseconds (tokio_unstable). Default + /// is 25 microseconds. If the values of the histogram don't approach zero + /// with the exception of the last bucket, try increasing this value to e.g. + /// 50 or 100. Inversely, decrease to 10 etc if the histogram lacks + /// resolution. + #[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_INTERVAL", default_value = "25")] + pub(crate) worker_histogram_interval: u64, + + /// Set the histogram bucket count (tokio_unstable). Default is 20. + #[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS", default_value = "20")] + pub(crate) worker_histogram_buckets: usize, } /// Parse commandline arguments into structured data diff --git a/src/main/runtime.rs b/src/main/runtime.rs index ad0c3cde..6fc405a7 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -35,25 +35,21 @@ pub(super) fn new(args: &Args) -> Result { .on_task_terminate(task_terminate); #[cfg(tokio_unstable)] - enable_histogram(&mut builder); + enable_histogram(&mut builder, args); builder.build().map_err(Into::into) } #[cfg(tokio_unstable)] -fn enable_histogram(builder: &mut Builder) { - use tokio::runtime::{HistogramConfiguration, LogHistogram}; - - let config = LogHistogram::builder() - .min_value(Duration::from_micros(10)) - .max_value(Duration::from_millis(1)) - .max_error(0.5) - .max_buckets(32) - .expect("erroneous histogram configuration"); +fn enable_histogram(builder: &mut Builder, args: &Args) { + use tokio::runtime::HistogramConfiguration; + let buckets = args.worker_histogram_buckets; + let interval = Duration::from_micros(args.worker_histogram_interval); + let linear = HistogramConfiguration::linear(interval, buckets); builder .enable_metrics_poll_time_histogram() - .metrics_poll_time_histogram_configuration(HistogramConfiguration::log(config)); + .metrics_poll_time_histogram_configuration(linear); } #[tracing::instrument( From 503210c3bffbc90a5fbcabf4c12a6e5fa9036d8a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 19 Dec 2024 07:33:29 +0000 Subject: [PATCH 0438/1248] toggle worker_affinity feature from program argument Signed-off-by: Jason Volk --- src/main/clap.rs | 15 ++++++++++++++- src/main/runtime.rs | 19 +++++++++++++++---- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/src/main/clap.rs b/src/main/clap.rs index 28577356..c447deae 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -2,7 +2,7 @@ use std::path::PathBuf; -use clap::Parser; +use clap::{ArgAction, Parser}; use conduwuit::{ config::{Figment, FigmentValue}, err, toml, @@ -50,6 +50,19 @@ pub(crate) struct Args { /// Set the histogram bucket count (tokio_unstable). Default is 20. #[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS", default_value = "20")] pub(crate) worker_histogram_buckets: usize, + + /// Toggles worker affinity feature. + #[arg( + long, + hide(true), + env = "CONDUWUIT_RUNTIME_WORKER_AFFINITY", + action = ArgAction::Set, + num_args = 0..=1, + require_equals(false), + default_value = "true", + default_missing_value = "true", + )] + pub(crate) worker_affinity: bool, } /// Parse commandline arguments into structured data diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 6fc405a7..ff8ab322 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -1,4 +1,4 @@ -use std::{thread, time::Duration}; +use std::{sync::OnceLock, thread, time::Duration}; use conduwuit::Result; use tokio::runtime::Builder; @@ -12,9 +12,14 @@ const GLOBAL_QUEUE_INTERVAL: u32 = 192; const KERNEL_QUEUE_INTERVAL: u32 = 256; const KERNEL_EVENTS_PER_TICK: usize = 512; -pub(super) fn new(args: &Args) -> Result { - let mut builder = Builder::new_multi_thread(); +static WORKER_AFFINITY: OnceLock = OnceLock::new(); +pub(super) fn new(args: &Args) -> Result { + WORKER_AFFINITY + .set(args.worker_affinity) + .expect("set WORKER_AFFINITY from program argument"); + + let mut builder = Builder::new_multi_thread(); builder .enable_io() .enable_time() @@ -63,7 +68,13 @@ fn enable_histogram(builder: &mut Builder, args: &Args) { )] fn thread_start() { #[cfg(feature = "worker_affinity")] - set_worker_affinity(); + if WORKER_AFFINITY + .get() + .copied() + .expect("WORKER_AFFINITY initialized by runtime::new()") + { + set_worker_affinity(); + } } #[cfg(feature = "worker_affinity")] From aefc4b4e69e17bac99b8608a5c02fbcfd66c8f78 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 19 Dec 2024 09:42:28 +0000 Subject: [PATCH 0439/1248] add more program options for tokio Signed-off-by: Jason Volk --- src/main/clap.rs | 19 ++++++++++++++++++- src/main/runtime.rs | 11 +++++------ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/src/main/clap.rs b/src/main/clap.rs index c447deae..ad5c815a 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -36,9 +36,26 @@ pub(crate) struct Args { pub(crate) test: Vec, /// Override the tokio worker_thread count. - #[arg(long, hide(true), env = "TOKIO_WORKER_THREADS", default_value = available_parallelism().to_string())] + #[arg( + long, + hide(true), + env = "TOKIO_WORKER_THREADS", + default_value = available_parallelism().to_string(), + )] pub(crate) worker_threads: usize, + /// Override the tokio global_queue_interval. + #[arg(long, hide(true), env = "TOKIO_GLOBAL_QUEUE_INTERVAL", default_value = "192")] + pub(crate) global_event_interval: u32, + + /// Override the tokio event_interval. + #[arg(long, hide(true), env = "TOKIO_EVENT_INTERVAL", default_value = "512")] + pub(crate) kernel_event_interval: u32, + + /// Override the tokio max_io_events_per_tick. + #[arg(long, hide(true), env = "TOKIO_MAX_IO_EVENTS_PER_TICK", default_value = "512")] + pub(crate) kernel_events_per_tick: usize, + /// Set the histogram bucket size, in microseconds (tokio_unstable). Default /// is 25 microseconds. If the values of the histogram don't approach zero /// with the exception of the last bucket, try increasing this value to e.g. diff --git a/src/main/runtime.rs b/src/main/runtime.rs index ff8ab322..e98e73d6 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -8,9 +8,7 @@ use crate::clap::Args; const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; -const GLOBAL_QUEUE_INTERVAL: u32 = 192; -const KERNEL_QUEUE_INTERVAL: u32 = 256; -const KERNEL_EVENTS_PER_TICK: usize = 512; +const MAX_BLOCKING_THREADS: usize = 1024; static WORKER_AFFINITY: OnceLock = OnceLock::new(); @@ -25,10 +23,11 @@ pub(super) fn new(args: &Args) -> Result { .enable_time() .thread_name(WORKER_NAME) .worker_threads(args.worker_threads.max(WORKER_MIN)) + .max_blocking_threads(MAX_BLOCKING_THREADS) .thread_keep_alive(Duration::from_secs(WORKER_KEEPALIVE)) - .max_io_events_per_tick(KERNEL_EVENTS_PER_TICK) - .event_interval(KERNEL_QUEUE_INTERVAL) - .global_queue_interval(GLOBAL_QUEUE_INTERVAL) + .global_queue_interval(args.global_event_interval) + .event_interval(args.kernel_event_interval) + .max_io_events_per_tick(args.kernel_events_per_tick) .on_thread_start(thread_start) .on_thread_stop(thread_stop) .on_thread_unpark(thread_unpark) From a3d87be22ff394fb3060502ac1952883b8b58783 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 21 Dec 2024 21:32:10 +0000 Subject: [PATCH 0440/1248] enable atomic_flush for database columns Signed-off-by: Jason Volk --- src/database/opts.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/database/opts.rs b/src/database/opts.rs index 933830bd..24128f14 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -39,8 +39,9 @@ pub(crate) fn db_options( } // IO + opts.set_atomic_flush(true); opts.set_manual_wal_flush(true); - opts.set_enable_pipelined_write(true); + opts.set_enable_pipelined_write(false); if config.rocksdb_direct_io { opts.set_use_direct_reads(true); opts.set_use_direct_io_for_flush_and_compaction(true); From 749f29aaaba1ca117c3cd8a5bb4ebb3a031bd84f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 22 Dec 2024 07:03:32 +0000 Subject: [PATCH 0441/1248] add and/and_then to BoolExt Signed-off-by: Jason Volk --- src/core/utils/bool.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/core/utils/bool.rs b/src/core/utils/bool.rs index b4192daf..b16f63e9 100644 --- a/src/core/utils/bool.rs +++ b/src/core/utils/bool.rs @@ -2,6 +2,10 @@ /// Boolean extensions and chain.starters pub trait BoolExt { + fn and(self, t: Option) -> Option; + + fn and_then Option>(self, f: F) -> Option; + #[must_use] fn clone_or(self, err: T, t: &T) -> T; @@ -39,6 +43,12 @@ pub trait BoolExt { } impl BoolExt for bool { + #[inline] + fn and(self, t: Option) -> Option { self.then_some(t).flatten() } + + #[inline] + fn and_then Option>(self, f: F) -> Option { self.then(f).flatten() } + #[inline] fn clone_or(self, err: T, t: &T) -> T { self.map_or(err, || t.clone()) } From 5fdb8895b1834fa77f59bd0e44a9705bab3288f3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 21 Dec 2024 22:12:42 +0000 Subject: [PATCH 0442/1248] add utils to detect device io characteristics Signed-off-by: Jason Volk --- Cargo.lock | 76 +++++++++++++++++++++-- Cargo.toml | 5 ++ src/core/Cargo.toml | 2 + src/core/utils/mod.rs | 2 +- src/core/utils/sys.rs | 23 +++---- src/core/utils/sys/compute.rs | 74 ++++++++++++++++++++++ src/core/utils/sys/storage.rs | 112 ++++++++++++++++++++++++++++++++++ src/main/Cargo.toml | 5 -- src/main/runtime.rs | 36 +++++------ 9 files changed, 288 insertions(+), 47 deletions(-) create mode 100644 src/core/utils/sys/compute.rs create mode 100644 src/core/utils/sys/storage.rs diff --git a/Cargo.lock b/Cargo.lock index d65ae18f..6522aa55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -631,7 +631,6 @@ dependencies = [ "conduwuit_service", "console-subscriber", "const-str", - "core_affinity", "hardened_malloc-rs", "log", "opentelemetry", @@ -716,6 +715,7 @@ dependencies = [ "clap", "conduwuit_macros", "const-str", + "core_affinity", "ctor", "cyborgtime", "either", @@ -740,6 +740,7 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", + "sysinfo", "thiserror 2.0.7", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", @@ -1675,7 +1676,7 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows", + "windows 0.52.0", ] [[package]] @@ -4075,6 +4076,18 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "sysinfo" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "948512566b1895f93b1592c7574baeb2de842f224f2aab158799ecadb8ebbb46" +dependencies = [ + "core-foundation-sys", + "libc", + "serde", + "windows 0.57.0", +] + [[package]] name = "tendril" version = "0.4.3" @@ -4932,7 +4945,17 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core", + "windows-core 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" +dependencies = [ + "windows-core 0.57.0", "windows-targets 0.52.6", ] @@ -4945,17 +4968,60 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "windows-registry" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" dependencies = [ - "windows-result", + "windows-result 0.2.0", "windows-strings", "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-result" version = "0.2.0" @@ -4971,7 +5037,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result", + "windows-result 0.2.0", "windows-targets 0.52.6", ] diff --git a/Cargo.toml b/Cargo.toml index 15f054bf..689f2b5c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -497,6 +497,11 @@ version = "1.3.0" [workspace.dependencies.core_affinity] version = "0.8.1" +[workspace.dependencies.sysinfo] +version = "0.33.0" +default-features = false +features = ["disk", "serde"] + # # Patches # diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 27c6da52..d249f647 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -64,6 +64,7 @@ chrono.workspace = true clap.workspace = true conduwuit-macros.workspace = true const-str.workspace = true +core_affinity.workspace = true ctor.workspace = true cyborgtime.workspace = true either.workspace = true @@ -86,6 +87,7 @@ serde_json.workspace = true serde_regex.workspace = true serde_yaml.workspace = true serde.workspace = true +sysinfo.workspace = true thiserror.workspace = true tikv-jemallocator.optional = true tikv-jemallocator.workspace = true diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index a9b73fb6..38232820 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -36,7 +36,7 @@ pub use self::{ rand::{shuffle, string as random_string}, stream::{IterStream, ReadyExt, Tools as StreamTools, TryReadyExt}, string::{str_from_bytes, string_from_bytes}, - sys::available_parallelism, + sys::compute::parallelism as available_parallelism, time::{now_millis as millis_since_unix_epoch, timepoint_ago, timepoint_from_now}, }; diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index 05ef12ca..5c5564c4 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -1,3 +1,10 @@ +pub mod compute; +pub mod storage; + +use std::path::PathBuf; + +pub use compute::parallelism as available_parallelism; + use crate::{debug, Result}; /// This is needed for opening lots of file descriptors, which tends to @@ -21,18 +28,6 @@ pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { Ok(()) } -/// Get the number of threads which could execute in parallel based on the -/// hardware and administrative constraints of this system. This value should be -/// used to hint the size of thread-pools and divide-and-conquer algorithms. -/// -/// * -#[must_use] -pub fn available_parallelism() -> usize { - std::thread::available_parallelism() - .expect("Unable to query for available parallelism.") - .get() -} - /// Return a possibly corrected std::env::current_exe() even if the path is /// marked deleted. /// @@ -40,9 +35,7 @@ pub fn available_parallelism() -> usize { /// This function is declared unsafe because the original result was altered for /// security purposes, and altering it back ignores those urposes and should be /// understood by the user. -pub unsafe fn current_exe() -> Result { - use std::path::PathBuf; - +pub unsafe fn current_exe() -> Result { let exe = std::env::current_exe()?; match exe.to_str() { | None => Ok(exe), diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs new file mode 100644 index 00000000..4e9ef743 --- /dev/null +++ b/src/core/utils/sys/compute.rs @@ -0,0 +1,74 @@ +//! System utilities related to compute/processing + +use std::{cell::Cell, sync::LazyLock}; + +use crate::is_equal_to; + +/// The list of cores available to the process (at startup) +static CORES_AVAILABLE: LazyLock = LazyLock::new(|| { + core_affinity::get_core_ids() + .unwrap_or_default() + .into_iter() + .map(|core_id| core_id.id) + .inspect(|&id| assert!(id < 128, "Core ID must be < 128 at least for now")) + .fold(0_u128, |mask, id| mask | (1 << id)) +}); + +thread_local! { + /// Tracks the affinity for this thread. This is updated when affinities + /// are set via our set_affinity() interface. + static CORE_AFFINITY: Cell = Cell::default(); +} + +/// Set the core affinity for this thread. The ID should be listed in +/// CORES_AVAILABLE. Empty input is a no-op; prior affinity unchanged. +pub fn set_affinity(ids: I) +where + I: Iterator, +{ + use core_affinity::{set_for_current, CoreId}; + + let mask: u128 = ids.fold(0, |mask, id| { + debug_assert!(is_core_available(id), "setting affinity to unavailable core"); + set_for_current(CoreId { id }); + mask | (1 << id) + }); + + if mask.count_ones() > 0 { + CORE_AFFINITY.replace(mask); + } +} + +/// Get the core affinity for this thread. +pub fn get_affinity() -> impl Iterator { + (0..128).filter(|&i| ((CORE_AFFINITY.get() & (1 << i)) != 0)) +} + +/// Gets the ID of the nth core available. This bijects our sequence of cores to +/// actual ID's which may have gaps for cores which are not available. +#[inline] +#[must_use] +pub fn get_core_available(i: usize) -> Option { cores_available().nth(i) } + +/// Determine if core (by id) is available to the process. +#[inline] +#[must_use] +pub fn is_core_available(id: usize) -> bool { cores_available().any(is_equal_to!(id)) } + +/// Get the list of cores available. The values were recorded at program start. +#[inline] +pub fn cores_available() -> impl Iterator { + (0..128).filter(|&i| ((*CORES_AVAILABLE & (1 << i)) != 0)) +} + +/// Get the number of threads which could execute in parallel based on the +/// hardware and administrative constraints of this system. This value should be +/// used to hint the size of thread-pools and divide-and-conquer algorithms. +/// +/// * +#[must_use] +pub fn parallelism() -> usize { + std::thread::available_parallelism() + .expect("Unable to query for available parallelism.") + .get() +} diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs new file mode 100644 index 00000000..8dc75236 --- /dev/null +++ b/src/core/utils/sys/storage.rs @@ -0,0 +1,112 @@ +//! System utilities related to devices/peripherals + +use std::{ + ffi::{OsStr, OsString}, + fs, + fs::{read_to_string, FileType}, + iter::IntoIterator, + path::Path, +}; + +use crate::{result::FlatOk, Result}; + +/// Device characteristics useful for random access throughput +#[derive(Clone, Debug, Default)] +pub struct Parallelism { + /// Number of requests for the device. + pub nr_requests: Option, + + /// Individual queue characteristics. + pub mq: Vec, +} + +/// Device queue characteristics +#[derive(Clone, Debug, Default)] +pub struct Queue { + /// Queue's indice. + pub id: usize, + + /// Number of requests for the queue. + pub nr_tags: Option, + + /// CPU affinities for the queue. + pub cpu_list: Vec, +} + +/// Get device characteristics useful for random access throughput by name. +#[must_use] +pub fn parallelism(name: &OsStr) -> Parallelism { + let name = name + .to_str() + .expect("device name expected to be utf-8 representable"); + + let block_path = Path::new("/").join("sys/").join("block/"); + + let mq_path = Path::new(&block_path).join(format!("{name}/mq/")); + + let nr_requests_path = Path::new(&block_path).join(format!("{name}/queue/nr_requests")); + + Parallelism { + nr_requests: read_to_string(&nr_requests_path) + .ok() + .as_deref() + .map(str::trim) + .map(str::parse) + .flat_ok(), + + mq: fs::read_dir(&mq_path) + .into_iter() + .flat_map(IntoIterator::into_iter) + .filter_map(Result::ok) + .filter(|entry| entry.file_type().as_ref().is_ok_and(FileType::is_dir)) + .map(|dir| queue_parallelism(&dir.path())) + .collect(), + } +} + +/// Get device queue characteristics by mq path on sysfs(5) +fn queue_parallelism(dir: &Path) -> Queue { + let queue_id = dir.file_name(); + + let nr_tags_path = dir.join("nr_tags"); + + let cpu_list_path = dir.join("cpu_list"); + + Queue { + id: queue_id + .and_then(OsStr::to_str) + .map(str::parse) + .flat_ok() + .expect("queue has some numerical identifier"), + + nr_tags: read_to_string(&nr_tags_path) + .ok() + .as_deref() + .map(str::trim) + .map(str::parse) + .flat_ok(), + + cpu_list: read_to_string(&cpu_list_path) + .iter() + .flat_map(|list| list.trim().split(',')) + .map(str::trim) + .map(str::parse) + .filter_map(Result::ok) + .collect(), + } +} + +/// Get the name of the device on which Path is mounted. +#[must_use] +pub fn name_from_path(path: &Path) -> Option { + sysinfo::Disks::new_with_refreshed_list() + .into_iter() + .filter(|disk| path.starts_with(disk.mount_point())) + .max_by(|a, b| { + let a = a.mount_point().ancestors().count(); + let b = b.mount_point().ancestors().count(); + a.cmp(&b) + }) + .map(|disk| Path::new(disk.name())) + .and_then(|path| path.file_name().map(ToOwned::to_owned)) +} diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index fe24d4c1..eeb6f2bc 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -129,9 +129,6 @@ tokio_console = [ "dep:console-subscriber", "tokio/tracing", ] -worker_affinity = [ - "dep:core_affinity", -] zstd_compression = [ "conduwuit-api/zstd_compression", "conduwuit-core/zstd_compression", @@ -151,8 +148,6 @@ clap.workspace = true console-subscriber.optional = true console-subscriber.workspace = true const-str.workspace = true -core_affinity.optional = true -core_affinity.workspace = true log.workspace = true opentelemetry-jaeger.optional = true opentelemetry-jaeger.workspace = true diff --git a/src/main/runtime.rs b/src/main/runtime.rs index e98e73d6..cee093ea 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -1,6 +1,17 @@ -use std::{sync::OnceLock, thread, time::Duration}; +use std::{ + iter::once, + sync::{ + atomic::{AtomicUsize, Ordering}, + OnceLock, + }, + thread, + time::Duration, +}; -use conduwuit::Result; +use conduwuit::{ + utils::sys::compute::{get_core_available, set_affinity}, + Result, +}; use tokio::runtime::Builder; use crate::clap::Args; @@ -66,7 +77,6 @@ fn enable_histogram(builder: &mut Builder, args: &Args) { ), )] fn thread_start() { - #[cfg(feature = "worker_affinity")] if WORKER_AFFINITY .get() .copied() @@ -76,24 +86,8 @@ fn thread_start() { } } -#[cfg(feature = "worker_affinity")] fn set_worker_affinity() { - use std::sync::{ - atomic::{AtomicUsize, Ordering}, - LazyLock, - }; - static CORES_OCCUPIED: AtomicUsize = AtomicUsize::new(0); - static CORES_AVAILABLE: LazyLock>> = LazyLock::new(|| { - core_affinity::get_core_ids().map(|mut cores| { - cores.sort_unstable(); - cores - }) - }); - - let Some(cores) = CORES_AVAILABLE.as_ref() else { - return; - }; if thread::current().name() != Some(WORKER_NAME) { return; @@ -106,11 +100,11 @@ fn set_worker_affinity() { return; } - let Some(id) = cores.get(i) else { + let Some(id) = get_core_available(i) else { return; }; - let _set = core_affinity::set_for_current(*id); + set_affinity(once(id)); } #[tracing::instrument( From 03f2ac9cafd9b070c36ca6fdf0fccf3d5fb8e953 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 22 Dec 2024 22:58:37 +0000 Subject: [PATCH 0443/1248] simplify usage of mpmc channels which don't require receiver lock Signed-off-by: Jason Volk --- src/service/admin/mod.rs | 19 +++++++++---------- src/service/presence/mod.rs | 21 +++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 59639e58..399055aa 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -20,14 +20,13 @@ use ruma::{ events::room::message::{Relation, RoomMessageEventContent}, OwnedEventId, OwnedRoomId, RoomId, UserId, }; -use tokio::sync::{Mutex, RwLock}; +use tokio::sync::RwLock; use crate::{account_data, globals, rooms, rooms::state::RoomMutexGuard, Dep}; pub struct Service { services: Services, - sender: Sender, - receiver: Mutex>, + channel: (Sender, Receiver), pub handle: RwLock>, pub complete: StdRwLock>, #[cfg(feature = "console")] @@ -78,7 +77,6 @@ const COMMAND_QUEUE_LIMIT: usize = 512; #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { - let (sender, receiver) = loole::bounded(COMMAND_QUEUE_LIMIT); Ok(Arc::new(Self { services: Services { server: args.server.clone(), @@ -90,8 +88,7 @@ impl crate::Service for Service { account_data: args.depend::("account_data"), services: None.into(), }, - sender, - receiver: Mutex::new(receiver), + channel: loole::bounded(COMMAND_QUEUE_LIMIT), handle: RwLock::new(None), complete: StdRwLock::new(None), #[cfg(feature = "console")] @@ -100,8 +97,8 @@ impl crate::Service for Service { } async fn worker(self: Arc) -> Result<()> { - let receiver = self.receiver.lock().await; let mut signals = self.services.server.signal.subscribe(); + let receiver = self.channel.1.clone(); self.startup_execute().await?; self.console_auto_start().await; @@ -128,8 +125,9 @@ impl crate::Service for Service { #[cfg(feature = "console")] self.console.interrupt(); - if !self.sender.is_closed() { - self.sender.close(); + let (sender, _) = &self.channel; + if !sender.is_closed() { + sender.close(); } } @@ -159,7 +157,8 @@ impl Service { /// will take place on the service worker's task asynchronously. Errors if /// the queue is full. pub fn command(&self, command: String, reply_id: Option) -> Result<()> { - self.sender + self.channel + .0 .send(CommandInput { command, reply_id }) .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) } diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index 1f9f63d9..bf5258e1 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -6,15 +6,15 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; use conduwuit::{checked, debug, error, result::LogErr, Error, Result, Server}; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; +use loole::{Receiver, Sender}; use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; -use tokio::{sync::Mutex, time::sleep}; +use tokio::time::sleep; use self::{data::Data, presence::Presence}; use crate::{globals, users, Dep}; pub struct Service { - timer_sender: loole::Sender, - timer_receiver: Mutex>, + timer_channel: (Sender, Receiver), timeout_remote_users: bool, idle_timeout: u64, offline_timeout: u64, @@ -36,10 +36,8 @@ impl crate::Service for Service { let config = &args.server.config; let idle_timeout_s = config.presence_idle_timeout_s; let offline_timeout_s = config.presence_offline_timeout_s; - let (timer_sender, timer_receiver) = loole::unbounded(); Ok(Arc::new(Self { - timer_sender, - timer_receiver: Mutex::new(timer_receiver), + timer_channel: loole::unbounded(), timeout_remote_users: config.presence_timeout_remote_users, idle_timeout: checked!(idle_timeout_s * 1_000)?, offline_timeout: checked!(offline_timeout_s * 1_000)?, @@ -53,8 +51,9 @@ impl crate::Service for Service { } async fn worker(self: Arc) -> Result<()> { + let receiver = self.timer_channel.1.clone(); + let mut presence_timers = FuturesUnordered::new(); - let receiver = self.timer_receiver.lock().await; while !receiver.is_closed() { tokio::select! { Some(user_id) = presence_timers.next() => { @@ -74,8 +73,9 @@ impl crate::Service for Service { } fn interrupt(&self) { - if !self.timer_sender.is_closed() { - self.timer_sender.close(); + let (timer_sender, _) = &self.timer_channel; + if !timer_sender.is_closed() { + timer_sender.close(); } } @@ -150,7 +150,8 @@ impl Service { | _ => self.services.server.config.presence_offline_timeout_s, }; - self.timer_sender + self.timer_channel + .0 .send((user_id.to_owned(), Duration::from_secs(timeout))) .map_err(|e| { error!("Failed to add presence timer: {}", e); From e5a1309583c3f280308ddb368d4b1a72cfcb30ad Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 22 Dec 2024 22:59:43 +0000 Subject: [PATCH 0444/1248] additional interruption points to hasten shutdown Signed-off-by: Jason Volk --- src/router/serve/unix.rs | 2 +- src/service/resolver/actual.rs | 8 +++++++- src/service/sending/send.rs | 1 + src/service/sending/sender.rs | 3 ++- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index bee3c8c7..f5018455 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -43,7 +43,7 @@ pub(super) async fn serve( let app = app.into_make_service_with_connect_info::(); let builder = server::conn::auto::Builder::new(executor); let listener = init(server).await?; - loop { + while server.running() { let app = app.clone(); let builder = builder.clone(); tokio::select! { diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 3f609b95..0506be2c 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -232,11 +232,12 @@ impl super::Service { #[tracing::instrument(skip_all, name = "well-known")] async fn request_well_known(&self, dest: &str) -> Result> { - trace!("Requesting well known for {dest}"); if !self.has_cached_override(dest) { self.query_and_cache_override(dest, dest, 8448).await?; } + self.services.server.check_running()?; + trace!("Requesting well known for {dest}"); let response = self .services .client @@ -304,6 +305,9 @@ impl super::Service { hostname: &'_ str, port: u16, ) -> Result<()> { + self.services.server.check_running()?; + + debug!("querying IP for {overname:?} ({hostname:?}:{port})"); match self.resolver.resolver.lookup_ip(hostname.to_owned()).await { | Err(e) => Self::handle_resolve_error(&e, hostname), | Ok(override_ip) => { @@ -328,6 +332,8 @@ impl super::Service { [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; for hostname in hostnames { + self.services.server.check_running()?; + debug!("querying SRV for {hostname:?}"); let hostname = hostname.trim_end_matches('.'); match self.resolver.resolver.srv_lookup(hostname).await { diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 3d13a3b0..e2981068 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -93,6 +93,7 @@ impl super::Service { let request = Request::try_from(request)?; self.validate_url(request.url())?; + self.server.check_running()?; Ok(request) } diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 4e806ce8..339236a5 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -105,7 +105,8 @@ impl Service { .get(id) .map(|(_, receiver)| receiver.clone()) .expect("Missing channel for sender worker"); - loop { + + while !receiver.is_closed() { tokio::select! { Some(response) = futures.next() => { self.handle_response(response, futures, statuses).await; From b1951070535674b4bcae429c8dee740e72fe104f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 22 Dec 2024 15:09:30 +0000 Subject: [PATCH 0445/1248] optimize for multi-queue storage topologies with affinity Signed-off-by: Jason Volk --- conduwuit-example.toml | 32 ++++++- src/core/config/mod.rs | 52 ++++++++-- src/database/engine.rs | 12 +-- src/database/pool.rs | 170 ++++++++++++++++++++++----------- src/database/pool/configure.rs | 106 ++++++++++++++++++++ src/main/runtime.rs | 2 +- 6 files changed, 297 insertions(+), 77 deletions(-) create mode 100644 src/database/pool/configure.rs diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3669961a..111acb05 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1397,18 +1397,42 @@ # #admin_room_notices = true +# Enable database pool affinity support. On supporting systems, block +# device queue topologies are detected and the request pool is optimized +# for the hardware; db_pool_workers is determined automatically. +# +#db_pool_affinity = true + # Sets the number of worker threads in the frontend-pool of the database. # This number should reflect the I/O capabilities of the system, -# specifically the queue-depth or the number of simultaneous requests in +# such as the queue-depth or the number of simultaneous requests in # flight. Defaults to 32 or four times the number of CPU cores, whichever # is greater. # +# Note: This value is only used if db_pool_affinity is disabled or not +# detected on the system, otherwise it is determined automatically. +# #db_pool_workers = 32 -# Size of the queue feeding the database's frontend-pool. Defaults to 256 -# or eight times the number of CPU cores, whichever is greater. +# When db_pool_affinity is enabled and detected, the size of any worker +# group will not exceed the determined value. This is necessary when +# thread-pooling approach does not scale to the full capabilities of +# high-end hardware; using detected values without limitation could +# degrade performance. # -#db_pool_queue_size = 256 +# The value is multiplied by the number of cores which share a device +# queue, since group workers can be scheduled on any of those cores. +# +#db_pool_workers_limit = 64 + +# Determines the size of the queues feeding the database's frontend-pool. +# The size of the queue is determined by multiplying this value with the +# number of pool workers. When this queue is full, tokio tasks conducting +# requests will yield until space is available; this is good for +# flow-control by avoiding buffer-bloat, but can inhibit throughput if +# too low. +# +#db_pool_queue_mult = 4 # Number of sender task workers; determines sender parallelism. Default is # '0' which means the value is determined internally, likely matching the diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 23feb0ca..3772aa16 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1581,22 +1581,50 @@ pub struct Config { #[serde(default = "true_fn")] pub admin_room_notices: bool, + /// Enable database pool affinity support. On supporting systems, block + /// device queue topologies are detected and the request pool is optimized + /// for the hardware; db_pool_workers is determined automatically. + /// + /// default: true + #[serde(default = "true_fn")] + pub db_pool_affinity: bool, + /// Sets the number of worker threads in the frontend-pool of the database. /// This number should reflect the I/O capabilities of the system, - /// specifically the queue-depth or the number of simultaneous requests in + /// such as the queue-depth or the number of simultaneous requests in /// flight. Defaults to 32 or four times the number of CPU cores, whichever /// is greater. /// + /// Note: This value is only used if db_pool_affinity is disabled or not + /// detected on the system, otherwise it is determined automatically. + /// /// default: 32 #[serde(default = "default_db_pool_workers")] pub db_pool_workers: usize, - /// Size of the queue feeding the database's frontend-pool. Defaults to 256 - /// or eight times the number of CPU cores, whichever is greater. + /// When db_pool_affinity is enabled and detected, the size of any worker + /// group will not exceed the determined value. This is necessary when + /// thread-pooling approach does not scale to the full capabilities of + /// high-end hardware; using detected values without limitation could + /// degrade performance. /// - /// default: 256 - #[serde(default = "default_db_pool_queue_size")] - pub db_pool_queue_size: usize, + /// The value is multiplied by the number of cores which share a device + /// queue, since group workers can be scheduled on any of those cores. + /// + /// default: 64 + #[serde(default = "default_db_pool_workers_limit")] + pub db_pool_workers_limit: usize, + + /// Determines the size of the queues feeding the database's frontend-pool. + /// The size of the queue is determined by multiplying this value with the + /// number of pool workers. When this queue is full, tokio tasks conducting + /// requests will yield until space is available; this is good for + /// flow-control by avoiding buffer-bloat, but can inhibit throughput if + /// too low. + /// + /// default: 4 + #[serde(default = "default_db_pool_queue_mult")] + pub db_pool_queue_mult: usize, /// Number of sender task workers; determines sender parallelism. Default is /// '0' which means the value is determined internally, likely matching the @@ -2399,8 +2427,12 @@ fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_p fn default_trusted_server_batch_size() -> usize { 256 } -fn default_db_pool_workers() -> usize { sys::available_parallelism().saturating_mul(4).max(32) } - -fn default_db_pool_queue_size() -> usize { - sys::available_parallelism().saturating_mul(8).max(256) +fn default_db_pool_workers() -> usize { + sys::available_parallelism() + .saturating_mul(4) + .clamp(32, 1024) } + +fn default_db_pool_workers_limit() -> usize { 64 } + +fn default_db_pool_queue_mult() -> usize { 4 } diff --git a/src/database/engine.rs b/src/database/engine.rs index 73ea559d..faf5b131 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -18,7 +18,7 @@ use rocksdb::{ use crate::{ opts::{cf_options, db_options}, - or_else, pool, + or_else, pool::Pool, result, util::map_err, @@ -87,8 +87,9 @@ impl Engine { .map(|(name, opts)| ColumnFamilyDescriptor::new(name, opts)) .collect::>(); - debug!("Opening database..."); let path = &config.database_path; + + debug!("Opening database..."); let res = if config.rocksdb_read_only { Db::open_cf_descriptors_read_only(&db_opts, path, cfds, false) } else if config.rocksdb_secondary { @@ -105,11 +106,6 @@ impl Engine { "Opened database." ); - let pool_opts = pool::Opts { - queue_size: config.db_pool_queue_size, - worker_num: config.db_pool_workers, - }; - Ok(Arc::new(Self { server: server.clone(), row_cache, @@ -121,7 +117,7 @@ impl Engine { corks: AtomicU32::new(0), read_only: config.rocksdb_read_only, secondary: config.rocksdb_secondary, - pool: Pool::new(server, &pool_opts).await?, + pool: Pool::new(server).await?, })) } diff --git a/src/database/pool.rs b/src/database/pool.rs index 4f018a38..51e705ce 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -1,3 +1,5 @@ +mod configure; + use std::{ mem::take, sync::{ @@ -6,39 +8,50 @@ use std::{ }, }; -use async_channel::{bounded, Receiver, RecvError, Sender}; -use conduwuit::{debug, debug_warn, defer, err, implement, result::DebugInspect, Result, Server}; +use async_channel::{Receiver, RecvError, Sender}; +use conduwuit::{ + debug, debug_warn, defer, err, implement, + result::DebugInspect, + trace, + utils::sys::compute::{get_affinity, get_core_available, set_affinity}, + Result, Server, +}; use futures::{channel::oneshot, TryFutureExt}; use oneshot::Sender as ResultSender; use rocksdb::Direction; use tokio::task::JoinSet; +use self::configure::configure; use crate::{keyval::KeyBuf, stream, Handle, Map}; +/// Frontend thread-pool. Operating system threads are used to make database +/// requests which are not cached. These thread-blocking requests are offloaded +/// from the tokio async workers and executed on this threadpool. pub(crate) struct Pool { server: Arc, + queues: Vec>, workers: Mutex>, - queue: Sender, + topology: Vec, busy: AtomicUsize, queued_max: AtomicUsize, } -pub(crate) struct Opts { - pub(crate) queue_size: usize, - pub(crate) worker_num: usize, -} - +/// Operations which can be submitted to the pool. pub(crate) enum Cmd { Get(Get), Iter(Seek), } +/// Point-query pub(crate) struct Get { pub(crate) map: Arc, pub(crate) key: KeyBuf, pub(crate) res: Option>>>, } +/// Iterator-seek. +/// Note: only initial seek is supported at this time on the assumption rocksdb +/// prefetching prevents mid-iteration polls from blocking on I/O. pub(crate) struct Seek { pub(crate) map: Arc, pub(crate) state: stream::State<'static>, @@ -47,34 +60,44 @@ pub(crate) struct Seek { pub(crate) res: Option>>, } -const QUEUE_LIMIT: (usize, usize) = (1, 3072); -const WORKER_LIMIT: (usize, usize) = (1, 512); - -impl Drop for Pool { - fn drop(&mut self) { - debug_assert!(self.queue.is_empty(), "channel must be empty on drop"); - debug_assert!(self.queue.is_closed(), "channel should be closed on drop"); - } -} +const WORKER_LIMIT: (usize, usize) = (1, 1024); +const QUEUE_LIMIT: (usize, usize) = (1, 2048); #[implement(Pool)] -pub(crate) async fn new(server: &Arc, opts: &Opts) -> Result> { - let queue_size = opts.queue_size.clamp(QUEUE_LIMIT.0, QUEUE_LIMIT.1); - let (send, recv) = bounded(queue_size); +pub(crate) async fn new(server: &Arc) -> Result> { + let (total_workers, queue_sizes, topology) = configure(server); + + let (senders, receivers) = queue_sizes.into_iter().map(async_channel::bounded).unzip(); + let pool = Arc::new(Self { server: server.clone(), + + queues: senders, + workers: JoinSet::new().into(), - queue: send, + + topology, + busy: AtomicUsize::default(), + queued_max: AtomicUsize::default(), }); - let worker_num = opts.worker_num.clamp(WORKER_LIMIT.0, WORKER_LIMIT.1); - pool.spawn_until(recv, worker_num).await?; + pool.spawn_until(receivers, total_workers).await?; Ok(pool) } +impl Drop for Pool { + fn drop(&mut self) { + debug_assert!(self.queues.iter().all(Sender::is_empty), "channel must be empty on drop"); + debug_assert!( + self.queues.iter().all(Sender::is_closed), + "channel should be closed on drop" + ); + } +} + #[implement(Pool)] pub(crate) async fn shutdown(self: &Arc) { self.close(); @@ -83,36 +106,39 @@ pub(crate) async fn shutdown(self: &Arc) { debug!(workers = workers.len(), "Waiting for workers to join..."); workers.join_all().await; - debug_assert!(self.queue.is_empty(), "channel is not empty"); } #[implement(Pool)] -pub(crate) fn close(&self) -> bool { - if !self.queue.close() { - return false; - } +pub(crate) fn close(&self) { + let senders = self.queues.iter().map(Sender::sender_count).sum::(); - let mut workers = take(&mut *self.workers.lock().expect("locked")); - debug!(workers = workers.len(), "Waiting for workers to join..."); - workers.abort_all(); - drop(workers); + let receivers = self + .queues + .iter() + .map(Sender::receiver_count) + .sum::(); - std::thread::yield_now(); - debug_assert!(self.queue.is_empty(), "channel is not empty"); debug!( - senders = self.queue.sender_count(), - receivers = self.queue.receiver_count(), - "Closed pool channel" + queues = self.queues.len(), + workers = self.workers.lock().expect("locked").len(), + ?senders, + ?receivers, + "Closing pool..." ); - true + for queue in &self.queues { + queue.close(); + } + + self.workers.lock().expect("locked").abort_all(); + std::thread::yield_now(); } #[implement(Pool)] -async fn spawn_until(self: &Arc, recv: Receiver, max: usize) -> Result { +async fn spawn_until(self: &Arc, recv: Vec>, count: usize) -> Result { let mut workers = self.workers.lock().expect("locked"); - while workers.len() < max { - self.spawn_one(&mut workers, recv.clone())?; + while workers.len() < count { + self.spawn_one(&mut workers, &recv)?; } Ok(()) @@ -125,8 +151,13 @@ async fn spawn_until(self: &Arc, recv: Receiver, max: usize) -> Resul skip_all, fields(id = %workers.len()) )] -fn spawn_one(self: &Arc, workers: &mut JoinSet<()>, recv: Receiver) -> Result { +fn spawn_one(self: &Arc, workers: &mut JoinSet<()>, recv: &[Receiver]) -> Result { + debug_assert!(!self.queues.is_empty(), "Must have at least one queue"); + debug_assert!(!recv.is_empty(), "Must have at least one receiver"); + let id = workers.len(); + let group = id.overflowing_rem(self.queues.len()).0; + let recv = recv[group].clone(); let self_ = self.clone(); #[cfg(not(tokio_unstable))] @@ -146,7 +177,9 @@ fn spawn_one(self: &Arc, workers: &mut JoinSet<()>, recv: Receiver) - pub(crate) async fn execute_get(&self, mut cmd: Get) -> Result> { let (send, recv) = oneshot::channel(); _ = cmd.res.insert(send); - self.execute(Cmd::Get(cmd)) + + let queue = self.select_queue(); + self.execute(queue, Cmd::Get(cmd)) .and_then(|()| { recv.map_ok(into_recv_get_result) .map_err(|e| err!(error!("recv failed {e:?}"))) @@ -159,7 +192,9 @@ pub(crate) async fn execute_get(&self, mut cmd: Get) -> Result> { pub(crate) async fn execute_iter(&self, mut cmd: Seek) -> Result> { let (send, recv) = oneshot::channel(); _ = cmd.res.insert(send); - self.execute(Cmd::Iter(cmd)) + + let queue = self.select_queue(); + self.execute(queue, Cmd::Iter(cmd)) .and_then(|()| { recv.map_ok(into_recv_seek) .map_err(|e| err!(error!("recv failed {e:?}"))) @@ -167,6 +202,13 @@ pub(crate) async fn execute_iter(&self, mut cmd: Seek) -> Result &Sender { + let core_id = get_affinity().next().unwrap_or(0); + let chan_id = self.topology[core_id]; + self.queues.get(chan_id).unwrap_or_else(|| &self.queues[0]) +} + #[implement(Pool)] #[tracing::instrument( level = "trace", @@ -174,25 +216,24 @@ pub(crate) async fn execute_iter(&self, mut cmd: Seek) -> Result Result { +async fn execute(&self, queue: &Sender, cmd: Cmd) -> Result { if cfg!(debug_assertions) { - self.queued_max - .fetch_max(self.queue.len(), Ordering::Relaxed); + self.queued_max.fetch_max(queue.len(), Ordering::Relaxed); } - if self.queue.is_full() { + if queue.is_full() { debug_warn!( - capacity = ?self.queue.capacity(), + capacity = ?queue.capacity(), "pool queue is full" ); } - self.queue + queue .send(cmd) .await .map_err(|e| err!(error!("send failed {e:?}"))) @@ -208,12 +249,33 @@ async fn execute(&self, cmd: Cmd) -> Result { ), )] fn worker(self: Arc, id: usize, recv: Receiver) { - debug!("worker spawned"); - defer! {{ debug!("worker finished"); }} + defer! {{ trace!("worker finished"); }} + trace!("worker spawned"); + self.worker_init(id); self.worker_loop(&recv); } +#[implement(Pool)] +fn worker_init(&self, id: usize) { + let group = id.overflowing_rem(self.queues.len()).0; + let affinity = self + .topology + .iter() + .enumerate() + .filter(|_| self.queues.len() > 1) + .filter_map(|(core_id, &queue_id)| (group == queue_id).then_some(core_id)) + .filter_map(get_core_available); + + // affinity is empty (no-op) if there's only one queue + set_affinity(affinity.clone()); + debug!( + ?group, + affinity = ?affinity.collect::>(), + "worker ready" + ); +} + #[implement(Pool)] fn worker_loop(&self, recv: &Receiver) { // initial +1 needed prior to entering wait diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs new file mode 100644 index 00000000..8353a265 --- /dev/null +++ b/src/database/pool/configure.rs @@ -0,0 +1,106 @@ +use std::{ffi::OsStr, sync::Arc}; + +use conduwuit::{ + debug_info, + utils::{ + sys::{compute::is_core_available, storage}, + BoolExt, + }, + Server, +}; + +use super::{QUEUE_LIMIT, WORKER_LIMIT}; + +pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) { + let config = &server.config; + + // This finds the block device and gathers all the properties we need. + let (device_name, device_prop) = config + .db_pool_affinity + .and_then(|| storage::name_from_path(&config.database_path)) + .map(|device_name| (device_name.clone(), storage::parallelism(&device_name))) + .unzip(); + + // The default worker count is masked-on if we didn't find better information. + let default_worker_count = device_prop + .as_ref() + .is_none_or(|prop| prop.mq.is_empty()) + .then_some(config.db_pool_workers); + + // Determine the worker groupings. Each indice represents a hardware queue and + // contains the number of workers which will service it. + let worker_counts: Vec<_> = device_prop + .iter() + .map(|dev| &dev.mq) + .flat_map(|mq| mq.iter()) + .filter(|mq| mq.cpu_list.iter().copied().any(is_core_available)) + .map(|mq| { + mq.nr_tags.unwrap_or_default().min( + config.db_pool_workers_limit.saturating_mul( + mq.cpu_list + .iter() + .filter(|&&id| is_core_available(id)) + .count() + .max(1), + ), + ) + }) + .chain(default_worker_count) + .collect(); + + // Determine our software queue size for each hardware queue. This is the mpmc + // between the tokio worker and the pool worker. + let queue_sizes: Vec<_> = worker_counts + .iter() + .map(|worker_count| { + worker_count + .saturating_mul(config.db_pool_queue_mult) + .clamp(QUEUE_LIMIT.0, QUEUE_LIMIT.1) + }) + .collect(); + + // Determine the CPU affinities of each hardware queue. Each indice is a cpu and + // each value is the associated hardware queue. There is a little shiftiness + // going on because cpu's which are not available to the process are filtered + // out, similar to the worker_counts. + let topology = device_prop + .iter() + .map(|dev| &dev.mq) + .flat_map(|mq| mq.iter()) + .fold(vec![0; 128], |mut topology, mq| { + mq.cpu_list + .iter() + .filter(|&&id| is_core_available(id)) + .for_each(|&id| { + topology[id] = mq.id; + }); + + topology + }); + + // Regardless of the capacity of all queues we establish some limit on the total + // number of workers; this is hopefully hinted by nr_requests. + let max_workers = device_prop + .as_ref() + .and_then(|prop| prop.nr_requests) + .unwrap_or(WORKER_LIMIT.1); + + // Determine the final worker count which we'll be spawning. + let total_workers = worker_counts + .iter() + .sum::() + .clamp(WORKER_LIMIT.0, max_workers); + + debug_info!( + device_name = ?device_name + .as_deref() + .and_then(OsStr::to_str) + .unwrap_or("None"), + ?worker_counts, + ?queue_sizes, + ?total_workers, + "Frontend topology", + ); + + (total_workers, queue_sizes, topology) +} diff --git a/src/main/runtime.rs b/src/main/runtime.rs index cee093ea..bfd2ef74 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -19,7 +19,7 @@ use crate::clap::Args; const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; -const MAX_BLOCKING_THREADS: usize = 1024; +const MAX_BLOCKING_THREADS: usize = 2048; static WORKER_AFFINITY: OnceLock = OnceLock::new(); From 7a6d6575585cd001f0c889aa41c1a2663b2435d0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 23 Dec 2024 04:32:28 +0000 Subject: [PATCH 0446/1248] configurable dynamic stream concurrency scalar Signed-off-by: Jason Volk --- conduwuit-example.toml | 23 +++++++++++++++++ src/core/config/mod.rs | 31 +++++++++++++++++++++++ src/core/utils/stream/broadband.rs | 13 +++++----- src/core/utils/stream/mod.rs | 29 ++++++++++++++++++++++ src/core/utils/stream/try_broadband.rs | 5 ++-- src/core/utils/stream/wideband.rs | 9 +++---- src/database/map/get_batch.rs | 10 +++++--- src/database/pool/configure.rs | 34 +++++++++++++++++++++++++- src/main/server.rs | 10 +++++++- 9 files changed, 144 insertions(+), 20 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 111acb05..c64b18e8 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1434,6 +1434,29 @@ # #db_pool_queue_mult = 4 +# Sets the initial value for the concurrency of streams. This value simply +# allows overriding the default in the code. The default is 32, which is +# the same as the default in the code. Note this value is itself +# overridden by the computed stream_width_scale, unless that is disabled; +# this value can serve as a fixed-width instead. +# +#stream_width_default = 32 + +# Scales the stream width starting from a base value detected for the +# specific system. The base value is the database pool worker count +# determined from the hardware queue size (e.g. 32 for SSD or 64 or 128+ +# for NVMe). This float allows scaling the width up or down by multiplying +# it (e.g. 1.5, 2.0, etc). The maximum result can be the size of the pool +# queue (see: db_pool_queue_mult) as any larger value will stall the tokio +# task. The value can also be scaled down (e.g. 0.5) to improve +# responsiveness for many users at the cost of throughput for each. +# +# Setting this value to 0.0 causes the stream width to be fixed at the +# value of stream_width_default. The default is 1.0 to match the +# capabilities detected for the system. +# +#stream_width_scale = 1.0 + # Number of sender task workers; determines sender parallelism. Default is # '0' which means the value is determined internally, likely matching the # number of tokio worker-threads or number of cores, etc. Override by diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 3772aa16..e1f578c8 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1626,6 +1626,33 @@ pub struct Config { #[serde(default = "default_db_pool_queue_mult")] pub db_pool_queue_mult: usize, + /// Sets the initial value for the concurrency of streams. This value simply + /// allows overriding the default in the code. The default is 32, which is + /// the same as the default in the code. Note this value is itself + /// overridden by the computed stream_width_scale, unless that is disabled; + /// this value can serve as a fixed-width instead. + /// + /// default: 32 + #[serde(default = "default_stream_width_default")] + pub stream_width_default: usize, + + /// Scales the stream width starting from a base value detected for the + /// specific system. The base value is the database pool worker count + /// determined from the hardware queue size (e.g. 32 for SSD or 64 or 128+ + /// for NVMe). This float allows scaling the width up or down by multiplying + /// it (e.g. 1.5, 2.0, etc). The maximum result can be the size of the pool + /// queue (see: db_pool_queue_mult) as any larger value will stall the tokio + /// task. The value can also be scaled down (e.g. 0.5) to improve + /// responsiveness for many users at the cost of throughput for each. + /// + /// Setting this value to 0.0 causes the stream width to be fixed at the + /// value of stream_width_default. The default is 1.0 to match the + /// capabilities detected for the system. + /// + /// default: 1.0 + #[serde(default = "default_stream_width_scale")] + pub stream_width_scale: f32, + /// Number of sender task workers; determines sender parallelism. Default is /// '0' which means the value is determined internally, likely matching the /// number of tokio worker-threads or number of cores, etc. Override by @@ -2436,3 +2463,7 @@ fn default_db_pool_workers() -> usize { fn default_db_pool_workers_limit() -> usize { 64 } fn default_db_pool_queue_mult() -> usize { 4 } + +fn default_stream_width_default() -> usize { 32 } + +fn default_stream_width_scale() -> f32 { 1.0 } diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs index 37416d63..6d1ff6fe 100644 --- a/src/core/utils/stream/broadband.rs +++ b/src/core/utils/stream/broadband.rs @@ -7,9 +7,7 @@ use futures::{ Future, }; -use super::ReadyExt; - -const WIDTH: usize = 32; +use super::{automatic_width, ReadyExt}; /// Concurrency extensions to augment futures::StreamExt. broad_ combinators /// produce out-of-order @@ -95,7 +93,7 @@ where Fut: Future + Send, { self.map(f) - .buffer_unordered(n.into().unwrap_or(WIDTH)) + .buffer_unordered(n.into().unwrap_or_else(automatic_width)) .ready_all(identity) } @@ -107,7 +105,7 @@ where Fut: Future + Send, { self.map(f) - .buffer_unordered(n.into().unwrap_or(WIDTH)) + .buffer_unordered(n.into().unwrap_or_else(automatic_width)) .ready_any(identity) } @@ -120,7 +118,7 @@ where U: Send, { self.map(f) - .buffer_unordered(n.into().unwrap_or(WIDTH)) + .buffer_unordered(n.into().unwrap_or_else(automatic_width)) .ready_filter_map(identity) } @@ -132,6 +130,7 @@ where Fut: Future + Send, U: Send, { - self.map(f).buffer_unordered(n.into().unwrap_or(WIDTH)) + self.map(f) + .buffer_unordered(n.into().unwrap_or_else(automatic_width)) } } diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index c9138116..4456784f 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -19,3 +19,32 @@ pub use tools::Tools; pub use try_broadband::TryBroadbandExt; pub use try_ready::TryReadyExt; pub use wideband::WidebandExt; + +/// Stream concurrency factor; this is a live value. +static WIDTH: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(32); + +/// Practicable limits on the stream width +pub const WIDTH_LIMIT: (usize, usize) = (1, 1024); + +/// Sets the live concurrency factor. The first return value is the previous +/// width which was replaced. The second return value is the value which was set +/// after any applied limits. +pub fn set_width(width: usize) -> (usize, usize) { + use std::sync::atomic::Ordering; + + let width = width.clamp(WIDTH_LIMIT.0, WIDTH_LIMIT.1); + (WIDTH.swap(width, Ordering::Relaxed), width) +} + +/// Used by stream operations where the concurrency factor hasn't been manually +/// supplied by the caller (most uses). Instead we provide a default value which +/// is adjusted at startup for the specific system and also dynamically. +#[inline] +pub fn automatic_width() -> usize { + use std::sync::atomic::Ordering; + + let width = WIDTH.load(Ordering::Relaxed); + debug_assert!(width >= WIDTH_LIMIT.0, "WIDTH should not be zero"); + debug_assert!(width <= WIDTH_LIMIT.1, "WIDTH is probably too large"); + width +} diff --git a/src/core/utils/stream/try_broadband.rs b/src/core/utils/stream/try_broadband.rs index d1213174..c72fcc2c 100644 --- a/src/core/utils/stream/try_broadband.rs +++ b/src/core/utils/stream/try_broadband.rs @@ -2,10 +2,9 @@ use futures::{TryFuture, TryStream, TryStreamExt}; +use super::automatic_width; use crate::Result; -const WIDTH: usize = 32; - /// Concurrency extensions to augment futures::TryStreamExt. broad_ combinators /// produce out-of-order pub trait TryBroadbandExt @@ -49,6 +48,6 @@ where Fut: TryFuture> + Send, { self.map_ok(f) - .try_buffer_unordered(n.into().unwrap_or(WIDTH)) + .try_buffer_unordered(n.into().unwrap_or_else(automatic_width)) } } diff --git a/src/core/utils/stream/wideband.rs b/src/core/utils/stream/wideband.rs index 053a351f..a8560bb4 100644 --- a/src/core/utils/stream/wideband.rs +++ b/src/core/utils/stream/wideband.rs @@ -7,9 +7,7 @@ use futures::{ Future, }; -use super::ReadyExt; - -const WIDTH: usize = 32; +use super::{automatic_width, ReadyExt}; /// Concurrency extensions to augment futures::StreamExt. wideband_ combinators /// produce in-order. @@ -66,7 +64,7 @@ where U: Send, { self.map(f) - .buffered(n.into().unwrap_or(WIDTH)) + .buffered(n.into().unwrap_or_else(automatic_width)) .ready_filter_map(identity) } @@ -78,6 +76,7 @@ where Fut: Future + Send, U: Send, { - self.map(f).buffered(n.into().unwrap_or(WIDTH)) + self.map(f) + .buffered(n.into().unwrap_or_else(automatic_width)) } } diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index 49cd5920..631692fe 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,6 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{err, implement, utils::IterStream, Result}; +use conduwuit::{ + err, implement, + utils::{stream::automatic_width, IterStream}, + Result, +}; use futures::{Stream, StreamExt}; use serde::Serialize; @@ -18,7 +22,7 @@ where { keys.stream() .map(move |key| self.aqry::(&key)) - .buffered(self.db.server.config.db_pool_workers.saturating_mul(2)) + .buffered(automatic_width()) } #[implement(super::Map)] @@ -33,7 +37,7 @@ where { keys.stream() .map(move |key| self.get(key)) - .buffered(self.db.server.config.db_pool_workers.saturating_mul(2)) + .buffered(automatic_width()) } #[implement(super::Map)] diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs index 8353a265..9361a534 100644 --- a/src/database/pool/configure.rs +++ b/src/database/pool/configure.rs @@ -1,8 +1,11 @@ use std::{ffi::OsStr, sync::Arc}; use conduwuit::{ - debug_info, + debug, debug_info, expected, utils::{ + math::usize_from_f64, + stream, + stream::WIDTH_LIMIT, sys::{compute::is_core_available, storage}, BoolExt, }, @@ -91,6 +94,13 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) .sum::() .clamp(WORKER_LIMIT.0, max_workers); + // After computing all of the above we can update the global automatic stream + // width, hopefully with a better value tailored to this system. + if config.stream_width_scale > 0.0 { + let num_queues = queue_sizes.len(); + update_stream_width(server, num_queues, total_workers); + } + debug_info!( device_name = ?device_name .as_deref() @@ -99,8 +109,30 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) ?worker_counts, ?queue_sizes, ?total_workers, + stream_width = ?stream::automatic_width(), "Frontend topology", ); (total_workers, queue_sizes, topology) } + +#[allow(clippy::as_conversions, clippy::cast_precision_loss)] +fn update_stream_width(server: &Arc, num_queues: usize, total_workers: usize) { + let config = &server.config; + let scale: f64 = config.stream_width_scale.min(100.0).into(); + let req_width = expected!(total_workers / num_queues).next_multiple_of(2); + let req_width = req_width as f64; + let req_width = usize_from_f64(req_width * scale) + .expect("failed to convert f64 to usize") + .clamp(WIDTH_LIMIT.0, WIDTH_LIMIT.1); + + let (old_width, new_width) = stream::set_width(req_width); + debug!( + scale = ?config.stream_width_scale, + ?num_queues, + ?req_width, + ?old_width, + ?new_width, + "Updated global stream width" + ); +} diff --git a/src/main/server.rs b/src/main/server.rs index bc2cff85..e1389f6d 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,6 +1,12 @@ use std::sync::Arc; -use conduwuit::{config::Config, info, log::Log, utils::sys, Error, Result}; +use conduwuit::{ + config::Config, + info, + log::Log, + utils::{stream, sys}, + Error, Result, +}; use tokio::{runtime, sync::Mutex}; use crate::{clap::Args, logging::TracingFlameGuard}; @@ -45,6 +51,8 @@ impl Server { sys::maximize_fd_limit() .expect("Unable to increase maximum soft and hard file descriptor limit"); + let (_old_width, _new_width) = stream::set_width(config.stream_width_default); + info!( server_name = %config.server_name, database_path = ?config.database_path, From a407909d09f09c6fe0f714a8ab6ae872c0b8ef6a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 24 Dec 2024 01:07:53 +0000 Subject: [PATCH 0447/1248] reduce unnecessary info-level spans to debug Signed-off-by: Jason Volk --- src/core/utils/sys/compute.rs | 2 +- src/service/resolver/actual.rs | 4 ++-- src/service/rooms/timeline/mod.rs | 6 +++--- src/service/sending/mod.rs | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs index 4e9ef743..e947b579 100644 --- a/src/core/utils/sys/compute.rs +++ b/src/core/utils/sys/compute.rs @@ -10,7 +10,7 @@ static CORES_AVAILABLE: LazyLock = LazyLock::new(|| { .unwrap_or_default() .into_iter() .map(|core_id| core_id.id) - .inspect(|&id| assert!(id < 128, "Core ID must be < 128 at least for now")) + .inspect(|&id| debug_assert!(id < 128, "Core ID must be < 128 at least for now")) .fold(0_u128, |mask, id| mask | (1 << id)) }); diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 0506be2c..f8bc0f6a 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -45,7 +45,7 @@ impl super::Service { /// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of /// specification - #[tracing::instrument(skip_all, name = "actual")] + #[tracing::instrument(skip(self, cache), name = "actual")] pub async fn resolve_actual_dest( &self, dest: &ServerName, @@ -298,7 +298,7 @@ impl super::Service { } } - #[tracing::instrument(skip_all, name = "ip")] + #[tracing::instrument(skip(self, overname, port), name = "ip")] async fn query_and_cache_override( &self, overname: &'_ str, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 028b270f..3b81e2e5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -859,7 +859,7 @@ impl Service { /// Creates a new persisted data unit and adds it to a room. This function /// takes a roomid_mutex_state, meaning that only this function is able to /// mutate the room state. - #[tracing::instrument(skip(self, state_lock))] + #[tracing::instrument(skip(self, state_lock), level = "debug")] pub async fn build_and_append_pdu( &self, pdu_builder: PduBuilder, @@ -1082,7 +1082,7 @@ impl Service { self.replace_pdu(&pdu_id, &obj, &pdu).await } - #[tracing::instrument(skip(self))] + #[tracing::instrument(skip(self), level = "debug")] pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { if self .services @@ -1194,7 +1194,7 @@ impl Service { Ok(()) } - #[tracing::instrument(skip(self, pdu))] + #[tracing::instrument(skip(self, pdu), level = "debug")] pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box) -> Result<()> { let (event_id, value, room_id) = self.services.event_handler.parse_incoming_pdu(&pdu).await?; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 5ccba249..e52bfb25 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -277,7 +277,7 @@ impl Service { } /// Sends a request to a federation server - #[tracing::instrument(skip_all, name = "request")] + #[tracing::instrument(skip_all, name = "request", level = "debug")] pub async fn send_federation_request( &self, dest: &ServerName, @@ -291,7 +291,7 @@ impl Service { } /// Like send_federation_request() but with a very large timeout - #[tracing::instrument(skip_all, name = "synapse")] + #[tracing::instrument(skip_all, name = "synapse", level = "debug")] pub async fn send_synapse_request( &self, dest: &ServerName, From 3b95af9a1887a07070f7b7918ce3cc91fb911918 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 24 Dec 2024 14:37:26 +0000 Subject: [PATCH 0448/1248] move stream width related into unit Signed-off-by: Jason Volk --- src/core/utils/stream/band.rs | 26 ++++++++++++++++++++++++++ src/core/utils/stream/mod.rs | 31 ++----------------------------- 2 files changed, 28 insertions(+), 29 deletions(-) create mode 100644 src/core/utils/stream/band.rs diff --git a/src/core/utils/stream/band.rs b/src/core/utils/stream/band.rs new file mode 100644 index 00000000..76f2a85a --- /dev/null +++ b/src/core/utils/stream/band.rs @@ -0,0 +1,26 @@ +use std::sync::atomic::{AtomicUsize, Ordering}; + +/// Stream concurrency factor; this is a live value. +static WIDTH: AtomicUsize = AtomicUsize::new(32); + +/// Practicable limits on the stream width +pub const WIDTH_LIMIT: (usize, usize) = (1, 1024); + +/// Sets the live concurrency factor. The first return value is the previous +/// width which was replaced. The second return value is the value which was set +/// after any applied limits. +pub fn set_width(width: usize) -> (usize, usize) { + let width = width.clamp(WIDTH_LIMIT.0, WIDTH_LIMIT.1); + (WIDTH.swap(width, Ordering::Relaxed), width) +} + +/// Used by stream operations where the concurrency factor hasn't been manually +/// supplied by the caller (most uses). Instead we provide a default value which +/// is adjusted at startup for the specific system and also dynamically. +#[inline] +pub fn automatic_width() -> usize { + let width = WIDTH.load(Ordering::Relaxed); + debug_assert!(width >= WIDTH_LIMIT.0, "WIDTH should not be zero"); + debug_assert!(width <= WIDTH_LIMIT.1, "WIDTH is probably too large"); + width +} diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 4456784f..a5ef17c5 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -1,3 +1,4 @@ +mod band; mod broadband; mod cloned; mod expect; @@ -9,6 +10,7 @@ mod try_broadband; mod try_ready; mod wideband; +pub use band::{automatic_width, set_width, WIDTH_LIMIT}; pub use broadband::BroadbandExt; pub use cloned::Cloned; pub use expect::TryExpect; @@ -19,32 +21,3 @@ pub use tools::Tools; pub use try_broadband::TryBroadbandExt; pub use try_ready::TryReadyExt; pub use wideband::WidebandExt; - -/// Stream concurrency factor; this is a live value. -static WIDTH: std::sync::atomic::AtomicUsize = std::sync::atomic::AtomicUsize::new(32); - -/// Practicable limits on the stream width -pub const WIDTH_LIMIT: (usize, usize) = (1, 1024); - -/// Sets the live concurrency factor. The first return value is the previous -/// width which was replaced. The second return value is the value which was set -/// after any applied limits. -pub fn set_width(width: usize) -> (usize, usize) { - use std::sync::atomic::Ordering; - - let width = width.clamp(WIDTH_LIMIT.0, WIDTH_LIMIT.1); - (WIDTH.swap(width, Ordering::Relaxed), width) -} - -/// Used by stream operations where the concurrency factor hasn't been manually -/// supplied by the caller (most uses). Instead we provide a default value which -/// is adjusted at startup for the specific system and also dynamically. -#[inline] -pub fn automatic_width() -> usize { - use std::sync::atomic::Ordering; - - let width = WIDTH.load(Ordering::Relaxed); - debug_assert!(width >= WIDTH_LIMIT.0, "WIDTH should not be zero"); - debug_assert!(width <= WIDTH_LIMIT.1, "WIDTH is probably too large"); - width -} From 5a335933b84c4598106d4b7b448b0ea4e8e48504 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 24 Dec 2024 14:43:28 +0000 Subject: [PATCH 0449/1248] pass stream width to ruma state res Signed-off-by: Jason Volk --- Cargo.lock | 26 +++++------ Cargo.toml | 2 +- .../rooms/event_handler/resolve_state.rs | 45 ++++++++++++------- .../rooms/event_handler/state_at_incoming.rs | 28 +++--------- 4 files changed, 48 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6522aa55..9134b290 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3175,7 +3175,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "assign", "js_int", @@ -3197,7 +3197,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "js_int", "ruma-common", @@ -3209,7 +3209,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "as_variant", "assign", @@ -3232,7 +3232,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "as_variant", "base64 0.22.1", @@ -3262,7 +3262,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3286,7 +3286,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "bytes", "http", @@ -3304,7 +3304,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3313,7 +3313,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "js_int", "ruma-common", @@ -3323,7 +3323,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3338,7 +3338,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "js_int", "ruma-common", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "headers", "http", @@ -3363,7 +3363,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3379,7 +3379,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=112ccc24cb14de26757715d611285d0806d5d91f#112ccc24cb14de26757715d611285d0806d5d91f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 689f2b5c..1546df3a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -335,7 +335,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "112ccc24cb14de26757715d611285d0806d5d91f" +rev = "d3ed3194ebe96b921d06d1d3e607f0bf7873f792" features = [ "compat", "rand", diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index d507c9c3..108be18d 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -6,7 +6,7 @@ use std::{ use conduwuit::{ debug, err, implement, - utils::stream::{IterStream, WidebandExt}, + utils::stream::{automatic_width, IterStream, WidebandExt}, Result, }; use futures::{FutureExt, StreamExt, TryFutureExt}; @@ -75,22 +75,10 @@ pub async fn resolve_state( .await; debug!("Resolving state"); - let lock = self.services.globals.stateres_mutex.lock(); - - let event_fetch = |event_id| self.event_fetch(event_id); - let event_exists = |event_id| self.event_exists(event_id); - let state = state_res::resolve( - room_version_id, - &fork_states, - &auth_chain_sets, - &event_fetch, - &event_exists, - ) - .boxed() - .await - .map_err(|e| err!(Database(error!("State resolution failed: {e:?}"))))?; - - drop(lock); + let state = self + .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .boxed() + .await?; debug!("State resolution done."); let state_events: Vec<_> = state @@ -119,3 +107,26 @@ pub async fn resolve_state( Ok(Arc::new(new_room_state)) } + +#[implement(super::Service)] +#[tracing::instrument(name = "ruma", level = "debug", skip_all)] +pub async fn state_resolution( + &self, + room_version: &RoomVersionId, + state_sets: &[StateMap>], + auth_chain_sets: &Vec>>, +) -> Result>> { + //TODO: ??? + let _lock = self.services.globals.stateres_mutex.lock(); + + state_res::resolve( + room_version, + state_sets.iter(), + auth_chain_sets, + &|event_id| self.event_fetch(event_id), + &|event_id| self.event_exists(event_id), + automatic_width(), + ) + .await + .map_err(|e| err!(error!("State resolution failed: {e:?}"))) +} diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 51879d0d..658fb904 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -11,10 +11,7 @@ use conduwuit::{ PduEvent, Result, }; use futures::{FutureExt, StreamExt}; -use ruma::{ - state_res::{self, StateMap}, - EventId, RoomId, RoomVersionId, -}; +use ruma::{state_res::StateMap, EventId, RoomId, RoomVersionId}; // TODO: if we know the prev_events of the incoming event we can avoid the #[implement(super::Service)] @@ -157,24 +154,11 @@ pub(super) async fn state_at_incoming_resolved( fork_states.push(state); } - let lock = self.services.globals.stateres_mutex.lock(); - - let event_fetch = |event_id| self.event_fetch(event_id); - let event_exists = |event_id| self.event_exists(event_id); - let result = state_res::resolve( - room_version_id, - &fork_states, - &auth_chain_sets, - &event_fetch, - &event_exists, - ) - .boxed() - .await - .map_err(|e| err!(Database(warn!(?e, "State resolution on prev events failed.")))); - - drop(lock); - - let Ok(new_state) = result else { + let Ok(new_state) = self + .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .boxed() + .await + else { return Ok(None); }; From 6458f4b1957a54691dc660a1c70e8c89beea402c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 28 Dec 2024 00:57:02 +0000 Subject: [PATCH 0450/1248] refactor various Arc to OwnedEventId Signed-off-by: Jason Volk --- Cargo.lock | 28 +++++++++-------- Cargo.toml | 3 +- src/admin/debug/commands.rs | 10 +++--- src/admin/user/commands.rs | 2 +- src/api/client/redact.rs | 4 +-- src/api/client/send.rs | 2 +- src/api/client/state.rs | 9 ++---- src/api/client/sync/v3.rs | 2 +- src/api/server/send.rs | 11 +++++-- src/api/server/send_leave.rs | 8 ++++- src/core/pdu/builder.rs | 6 ++-- src/core/pdu/event.rs | 6 ++-- src/core/pdu/mod.rs | 14 ++++----- src/core/pdu/redact.rs | 10 +++--- src/service/rooms/auth_chain/mod.rs | 8 ++--- .../fetch_and_handle_outliers.rs | 17 +++++----- src/service/rooms/event_handler/fetch_prev.rs | 12 +++---- .../rooms/event_handler/fetch_state.rs | 31 +++++++------------ .../event_handler/handle_incoming_pdu.rs | 2 +- .../rooms/event_handler/handle_outlier_pdu.rs | 20 +++++------- .../rooms/event_handler/handle_prev_pdu.rs | 14 +++------ src/service/rooms/event_handler/mod.rs | 6 ++-- .../rooms/event_handler/resolve_state.rs | 14 ++++----- .../rooms/event_handler/state_at_incoming.rs | 12 +++---- .../event_handler/upgrade_outlier_pdu.rs | 2 +- src/service/rooms/pdu_metadata/data.rs | 6 +++- src/service/rooms/pdu_metadata/mod.rs | 6 ++-- src/service/rooms/read_receipt/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 27 ++++++---------- 29 files changed, 142 insertions(+), 152 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9134b290..8b41bf35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3175,7 +3175,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "assign", "js_int", @@ -3197,7 +3197,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "js_int", "ruma-common", @@ -3209,7 +3209,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "as_variant", "assign", @@ -3232,7 +3232,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "as_variant", "base64 0.22.1", @@ -3250,6 +3250,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", + "smallvec", "thiserror 2.0.7", "time", "tracing", @@ -3262,7 +3263,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3276,6 +3277,7 @@ dependencies = [ "ruma-macros", "serde", "serde_json", + "smallvec", "thiserror 2.0.7", "tracing", "url", @@ -3286,7 +3288,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "bytes", "http", @@ -3304,7 +3306,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3313,7 +3315,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "js_int", "ruma-common", @@ -3323,7 +3325,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3338,7 +3340,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "js_int", "ruma-common", @@ -3350,7 +3352,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "headers", "http", @@ -3363,7 +3365,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3379,7 +3381,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d3ed3194ebe96b921d06d1d3e607f0bf7873f792#d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 1546df3a..ea153fda 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,7 @@ features = [ "const_generics", "const_new", "serde", + "union", "write", ] @@ -335,7 +336,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "d3ed3194ebe96b921d06d1d3e607f0bf7873f792" +rev = "54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" features = [ "compat", "rand", diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 0c5a7cbd..85a014b9 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -2,7 +2,6 @@ use std::{ collections::HashMap, fmt::Write, iter::once, - sync::Arc, time::{Instant, SystemTime}, }; @@ -13,7 +12,8 @@ use futures::{FutureExt, StreamExt}; use ruma::{ api::{client::error::ErrorKind, federation::event::get_room_state}, events::room::message::RoomMessageEventContent, - CanonicalJsonObject, EventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName, + CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, + ServerName, }; use service::rooms::state_compressor::HashSetCompressStateEvent; use tracing_subscriber::EnvFilter; @@ -598,14 +598,14 @@ pub(super) async fn force_set_room_state_from_server( let room_version = self.services.rooms.state.get_room_version(&room_id).await?; - let mut state: HashMap> = HashMap::new(); + let mut state: HashMap = HashMap::new(); let remote_state_response = self .services .sending .send_federation_request(&server_name, get_room_state::v1::Request { room_id: room_id.clone().into(), - event_id: first_pdu.event_id.clone().into(), + event_id: first_pdu.event_id.clone(), }) .await?; @@ -677,7 +677,7 @@ pub(super) async fn force_set_room_state_from_server( .services .rooms .event_handler - .resolve_state(room_id.clone().as_ref(), &room_version, state) + .resolve_state(&room_id, &room_version, state) .await?; info!("Forcing new room state"); diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 1cbbf856..5758d937 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -922,7 +922,7 @@ pub(super) async fn redact_event( PduBuilder { redacts: Some(event.event_id.clone()), ..PduBuilder::timeline(&RoomRedactionEventContent { - redacts: Some(event.event_id.clone().into()), + redacts: Some(event.event_id.clone()), reason: Some(reason), }) }, diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index 0b01238f..ba59a010 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -24,7 +24,7 @@ pub(crate) async fn redact_event_route( .timeline .build_and_append_pdu( PduBuilder { - redacts: Some(body.event_id.clone().into()), + redacts: Some(body.event_id.clone()), ..PduBuilder::timeline(&RoomRedactionEventContent { redacts: Some(body.event_id.clone()), reason: body.reason.clone(), @@ -38,5 +38,5 @@ pub(crate) async fn redact_event_route( drop(state_lock); - Ok(redact_event::v3::Response { event_id: event_id.into() }) + Ok(redact_event::v3::Response { event_id }) } diff --git a/src/api/client/send.rs b/src/api/client/send.rs index 9afa7e8c..39340070 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -92,5 +92,5 @@ pub(crate) async fn send_message_event_route( drop(state_lock); - Ok(send_message_event::v3::Response { event_id: event_id.into() }) + Ok(send_message_event::v3::Response { event_id }) } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index f56444c7..e5a7dd28 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use axum::extract::State; use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, Error, PduEvent, Result}; use ruma::{ @@ -16,7 +14,7 @@ use ruma::{ AnyStateEventContent, StateEventType, }, serde::Raw, - EventId, RoomId, UserId, + OwnedEventId, RoomId, UserId, }; use service::Services; @@ -50,8 +48,7 @@ pub(crate) async fn send_state_event_for_key_route( None }, ) - .await? - .into(), + .await?, }) } @@ -177,7 +174,7 @@ async fn send_state_event_for_key_helper( json: &Raw, state_key: String, timestamp: Option, -) -> Result> { +) -> Result { allowed_to_send_state_event(services, room_id, event_type, json).await?; let state_lock = services.rooms.state.mutex.lock(room_id).await; let event_id = services diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a05bcf98..fbf42f92 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -441,7 +441,7 @@ async fn handle_left_room( // This is just a rejected invite, not a room we know // Insert a leave event anyways let event = PduEvent { - event_id: EventId::new(services.globals.server_name()).into(), + event_id: EventId::new(services.globals.server_name()), sender: sender_user.to_owned(), origin: None, origin_server_ts: utils::millis_since_unix_epoch() diff --git a/src/api/server/send.rs b/src/api/server/send.rs index c5fc7118..dbe0108f 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -6,7 +6,7 @@ use conduwuit::{ debug, debug_warn, err, error, result::LogErr, trace, utils::ReadyExt, warn, Err, Error, Result, }; -use futures::StreamExt; +use futures::{FutureExt, StreamExt}; use ruma::{ api::{ client::error::ErrorKind, @@ -74,8 +74,13 @@ pub(crate) async fn send_transaction_message_route( ); let resolved_map = - handle_pdus(&services, &client, &body.pdus, body.origin(), &txn_start_time).await?; - handle_edus(&services, &client, &body.edus, body.origin()).await; + handle_pdus(&services, &client, &body.pdus, body.origin(), &txn_start_time) + .boxed() + .await?; + + handle_edus(&services, &client, &body.edus, body.origin()) + .boxed() + .await; debug!( pdus = ?body.pdus.len(), diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index 016ed7fa..e955a267 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -2,6 +2,7 @@ use axum::extract::State; use conduwuit::{err, Err, Result}; +use futures::FutureExt; use ruma::{ api::federation::membership::create_leave_event, events::{ @@ -154,10 +155,15 @@ async fn create_leave_event( .rooms .event_handler .handle_incoming_pdu(origin, room_id, &event_id, value, true) + .boxed() .await? .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; drop(mutex_lock); - services.sending.send_pdu_room(room_id, &pdu_id).await + services + .sending + .send_pdu_room(room_id, &pdu_id) + .boxed() + .await } diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index db3bb20a..b25d4e9e 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -1,8 +1,8 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; use ruma::{ events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, - EventId, MilliSecondsSinceUnixEpoch, + MilliSecondsSinceUnixEpoch, OwnedEventId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -19,7 +19,7 @@ pub struct Builder { pub state_key: Option, - pub redacts: Option>, + pub redacts: Option, /// For timestamped messaging, should only be used for appservices. /// Will be set to current time if None diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs index fb98d686..6a92afe8 100644 --- a/src/core/pdu/event.rs +++ b/src/core/pdu/event.rs @@ -1,13 +1,11 @@ -use std::sync::Arc; - pub use ruma::state_res::Event; -use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; +use ruma::{events::TimelineEventType, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; use serde_json::value::RawValue as RawJsonValue; use super::Pdu; impl Event for Pdu { - type Id = Arc; + type Id = OwnedEventId; fn event_id(&self) -> &Self::Id { &self.event_id } diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index c90e174e..ba1d2ce1 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -12,11 +12,11 @@ mod strip; mod tests; mod unsigned; -use std::{cmp::Ordering, sync::Arc}; +use std::cmp::Ordering; use ruma::{ - events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedRoomId, - OwnedUserId, UInt, + events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, + OwnedRoomId, OwnedUserId, UInt, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; @@ -35,7 +35,7 @@ use crate::Result; /// Persistent Data Unit (Event) #[derive(Clone, Deserialize, Serialize, Debug)] pub struct Pdu { - pub event_id: Arc, + pub event_id: OwnedEventId, pub room_id: OwnedRoomId, pub sender: OwnedUserId, #[serde(skip_serializing_if = "Option::is_none")] @@ -46,11 +46,11 @@ pub struct Pdu { pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, - pub prev_events: Vec>, + pub prev_events: Vec, pub depth: UInt, - pub auth_events: Vec>, + pub auth_events: Vec, #[serde(skip_serializing_if = "Option::is_none")] - pub redacts: Option>, + pub redacts: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub unsigned: Option>, pub hashes: EventHash, diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index dc97bfa8..5d33eeca 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -1,9 +1,7 @@ -use std::sync::Arc; - use ruma::{ canonical_json::redact_content_in_place, events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, - EventId, RoomVersionId, + OwnedEventId, RoomVersionId, }; use serde::Deserialize; use serde_json::{ @@ -73,15 +71,15 @@ pub fn is_redacted(&self) -> bool { /// > such events over the Client-Server API. #[implement(super::Pdu)] #[must_use] -pub fn copy_redacts(&self) -> (Option>, Box) { +pub fn copy_redacts(&self) -> (Option, Box) { if self.kind == TimelineEventType::RoomRedaction { if let Ok(mut content) = serde_json::from_str::(self.content.get()) { if let Some(redacts) = content.redacts { - return (Some(redacts.into()), self.content.clone()); + return (Some(redacts), self.content.clone()); } else if let Some(redacts) = self.redacts.clone() { - content.redacts = Some(redacts.into()); + content.redacts = Some(redacts); return ( self.redacts.clone(), to_raw_value(&content).expect("Must be valid, we only added redacts field"), diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 87992f2d..796eb5d2 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -12,7 +12,7 @@ use conduwuit::{ validated, warn, Err, Result, }; use futures::{Stream, StreamExt}; -use ruma::{EventId, RoomId}; +use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; use crate::{rooms, rooms::short::ShortEventId, Dep}; @@ -46,7 +46,7 @@ impl Service { &'a self, room_id: &RoomId, starting_events: I, - ) -> Result> + Send + '_> + ) -> Result + Send + '_> where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { @@ -63,7 +63,7 @@ impl Service { &'a self, room_id: &RoomId, starting_events: I, - ) -> Result>> + ) -> Result> where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { @@ -185,7 +185,7 @@ impl Service { room_id: &RoomId, event_id: &EventId, ) -> Result> { - let mut todo = vec![Arc::from(event_id)]; + let mut todo = vec![event_id.to_owned()]; let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index af0ef67d..f980ca24 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -10,7 +10,7 @@ use conduwuit::{ }; use futures::TryFutureExt; use ruma::{ - api::federation::event::get_event, CanonicalJsonValue, EventId, RoomId, RoomVersionId, + api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, RoomVersionId, ServerName, }; @@ -27,7 +27,7 @@ use ruma::{ pub(super) async fn fetch_and_handle_outliers<'a>( &self, origin: &'a ServerName, - events: &'a [Arc], + events: &'a [OwnedEventId], create_event: &'a PduEvent, room_id: &'a RoomId, room_version_id: &'a RoomVersionId, @@ -62,7 +62,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( // c. Ask origin server over federation // We also handle its auth chain here so we don't get a stack overflow in // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; + let mut todo_auth_events = vec![id.clone()]; let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len()); let mut events_all = HashSet::with_capacity(todo_auth_events.len()); while let Some(next_id) = todo_auth_events.pop() { @@ -124,14 +124,15 @@ pub(super) async fn fetch_and_handle_outliers<'a>( ); } - if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) + if let Some(auth_events) = value + .get("auth_events") + .and_then(CanonicalJsonValue::as_array) { for auth_event in auth_events { if let Ok(auth_event) = - serde_json::from_value(auth_event.clone().into()) + serde_json::from_value::(auth_event.clone().into()) { - let a: Arc = auth_event; - todo_auth_events.push(a); + todo_auth_events.push(auth_event); } else { warn!("Auth event id is not valid"); } @@ -201,7 +202,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( }, | Err(e) => { warn!("Authentication of event {next_id} failed: {e:?}"); - back_off(next_id.into()); + back_off(next_id); }, } } diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index b271958f..ca93e11d 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -8,7 +8,7 @@ use futures::{future, FutureExt}; use ruma::{ int, state_res::{self}, - uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, + uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, ServerName, }; @@ -23,14 +23,14 @@ pub(super) async fn fetch_prev( create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, - initial_set: Vec>, + initial_set: Vec, ) -> Result<( - Vec>, - HashMap, (Arc, BTreeMap)>, + Vec, + HashMap, BTreeMap)>, )> { - let mut graph: HashMap, _> = HashMap::with_capacity(initial_set.len()); + let mut graph: HashMap = HashMap::with_capacity(initial_set.len()); let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = initial_set; + let mut todo_outlier_stack: Vec = initial_set; let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 9ea3e081..edc47194 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -1,15 +1,14 @@ -use std::{ - collections::{hash_map, HashMap}, - sync::Arc, -}; +use std::collections::{hash_map, HashMap}; use conduwuit::{debug, implement, warn, Err, Error, PduEvent, Result}; use futures::FutureExt; use ruma::{ - api::federation::event::get_room_state_ids, events::StateEventType, EventId, RoomId, - RoomVersionId, ServerName, + api::federation::event::get_room_state_ids, events::StateEventType, EventId, OwnedEventId, + RoomId, RoomVersionId, ServerName, }; +use crate::rooms::short::ShortStateKey; + /// Call /state_ids to find out what the state at this pdu is. We trust the /// server's response to some extend (sic), but we still do a lot of checks /// on the events @@ -22,31 +21,25 @@ pub(super) async fn fetch_state( room_id: &RoomId, room_version_id: &RoomVersionId, event_id: &EventId, -) -> Result>>> { +) -> Result>> { debug!("Fetching state ids"); let res = self .services .sending .send_federation_request(origin, get_room_state_ids::v1::Request { room_id: room_id.to_owned(), - event_id: (*event_id).to_owned(), + event_id: event_id.to_owned(), }) .await .inspect_err(|e| warn!("Fetching state for event failed: {e}"))?; debug!("Fetching state events"); - let collect = res - .pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(); - let state_vec = self - .fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id) + .fetch_and_handle_outliers(origin, &res.pdu_ids, create_event, room_id, room_version_id) .boxed() .await; - let mut state: HashMap<_, Arc> = HashMap::with_capacity(state_vec.len()); + let mut state: HashMap = HashMap::with_capacity(state_vec.len()); for (pdu, _) in state_vec { let state_key = pdu .state_key @@ -61,10 +54,10 @@ pub(super) async fn fetch_state( match state.entry(shortstatekey) { | hash_map::Entry::Vacant(v) => { - v.insert(Arc::from(&*pdu.event_id)); + v.insert(pdu.event_id.clone()); }, | hash_map::Entry::Occupied(_) => - return Err(Error::bad_database( + return Err!(Database( "State event's type and state_key combination exists multiple times.", )), } @@ -77,7 +70,7 @@ pub(super) async fn fetch_state( .get_shortstatekey(&StateEventType::RoomCreate, "") .await?; - if state.get(&create_shortstatekey).map(AsRef::as_ref) != Some(&create_event.event_id) { + if state.get(&create_shortstatekey) != Some(&create_event.event_id) { return Err!(Database("Incoming event refers to wrong create event.")); } diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index ca56228d..0e0409b4 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -147,7 +147,7 @@ pub async fn handle_incoming_pdu<'a>( .bad_event_ratelimiter .write() .expect("locked") - .entry(prev_id.into()) + .entry(prev_id) { | Entry::Vacant(e) => { e.insert((now, 1)); diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 59ef27ba..c3278329 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -79,19 +79,13 @@ pub(super) async fn handle_outlier_pdu<'a>( // the auth events are also rejected "due to auth events" // NOTE: Step 5 is not applied anymore because it failed too often debug!("Fetching auth events"); - Box::pin( - self.fetch_and_handle_outliers( - origin, - &incoming_pdu - .auth_events - .iter() - .map(|x| Arc::from(&**x)) - .collect::>>(), - create_event, - room_id, - &room_version_id, - ), - ) + Box::pin(self.fetch_and_handle_outliers( + origin, + &incoming_pdu.auth_events, + create_event, + room_id, + &room_version_id, + )) .await; } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index becaeb17..a8893160 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -5,9 +5,9 @@ use std::{ }; use conduwuit::{ - debug, implement, utils::math::continue_exponential_backoff_secs, Error, PduEvent, Result, + debug, implement, utils::math::continue_exponential_backoff_secs, Err, PduEvent, Result, }; -use ruma::{api::client::error::ErrorKind, CanonicalJsonValue, EventId, RoomId, ServerName}; +use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName}; #[implement(super::Service)] #[allow(clippy::type_complexity)] @@ -22,7 +22,7 @@ pub(super) async fn handle_prev_pdu<'a>( event_id: &'a EventId, room_id: &'a RoomId, eventid_info: &mut HashMap< - Arc, + OwnedEventId, (Arc, BTreeMap), >, create_event: &Arc, @@ -31,14 +31,10 @@ pub(super) async fn handle_prev_pdu<'a>( ) -> Result { // Check for disabled again because it might have changed if self.services.metadata.is_disabled(room_id).await { - debug!( + return Err!(Request(Forbidden(debug_warn!( "Federaton of room {room_id} is currently disabled on this server. Request by \ origin {origin} and event ID {event_id}" - ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Federation of this room is currently disabled on this server.", - )); + )))); } if let Some((time, tries)) = self diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index bfc5a014..8bcbc48b 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -23,7 +23,7 @@ use conduwuit::{ }; use futures::TryFutureExt; use ruma::{ - events::room::create::RoomCreateEventContent, state_res::RoomVersion, EventId, OwnedEventId, + events::room::create::RoomCreateEventContent, state_res::RoomVersion, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, }; @@ -97,11 +97,11 @@ impl crate::Service for Service { } impl Service { - async fn event_exists(&self, event_id: Arc) -> bool { + async fn event_exists(&self, event_id: OwnedEventId) -> bool { self.services.timeline.pdu_exists(&event_id).await } - async fn event_fetch(&self, event_id: Arc) -> Option> { + async fn event_fetch(&self, event_id: OwnedEventId) -> Option> { self.services .timeline .get_pdu(&event_id) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 108be18d..b47111d9 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -12,7 +12,7 @@ use conduwuit::{ use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ state_res::{self, StateMap}, - EventId, RoomId, RoomVersionId, + OwnedEventId, RoomId, RoomVersionId, }; use crate::rooms::state_compressor::CompressedStateEvent; @@ -23,7 +23,7 @@ pub async fn resolve_state( &self, room_id: &RoomId, room_version_id: &RoomVersionId, - incoming_state: HashMap>, + incoming_state: HashMap, ) -> Result>> { debug!("Loading current room state ids"); let current_sstatehash = self @@ -44,7 +44,7 @@ pub async fn resolve_state( for state in &fork_states { let starting_events = state.values().map(Borrow::borrow); - let auth_chain: HashSet> = self + let auth_chain: HashSet = self .services .auth_chain .get_event_ids(room_id, starting_events) @@ -56,7 +56,7 @@ pub async fn resolve_state( } debug!("Loading fork states"); - let fork_states: Vec>> = fork_states + let fork_states: Vec> = fork_states .into_iter() .stream() .wide_then(|fork_state| { @@ -113,9 +113,9 @@ pub async fn resolve_state( pub async fn state_resolution( &self, room_version: &RoomVersionId, - state_sets: &[StateMap>], - auth_chain_sets: &Vec>>, -) -> Result>> { + state_sets: &[StateMap], + auth_chain_sets: &Vec>, +) -> Result> { //TODO: ??? let _lock = self.services.globals.stateres_mutex.lock(); diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 658fb904..fa2ce1cd 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -11,7 +11,7 @@ use conduwuit::{ PduEvent, Result, }; use futures::{FutureExt, StreamExt}; -use ruma::{state_res::StateMap, EventId, RoomId, RoomVersionId}; +use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; // TODO: if we know the prev_events of the incoming event we can avoid the #[implement(super::Service)] @@ -20,8 +20,8 @@ use ruma::{state_res::StateMap, EventId, RoomId, RoomVersionId}; pub(super) async fn state_at_incoming_degree_one( &self, incoming_pdu: &Arc, -) -> Result>>> { - let prev_event = &*incoming_pdu.prev_events[0]; +) -> Result>> { + let prev_event = &incoming_pdu.prev_events[0]; let Ok(prev_event_sstatehash) = self .services .state_accessor @@ -56,7 +56,7 @@ pub(super) async fn state_at_incoming_degree_one( .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) .await; - state.insert(shortstatekey, Arc::from(prev_event)); + state.insert(shortstatekey, prev_event.clone()); // Now it's the state after the pdu } @@ -72,7 +72,7 @@ pub(super) async fn state_at_incoming_resolved( incoming_pdu: &Arc, room_id: &RoomId, room_version_id: &RoomVersionId, -) -> Result>>> { +) -> Result>> { debug!("Calculating state at event using state res"); let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len()); @@ -142,7 +142,7 @@ pub(super) async fn state_at_incoming_resolved( starting_events.push(id.borrow()); } - let auth_chain: HashSet> = self + let auth_chain: HashSet = self .services .auth_chain .get_event_ids(room_id, starting_events.into_iter()) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index aa484cd6..8adf4246 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -282,7 +282,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( } trace!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone().into()); + extremities.insert(incoming_pdu.event_id.clone()); // Now that the event has passed all auth it is added into the timeline. // We use the `state_at_event` instead of `state_after` so we accurately diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 01950975..2e6ecbb5 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -88,7 +88,11 @@ impl Data { }) } - pub(super) fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) { + #[inline] + pub(super) fn mark_as_referenced<'a, I>(&self, room_id: &RoomId, event_ids: I) + where + I: Iterator, + { for prev in event_ids { let key = (room_id, prev); self.referencedevents.put_raw(key, []); diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index ac70dbc3..4cb14ebc 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -98,9 +98,11 @@ impl Service { pdus } - #[inline] #[tracing::instrument(skip_all, level = "debug")] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) { + pub fn mark_as_referenced<'a, I>(&self, room_id: &RoomId, event_ids: I) + where + I: Iterator, + { self.db.mark_as_referenced(room_id, event_ids); } diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 53e64957..9777faeb 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -77,7 +77,7 @@ impl Service { let pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await?; - let event_id: OwnedEventId = pdu.event_id.into(); + let event_id: OwnedEventId = pdu.event_id; let user_id: OwnedUserId = user_id.to_owned(); let content: BTreeMap = BTreeMap::from_iter([( event_id, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 3b81e2e5..a2f1ab1b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -10,7 +10,7 @@ use std::{ use conduwuit::{ debug, debug_warn, err, error, implement, info, - pdu::{EventHash, PduBuilder, PduCount, PduEvent}, + pdu::{gen_event_id, EventHash, PduBuilder, PduCount, PduEvent}, utils::{self, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, validated, warn, Err, Error, Result, Server, }; @@ -371,7 +371,7 @@ impl Service { // We must keep track of all events that have been referenced. self.services .pdu_metadata - .mark_as_referenced(&pdu.room_id, &pdu.prev_events); + .mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); self.services .state @@ -681,12 +681,12 @@ impl Service { timestamp, } = pdu_builder; - let prev_events: Vec<_> = self + let prev_events: Vec = self .services .state .get_forward_extremities(room_id) .take(20) - .map(Arc::from) + .map(Into::into) .collect() .await; @@ -834,17 +834,10 @@ impl Service { } // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + pdu.event_id = gen_event_id(&pdu_json, &room_version_id)?; - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); + pdu_json + .insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into())); // Generate short event id let _shorteventid = self @@ -867,7 +860,7 @@ impl Service { room_id: &RoomId, state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state * mutex */ - ) -> Result> { + ) -> Result { let (pdu, pdu_json) = self .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) .await?; @@ -987,7 +980,7 @@ impl Service { if soft_fail { self.services .pdu_metadata - .mark_as_referenced(&pdu.room_id, &pdu.prev_events); + .mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); self.services .state @@ -1170,7 +1163,7 @@ impl Service { backfill_server, federation::backfill::get_backfill::v1::Request { room_id: room_id.to_owned(), - v: vec![first_pdu.1.event_id.as_ref().to_owned()], + v: vec![first_pdu.1.event_id.clone()], limit: uint!(100), }, ) From 74eb30c106dcd88e97ba1367a29bba9669e5d9d0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 25 Dec 2024 06:17:18 +0000 Subject: [PATCH 0451/1248] add LIFO scheduling for database frontend pool workers Signed-off-by: Jason Volk --- Cargo.lock | 6 ++---- Cargo.toml | 8 ++++++++ src/database/pool.rs | 9 +++++++-- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8b41bf35..0404f778 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,8 +95,7 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-channel" version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +source = "git+https://github.com/jevolk/async-channel?rev=fefa543ca5eddf21237d75776fce98b7e09e924a#fefa543ca5eddf21237d75776fce98b7e09e924a" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -1259,8 +1258,7 @@ dependencies = [ [[package]] name = "event-listener" version = "5.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +source = "git+https://github.com/jevolk/event-listener?rev=96d7e0fc026d8f708b19bc9267a382676a50354c#96d7e0fc026d8f708b19bc9267a382676a50354c" dependencies = [ "concurrent-queue", "parking", diff --git a/Cargo.toml b/Cargo.toml index ea153fda..36f6c1ad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -529,6 +529,14 @@ rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" git = "https://github.com/girlbossceo/rustyline-async" rev = "deaeb0694e2083f53d363b648da06e10fc13900c" +# adds LIFO queue scheduling; this should be updated with PR progress. +[patch.crates-io.event-listener] +git = "https://github.com/jevolk/event-listener" +rev = "96d7e0fc026d8f708b19bc9267a382676a50354c" +[patch.crates-io.async-channel] +git = "https://github.com/jevolk/async-channel" +rev = "fefa543ca5eddf21237d75776fce98b7e09e924a" + # # Our crates # diff --git a/src/database/pool.rs b/src/database/pool.rs index 51e705ce..1c55c456 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -8,7 +8,7 @@ use std::{ }, }; -use async_channel::{Receiver, RecvError, Sender}; +use async_channel::{Receiver, RecvError, Sched, Sender}; use conduwuit::{ debug, debug_warn, defer, err, implement, result::DebugInspect, @@ -65,9 +65,14 @@ const QUEUE_LIMIT: (usize, usize) = (1, 2048); #[implement(Pool)] pub(crate) async fn new(server: &Arc) -> Result> { + const CHAN_SCHED: (Sched, Sched) = (Sched::Fifo, Sched::Lifo); + let (total_workers, queue_sizes, topology) = configure(server); - let (senders, receivers) = queue_sizes.into_iter().map(async_channel::bounded).unzip(); + let (senders, receivers) = queue_sizes + .into_iter() + .map(|cap| async_channel::bounded_with_sched(cap, CHAN_SCHED)) + .unzip(); let pool = Arc::new(Self { server: server.clone(), From e21403a4d4abba20d48c5801a703efa5cde82452 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 28 Dec 2024 02:51:30 +0000 Subject: [PATCH 0452/1248] use VecDeque for todo queues Signed-off-by: Jason Volk --- src/service/rooms/auth_chain/mod.rs | 9 +++++---- .../rooms/event_handler/fetch_and_handle_outliers.rs | 8 ++++---- src/service/rooms/event_handler/fetch_prev.rs | 8 ++++---- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 796eb5d2..67883d01 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,7 +1,7 @@ mod data; use std::{ - collections::{BTreeSet, HashSet}, + collections::{BTreeSet, HashSet, VecDeque}, fmt::Debug, sync::Arc, }; @@ -185,10 +185,10 @@ impl Service { room_id: &RoomId, event_id: &EventId, ) -> Result> { - let mut todo = vec![event_id.to_owned()]; + let mut todo: VecDeque<_> = [event_id.to_owned()].into(); let mut found = HashSet::new(); - while let Some(event_id) = todo.pop() { + while let Some(event_id) = todo.pop_front() { trace!(?event_id, "processing auth event"); match self.services.timeline.get_pdu(&event_id).await { @@ -218,7 +218,8 @@ impl Service { ?auth_event, "adding auth event to processing queue" ); - todo.push(auth_event.clone()); + + todo.push_back(auth_event.clone()); } } }, diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index f980ca24..1bad4659 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, BTreeMap, HashSet}, + collections::{hash_map, BTreeMap, HashSet, VecDeque}, sync::Arc, time::Instant, }; @@ -62,10 +62,10 @@ pub(super) async fn fetch_and_handle_outliers<'a>( // c. Ask origin server over federation // We also handle its auth chain here so we don't get a stack overflow in // handle_outlier_pdu. - let mut todo_auth_events = vec![id.clone()]; + let mut todo_auth_events: VecDeque<_> = [id.clone()].into(); let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len()); let mut events_all = HashSet::with_capacity(todo_auth_events.len()); - while let Some(next_id) = todo_auth_events.pop() { + while let Some(next_id) = todo_auth_events.pop_front() { if let Some((time, tries)) = self .services .globals @@ -132,7 +132,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( if let Ok(auth_event) = serde_json::from_value::(auth_event.clone().into()) { - todo_auth_events.push(auth_event); + todo_auth_events.push_back(auth_event); } else { warn!("Auth event id is not valid"); } diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index ca93e11d..3f121f69 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet, VecDeque}, sync::Arc, }; @@ -30,13 +30,13 @@ pub(super) async fn fetch_prev( )> { let mut graph: HashMap = HashMap::with_capacity(initial_set.len()); let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec = initial_set; + let mut todo_outlier_stack: VecDeque = initial_set.into(); let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; let mut amount = 0; - while let Some(prev_event_id) = todo_outlier_stack.pop() { + while let Some(prev_event_id) = todo_outlier_stack.pop_front() { self.services.server.check_running()?; if let Some((pdu, mut json_opt)) = self @@ -74,7 +74,7 @@ pub(super) async fn fetch_prev( amount = amount.saturating_add(1); for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(prev_prev.clone()); + todo_outlier_stack.push_back(prev_prev.clone()); } } From 3a2c5be4f4d6b74527ae40ae05f9a5b0e786314e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 28 Dec 2024 04:51:19 +0000 Subject: [PATCH 0453/1248] reduce exponential backoff for prev fetch Signed-off-by: Jason Volk --- src/service/rooms/event_handler/fetch_and_handle_outliers.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 1bad4659..5c561789 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -75,8 +75,8 @@ pub(super) async fn fetch_and_handle_outliers<'a>( .get(&*next_id) { // Exponential backoff - const MIN_DURATION: u64 = 5 * 60; - const MAX_DURATION: u64 = 60 * 60 * 24; + const MIN_DURATION: u64 = 60 * 2; + const MAX_DURATION: u64 = 60 * 60 * 8; if continue_exponential_backoff_secs( MIN_DURATION, MAX_DURATION, From d91570d0e61d7c06b0a4659b3803ced3e52ccf35 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 18 Dec 2024 20:48:40 +0000 Subject: [PATCH 0454/1248] add Error variant for FeatureDisabled Signed-off-by: Jason Volk --- src/core/error/mod.rs | 6 +++++- src/core/error/response.rs | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 12ba0797..f38178e6 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -98,6 +98,8 @@ pub enum Error { ContentDisposition(#[from] ruma::http_headers::ContentDispositionParseError), #[error("{0}")] Database(Cow<'static, str>), + #[error("Feature '{0}' is not available on this server.")] + FeatureDisabled(Cow<'static, str>), #[error("Remote server {0} responded with: {1}")] Federation(ruma::OwnedServerName, ruma::api::client::error::Error), #[error("{0} in {1}")] @@ -153,12 +155,13 @@ impl Error { /// Returns the Matrix error code / error kind #[inline] pub fn kind(&self) -> ruma::api::client::error::ErrorKind { - use ruma::api::client::error::ErrorKind::Unknown; + use ruma::api::client::error::ErrorKind::{FeatureDisabled, Unknown}; match self { | Self::Federation(_, error) | Self::Ruma(error) => response::ruma_error_kind(error).clone(), | Self::BadRequest(kind, ..) | Self::Request(kind, ..) => kind.clone(), + | Self::FeatureDisabled(..) => FeatureDisabled, | _ => Unknown, } } @@ -172,6 +175,7 @@ impl Error { | Self::Federation(_, error) | Self::Ruma(error) => error.status_code, | Self::Request(kind, _, code) => response::status_code(kind, *code), | Self::BadRequest(kind, ..) => response::bad_request_code(kind), + | Self::FeatureDisabled(..) => response::bad_request_code(&self.kind()), | Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR), | Self::Conflict(_) => StatusCode::CONFLICT, | _ => StatusCode::INTERNAL_SERVER_ERROR, diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 568238c3..335fddab 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -66,7 +66,7 @@ pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode { | Unrecognized => StatusCode::METHOD_NOT_ALLOWED, // 404 - | NotFound => StatusCode::NOT_FOUND, + | NotFound | NotImplemented | FeatureDisabled => StatusCode::NOT_FOUND, // 403 | GuestAccessForbidden From 0a9b6c136fcc29d4f72996dc7eaf7ed4948e820a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 28 Dec 2024 23:31:24 +0000 Subject: [PATCH 0455/1248] refactor for ruma identifiers optimizations Signed-off-by: Jason Volk --- Cargo.lock | 26 ++++++------ Cargo.toml | 2 +- src/admin/debug/commands.rs | 2 +- src/admin/room/alias.rs | 5 +-- src/admin/room/moderation.rs | 28 ++++++------- src/api/client/room/create.rs | 40 +++++++++---------- src/api/client/session.rs | 6 +-- src/api/client/sync/v3.rs | 10 ++--- src/api/client/sync/v4.rs | 4 +- src/api/router/auth.rs | 2 +- src/api/server/invite.rs | 4 +- src/service/globals/mod.rs | 6 +-- src/service/rooms/alias/mod.rs | 6 +-- .../rooms/event_handler/parse_incoming_pdu.rs | 4 +- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 10 ++--- src/service/sending/data.rs | 8 ++-- src/service/sending/sender.rs | 2 +- src/service/users/mod.rs | 2 +- 19 files changed, 81 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0404f778..5b00a313 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3173,7 +3173,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "assign", "js_int", @@ -3195,7 +3195,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "js_int", "ruma-common", @@ -3207,7 +3207,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "as_variant", "assign", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "as_variant", "base64 0.22.1", @@ -3261,7 +3261,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3286,7 +3286,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "bytes", "http", @@ -3304,7 +3304,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3313,7 +3313,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "js_int", "ruma-common", @@ -3323,7 +3323,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3338,7 +3338,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "js_int", "ruma-common", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "headers", "http", @@ -3363,7 +3363,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3379,7 +3379,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a#54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 36f6c1ad..24e6eb79 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -336,7 +336,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "54da128bbe09a0c6d14c9a6bf7b6b54a2d7c835a" +rev = "30a08ff7be779df6858036c3f01f3ba64c1ee785" features = [ "compat", "rand", diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 85a014b9..5bc65d9b 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -75,7 +75,7 @@ pub(super) async fn parse_pdu(&self) -> Result { match serde_json::from_str(&string) { | Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { | Ok(hash) => { - let event_id = EventId::parse(format!("${hash}")); + let event_id = OwnedEventId::parse(format!("${hash}")); match serde_json::from_value::( serde_json::to_value(value).expect("value is json"), diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 835138b2..4490600d 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -4,8 +4,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, - RoomId, + events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomId, }; use crate::{escape_html, Command}; @@ -58,7 +57,7 @@ pub(super) async fn process( | RoomAliasCommand::Which { ref room_alias_localpart } => { let room_alias_str = format!("#{}:{}", room_alias_localpart, services.globals.server_name()); - let room_alias = match RoomAliasId::parse_box(room_alias_str) { + let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { | Ok(alias) => alias, | Err(err) => return Ok(RoomMessageEventContent::text_plain(format!( diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index b16aff1f..bf54505e 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -105,10 +105,9 @@ async fn ban_room( }; debug!("Room specified is a room ID, banning room ID"); + self.services.rooms.metadata.ban_room(room_id, true); - self.services.rooms.metadata.ban_room(&room_id, true); - - room_id + room_id.to_owned() } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, @@ -129,7 +128,7 @@ async fn ban_room( .services .rooms .alias - .resolve_local_alias(&room_alias) + .resolve_local_alias(room_alias) .await { room_id @@ -143,20 +142,20 @@ async fn ban_room( .services .rooms .alias - .resolve_alias(&room_alias, None) + .resolve_alias(room_alias, None) .await { | Ok((room_id, servers)) => { debug!( ?room_id, ?servers, - "Got federation response fetching room ID for {room}" + "Got federation response fetching room ID for {room_id}" ); room_id }, | Err(e) => { return Ok(RoomMessageEventContent::notice_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" + "Failed to resolve room alias {room_alias} to a room ID: {e}" ))); }, } @@ -316,7 +315,7 @@ async fn ban_list_of_rooms( }, }; - room_ids.push(room_id); + room_ids.push(room_id.to_owned()); } if room_alias_or_id.is_room_alias_id() { @@ -326,7 +325,7 @@ async fn ban_list_of_rooms( .services .rooms .alias - .resolve_local_alias(&room_alias) + .resolve_local_alias(room_alias) .await { room_id @@ -340,7 +339,7 @@ async fn ban_list_of_rooms( .services .rooms .alias - .resolve_alias(&room_alias, None) + .resolve_alias(room_alias, None) .await { | Ok((room_id, servers)) => { @@ -519,10 +518,9 @@ async fn unban_room( }; debug!("Room specified is a room ID, unbanning room ID"); + self.services.rooms.metadata.ban_room(room_id, false); - self.services.rooms.metadata.ban_room(&room_id, false); - - room_id + room_id.to_owned() } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, @@ -543,7 +541,7 @@ async fn unban_room( .services .rooms .alias - .resolve_local_alias(&room_alias) + .resolve_local_alias(room_alias) .await { room_id @@ -557,7 +555,7 @@ async fn unban_room( .services .rooms .alias - .resolve_alias(&room_alias, None) + .resolve_alias(room_alias, None) .await { | Ok((room_id, servers)) => { diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 89cddc0f..a78242ca 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -1,7 +1,9 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{debug_info, debug_warn, error, info, pdu::PduBuilder, warn, Err, Error, Result}; +use conduwuit::{ + debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, +}; use futures::FutureExt; use ruma::{ api::client::{ @@ -24,8 +26,7 @@ use ruma::{ }, int, serde::{JsonObject, Raw}, - CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, - RoomVersionId, + CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; use service::{appservice::RegistrationInfo, Services}; @@ -554,14 +555,15 @@ async fn room_alias_check( return Err(Error::BadRequest(ErrorKind::Unknown, "Room alias name is forbidden.")); } - let full_room_alias = RoomAliasId::parse(format!( - "#{}:{}", - room_alias_name, services.globals.config.server_name - )) - .map_err(|e| { - info!("Failed to parse room alias {room_alias_name}: {e}"); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid room alias specified.") - })?; + let server_name = services.globals.server_name(); + let full_room_alias = OwnedRoomAliasId::parse(format!("#{room_alias_name}:{server_name}")) + .map_err(|e| { + err!(Request(InvalidParam(debug_error!( + ?e, + ?room_alias_name, + "Failed to parse room alias.", + )))) + })?; if services .rooms @@ -620,15 +622,11 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result { // A new user joined an encrypted room - if !share_encrypted_room(services, sender_user, &user_id, Some(room_id)) + if !share_encrypted_room(services, sender_user, user_id, Some(room_id)) .await { - device_list_updates.insert(user_id); + device_list_updates.insert(user_id.into()); } }, | MembershipState::Leave => { // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); + left_encrypted_users.insert(user_id.into()); }, | _ => {}, } diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 0c6ea650..9915752e 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -30,7 +30,7 @@ use ruma::{ TimelineEventType::{self, *}, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, UInt, UserId, + uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, UInt, }; use service::{rooms::read_receipt::pack_receipts, Services}; @@ -243,7 +243,7 @@ pub(crate) async fn sync_events_v4_route( if pdu.kind == RoomMember { if let Some(state_key) = &pdu.state_key { let user_id = - UserId::parse(state_key.clone()).map_err(|_| { + OwnedUserId::parse(state_key.clone()).map_err(|_| { Error::bad_database("Invalid UserId in member PDU.") })?; diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index c5b040e0..dd25e091 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -212,7 +212,7 @@ async fn auth_appservice( .query .user_id .clone() - .map_or_else(user_id_default, UserId::parse) + .map_or_else(user_id_default, OwnedUserId::parse) else { return Err!(Request(InvalidUsername("Username is invalid."))); }; diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index a5b4a11c..6d3be04c 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -6,7 +6,7 @@ use ruma::{ api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, - CanonicalJsonValue, EventId, OwnedUserId, UserId, + CanonicalJsonValue, OwnedEventId, OwnedUserId, UserId, }; use crate::Ruma; @@ -86,7 +86,7 @@ pub(crate) async fn create_invite_route( .map_err(|e| err!(Request(InvalidParam("Failed to sign event: {e}"))))?; // Generate event id - let event_id = EventId::parse(format!( + let event_id = OwnedEventId::parse(format!( "${}", ruma::signatures::reference_hash(&signed_event, &body.room_version) .expect("ruma can calculate reference hashes") diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 88199f5f..3f98babe 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -10,9 +10,7 @@ use std::{ use conduwuit::{error, Config, Result}; use data::Data; use regex::RegexSet; -use ruma::{ - OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomAliasId, ServerName, UserId, -}; +use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; use tokio::sync::Mutex; use crate::service; @@ -73,7 +71,7 @@ impl crate::Service for Service { jwt_decoding_key, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), stateres_mutex: Arc::new(Mutex::new(())), - admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name)) + admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &config.server_name)) .expect("#admins:server_name is valid alias name"), server_user: UserId::parse_with_server_name( String::from("conduit"), diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 9dcf9d8e..0acbb116 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -129,10 +129,10 @@ impl Service { servers: Option>, ) -> Result<(OwnedRoomId, Vec)> { if room.is_room_id() { - let room_id = RoomId::parse(room).expect("valid RoomId"); - Ok((room_id, servers.unwrap_or_default())) + let room_id: &RoomId = room.try_into().expect("valid RoomId"); + Ok((room_id.to_owned(), servers.unwrap_or_default())) } else { - let alias = &RoomAliasId::parse(room).expect("valid RoomAliasId"); + let alias: &RoomAliasId = room.try_into().expect("valid RoomAliasId"); self.resolve_alias(alias, servers).await } } diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index f3c75f36..0c11314d 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -1,5 +1,5 @@ use conduwuit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; -use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, RoomId}; +use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId}; use serde_json::value::RawValue as RawJsonValue; #[implement(super::Service)] @@ -14,7 +14,7 @@ pub async fn parse_incoming_pdu( let room_id: OwnedRoomId = value .get("room_id") .and_then(CanonicalJsonValue::as_str) - .map(RoomId::parse) + .map(OwnedRoomId::parse) .flat_ok_or(err!(Request(InvalidParam("Invalid room_id in pdu"))))?; let room_version_id = self diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index a7f79e94..2769beb8 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -139,7 +139,7 @@ impl Service { .state_cache .update_membership( room_id, - &user_id, + user_id, membership_event, &pdu.sender, None, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a2f1ab1b..81df7b35 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -35,7 +35,7 @@ use ruma::{ push::{Action, Ruleset, Tweak}, state_res::{self, Event, RoomVersion}, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, + OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -424,7 +424,7 @@ impl Service { if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { - let target_user_id = UserId::parse(state_key.clone())?; + let target_user_id = OwnedUserId::parse(state_key)?; if self.services.users.is_active_local(&target_user_id).await { push_target.insert(target_user_id); @@ -534,7 +534,7 @@ impl Service { | TimelineEventType::RoomMember => { if let Some(state_key) = &pdu.state_key { // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) + let target_user_id = UserId::parse(state_key) .expect("This state_key was previously validated"); let content: RoomMemberEventContent = pdu.get_content()?; @@ -550,7 +550,7 @@ impl Service { .state_cache .update_membership( &pdu.room_id, - &target_user_id, + target_user_id, content, &pdu.sender, invite_state, @@ -627,7 +627,7 @@ impl Service { .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) { let appservice_uid = appservice.registration.sender_localpart.as_str(); - if state_key_uid == appservice_uid { + if state_key_uid == &appservice_uid { self.services .sending .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index a699b8ee..436f633e 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -7,7 +7,7 @@ use conduwuit::{ }; use database::{Database, Deserialized, Map}; use futures::{Stream, StreamExt}; -use ruma::{ServerName, UserId}; +use ruma::{OwnedServerName, ServerName, UserId}; use super::{Destination, SendingEvent}; use crate::{globals, Dep}; @@ -209,7 +209,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se let mut parts = key[1..].splitn(3, |&b| b == 0xFF); let user = parts.next().expect("splitn always returns one element"); - let user_string = utils::string_from_bytes(user) + let user_string = utils::str_from_bytes(user) .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; let user_id = UserId::parse(user_string) .map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; @@ -225,7 +225,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; ( - Destination::Push(user_id, pushkey_string), + Destination::Push(user_id.to_owned(), pushkey_string), if value.is_empty() { SendingEvent::Pdu(event.into()) } else { @@ -246,7 +246,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se })?; ( - Destination::Federation(ServerName::parse(server).map_err(|_| { + Destination::Federation(OwnedServerName::parse(&server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), if value.is_empty() { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 339236a5..bf4ebafb 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -883,7 +883,7 @@ impl Service { .get("room_id") .and_then(|val| RoomId::parse(val.as_str()?).ok()) { - match self.services.state.get_room_version(&room_id).await { + match self.services.state.get_room_version(room_id).await { | Ok(room_version_id) => match room_version_id { | RoomVersionId::V1 | RoomVersionId::V2 => {}, | _ => _ = pdu_json.remove("event_id"), diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 1d7f4248..fe064d9c 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -937,7 +937,7 @@ impl Service { let user_string = utils::string_from_bytes(user_bytes) .map_err(|e| err!(Database("User ID in openid_userid is invalid unicode. {e}")))?; - UserId::parse(user_string) + OwnedUserId::try_from(user_string) .map_err(|e| err!(Database("User ID in openid_userid is invalid. {e}"))) } From 9eb99f8070db4711845592287fd0a793fdd7cd7f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 29 Dec 2024 02:21:22 +0000 Subject: [PATCH 0456/1248] abstract async shutdown waiting loop Signed-off-by: Jason Volk --- src/core/server.rs | 12 +++++++++++- src/service/sync/watch.rs | 9 ++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/core/server.rs b/src/core/server.rs index 2a70ae4b..8a4d9f66 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -1,5 +1,8 @@ use std::{ - sync::atomic::{AtomicBool, Ordering}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, time::SystemTime, }; @@ -102,6 +105,13 @@ impl Server { Ok(()) } + #[inline] + pub async fn until_shutdown(self: Arc) { + while self.running() { + self.signal.subscribe().recv().await.ok(); + } + } + #[inline] pub fn runtime(&self) -> &runtime::Handle { self.runtime diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs index 50959ded..2b351c3a 100644 --- a/src/service/sync/watch.rs +++ b/src/service/sync/watch.rs @@ -97,13 +97,8 @@ pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result { ); // Server shutdown - let server_shutdown = async move { - while self.services.server.running() { - self.services.server.signal.subscribe().recv().await.ok(); - } - }; - - futures.push(server_shutdown.boxed()); + let server_shutdown = self.services.server.clone().until_shutdown().boxed(); + futures.push(server_shutdown); if !self.services.server.running() { return Ok(()); } From 7c8eeaf4ea117645de6af9c8e9c1cd5a13703835 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 29 Dec 2024 04:23:12 +0000 Subject: [PATCH 0457/1248] simplify multi_get_or_create/related stream implementations Signed-off-by: Jason Volk --- src/service/rooms/short/mod.rs | 12 +++++------- src/service/rooms/state_compressor/mod.rs | 7 +------ 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index f814411b..00c1d16c 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId}; -use conduwuit::{err, implement, utils, utils::stream::ReadyExt, Result}; +use conduwuit::{err, implement, utils, utils::IterStream, Result}; use database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; @@ -65,16 +65,13 @@ pub fn multi_get_or_create_shorteventid<'a, I>( event_ids: I, ) -> impl Stream + Send + '_ where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, - ::Item: AsRef<[u8]> + Send + Sync + 'a, + I: Iterator + Clone + Debug + Send + 'a, { self.db .eventid_shorteventid .get_batch(event_ids.clone()) - .ready_scan(event_ids, |event_ids, result| { - event_ids.next().map(|event_id| (event_id, result)) - }) - .map(|(event_id, result)| match result { + .zip(event_ids.into_iter().stream()) + .map(|(result, event_id)| match result { | Ok(ref short) => utils::u64_from_u8(short), | Err(_) => self.create_shorteventid(event_id), }) @@ -90,6 +87,7 @@ fn create_shorteventid(&self, event_id: &EventId) -> ShortEventId { self.db .eventid_shorteventid .raw_aput::(event_id, short); + self.db .shorteventid_eventid .aput_raw::(short, event_id); diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index dbe0a386..a61a66a1 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -187,12 +187,7 @@ impl Service { state: I, ) -> impl Stream + Send + 'a where - I: Iterator - + Clone - + Debug - + ExactSizeIterator - + Send - + 'a, + I: Iterator + Clone + Debug + Send + 'a, { let event_ids = state.clone().map(at!(1)); From b56e480b3cd039b89af6295f68654b1d16efe9eb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 30 Dec 2024 08:57:39 +0000 Subject: [PATCH 0458/1248] integrate some std io error kinds with our status code abstraction associate rocksdb error kinds with std io error kinds Signed-off-by: Jason Volk --- src/core/error/mod.rs | 1 + src/core/error/response.rs | 14 ++++++++++++++ src/database/util.rs | 26 ++++++++++++++++++++++++-- 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index f38178e6..f1e3b924 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -178,6 +178,7 @@ impl Error { | Self::FeatureDisabled(..) => response::bad_request_code(&self.kind()), | Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR), | Self::Conflict(_) => StatusCode::CONFLICT, + | Self::Io(error) => response::io_error_code(error.kind()), | _ => StatusCode::INTERNAL_SERVER_ERROR, } } diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 335fddab..ede1a05d 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -95,3 +95,17 @@ pub(super) fn ruma_error_message(error: &ruma::api::client::error::Error) -> Str pub(super) fn ruma_error_kind(e: &ruma::api::client::error::Error) -> &ErrorKind { e.error_kind().unwrap_or(&ErrorKind::Unknown) } + +pub(super) fn io_error_code(kind: std::io::ErrorKind) -> StatusCode { + use std::io::ErrorKind; + + match kind { + | ErrorKind::InvalidInput => StatusCode::BAD_REQUEST, + | ErrorKind::PermissionDenied => StatusCode::FORBIDDEN, + | ErrorKind::NotFound => StatusCode::NOT_FOUND, + | ErrorKind::TimedOut => StatusCode::GATEWAY_TIMEOUT, + | ErrorKind::FileTooLarge => StatusCode::PAYLOAD_TOO_LARGE, + | ErrorKind::StorageFull => StatusCode::INSUFFICIENT_STORAGE, + | _ => StatusCode::INTERNAL_SERVER_ERROR, + } +} diff --git a/src/database/util.rs b/src/database/util.rs index c2a020e3..caef94f1 100644 --- a/src/database/util.rs +++ b/src/database/util.rs @@ -1,4 +1,4 @@ -use conduwuit::{err, Result}; +use conduwuit::Result; use rocksdb::{Direction, ErrorKind, IteratorMode}; //#[cfg(debug_assertions)] @@ -51,6 +51,28 @@ pub(crate) fn or_else(e: rocksdb::Error) -> Result { Err pub(crate) fn is_incomplete(e: &rocksdb::Error) -> bool { e.kind() == ErrorKind::Incomplete } pub(crate) fn map_err(e: rocksdb::Error) -> conduwuit::Error { + let kind = io_error_kind(&e.kind()); let string = e.into_string(); - err!(Database(error!("{string}"))) + + std::io::Error::new(kind, string).into() +} + +fn io_error_kind(e: &ErrorKind) -> std::io::ErrorKind { + use std::io; + + match e { + | ErrorKind::NotFound => io::ErrorKind::NotFound, + | ErrorKind::Corruption => io::ErrorKind::InvalidData, + | ErrorKind::InvalidArgument => io::ErrorKind::InvalidInput, + | ErrorKind::Aborted => io::ErrorKind::Interrupted, + | ErrorKind::NotSupported => io::ErrorKind::Unsupported, + | ErrorKind::CompactionTooLarge => io::ErrorKind::FileTooLarge, + | ErrorKind::MergeInProgress | ErrorKind::Busy => io::ErrorKind::ResourceBusy, + | ErrorKind::Expired | ErrorKind::TimedOut => io::ErrorKind::TimedOut, + | ErrorKind::Incomplete | ErrorKind::TryAgain => io::ErrorKind::WouldBlock, + | ErrorKind::ColumnFamilyDropped + | ErrorKind::ShutdownInProgress + | ErrorKind::IOError + | ErrorKind::Unknown => io::ErrorKind::Other, + } } From a1fc4d49ac3c0c830d7d3b6edf0db37924e8088b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 30 Dec 2024 12:35:31 +0000 Subject: [PATCH 0459/1248] reduce non-debug startup warnings Signed-off-by: Jason Volk --- src/core/config/check.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/config/check.rs b/src/core/config/check.rs index c242e2fd..06ae5ebb 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -4,12 +4,12 @@ use either::Either; use figment::Figment; use super::DEPRECATED_KEYS; -use crate::{debug, debug_info, error, info, warn, Config, Err, Result}; +use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result}; #[allow(clippy::cognitive_complexity)] pub fn check(config: &Config) -> Result<()> { if cfg!(debug_assertions) { - info!("Note: conduwuit was built without optimisations (i.e. debug build)"); + warn!("Note: conduwuit was built without optimisations (i.e. debug build)"); } warn_deprecated(config); @@ -23,7 +23,7 @@ pub fn check(config: &Config) -> Result<()> { } if cfg!(all(feature = "hardened_malloc", feature = "jemalloc")) { - info!( + debug_warn!( "hardened_malloc and jemalloc compile-time features are both enabled, this causes \ jemalloc to be used." ); From 9c6b5b44070c16a72a5149b4a0f104d2ef6ba9dd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 30 Dec 2024 13:56:21 +0000 Subject: [PATCH 0460/1248] add faster interruption to resolver (#649) Signed-off-by: Jason Volk --- src/service/resolver/dns.rs | 38 +++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index c331dfba..5c9018ab 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -2,7 +2,7 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; use conduwuit::{err, Result, Server}; use futures::FutureExt; -use hickory_resolver::TokioAsyncResolver; +use hickory_resolver::{lookup_ip::LookupIp, TokioAsyncResolver}; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use super::cache::{Cache, CachedOverride}; @@ -10,11 +10,13 @@ use super::cache::{Cache, CachedOverride}; pub struct Resolver { pub(crate) resolver: Arc, pub(crate) hooked: Arc, + server: Arc, } pub(crate) struct Hooked { resolver: Arc, cache: Arc, + server: Arc, } type ResolvingResult = Result>; @@ -72,14 +74,15 @@ impl Resolver { let resolver = Arc::new(TokioAsyncResolver::tokio(conf, opts)); Ok(Arc::new(Self { resolver: resolver.clone(), - hooked: Arc::new(Hooked { resolver, cache }), + hooked: Arc::new(Hooked { resolver, cache, server: server.clone() }), + server: server.clone(), })) } } impl Resolve for Resolver { fn resolve(&self, name: Name) -> Resolving { - resolve_to_reqwest(self.resolver.clone(), name).boxed() + resolve_to_reqwest(self.server.clone(), self.resolver.clone(), name).boxed() } } @@ -94,12 +97,29 @@ impl Resolve for Hooked { .cloned(); cached.map_or_else( - || resolve_to_reqwest(self.resolver.clone(), name).boxed(), + || resolve_to_reqwest(self.server.clone(), self.resolver.clone(), name).boxed(), |cached| cached_to_reqwest(cached).boxed(), ) } } +async fn resolve_to_reqwest( + server: Arc, + resolver: Arc, + name: Name, +) -> ResolvingResult { + use std::{io, io::ErrorKind::Interrupted}; + + let handle_shutdown = || Box::new(io::Error::new(Interrupted, "Server shutting down")); + let handle_results = + |results: LookupIp| Box::new(results.into_iter().map(|ip| SocketAddr::new(ip, 0))); + + tokio::select! { + results = resolver.lookup_ip(name.as_str()) => Ok(handle_results(results?)), + () = server.until_shutdown() => Err(handle_shutdown()), + } +} + async fn cached_to_reqwest(cached: CachedOverride) -> ResolvingResult { let addrs = cached .ips @@ -108,13 +128,3 @@ async fn cached_to_reqwest(cached: CachedOverride) -> ResolvingResult { Ok(Box::new(addrs)) } - -async fn resolve_to_reqwest(resolver: Arc, name: Name) -> ResolvingResult { - let results = resolver - .lookup_ip(name.as_str()) - .await? - .into_iter() - .map(|ip| SocketAddr::new(ip, 0)); - - Ok(Box::new(results)) -} From 203cf57fdf060c10d955fd6a57ade5ba9fcbb834 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 30 Dec 2024 12:28:18 +0000 Subject: [PATCH 0461/1248] add console log event format hook Signed-off-by: Jason Volk --- src/core/debug.rs | 2 +- src/core/log/console.rs | 94 +++++++++++++++++++++++++++++++++++++++++ src/core/log/mod.rs | 2 + src/main/logging.rs | 12 +++--- 4 files changed, 104 insertions(+), 6 deletions(-) create mode 100644 src/core/log/console.rs diff --git a/src/core/debug.rs b/src/core/debug.rs index 123cf820..c904f23c 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -17,7 +17,7 @@ pub use crate::{result::DebugInspect, utils::debug::*}; macro_rules! debug_event { ( $level:expr, $($x:tt)+ ) => { if $crate::debug::logging() { - ::tracing::event!( $level, $($x)+ ) + ::tracing::event!( $level, _debug = true, $($x)+ ) } else { ::tracing::debug!( $($x)+ ) } diff --git a/src/core/log/console.rs b/src/core/log/console.rs new file mode 100644 index 00000000..0bc44fa7 --- /dev/null +++ b/src/core/log/console.rs @@ -0,0 +1,94 @@ +use tracing::{ + field::{Field, Visit}, + Event, Level, Subscriber, +}; +use tracing_subscriber::{ + field::RecordFields, + fmt, + fmt::{ + format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, + FmtContext, FormatEvent, FormatFields, + }, + registry::LookupSpan, +}; + +use crate::{Config, Result}; + +pub struct ConsoleFormat { + _compact: Format, + full: Format, + pretty: Format, +} + +struct ConsoleVisitor<'a> { + visitor: DefaultVisitor<'a>, +} + +impl ConsoleFormat { + #[must_use] + pub fn new(config: &Config) -> Self { + Self { + _compact: fmt::format().compact(), + + full: Format::::default() + .with_thread_ids(config.log_thread_ids) + .with_ansi(config.log_colors), + + pretty: fmt::format() + .pretty() + .with_ansi(config.log_colors) + .with_thread_names(true) + .with_thread_ids(true) + .with_target(true) + .with_file(true) + .with_line_number(true) + .with_source_location(true), + } + } +} + +impl FormatEvent for ConsoleFormat +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, +{ + fn format_event( + &self, + ctx: &FmtContext<'_, S, N>, + writer: Writer<'_>, + event: &Event<'_>, + ) -> Result<(), std::fmt::Error> { + let is_debug = + cfg!(debug_assertions) && event.fields().any(|field| field.name() == "_debug"); + + match *event.metadata().level() { + | Level::ERROR if !is_debug => self.pretty.format_event(ctx, writer, event), + | _ => self.full.format_event(ctx, writer, event), + } + } +} + +impl<'writer> FormatFields<'writer> for ConsoleFormat { + fn format_fields(&self, writer: Writer<'writer>, fields: R) -> Result<(), std::fmt::Error> + where + R: RecordFields, + { + let mut visitor = ConsoleVisitor { + visitor: DefaultVisitor::<'_>::new(writer, true), + }; + + fields.record(&mut visitor); + + Ok(()) + } +} + +impl Visit for ConsoleVisitor<'_> { + fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { + if field.name().starts_with('_') { + return; + } + + self.visitor.record_debug(field, value); + } +} diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 48b7f0f3..0c51a383 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -2,12 +2,14 @@ pub mod capture; pub mod color; +mod console; pub mod fmt; pub mod fmt_span; mod reload; mod suppress; pub use capture::Capture; +pub use console::ConsoleFormat; pub use reload::{LogLevelReloadHandles, ReloadHandle}; pub use suppress::Suppress; pub use tracing::Level; diff --git a/src/main/logging.rs b/src/main/logging.rs index e8a18b10..85945e8a 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -3,11 +3,11 @@ use std::sync::Arc; use conduwuit::{ config::Config, debug_warn, err, - log::{capture, fmt_span, LogLevelReloadHandles}, + log::{capture, fmt_span, ConsoleFormat, LogLevelReloadHandles}, result::UnwrapOrErr, Result, }; -use tracing_subscriber::{layer::SubscriberExt, reload, EnvFilter, Layer, Registry}; +use tracing_subscriber::{fmt, layer::SubscriberExt, reload, EnvFilter, Layer, Registry}; #[cfg(feature = "perf_measurements")] pub(crate) type TracingFlameGuard = @@ -26,10 +26,12 @@ pub(crate) fn init( .with_regex(config.log_filter_regex) .parse(&config.log) .map_err(|e| err!(Config("log", "{e}.")))?; - let console_layer = tracing_subscriber::fmt::Layer::new() - .with_ansi(config.log_colors) + let console_layer = fmt::Layer::new() .with_span_events(console_span_events) - .with_thread_ids(config.log_thread_ids); + .event_format(ConsoleFormat::new(config)) + .fmt_fields(ConsoleFormat::new(config)) + .map_writer(|w| w); + let (console_reload_filter, console_reload_handle) = reload::Layer::new(console_filter.clone()); reload_handles.add("console", Box::new(console_reload_handle)); From 2bbb28bb888a12644e9f19843b740294f55f87b8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 30 Dec 2024 09:06:47 +0000 Subject: [PATCH 0462/1248] get device info with libc using major/minor Signed-off-by: Jason Volk --- Cargo.lock | 75 +++------------------------------- Cargo.toml | 6 +-- src/core/Cargo.toml | 2 +- src/core/utils/sys/storage.rs | 72 +++++++++++++++++++++----------- src/database/pool/configure.rs | 12 ++++-- 5 files changed, 64 insertions(+), 103 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5b00a313..0c1890c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -726,6 +726,7 @@ dependencies = [ "image", "ipaddress", "itertools 0.13.0", + "libc", "libloading", "log", "nix", @@ -739,7 +740,6 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", - "sysinfo", "thiserror 2.0.7", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", @@ -1674,7 +1674,7 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows 0.52.0", + "windows", ] [[package]] @@ -4076,18 +4076,6 @@ dependencies = [ "syn 2.0.90", ] -[[package]] -name = "sysinfo" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "948512566b1895f93b1592c7574baeb2de842f224f2aab158799ecadb8ebbb46" -dependencies = [ - "core-foundation-sys", - "libc", - "serde", - "windows 0.57.0", -] - [[package]] name = "tendril" version = "0.4.3" @@ -4945,17 +4933,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core 0.52.0", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows" -version = "0.57.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" -dependencies = [ - "windows-core 0.57.0", + "windows-core", "windows-targets 0.52.6", ] @@ -4968,60 +4946,17 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-core" -version = "0.57.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" -dependencies = [ - "windows-implement", - "windows-interface", - "windows-result 0.1.2", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-implement" -version = "0.57.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.90", -] - -[[package]] -name = "windows-interface" -version = "0.57.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.90", -] - [[package]] name = "windows-registry" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" dependencies = [ - "windows-result 0.2.0", + "windows-result", "windows-strings", "windows-targets 0.52.6", ] -[[package]] -name = "windows-result" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-result" version = "0.2.0" @@ -5037,7 +4972,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result 0.2.0", + "windows-result", "windows-targets 0.52.6", ] diff --git a/Cargo.toml b/Cargo.toml index 24e6eb79..022baaa3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -498,10 +498,8 @@ version = "1.3.0" [workspace.dependencies.core_affinity] version = "0.8.1" -[workspace.dependencies.sysinfo] -version = "0.33.0" -default-features = false -features = ["disk", "serde"] +[workspace.dependencies.libc] +version = "0.2" # # Patches diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index d249f647..2873a05d 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -75,6 +75,7 @@ http.workspace = true image.workspace = true ipaddress.workspace = true itertools.workspace = true +libc.workspace = true libloading.workspace = true log.workspace = true rand.workspace = true @@ -87,7 +88,6 @@ serde_json.workspace = true serde_regex.workspace = true serde_yaml.workspace = true serde.workspace = true -sysinfo.workspace = true thiserror.workspace = true tikv-jemallocator.optional = true tikv-jemallocator.workspace = true diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index 8dc75236..25b17904 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -1,14 +1,20 @@ //! System utilities related to devices/peripherals use std::{ - ffi::{OsStr, OsString}, + ffi::OsStr, fs, fs::{read_to_string, FileType}, iter::IntoIterator, - path::Path, + path::{Path, PathBuf}, }; -use crate::{result::FlatOk, Result}; +use libc::dev_t; + +use crate::{ + result::FlatOk, + utils::{result::LogDebugErr, string::SplitInfallible}, + Result, +}; /// Device characteristics useful for random access throughput #[derive(Clone, Debug, Default)] @@ -35,16 +41,12 @@ pub struct Queue { /// Get device characteristics useful for random access throughput by name. #[must_use] -pub fn parallelism(name: &OsStr) -> Parallelism { - let name = name - .to_str() - .expect("device name expected to be utf-8 representable"); +pub fn parallelism(path: &Path) -> Parallelism { + let dev_id = dev_from_path(path).log_debug_err().unwrap_or_default(); - let block_path = Path::new("/").join("sys/").join("block/"); + let mq_path = block_path(dev_id).join("mq/"); - let mq_path = Path::new(&block_path).join(format!("{name}/mq/")); - - let nr_requests_path = Path::new(&block_path).join(format!("{name}/queue/nr_requests")); + let nr_requests_path = block_path(dev_id).join("queue/nr_requests"); Parallelism { nr_requests: read_to_string(&nr_requests_path) @@ -96,17 +98,39 @@ fn queue_parallelism(dir: &Path) -> Queue { } } -/// Get the name of the device on which Path is mounted. -#[must_use] -pub fn name_from_path(path: &Path) -> Option { - sysinfo::Disks::new_with_refreshed_list() - .into_iter() - .filter(|disk| path.starts_with(disk.mount_point())) - .max_by(|a, b| { - let a = a.mount_point().ancestors().count(); - let b = b.mount_point().ancestors().count(); - a.cmp(&b) - }) - .map(|disk| Path::new(disk.name())) - .and_then(|path| path.file_name().map(ToOwned::to_owned)) +/// Get the name of the block device on which Path is mounted. +pub fn name_from_path(path: &Path) -> Result { + use std::io::{Error, ErrorKind::NotFound}; + + let (major, minor) = dev_from_path(path)?; + let path = block_path((major, minor)).join("uevent"); + read_to_string(path) + .iter() + .map(String::as_str) + .flat_map(str::lines) + .map(|line| line.split_once_infallible("=")) + .find_map(|(key, val)| (key == "DEVNAME").then_some(val)) + .ok_or_else(|| Error::new(NotFound, "DEVNAME not found.")) + .map_err(Into::into) + .map(Into::into) +} + +/// Get the (major, minor) of the block device on which Path is mounted. +#[allow(clippy::useless_conversion, clippy::unnecessary_fallible_conversions)] +pub fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { + #[cfg(target_family = "unix")] + use std::os::unix::fs::MetadataExt; + + let stat = fs::metadata(path)?; + let dev_id = stat.dev().try_into()?; + + // SAFETY: These functions may not need to be marked as unsafe. + // see: https://github.com/rust-lang/libc/issues/3759 + let (major, minor) = unsafe { (libc::major(dev_id), libc::minor(dev_id)) }; + + Ok((major.try_into()?, minor.try_into()?)) +} + +fn block_path((major, minor): (dev_t, dev_t)) -> PathBuf { + format!("/sys/dev/block/{major}:{minor}/").into() } diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs index 9361a534..2a192a9c 100644 --- a/src/database/pool/configure.rs +++ b/src/database/pool/configure.rs @@ -1,9 +1,10 @@ -use std::{ffi::OsStr, sync::Arc}; +use std::{path::PathBuf, sync::Arc}; use conduwuit::{ debug, debug_info, expected, utils::{ math::usize_from_f64, + result::LogDebugErr, stream, stream::WIDTH_LIMIT, sys::{compute::is_core_available, storage}, @@ -20,8 +21,12 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) // This finds the block device and gathers all the properties we need. let (device_name, device_prop) = config .db_pool_affinity - .and_then(|| storage::name_from_path(&config.database_path)) - .map(|device_name| (device_name.clone(), storage::parallelism(&device_name))) + .and_then(|| { + let path: PathBuf = config.database_path.clone(); + let name = storage::name_from_path(&path).log_debug_err().ok(); + let prop = storage::parallelism(&path); + name.map(|name| (name, prop)) + }) .unzip(); // The default worker count is masked-on if we didn't find better information. @@ -104,7 +109,6 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) debug_info!( device_name = ?device_name .as_deref() - .and_then(OsStr::to_str) .unwrap_or("None"), ?worker_counts, ?queue_sizes, From c5b94be5b8d1816c3c03f4a7c15a702b7b8e5215 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 30 Dec 2024 17:01:27 +0000 Subject: [PATCH 0463/1248] remove dev_release_log_level cfg Signed-off-by: Jason Volk --- src/admin/Cargo.toml | 1 - src/api/Cargo.toml | 1 - src/core/Cargo.toml | 1 - src/core/debug.rs | 7 +------ src/database/Cargo.toml | 1 - src/main/Cargo.toml | 8 -------- src/router/Cargo.toml | 1 - src/service/Cargo.toml | 1 - 8 files changed, 1 insertion(+), 20 deletions(-) diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml index 3f8fbf79..ca865969 100644 --- a/src/admin/Cargo.toml +++ b/src/admin/Cargo.toml @@ -17,7 +17,6 @@ crate-type = [ ] [features] -#dev_release_log_level = [] release_max_log_level = [ "tracing/max_level_trace", "tracing/release_max_level_info", diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index f3a84664..1bc73624 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -18,7 +18,6 @@ crate-type = [ [features] element_hacks = [] -#dev_release_log_level = [] release_max_log_level = [ "tracing/max_level_trace", "tracing/release_max_level_info", diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 2873a05d..49874c9c 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -17,7 +17,6 @@ crate-type = [ ] [features] -#dev_release_log_level = [] release_max_log_level = [ "tracing/max_level_trace", "tracing/release_max_level_info", diff --git a/src/core/debug.rs b/src/core/debug.rs index c904f23c..aebfc833 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -10,9 +10,6 @@ pub use crate::{result::DebugInspect, utils::debug::*}; /// Log event at given level in debug-mode (when debug-assertions are enabled). /// In release-mode it becomes DEBUG level, and possibly subject to elision. -/// -/// Release-mode can be simulated in debug-mode builds by enabling the feature -/// 'dev_release_log_level'. #[macro_export] macro_rules! debug_event { ( $level:expr, $($x:tt)+ ) => { @@ -98,6 +95,4 @@ pub fn type_name() -> &'static str { std::any::type_name::() } #[must_use] #[inline] -pub const fn logging() -> bool { - cfg!(debug_assertions) && cfg!(not(feature = "dev_release_log_level")) -} +pub const fn logging() -> bool { cfg!(debug_assertions) } diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index efd18a1a..09eedaf4 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -17,7 +17,6 @@ crate-type = [ ] [features] -#dev_release_log_level = [] release_max_log_level = [ "tracing/max_level_trace", "tracing/release_max_level_info", diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index eeb6f2bc..a6421b34 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -55,14 +55,6 @@ brotli_compression = [ console = [ "conduwuit-service/console", ] -#dev_release_log_level = [ -# "conduwuit-admin/dev_release_log_level", -# "conduwuit-api/dev_release_log_level", -# "conduwuit-core/dev_release_log_level", -# "conduwuit-database/dev_release_log_level", -# "conduwuit-router/dev_release_log_level", -# "conduwuit-service/dev_release_log_level", -#] direct_tls = [ "conduwuit-router/direct_tls" ] diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 1b2c248e..1623590b 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -17,7 +17,6 @@ crate-type = [ ] [features] -#dev_release_log_level = [] release_max_log_level = [ "tracing/max_level_trace", "tracing/release_max_level_info", diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 85c4ead9..26f737ee 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -24,7 +24,6 @@ console = [ "dep:rustyline-async", "dep:termimad", ] -#dev_release_log_level = [] element_hacks = [] gzip_compression = [ "reqwest/gzip", From 3ffe2cd56e27aaa34c4df94387be1334d5c379c1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 30 Dec 2024 21:26:38 +0000 Subject: [PATCH 0464/1248] reduce backoff warning from infolog Signed-off-by: Jason Volk --- .../rooms/event_handler/fetch_and_handle_outliers.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 5c561789..2f6940ed 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -5,7 +5,7 @@ use std::{ }; use conduwuit::{ - debug, debug_error, implement, info, pdu, trace, + debug, debug_error, debug_warn, implement, pdu, trace, utils::math::continue_exponential_backoff_secs, warn, PduEvent, }; use futures::TryFutureExt; @@ -83,7 +83,11 @@ pub(super) async fn fetch_and_handle_outliers<'a>( time.elapsed(), *tries, ) { - info!("Backing off from {next_id}"); + debug_warn!( + tried = ?*tries, + elapsed = ?time.elapsed(), + "Backing off from {next_id}", + ); continue; } } From 0e110bb48b0d49e24528395ae480f3b74bb62da3 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 30 Dec 2024 18:33:55 -0500 Subject: [PATCH 0465/1248] fix flipped room ver check on send_join Signed-off-by: June Clementine Strawberry --- src/api/server/send_join.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 6ac84907..fe0277d1 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -141,7 +141,7 @@ async fn create_join_event( if let Some(authorising_user) = content.join_authorized_via_users_server { use ruma::RoomVersionId::*; - if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { + if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { return Err!(Request(InvalidParam( "Room version {room_version_id} does not support restricted rooms but \ join_authorised_via_users_server ({authorising_user}) was found in the event." From 0b2fca5ad9caa4e0325d9653722ee14218cb5f3c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 1 Jan 2025 10:18:38 +0000 Subject: [PATCH 0466/1248] add complement diff from e2f6753a3280 --- tests/test_results/complement/test_results.jsonl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 0db37515..26ae6931 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -207,16 +207,16 @@ {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_with_mangled_join_rules"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room"} +{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited"} +{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinFailOver"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinFailOverInMSC3787Room"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} +{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited"} +{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUser"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsSpacesSummaryFederation"} From 1792711d0967f34a7a07c9e7d832d4447f433987 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 30 Dec 2024 09:35:44 +0000 Subject: [PATCH 0467/1248] support core affinity mask Signed-off-by: Jason Volk --- Cargo.lock | 3 +-- Cargo.toml | 4 ++++ src/core/utils/sys/compute.rs | 43 ++++++++++++++++++++++++----------- src/database/pool.rs | 4 ++-- src/main/runtime.rs | 4 ++-- 5 files changed, 39 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0c1890c4..7f9ef547 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -937,8 +937,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_affinity" version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622892f5635ce1fc38c8f16dfc938553ed64af482edb5e150bf4caedbfcb2304" +source = "git+https://github.com/jevolk/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" dependencies = [ "libc", "num_cpus", diff --git a/Cargo.toml b/Cargo.toml index 022baaa3..c66dfcff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -535,6 +535,10 @@ rev = "96d7e0fc026d8f708b19bc9267a382676a50354c" git = "https://github.com/jevolk/async-channel" rev = "fefa543ca5eddf21237d75776fce98b7e09e924a" +[patch.crates-io.core_affinity] +git = "https://github.com/jevolk/core_affinity_rs" +rev = "9c8e51510c35077df888ee72a36b4b05637147da" + # # Our crates # diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs index e947b579..9e90fc90 100644 --- a/src/core/utils/sys/compute.rs +++ b/src/core/utils/sys/compute.rs @@ -1,6 +1,6 @@ //! System utilities related to compute/processing -use std::{cell::Cell, sync::LazyLock}; +use std::{cell::Cell, fmt::Debug, sync::LazyLock}; use crate::is_equal_to; @@ -22,33 +22,48 @@ thread_local! { /// Set the core affinity for this thread. The ID should be listed in /// CORES_AVAILABLE. Empty input is a no-op; prior affinity unchanged. -pub fn set_affinity(ids: I) +#[tracing::instrument( + level = "debug", + skip_all, + fields( + id = ?std::thread::current().id(), + name = %std::thread::current().name().unwrap_or("None"), + set = ?ids.by_ref().collect::>(), + CURRENT = %format!("[b{:b}]", CORE_AFFINITY.get()), + AVAILABLE = %format!("[b{:b}]", *CORES_AVAILABLE), + ), +)] +pub fn set_affinity(mut ids: I) where - I: Iterator, + I: Iterator + Clone + Debug, { - use core_affinity::{set_for_current, CoreId}; + use core_affinity::{set_each_for_current, set_for_current, CoreId}; - let mask: u128 = ids.fold(0, |mask, id| { + let n = ids.clone().count(); + let mask: u128 = ids.clone().fold(0, |mask, id| { debug_assert!(is_core_available(id), "setting affinity to unavailable core"); - set_for_current(CoreId { id }); mask | (1 << id) }); + if n > 1 { + set_each_for_current(ids.map(|id| CoreId { id })); + } else if n > 0 { + set_for_current(CoreId { id: ids.next().expect("n > 0") }); + } + if mask.count_ones() > 0 { CORE_AFFINITY.replace(mask); } } /// Get the core affinity for this thread. -pub fn get_affinity() -> impl Iterator { - (0..128).filter(|&i| ((CORE_AFFINITY.get() & (1 << i)) != 0)) -} +pub fn get_affinity() -> impl Iterator { iter_bits(CORE_AFFINITY.get()) } /// Gets the ID of the nth core available. This bijects our sequence of cores to /// actual ID's which may have gaps for cores which are not available. #[inline] #[must_use] -pub fn get_core_available(i: usize) -> Option { cores_available().nth(i) } +pub fn nth_core_available(i: usize) -> Option { cores_available().nth(i) } /// Determine if core (by id) is available to the process. #[inline] @@ -57,9 +72,7 @@ pub fn is_core_available(id: usize) -> bool { cores_available().any(is_equal_to! /// Get the list of cores available. The values were recorded at program start. #[inline] -pub fn cores_available() -> impl Iterator { - (0..128).filter(|&i| ((*CORES_AVAILABLE & (1 << i)) != 0)) -} +pub fn cores_available() -> impl Iterator { iter_bits(*CORES_AVAILABLE) } /// Get the number of threads which could execute in parallel based on the /// hardware and administrative constraints of this system. This value should be @@ -72,3 +85,7 @@ pub fn parallelism() -> usize { .expect("Unable to query for available parallelism.") .get() } + +fn iter_bits(v: u128) -> impl Iterator { + (0..128).filter(move |&i| (v & (1 << i)) != 0) +} diff --git a/src/database/pool.rs b/src/database/pool.rs index 1c55c456..8182f217 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -13,7 +13,7 @@ use conduwuit::{ debug, debug_warn, defer, err, implement, result::DebugInspect, trace, - utils::sys::compute::{get_affinity, get_core_available, set_affinity}, + utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, Result, Server, }; use futures::{channel::oneshot, TryFutureExt}; @@ -270,7 +270,7 @@ fn worker_init(&self, id: usize) { .enumerate() .filter(|_| self.queues.len() > 1) .filter_map(|(core_id, &queue_id)| (group == queue_id).then_some(core_id)) - .filter_map(get_core_available); + .filter_map(nth_core_available); // affinity is empty (no-op) if there's only one queue set_affinity(affinity.clone()); diff --git a/src/main/runtime.rs b/src/main/runtime.rs index bfd2ef74..b9dfc866 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -9,7 +9,7 @@ use std::{ }; use conduwuit::{ - utils::sys::compute::{get_core_available, set_affinity}, + utils::sys::compute::{nth_core_available, set_affinity}, Result, }; use tokio::runtime::Builder; @@ -100,7 +100,7 @@ fn set_worker_affinity() { return; } - let Some(id) = get_core_available(i) else { + let Some(id) = nth_core_available(i) else { return; }; From 2259e2c82f890415b84e5999242c3ee102973d23 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 1 Jan 2025 06:08:20 +0000 Subject: [PATCH 0468/1248] batch queries to maximize throughput query-side streams for first level of callsites Signed-off-by: Jason Volk --- conduwuit-example.toml | 13 ++++ src/core/config/mod.rs | 17 ++++++ src/core/utils/stream/band.rs | 26 +++++++- src/core/utils/stream/mod.rs | 5 +- src/database/map/get.rs | 10 +++- src/database/map/get_batch.rs | 66 +++++++++++++------- src/database/pool.rs | 76 ++++++++++++++++++------ src/database/pool/configure.rs | 11 +++- src/main/server.rs | 1 + src/service/rooms/auth_chain/mod.rs | 2 +- src/service/rooms/short/mod.rs | 12 ++-- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/state_accessor/data.rs | 6 +- 13 files changed, 191 insertions(+), 56 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index c64b18e8..526e9fe2 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1457,6 +1457,19 @@ # #stream_width_scale = 1.0 +# Sets the initial amplification factor. This controls batch sizes of +# requests made by each pool worker, multiplying the throughput of each +# stream. This value is somewhat abstract from specific hardware +# characteristics and can be significantly larger than any thread count or +# queue size. This is because each database query may require several +# index lookups, thus many database queries in a batch may make progress +# independently while also sharing index and data blocks which may or may +# not be cached. It is worthwhile to submit huge batches to reduce +# complexity. The maximum value is 32768, though sufficient hardware is +# still advised for that. +# +#stream_amplification = 1024 + # Number of sender task workers; determines sender parallelism. Default is # '0' which means the value is determined internally, likely matching the # number of tokio worker-threads or number of cores, etc. Override by diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e1f578c8..b1ede844 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1653,6 +1653,21 @@ pub struct Config { #[serde(default = "default_stream_width_scale")] pub stream_width_scale: f32, + /// Sets the initial amplification factor. This controls batch sizes of + /// requests made by each pool worker, multiplying the throughput of each + /// stream. This value is somewhat abstract from specific hardware + /// characteristics and can be significantly larger than any thread count or + /// queue size. This is because each database query may require several + /// index lookups, thus many database queries in a batch may make progress + /// independently while also sharing index and data blocks which may or may + /// not be cached. It is worthwhile to submit huge batches to reduce + /// complexity. The maximum value is 32768, though sufficient hardware is + /// still advised for that. + /// + /// default: 1024 + #[serde(default = "default_stream_amplification")] + pub stream_amplification: usize, + /// Number of sender task workers; determines sender parallelism. Default is /// '0' which means the value is determined internally, likely matching the /// number of tokio worker-threads or number of cores, etc. Override by @@ -2467,3 +2482,5 @@ fn default_db_pool_queue_mult() -> usize { 4 } fn default_stream_width_default() -> usize { 32 } fn default_stream_width_scale() -> f32 { 1.0 } + +fn default_stream_amplification() -> usize { 1024 } diff --git a/src/core/utils/stream/band.rs b/src/core/utils/stream/band.rs index 76f2a85a..45ad7d94 100644 --- a/src/core/utils/stream/band.rs +++ b/src/core/utils/stream/band.rs @@ -3,9 +3,15 @@ use std::sync::atomic::{AtomicUsize, Ordering}; /// Stream concurrency factor; this is a live value. static WIDTH: AtomicUsize = AtomicUsize::new(32); -/// Practicable limits on the stream width +/// Stream throughput amplifier; this is a live value. +static AMPLIFICATION: AtomicUsize = AtomicUsize::new(1024); + +/// Practicable limits on the stream width. pub const WIDTH_LIMIT: (usize, usize) = (1, 1024); +/// Practicable limits on the stream amplifier. +pub const AMPLIFICATION_LIMIT: (usize, usize) = (32, 32768); + /// Sets the live concurrency factor. The first return value is the previous /// width which was replaced. The second return value is the value which was set /// after any applied limits. @@ -14,6 +20,14 @@ pub fn set_width(width: usize) -> (usize, usize) { (WIDTH.swap(width, Ordering::Relaxed), width) } +/// Sets the live concurrency amplification. The first return value is the +/// previous width which was replaced. The second return value is the value +/// which was set after any applied limits. +pub fn set_amplification(width: usize) -> (usize, usize) { + let width = width.clamp(AMPLIFICATION_LIMIT.0, AMPLIFICATION_LIMIT.1); + (AMPLIFICATION.swap(width, Ordering::Relaxed), width) +} + /// Used by stream operations where the concurrency factor hasn't been manually /// supplied by the caller (most uses). Instead we provide a default value which /// is adjusted at startup for the specific system and also dynamically. @@ -24,3 +38,13 @@ pub fn automatic_width() -> usize { debug_assert!(width <= WIDTH_LIMIT.1, "WIDTH is probably too large"); width } + +/// Used by stream operations where the amplification hasn't been manually +/// supplied by the caller. Instead we provide a computed value. +#[inline] +pub fn automatic_amplification() -> usize { + let amplification = AMPLIFICATION.load(Ordering::Relaxed); + debug_assert!(amplification >= AMPLIFICATION_LIMIT.0, "amplification is too low"); + debug_assert!(amplification <= AMPLIFICATION_LIMIT.1, "amplification is too high"); + amplification +} diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index a5ef17c5..61ae993d 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -10,7 +10,10 @@ mod try_broadband; mod try_ready; mod wideband; -pub use band::{automatic_width, set_width, WIDTH_LIMIT}; +pub use band::{ + automatic_amplification, automatic_width, set_amplification, set_width, AMPLIFICATION_LIMIT, + WIDTH_LIMIT, +}; pub use broadband::BroadbandExt; pub use cloned::Cloned; pub use expect::TryExpect; diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 94a6b727..e64ef2ec 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -2,7 +2,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; use arrayvec::ArrayVec; use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; -use futures::{Future, FutureExt}; +use futures::{future::ready, Future, FutureExt, TryFutureExt}; use serde::Serialize; use tokio::task; @@ -79,11 +79,15 @@ where debug_assert!(matches!(cached, Ok(None)), "expected status Incomplete"); let cmd = Get { map: self.clone(), - key: key.as_ref().into(), + key: [key.as_ref().into()].into(), res: None, }; - self.db.pool.execute_get(cmd).boxed() + self.db + .pool + .execute_get(cmd) + .and_then(|mut res| ready(res.remove(0))) + .boxed() } /// Fetch a value from the database into cache, returning a reference-handle. diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index 631692fe..452697f1 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -2,42 +2,68 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduwuit::{ err, implement, - utils::{stream::automatic_width, IterStream}, + utils::{ + stream::{automatic_amplification, automatic_width, WidebandExt}, + IterStream, + }, Result, }; -use futures::{Stream, StreamExt}; +use futures::{Stream, StreamExt, TryStreamExt}; use serde::Serialize; -use crate::{util::map_err, Handle}; +use crate::{keyval::KeyBuf, ser, util::map_err, Handle}; #[implement(super::Map)] #[tracing::instrument(skip(self, keys), level = "trace")] -pub fn aqry_batch<'b, 'a: 'b, const MAX: usize, I, K>( +pub fn qry_batch<'a, S, K>( self: &'a Arc, - keys: I, -) -> impl Stream>> + Send + 'a + keys: S, +) -> impl Stream>> + Send + 'a where - I: Iterator + Send + 'a, - K: Serialize + ?Sized + Debug + 'b, + S: Stream + Send + 'a, + K: Serialize + Debug + 'a, { - keys.stream() - .map(move |key| self.aqry::(&key)) - .buffered(automatic_width()) + use crate::pool::Get; + + keys.ready_chunks(automatic_amplification()) + .widen_then(automatic_width(), |chunk| { + let keys = chunk + .iter() + .map(ser::serialize_to::) + .map(|result| result.expect("failed to serialize query key")) + .map(Into::into) + .collect(); + + self.db + .pool + .execute_get(Get { map: self.clone(), key: keys, res: None }) + }) + .map_ok(|results| results.into_iter().stream()) + .try_flatten() } #[implement(super::Map)] #[tracing::instrument(skip(self, keys), level = "trace")] -pub fn get_batch<'a, I, K>( +pub fn get_batch<'a, S, K>( self: &'a Arc, - keys: I, + keys: S, ) -> impl Stream>> + Send + 'a where - I: Iterator + Debug + Send + 'a, - K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, + S: Stream + Send + 'a, + K: AsRef<[u8]> + Send + Sync + 'a, { - keys.stream() - .map(move |key| self.get(key)) - .buffered(automatic_width()) + use crate::pool::Get; + + keys.ready_chunks(automatic_amplification()) + .widen_then(automatic_width(), |chunk| { + self.db.pool.execute_get(Get { + map: self.clone(), + key: chunk.iter().map(AsRef::as_ref).map(Into::into).collect(), + res: None, + }) + }) + .map_ok(|results| results.into_iter().stream()) + .try_flatten() } #[implement(super::Map)] @@ -47,8 +73,8 @@ pub(crate) fn get_batch_blocking<'a, I, K>( keys: I, ) -> impl Iterator>> + Send where - I: Iterator + ExactSizeIterator + Debug + Send, - K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a, + I: Iterator + ExactSizeIterator + Send, + K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, { // Optimization can be `true` if key vector is pre-sorted **by the column // comparator**. diff --git a/src/database/pool.rs b/src/database/pool.rs index 8182f217..bcf20de8 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -19,6 +19,7 @@ use conduwuit::{ use futures::{channel::oneshot, TryFutureExt}; use oneshot::Sender as ResultSender; use rocksdb::Direction; +use smallvec::SmallVec; use tokio::task::JoinSet; use self::configure::configure; @@ -42,11 +43,11 @@ pub(crate) enum Cmd { Iter(Seek), } -/// Point-query +/// Multi-point-query pub(crate) struct Get { pub(crate) map: Arc, - pub(crate) key: KeyBuf, - pub(crate) res: Option>>>, + pub(crate) key: BatchQuery<'static>, + pub(crate) res: Option>>, } /// Iterator-seek. @@ -60,8 +61,13 @@ pub(crate) struct Seek { pub(crate) res: Option>>, } +pub(crate) type BatchQuery<'a> = SmallVec<[KeyBuf; BATCH_INLINE]>; +pub(crate) type BatchResult<'a> = SmallVec<[ResultHandle<'a>; BATCH_INLINE]>; +pub(crate) type ResultHandle<'a> = Result>; + const WORKER_LIMIT: (usize, usize) = (1, 1024); const QUEUE_LIMIT: (usize, usize) = (1, 2048); +const BATCH_INLINE: usize = 1; #[implement(Pool)] pub(crate) async fn new(server: &Arc) -> Result> { @@ -179,22 +185,24 @@ fn spawn_one(self: &Arc, workers: &mut JoinSet<()>, recv: &[Receiver] #[implement(Pool)] #[tracing::instrument(level = "trace", name = "get", skip(self, cmd))] -pub(crate) async fn execute_get(&self, mut cmd: Get) -> Result> { +pub(crate) async fn execute_get(self: &Arc, mut cmd: Get) -> Result> { let (send, recv) = oneshot::channel(); _ = cmd.res.insert(send); let queue = self.select_queue(); self.execute(queue, Cmd::Get(cmd)) - .and_then(|()| { - recv.map_ok(into_recv_get_result) + .and_then(move |()| { + recv.map_ok(into_recv_get) .map_err(|e| err!(error!("recv failed {e:?}"))) }) - .await? + .await + .map(Into::into) + .map_err(Into::into) } #[implement(Pool)] #[tracing::instrument(level = "trace", name = "iter", skip(self, cmd))] -pub(crate) async fn execute_iter(&self, mut cmd: Seek) -> Result> { +pub(crate) async fn execute_iter(self: &Arc, mut cmd: Seek) -> Result> { let (send, recv) = oneshot::channel(); _ = cmd.res.insert(send); @@ -282,7 +290,7 @@ fn worker_init(&self, id: usize) { } #[implement(Pool)] -fn worker_loop(&self, recv: &Receiver) { +fn worker_loop(self: &Arc, recv: &Receiver) { // initial +1 needed prior to entering wait self.busy.fetch_add(1, Ordering::Relaxed); @@ -302,18 +310,19 @@ fn worker_loop(&self, recv: &Receiver) { busy = self.busy.fetch_sub(1, Ordering::Relaxed) - 1, ), )] -fn worker_wait(&self, recv: &Receiver) -> Result { +fn worker_wait(self: &Arc, recv: &Receiver) -> Result { recv.recv_blocking().debug_inspect(|_| { self.busy.fetch_add(1, Ordering::Relaxed); }) } #[implement(Pool)] -fn worker_handle(&self, cmd: Cmd) { +fn worker_handle(self: &Arc, cmd: Cmd) { match cmd { - | Cmd::Get(cmd) => self.handle_get(cmd), + | Cmd::Get(cmd) if cmd.key.len() == 1 => self.handle_get(cmd), + | Cmd::Get(cmd) => self.handle_batch(cmd), | Cmd::Iter(cmd) => self.handle_iter(cmd), - } + }; } #[implement(Pool)] @@ -331,12 +340,43 @@ fn handle_iter(&self, mut cmd: Seek) { } let from = cmd.key.as_deref().map(Into::into); + let result = match cmd.dir { | Direction::Forward => cmd.state.init_fwd(from), | Direction::Reverse => cmd.state.init_rev(from), }; let chan_result = chan.send(into_send_seek(result)); + + let _chan_sent = chan_result.is_ok(); +} + +#[implement(Pool)] +#[tracing::instrument( + name = "batch", + level = "trace", + skip_all, + fields( + %cmd.map, + keys = %cmd.key.len(), + ), +)] +fn handle_batch(self: &Arc, mut cmd: Get) { + debug_assert!(cmd.key.len() > 1, "should have more than one key"); + debug_assert!(!cmd.key.iter().any(SmallVec::is_empty), "querying for empty key"); + + let chan = cmd.res.take().expect("missing result channel"); + + if chan.is_canceled() { + return; + } + + let keys = cmd.key.iter().map(Into::into); + + let result: SmallVec<_> = cmd.map.get_batch_blocking(keys).collect(); + + let chan_result = chan.send(into_send_get(result)); + let _chan_sent = chan_result.is_ok(); } @@ -348,7 +388,7 @@ fn handle_iter(&self, mut cmd: Seek) { fields(%cmd.map), )] fn handle_get(&self, mut cmd: Get) { - debug_assert!(!cmd.key.is_empty(), "querying for empty key"); + debug_assert!(!cmd.key[0].is_empty(), "querying for empty key"); // Obtain the result channel. let chan = cmd.res.take().expect("missing result channel"); @@ -362,16 +402,16 @@ fn handle_get(&self, mut cmd: Get) { // Perform the actual database query. We reuse our database::Map interface but // limited to the blocking calls, rather than creating another surface directly // with rocksdb here. - let result = cmd.map.get_blocking(&cmd.key); + let result = cmd.map.get_blocking(&cmd.key[0]); // Send the result back to the submitter. - let chan_result = chan.send(into_send_get_result(result)); + let chan_result = chan.send(into_send_get([result].into())); // If the future was dropped during the query this will fail acceptably. let _chan_sent = chan_result.is_ok(); } -fn into_send_get_result(result: Result>) -> Result> { +fn into_send_get(result: BatchResult<'_>) -> BatchResult<'static> { // SAFETY: Necessary to send the Handle (rust_rocksdb::PinnableSlice) through // the channel. The lifetime on the handle is a device by rust-rocksdb to // associate a database lifetime with its assets. The Handle must be dropped @@ -379,7 +419,7 @@ fn into_send_get_result(result: Result>) -> Result> { unsafe { std::mem::transmute(result) } } -fn into_recv_get_result(result: Result>) -> Result> { +fn into_recv_get<'a>(result: BatchResult<'static>) -> BatchResult<'a> { // SAFETY: This is to receive the Handle from the channel. unsafe { std::mem::transmute(result) } } diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs index 2a192a9c..6cac58e7 100644 --- a/src/database/pool/configure.rs +++ b/src/database/pool/configure.rs @@ -6,7 +6,7 @@ use conduwuit::{ math::usize_from_f64, result::LogDebugErr, stream, - stream::WIDTH_LIMIT, + stream::{AMPLIFICATION_LIMIT, WIDTH_LIMIT}, sys::{compute::is_core_available, storage}, BoolExt, }, @@ -124,19 +124,28 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) fn update_stream_width(server: &Arc, num_queues: usize, total_workers: usize) { let config = &server.config; let scale: f64 = config.stream_width_scale.min(100.0).into(); + let req_width = expected!(total_workers / num_queues).next_multiple_of(2); let req_width = req_width as f64; let req_width = usize_from_f64(req_width * scale) .expect("failed to convert f64 to usize") .clamp(WIDTH_LIMIT.0, WIDTH_LIMIT.1); + let req_amp = config.stream_amplification as f64; + let req_amp = usize_from_f64(req_amp * scale) + .expect("failed to convert f64 to usize") + .clamp(AMPLIFICATION_LIMIT.0, AMPLIFICATION_LIMIT.1); + let (old_width, new_width) = stream::set_width(req_width); + let (old_amp, new_amp) = stream::set_amplification(req_amp); debug!( scale = ?config.stream_width_scale, ?num_queues, ?req_width, ?old_width, ?new_width, + ?old_amp, + ?new_amp, "Updated global stream width" ); } diff --git a/src/main/server.rs b/src/main/server.rs index e1389f6d..a81b708d 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -52,6 +52,7 @@ impl Server { .expect("Unable to increase maximum soft and hard file descriptor limit"); let (_old_width, _new_width) = stream::set_width(config.stream_width_default); + let (_old_amp, _new_amp) = stream::set_amplification(config.stream_amplification); info!( server_name = %config.server_name, diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 67883d01..f6534825 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -71,7 +71,7 @@ impl Service { let event_ids = self .services .short - .multi_get_eventid_from_short(chain.iter()) + .multi_get_eventid_from_short(chain.into_iter().stream()) .ready_filter_map(Result::ok) .collect() .await; diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 00c1d16c..b645f9f1 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -69,7 +69,7 @@ where { self.db .eventid_shorteventid - .get_batch(event_ids.clone()) + .get_batch(event_ids.clone().stream()) .zip(event_ids.into_iter().stream()) .map(|(result, event_id)| match result { | Ok(ref short) => utils::u64_from_u8(short), @@ -162,20 +162,18 @@ where } #[implement(Service)] -pub fn multi_get_eventid_from_short<'a, Id, I>( +pub fn multi_get_eventid_from_short<'a, Id, S>( &'a self, - shorteventid: I, + shorteventid: S, ) -> impl Stream> + Send + 'a where - I: Iterator + Send + 'a, + S: Stream + Send + 'a, Id: for<'de> Deserialize<'de> + Sized + ToOwned + 'a, ::Owned: Borrow, { - const BUFSIZE: usize = size_of::(); - self.db .shorteventid_eventid - .aqry_batch::(shorteventid) + .qry_batch(shorteventid) .map(Deserialized::deserialized) } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 2769beb8..fd303667 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -467,7 +467,7 @@ impl Service { let auth_pdus = self .services .short - .multi_get_eventid_from_short(event_ids.iter()) + .multi_get_eventid_from_short(event_ids.into_iter().stream()) .zip(state_keys.into_iter().stream()) .ready_filter_map(|(event_id, tsk)| Some((tsk, event_id.ok()?))) .broad_filter_map(|(tsk, event_id): (_, OwnedEventId)| async move { diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index d60e505e..29b27a05 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, collections::HashMap, sync::Arc}; use conduwuit::{ - at, err, ref_at, + at, err, utils::stream::{BroadbandExt, IterStream, ReadyExt}, PduEvent, Result, }; @@ -69,7 +69,7 @@ impl Data { let full_pdus = self .services .short - .multi_get_eventid_from_short(short_ids.iter().map(ref_at!(1))) + .multi_get_eventid_from_short(short_ids.into_iter().map(at!(1)).stream()) .ready_filter_map(Result::ok) .broad_filter_map(|event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await.ok() @@ -93,7 +93,7 @@ impl Data { let full_ids = self .services .short - .multi_get_eventid_from_short(short_ids.iter().map(ref_at!(1))) + .multi_get_eventid_from_short(short_ids.iter().map(at!(1)).stream()) .zip(short_ids.iter().stream().map(at!(0))) .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) .collect() From 7e4453620eb3b9eb618ff195b72fc0d5df5d898b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 1 Jan 2025 20:09:41 +0000 Subject: [PATCH 0469/1248] move exponential backoff util to different submod Signed-off-by: Jason Volk --- src/core/utils/math.rs | 30 +------------------ src/core/utils/mod.rs | 5 +++- src/core/utils/time.rs | 2 ++ src/core/utils/time/exponential_backoff.rs | 29 ++++++++++++++++++ .../fetch_and_handle_outliers.rs | 2 +- .../rooms/event_handler/handle_prev_pdu.rs | 2 +- src/service/sending/sender.rs | 2 +- 7 files changed, 39 insertions(+), 33 deletions(-) create mode 100644 src/core/utils/time/exponential_backoff.rs diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index da2357d5..c5a785e2 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -1,4 +1,4 @@ -use std::{cmp, convert::TryFrom, time::Duration}; +use std::{cmp, convert::TryFrom}; pub use checked_ops::checked_ops; @@ -53,34 +53,6 @@ macro_rules! validated { ($($input:tt)+) => { $crate::expected!($($input)+) } } -/// Returns false if the exponential backoff has expired based on the inputs -#[inline] -#[must_use] -pub fn continue_exponential_backoff_secs( - min: u64, - max: u64, - elapsed: Duration, - tries: u32, -) -> bool { - let min = Duration::from_secs(min); - let max = Duration::from_secs(max); - continue_exponential_backoff(min, max, elapsed, tries) -} - -/// Returns false if the exponential backoff has expired based on the inputs -#[inline] -#[must_use] -pub fn continue_exponential_backoff( - min: Duration, - max: Duration, - elapsed: Duration, - tries: u32, -) -> bool { - let min = min.saturating_mul(tries).saturating_mul(tries); - let min = cmp::min(min, max); - elapsed < min -} - #[inline] #[allow(clippy::as_conversions)] pub fn usize_from_f64(val: f64) -> Result { diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 38232820..16072765 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -37,7 +37,10 @@ pub use self::{ stream::{IterStream, ReadyExt, Tools as StreamTools, TryReadyExt}, string::{str_from_bytes, string_from_bytes}, sys::compute::parallelism as available_parallelism, - time::{now_millis as millis_since_unix_epoch, timepoint_ago, timepoint_from_now}, + time::{ + exponential_backoff::{continue_exponential_backoff, continue_exponential_backoff_secs}, + now_millis as millis_since_unix_epoch, timepoint_ago, timepoint_from_now, + }, }; #[inline] diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs index d65eb2d4..81fdda2a 100644 --- a/src/core/utils/time.rs +++ b/src/core/utils/time.rs @@ -1,3 +1,5 @@ +pub mod exponential_backoff; + use std::time::{Duration, SystemTime, UNIX_EPOCH}; use crate::{err, Result}; diff --git a/src/core/utils/time/exponential_backoff.rs b/src/core/utils/time/exponential_backoff.rs new file mode 100644 index 00000000..682c2592 --- /dev/null +++ b/src/core/utils/time/exponential_backoff.rs @@ -0,0 +1,29 @@ +use std::{cmp, time::Duration}; + +/// Returns false if the exponential backoff has expired based on the inputs +#[inline] +#[must_use] +pub fn continue_exponential_backoff_secs( + min: u64, + max: u64, + elapsed: Duration, + tries: u32, +) -> bool { + let min = Duration::from_secs(min); + let max = Duration::from_secs(max); + continue_exponential_backoff(min, max, elapsed, tries) +} + +/// Returns false if the exponential backoff has expired based on the inputs +#[inline] +#[must_use] +pub fn continue_exponential_backoff( + min: Duration, + max: Duration, + elapsed: Duration, + tries: u32, +) -> bool { + let min = min.saturating_mul(tries).saturating_mul(tries); + let min = cmp::min(min, max); + elapsed < min +} diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 2f6940ed..84d0edd0 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -6,7 +6,7 @@ use std::{ use conduwuit::{ debug, debug_error, debug_warn, implement, pdu, trace, - utils::math::continue_exponential_backoff_secs, warn, PduEvent, + utils::continue_exponential_backoff_secs, warn, PduEvent, }; use futures::TryFutureExt; use ruma::{ diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index a8893160..0a5295dc 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -5,7 +5,7 @@ use std::{ }; use conduwuit::{ - debug, implement, utils::math::continue_exponential_backoff_secs, Err, PduEvent, Result, + debug, implement, utils::continue_exponential_backoff_secs, Err, PduEvent, Result, }; use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName}; diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index bf4ebafb..482c31cf 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -13,7 +13,7 @@ use conduwuit::{ debug, err, error, result::LogErr, trace, - utils::{calculate_hash, math::continue_exponential_backoff_secs, ReadyExt}, + utils::{calculate_hash, continue_exponential_backoff_secs, ReadyExt}, warn, Error, Result, }; use futures::{ From 1a71798859786645ef8520c19ede5576b4d15bae Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 1 Jan 2025 20:20:41 +0000 Subject: [PATCH 0470/1248] add Expected trait to utils; use (already transitive) num-traits. Signed-off-by: Jason Volk --- Cargo.lock | 1 + Cargo.toml | 3 ++ src/core/Cargo.toml | 1 + src/core/utils/math.rs | 3 ++ src/core/utils/math/expected.rs | 52 +++++++++++++++++++++++++++++++++ 5 files changed, 60 insertions(+) create mode 100644 src/core/utils/math/expected.rs diff --git a/Cargo.lock b/Cargo.lock index 7f9ef547..a8a6ae0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -730,6 +730,7 @@ dependencies = [ "libloading", "log", "nix", + "num-traits", "rand", "regex", "reqwest", diff --git a/Cargo.toml b/Cargo.toml index c66dfcff..805c7d7c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -501,6 +501,9 @@ version = "0.8.1" [workspace.dependencies.libc] version = "0.2" +[workspace.dependencies.num-traits] +version = "0.2" + # # Patches # diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 49874c9c..dd8f634a 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -77,6 +77,7 @@ itertools.workspace = true libc.workspace = true libloading.workspace = true log.workspace = true +num-traits.workspace = true rand.workspace = true regex.workspace = true reqwest.workspace = true diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index c5a785e2..a08cb206 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -1,7 +1,10 @@ +mod expected; + use std::{cmp, convert::TryFrom}; pub use checked_ops::checked_ops; +pub use self::expected::Expected; use crate::{debug::type_name, err, Err, Error, Result}; /// Checked arithmetic expression. Returns a Result diff --git a/src/core/utils/math/expected.rs b/src/core/utils/math/expected.rs new file mode 100644 index 00000000..f0f71854 --- /dev/null +++ b/src/core/utils/math/expected.rs @@ -0,0 +1,52 @@ +use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub}; + +use crate::expected; + +pub trait Expected { + #[inline] + #[must_use] + fn expected_add(self, rhs: Self) -> Self + where + Self: CheckedAdd + Sized, + { + expected!(self + rhs) + } + + #[inline] + #[must_use] + fn expected_sub(self, rhs: Self) -> Self + where + Self: CheckedSub + Sized, + { + expected!(self - rhs) + } + + #[inline] + #[must_use] + fn expected_mul(self, rhs: Self) -> Self + where + Self: CheckedMul + Sized, + { + expected!(self * rhs) + } + + #[inline] + #[must_use] + fn expected_div(self, rhs: Self) -> Self + where + Self: CheckedDiv + Sized, + { + expected!(self / rhs) + } + + #[inline] + #[must_use] + fn expected_rem(self, rhs: Self) -> Self + where + Self: CheckedRem + Sized, + { + expected!(self % rhs) + } +} + +impl Expected for T {} From 33635e11d1ad1f52744d368defc12e6ec25ec59d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 1 Jan 2025 23:47:42 +0000 Subject: [PATCH 0471/1248] bump ruma tweak tracing instrument Signed-off-by: Jason Volk --- Cargo.lock | 26 +++++++++---------- Cargo.toml | 2 +- src/service/rooms/event_handler/acl_check.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8a6ae0a..cf08509f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3173,7 +3173,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "assign", "js_int", @@ -3195,7 +3195,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "js_int", "ruma-common", @@ -3207,7 +3207,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "as_variant", "assign", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "as_variant", "base64 0.22.1", @@ -3261,7 +3261,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3286,7 +3286,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "bytes", "http", @@ -3304,7 +3304,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3313,7 +3313,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "js_int", "ruma-common", @@ -3323,7 +3323,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3338,7 +3338,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "js_int", "ruma-common", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "headers", "http", @@ -3363,7 +3363,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3379,7 +3379,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=30a08ff7be779df6858036c3f01f3ba64c1ee785#30a08ff7be779df6858036c3f01f3ba64c1ee785" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 805c7d7c..a59dcfcd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -336,7 +336,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "30a08ff7be779df6858036c3f01f3ba64c1ee785" +rev = "ac37db212aed236d4c415cfecb96aa42cfebc6d9" features = [ "compat", "rand", diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs index ef12a25c..714b6fc1 100644 --- a/src/service/rooms/event_handler/acl_check.rs +++ b/src/service/rooms/event_handler/acl_check.rs @@ -6,7 +6,7 @@ use ruma::{ /// Returns Ok if the acl allows the server #[implement(super::Service)] -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip_all, level = "debug")] pub async fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result { let Ok(acl_event_content) = self .services diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index b47111d9..0e4b919d 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -114,7 +114,7 @@ pub async fn state_resolution( &self, room_version: &RoomVersionId, state_sets: &[StateMap], - auth_chain_sets: &Vec>, + auth_chain_sets: &[HashSet], ) -> Result> { //TODO: ??? let _lock = self.services.globals.stateres_mutex.lock(); From b8f8f686349445e69b19d203a89cb97f71ed2330 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 1 Jan 2025 23:49:08 +0000 Subject: [PATCH 0472/1248] improve memory-usage output w/ more byte-sizes Signed-off-by: Jason Volk --- src/service/globals/mod.rs | 21 ++++++++----- src/service/resolver/cache.rs | 18 +++++++++++- src/service/resolver/fed.rs | 10 +++++++ src/service/resolver/mod.rs | 35 ++++++++++++---------- src/service/rooms/state_accessor/mod.rs | 39 +++++++++++++++++++++---- 5 files changed, 92 insertions(+), 31 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 3f98babe..3a514d5c 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,7 +7,7 @@ use std::{ time::Instant, }; -use conduwuit::{error, Config, Result}; +use conduwuit::{error, utils::bytes::pretty, Config, Result}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; @@ -93,13 +93,18 @@ impl crate::Service for Service { Ok(Arc::new(s)) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { - let bad_event_ratelimiter = self - .bad_event_ratelimiter - .read() - .expect("locked for reading") - .len(); - writeln!(out, "bad_event_ratelimiter: {bad_event_ratelimiter}")?; + fn memory_usage(&self, out: &mut dyn Write) -> Result { + let (ber_count, ber_bytes) = self.bad_event_ratelimiter.read()?.iter().fold( + (0_usize, 0_usize), + |(mut count, mut bytes), (event_id, _)| { + bytes = bytes.saturating_add(event_id.capacity()); + bytes = bytes.saturating_add(size_of::()); + count = count.saturating_add(1); + (count, bytes) + }, + ); + + writeln!(out, "bad_event_ratelimiter: {ber_count} ({})", pretty(ber_bytes))?; Ok(()) } diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 3e961f4c..e309a129 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -6,7 +6,10 @@ use std::{ }; use arrayvec::ArrayVec; -use conduwuit::{trace, utils::rand}; +use conduwuit::{ + trace, + utils::{math::Expected, rand}, +}; use ruma::{OwnedServerName, ServerName}; use super::fed::FedDest; @@ -113,6 +116,15 @@ impl CachedDest { pub(crate) fn default_expire() -> SystemTime { rand::timepoint_secs(60 * 60 * 18..60 * 60 * 36) } + + #[inline] + #[must_use] + pub fn size(&self) -> usize { + self.dest + .size() + .expected_add(self.host.len()) + .expected_add(size_of_val(&self.expire)) + } } impl CachedOverride { @@ -126,4 +138,8 @@ impl CachedOverride { pub(crate) fn default_expire() -> SystemTime { rand::timepoint_secs(60 * 60 * 6..60 * 60 * 12) } + + #[inline] + #[must_use] + pub fn size(&self) -> usize { size_of_val(self) } } diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index 3986db8e..76fc6894 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -5,6 +5,7 @@ use std::{ }; use arrayvec::ArrayString; +use conduwuit::utils::math::Expected; #[derive(Clone, Debug, PartialEq, Eq)] pub enum FedDest { @@ -76,6 +77,15 @@ impl FedDest { pub fn default_port() -> PortString { PortString::from(DEFAULT_PORT).expect("default port string") } + + #[inline] + #[must_use] + pub fn size(&self) -> usize { + match self { + | Self::Literal(saddr) => size_of_val(saddr), + | Self::Named(host, port) => host.len().expected_add(port.capacity()), + } + } } impl fmt::Display for FedDest { diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index e18cf0bd..6a6289b6 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,7 +6,7 @@ mod tests; use std::{fmt::Write, sync::Arc}; -use conduwuit::{Result, Server}; +use conduwuit::{utils, utils::math::Expected, Result, Server}; use self::{cache::Cache, dns::Resolver}; use crate::{client, Dep}; @@ -36,22 +36,25 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { - let resolver_overrides_cache = self - .cache - .overrides - .read() - .expect("locked for reading") - .len(); - writeln!(out, "resolver_overrides_cache: {resolver_overrides_cache}")?; + fn memory_usage(&self, out: &mut dyn Write) -> Result { + use utils::bytes::pretty; - let resolver_destinations_cache = self - .cache - .destinations - .read() - .expect("locked for reading") - .len(); - writeln!(out, "resolver_destinations_cache: {resolver_destinations_cache}")?; + let (oc_count, oc_bytes) = self.cache.overrides.read()?.iter().fold( + (0_usize, 0_usize), + |(count, bytes), (key, val)| { + (count.expected_add(1), bytes.expected_add(key.len()).expected_add(val.size())) + }, + ); + + let (dc_count, dc_bytes) = self.cache.destinations.read()?.iter().fold( + (0_usize, 0_usize), + |(count, bytes), (key, val)| { + (count.expected_add(1), bytes.expected_add(key.len()).expected_add(val.size())) + }, + ); + + writeln!(out, "resolver_overrides_cache: {oc_count} ({})", pretty(oc_bytes))?; + writeln!(out, "resolver_destinations_cache: {dc_count} ({})", pretty(dc_bytes))?; Ok(()) } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index fd132798..d89c8835 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -10,7 +10,11 @@ use std::{ use conduwuit::{ err, error, pdu::PduBuilder, - utils::{math::usize_from_f64, ReadyExt}, + utils, + utils::{ + math::{usize_from_f64, Expected}, + ReadyExt, + }, Err, Error, PduEvent, Result, }; use futures::StreamExt; @@ -84,12 +88,35 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { - let server_visibility_cache = self.server_visibility_cache.lock().expect("locked").len(); - writeln!(out, "server_visibility_cache: {server_visibility_cache}")?; + fn memory_usage(&self, out: &mut dyn Write) -> Result { + use utils::bytes::pretty; - let user_visibility_cache = self.user_visibility_cache.lock().expect("locked").len(); - writeln!(out, "user_visibility_cache: {user_visibility_cache}")?; + let (svc_count, svc_bytes) = self.server_visibility_cache.lock()?.iter().fold( + (0_usize, 0_usize), + |(count, bytes), (key, _)| { + ( + count.expected_add(1), + bytes + .expected_add(key.0.capacity()) + .expected_add(size_of_val(&key.1)), + ) + }, + ); + + let (uvc_count, uvc_bytes) = self.user_visibility_cache.lock()?.iter().fold( + (0_usize, 0_usize), + |(count, bytes), (key, _)| { + ( + count.expected_add(1), + bytes + .expected_add(key.0.capacity()) + .expected_add(size_of_val(&key.1)), + ) + }, + ); + + writeln!(out, "server_visibility_cache: {svc_count} ({})", pretty(svc_bytes))?; + writeln!(out, "user_visibility_cache: {uvc_count} ({})", pretty(uvc_bytes))?; Ok(()) } From b4ef646485de48eb660563f277cfc939d938ff17 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 2 Jan 2025 00:11:12 +0000 Subject: [PATCH 0473/1248] translate 5xx to 404 on remote media request Signed-off-by: Jason Volk --- src/service/media/remote.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index d5ad5391..ca73c3ef 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -331,7 +331,10 @@ fn handle_federation_error( // If we get these from any middleware we'll try the other endpoint rather than // giving up too early. - if error.status_code().is_client_error() || error.status_code().is_redirection() { + if error.status_code().is_redirection() + || error.status_code().is_client_error() + || error.status_code().is_server_error() + { return fallback(); } From 72797532b69b37d0ce532521f187f8a1396fc53c Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 1 Jan 2025 23:19:51 -0500 Subject: [PATCH 0474/1248] fix MSC4133 fields not being returned as original types Signed-off-by: strawberry --- src/api/client/unstable.rs | 8 ++++++++ src/service/users/mod.rs | 13 ++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 2c9add44..66cb31d5 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -506,6 +506,10 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("The requested profile key does not exist."))); } + if profile_key_value.is_empty() { + return Err!(Request(NotFound("The requested profile key does not exist."))); + } + return Ok(get_profile_key::unstable::Response { value: profile_key_value }); } } @@ -522,5 +526,9 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("The requested profile key does not exist."))); } + if profile_key_value.is_empty() { + return Err!(Request(NotFound("The requested profile key does not exist."))); + } + Ok(get_profile_key::unstable::Response { value: profile_key_value }) } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index fe064d9c..3c43968a 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -947,12 +947,13 @@ impl Service { user_id: &UserId, profile_key: &str, ) -> Result { - let key = (user_id, profile_key); self.db .useridprofilekey_value - .qry(&key) + .qry(&(user_id, profile_key)) .await - .deserialized() + .deserialized::>() + .map(serde_json::to_value)? + .map_err(Into::into) } /// Gets all the user's profile keys and values in an iterator @@ -960,14 +961,16 @@ impl Service { &'a self, user_id: &'a UserId, ) -> impl Stream + 'a + Send { - type KeyVal = ((Ignore, String), serde_json::Value); + type KeyVal = ((Ignore, String), Raw); let prefix = (user_id, Interfix); self.db .useridprofilekey_value .stream_prefix(&prefix) .ignore_err() - .map(|((_, key), val): KeyVal| (key, val)) + .ready_filter_map(|((_, key), val): KeyVal| { + Some((key, serde_json::to_value(val).ok()?)) + }) } /// Sets a new profile key value, removes the key if value is None From 77d7e8a3ad743f4043ff652939346ae6c7be0312 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 2 Jan 2025 06:35:33 +0000 Subject: [PATCH 0475/1248] add json array to serde_json::Value db deserialization without Raw --- src/database/de.rs | 4 ++++ src/service/users/mod.rs | 13 +++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index f648b89a..4f5be6fc 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -351,6 +351,10 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { match self.record_peek_byte() { | Some(b'{') => self.deserialize_map(visitor), + | Some(b'[') => serde_json::Deserializer::from_slice(self.record_next()) + .deserialize_seq(visitor) + .map_err(Into::into), + | _ => self.deserialize_str(visitor), } } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 3c43968a..fe064d9c 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -947,13 +947,12 @@ impl Service { user_id: &UserId, profile_key: &str, ) -> Result { + let key = (user_id, profile_key); self.db .useridprofilekey_value - .qry(&(user_id, profile_key)) + .qry(&key) .await - .deserialized::>() - .map(serde_json::to_value)? - .map_err(Into::into) + .deserialized() } /// Gets all the user's profile keys and values in an iterator @@ -961,16 +960,14 @@ impl Service { &'a self, user_id: &'a UserId, ) -> impl Stream + 'a + Send { - type KeyVal = ((Ignore, String), Raw); + type KeyVal = ((Ignore, String), serde_json::Value); let prefix = (user_id, Interfix); self.db .useridprofilekey_value .stream_prefix(&prefix) .ignore_err() - .ready_filter_map(|((_, key), val): KeyVal| { - Some((key, serde_json::to_value(val).ok()?)) - }) + .map(|((_, key), val): KeyVal| (key, val)) } /// Sets a new profile key value, removes the key if value is None From b1182fe8a4065eb6edcfc992118b630f7baf5afc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 2 Jan 2025 07:16:46 +0000 Subject: [PATCH 0476/1248] improve some router tracing spans Signed-off-by: Jason Volk --- src/router/request.rs | 69 ++++++++++++++++++++++++++-------------- src/router/serve/unix.rs | 50 +++++++++++++++++++++-------- 2 files changed, 83 insertions(+), 36 deletions(-) diff --git a/src/router/request.rs b/src/router/request.rs index 559c7f88..ab98fe4f 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -12,6 +12,18 @@ use http::{Method, StatusCode, Uri}; parent = None, level = "trace", skip_all, + fields( + handled = %services + .server + .metrics + .requests_spawn_finished + .fetch_add(1, Ordering::Relaxed), + active = %services + .server + .metrics + .requests_spawn_active + .fetch_add(1, Ordering::Relaxed), + ) )] pub(crate) async fn spawn( State(services): State>, @@ -19,34 +31,56 @@ pub(crate) async fn spawn( next: axum::middleware::Next, ) -> Result { let server = &services.server; + + #[cfg(debug_assertions)] + defer! {{ + _ = server + .metrics + .requests_spawn_active + .fetch_sub(1, Ordering::Relaxed); + }}; + if !server.running() { debug_warn!("unavailable pending shutdown"); return Err(StatusCode::SERVICE_UNAVAILABLE); } - let active = server - .metrics - .requests_spawn_active - .fetch_add(1, Ordering::Relaxed); - trace!(active, "enter"); - defer! {{ - let active = server.metrics.requests_spawn_active.fetch_sub(1, Ordering::Relaxed); - let finished = server.metrics.requests_spawn_finished.fetch_add(1, Ordering::Relaxed); - trace!(active, finished, "leave"); - }}; - let fut = next.run(req); let task = server.runtime().spawn(fut); task.await.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) } -#[tracing::instrument(level = "debug", skip_all)] +#[tracing::instrument( + level = "debug", + skip_all, + fields( + handled = %services + .server + .metrics + .requests_handle_finished + .fetch_add(1, Ordering::Relaxed), + active = %services + .server + .metrics + .requests_handle_active + .fetch_add(1, Ordering::Relaxed), + ) +)] pub(crate) async fn handle( State(services): State>, req: http::Request, next: axum::middleware::Next, ) -> Result { let server = &services.server; + + #[cfg(debug_assertions)] + defer! {{ + _ = server + .metrics + .requests_handle_active + .fetch_sub(1, Ordering::Relaxed); + }}; + if !server.running() { debug_warn!( method = %req.method(), @@ -57,17 +91,6 @@ pub(crate) async fn handle( return Err(StatusCode::SERVICE_UNAVAILABLE); } - let active = server - .metrics - .requests_handle_active - .fetch_add(1, Ordering::Relaxed); - trace!(active, "enter"); - defer! {{ - let active = server.metrics.requests_handle_active.fetch_sub(1, Ordering::Relaxed); - let finished = server.metrics.requests_handle_finished.fetch_add(1, Ordering::Relaxed); - trace!(active, finished, "leave"); - }}; - let uri = req.uri().clone(); let method = req.method().clone(); let result = next.run(req).await; diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index f5018455..fcd361f0 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -2,6 +2,7 @@ use std::{ net::{self, IpAddr, Ipv4Addr}, + os::fd::AsRawFd, path::Path, sync::{atomic::Ordering, Arc}, }; @@ -60,31 +61,54 @@ pub(super) async fn serve( Ok(()) } +#[tracing::instrument( + level = "trace", + skip_all, + fields( + ?listener, + socket = ?conn.0, + ), +)] async fn accept( server: &Arc, listener: &UnixListener, tasks: &mut JoinSet<()>, - mut app: MakeService, + app: MakeService, builder: server::conn::auto::Builder, conn: (UnixStream, SocketAddr), ) { - let (socket, remote) = conn; - let socket = TokioIo::new(socket); - trace!(?listener, ?socket, ?remote, "accepted"); - - let called = app.call(NULL_ADDR).await.unwrap_infallible(); - - let service = move |req: Request| called.clone().oneshot(req); - let handler = service_fn(service); - let task = async move { - // bug on darwin causes all results to be errors. do not unwrap this - _ = builder.serve_connection(socket, handler).await; - }; + let (socket, _) = conn; + let server_ = server.clone(); + let task = async move { accepted(server_, builder, socket, app).await }; _ = tasks.spawn_on(task, server.runtime()); while tasks.try_join_next().is_some() {} } +#[tracing::instrument( + level = "trace", + skip_all, + fields( + fd = %socket.as_raw_fd(), + path = ?socket.local_addr(), + ), +)] +async fn accepted( + server: Arc, + builder: server::conn::auto::Builder, + socket: UnixStream, + mut app: MakeService, +) { + let socket = TokioIo::new(socket); + let called = app.call(NULL_ADDR).await.unwrap_infallible(); + let service = move |req: Request| called.clone().oneshot(req); + let handler = service_fn(service); + trace!(?socket, ?handler, "serving connection"); + + // bug on darwin causes all results to be errors. do not unwrap this + _ = builder.serve_connection(socket, handler).await; +} + async fn init(server: &Arc) -> Result { use std::os::unix::fs::PermissionsExt; From 50cadbee9624f1a24f2395ce28439d7140a433fa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 2 Jan 2025 07:31:16 +0000 Subject: [PATCH 0477/1248] add faster shutdown point in unix socket listener Signed-off-by: Jason Volk --- src/router/serve/unix.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index fcd361f0..6855b34c 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -106,7 +106,10 @@ async fn accepted( trace!(?socket, ?handler, "serving connection"); // bug on darwin causes all results to be errors. do not unwrap this - _ = builder.serve_connection(socket, handler).await; + tokio::select! { + () = server.until_shutdown() => (), + _ = builder.serve_connection(socket, handler) => (), + }; } async fn init(server: &Arc) -> Result { From a5d70f73566044369231a43ae2f2be3bc5e9d715 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 2 Jan 2025 18:16:00 -0500 Subject: [PATCH 0478/1248] add some m.room.member checks on putting direct state events Signed-off-by: strawberry --- src/api/client/state.rs | 118 +++++++++++------- src/api/server/send_join.rs | 11 +- .../event_handler/handle_incoming_pdu.rs | 16 +-- .../rooms/event_handler/handle_outlier_pdu.rs | 2 +- src/service/rooms/timeline/mod.rs | 23 ++++ src/service/sending/sender.rs | 7 +- 6 files changed, 117 insertions(+), 60 deletions(-) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index e5a7dd28..d00ee5e5 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,15 +1,13 @@ use axum::extract::State; -use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, Error, PduEvent, Result}; +use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, PduEvent, Result}; use ruma::{ - api::client::{ - error::ErrorKind, - state::{get_state_events, get_state_events_for_key, send_state_event}, - }, + api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, events::{ room::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, }, AnyStateEventContent, StateEventType, }, @@ -23,11 +21,6 @@ use crate::{Ruma, RumaResponse}; /// # `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}/{stateKey}` /// /// Sends a state event into the room. -/// -/// - The only requirement for the content is that it has to be valid json -/// - Tries to send the event into the room, auth rules will determine if it is -/// allowed -/// - If event is new `canonical_alias`: Rejects if alias is incorrect pub(crate) async fn send_state_event_for_key_route( State(services): State, body: Ruma, @@ -41,7 +34,7 @@ pub(crate) async fn send_state_event_for_key_route( &body.room_id, &body.event_type, &body.body.body, - body.state_key.clone(), + &body.state_key, if body.appservice_info.is_some() { body.timestamp } else { @@ -55,11 +48,6 @@ pub(crate) async fn send_state_event_for_key_route( /// # `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}` /// /// Sends a state event into the room. -/// -/// - The only requirement for the content is that it has to be valid json -/// - Tries to send the event into the room, auth rules will determine if it is -/// allowed -/// - If event is new `canonical_alias`: Rejects if alias is incorrect pub(crate) async fn send_state_event_for_empty_key_route( State(services): State, body: Ruma, @@ -172,10 +160,10 @@ async fn send_state_event_for_key_helper( room_id: &RoomId, event_type: &StateEventType, json: &Raw, - state_key: String, + state_key: &str, timestamp: Option, ) -> Result { - allowed_to_send_state_event(services, room_id, event_type, json).await?; + allowed_to_send_state_event(services, room_id, event_type, state_key, json).await?; let state_lock = services.rooms.state.mutex.lock(room_id).await; let event_id = services .rooms @@ -184,7 +172,7 @@ async fn send_state_event_for_key_helper( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get())?, - state_key: Some(state_key), + state_key: Some(String::from(state_key)), timestamp, ..Default::default() }, @@ -201,6 +189,7 @@ async fn allowed_to_send_state_event( services: &Services, room_id: &RoomId, event_type: &StateEventType, + state_key: &str, json: &Raw, ) -> Result { match event_type { @@ -212,10 +201,7 @@ async fn allowed_to_send_state_event( // Forbid m.room.encryption if encryption is disabled | StateEventType::RoomEncryption => if !services.globals.allow_encryption() { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Encryption has been disabled", - )); + return Err!(Request(Forbidden("Encryption is disabled on this homeserver."))); }, // admin room is a sensitive room, it should not ever be made public | StateEventType::RoomJoinRules => { @@ -225,10 +211,9 @@ async fn allowed_to_send_state_event( serde_json::from_str::(json.json().get()) { if join_rule.join_rule == JoinRule::Public { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Admin room is not allowed to be public.", - )); + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made public" + ))); } } } @@ -236,26 +221,22 @@ async fn allowed_to_send_state_event( }, // admin room is a sensitive room, it should not ever be made world readable | StateEventType::RoomHistoryVisibility => { - if let Ok(admin_room_id) = services.admin.get_admin_room().await { - if admin_room_id == room_id { - if let Ok(visibility_content) = serde_json::from_str::< - RoomHistoryVisibilityEventContent, - >(json.json().get()) - { - if visibility_content.history_visibility + if let Ok(visibility_content) = + serde_json::from_str::(json.json().get()) + { + if let Ok(admin_room_id) = services.admin.get_admin_room().await { + if admin_room_id == room_id + && visibility_content.history_visibility == HistoryVisibility::WorldReadable - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Admin room is not allowed to be made world readable (public \ - room history).", - )); - } + { + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made world readable \ + (public room history)." + ))); } } } }, - // TODO: allow alias if it previously existed | StateEventType::RoomCanonicalAlias => { if let Ok(canonical_alias) = serde_json::from_str::(json.json().get()) @@ -289,6 +270,59 @@ async fn allowed_to_send_state_event( } } }, + | StateEventType::RoomMember => { + let Ok(membership_content) = + serde_json::from_str::(json.json().get()) + else { + return Err!(Request(BadJson( + "Membership content must have a valid JSON body with at least a valid \ + membership state." + ))); + }; + + let Ok(state_key) = UserId::parse(state_key) else { + return Err!(Request(BadJson( + "Membership event has invalid or non-existent state key" + ))); + }; + + if let Some(authorising_user) = membership_content.join_authorized_via_users_server { + if membership_content.membership != MembershipState::Join { + return Err!(Request(BadJson( + "join_authorised_via_users_server is only for member joins" + ))); + } + + if services + .rooms + .state_cache + .is_joined(state_key, room_id) + .await + { + return Err!(Request(InvalidParam( + "{state_key} is already joined, an authorising user is not required." + ))); + } + + if !services.globals.user_is_local(&authorising_user) { + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} does not belong to this homeserver" + ))); + } + + if !services + .rooms + .state_cache + .is_joined(&authorising_user, room_id) + .await + { + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} is not in the room, they cannot \ + authorise the join." + ))); + } + } + }, | _ => (), } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index fe0277d1..6cbe5143 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -61,12 +61,11 @@ async fn create_join_event( }; let event_room_id: OwnedRoomId = serde_json::from_value( - serde_json::to_value( - value - .get("room_id") - .ok_or_else(|| err!(Request(BadJson("Event missing room_id property."))))?, - ) - .expect("CanonicalJson is valid json value"), + value + .get("room_id") + .ok_or_else(|| err!(Request(BadJson("Event missing room_id property."))))? + .clone() + .into(), ) .map_err(|e| err!(Request(BadJson(warn!("room_id field is not a valid room ID: {e}")))))?; diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 0e0409b4..4c2fb2f7 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -4,12 +4,9 @@ use std::{ time::Instant, }; -use conduwuit::{debug, err, implement, warn, Error, Result}; +use conduwuit::{debug, err, implement, warn, Err, Result}; use futures::{FutureExt, TryFutureExt}; -use ruma::{ - api::client::error::ErrorKind, events::StateEventType, CanonicalJsonValue, EventId, RoomId, - ServerName, UserId, -}; +use ruma::{events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId}; use super::{check_room_id, get_room_version_id}; use crate::rooms::timeline::RawPduId; @@ -58,15 +55,14 @@ pub async fn handle_incoming_pdu<'a>( // 1.1 Check the server is in the room if !self.services.metadata.exists(room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); + return Err!(Request(NotFound("Room is unknown to this server"))); } // 1.2 Check if the room is disabled if self.services.metadata.is_disabled(room_id).await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Federation of this room is currently disabled on this server.", - )); + return Err!(Request(Forbidden( + "Federation of this room is currently disabled on this server." + ))); } // 1.3.1 Check room ACL on origin field/server diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index c3278329..3ad73295 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -68,7 +68,7 @@ pub(super) async fn handle_outlier_pdu<'a>( let incoming_pdu = serde_json::from_value::( serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), ) - .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; + .map_err(|e| err!(Request(BadJson(debug_warn!("Event is not a valid PDU: {e}")))))?; check_room_id(room_id, &incoming_pdu)?; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 81df7b35..2a272c38 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -901,6 +901,29 @@ impl Service { } }; + if pdu.kind == TimelineEventType::RoomMember { + let content: RoomMemberEventContent = pdu.get_content()?; + + if content.join_authorized_via_users_server.is_some() + && content.membership != MembershipState::Join + { + return Err!(Request(BadJson( + "join_authorised_via_users_server is only for member joins" + ))); + } + + if content + .join_authorized_via_users_server + .as_ref() + .is_some_and(|authorising_user| { + !self.services.globals.user_is_local(authorising_user) + }) { + return Err!(Request(InvalidParam( + "Authorising user does not belong to this homeserver" + ))); + } + } + // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 482c31cf..a9abada4 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -739,7 +739,12 @@ impl Service { )); }; - let mut pdus = Vec::new(); + let mut pdus = Vec::with_capacity( + events + .iter() + .filter(|event| matches!(event, SendingEvent::Pdu(_))) + .count(), + ); for event in &events { match event { | SendingEvent::Pdu(pdu_id) => { From 6c8a1b5e9b3854c036e5fc74733357f223dc437f Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 2 Jan 2025 18:48:04 -0500 Subject: [PATCH 0479/1248] bump ruwuma Signed-off-by: strawberry --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf08509f..2005202d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3173,7 +3173,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "assign", "js_int", @@ -3195,7 +3195,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "js_int", "ruma-common", @@ -3207,7 +3207,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "as_variant", "assign", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "as_variant", "base64 0.22.1", @@ -3261,7 +3261,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3286,7 +3286,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "bytes", "http", @@ -3304,7 +3304,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "js_int", "thiserror 2.0.7", @@ -3313,7 +3313,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "js_int", "ruma-common", @@ -3323,7 +3323,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3338,7 +3338,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "js_int", "ruma-common", @@ -3350,7 +3350,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "headers", "http", @@ -3363,7 +3363,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3379,7 +3379,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ac37db212aed236d4c415cfecb96aa42cfebc6d9#ac37db212aed236d4c415cfecb96aa42cfebc6d9" +source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index a59dcfcd..3d65e4cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -336,7 +336,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "ac37db212aed236d4c415cfecb96aa42cfebc6d9" +rev = "c4f55b39900b33b2d443dd12a6a2dab50961fdfb" features = [ "compat", "rand", From 7a349fdc58bb1322991a7b403178b8dccedf2e59 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 2 Jan 2025 18:55:52 -0500 Subject: [PATCH 0480/1248] use latest sccache, bump nix flake lock, cache everything in rust-cache Signed-off-by: strawberry --- .github/workflows/ci.yml | 12 +++++++----- Cargo.lock | 6 +++--- Cargo.toml | 10 +++++----- flake.lock | 24 ++++++++++++------------ src/database/pool.rs | 6 +++--- 5 files changed, 30 insertions(+), 28 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b20a43e..453d9df8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -188,13 +188,13 @@ jobs: - name: Run sccache-cache if: (env.SCCACHE_GHA_ENABLED == 'true') uses: mozilla-actions/sccache-action@main - with: - version: "v0.8.2" # use rust-cache - uses: Swatinem/rust-cache@v2 with: cache-all-crates: "true" + cache-on-failure: "true" + cache-targets: "true" - name: Run CI tests env: @@ -366,13 +366,13 @@ jobs: - name: Run sccache-cache if: (env.SCCACHE_GHA_ENABLED == 'true') uses: mozilla-actions/sccache-action@main - with: - version: "v0.8.2" # use rust-cache - uses: Swatinem/rust-cache@v2 with: cache-all-crates: "true" + cache-on-failure: "true" + cache-targets: "true" - name: Build static ${{ matrix.target }}-all-features run: | @@ -660,6 +660,8 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-all-crates: "true" + cache-on-failure: "true" + cache-targets: "true" # Nix can't do portable macOS builds yet - name: Build macOS x86_64 binary @@ -921,4 +923,4 @@ jobs: if [ ! -z $GITLAB_TOKEN ]; then echo "- \`docker pull ${GLCR_TAG}\`" >> $GITHUB_STEP_SUMMARY echo "- \`docker pull ${GLCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - fi \ No newline at end of file + fi diff --git a/Cargo.lock b/Cargo.lock index 2005202d..3f900a11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,7 +95,7 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-channel" version = "2.3.1" -source = "git+https://github.com/jevolk/async-channel?rev=fefa543ca5eddf21237d75776fce98b7e09e924a#fefa543ca5eddf21237d75776fce98b7e09e924a" +source = "git+https://github.com/girlbossceo/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -938,7 +938,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_affinity" version = "0.8.1" -source = "git+https://github.com/jevolk/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" +source = "git+https://github.com/girlbossceo/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" dependencies = [ "libc", "num_cpus", @@ -1258,7 +1258,7 @@ dependencies = [ [[package]] name = "event-listener" version = "5.3.1" -source = "git+https://github.com/jevolk/event-listener?rev=96d7e0fc026d8f708b19bc9267a382676a50354c#96d7e0fc026d8f708b19bc9267a382676a50354c" +source = "git+https://github.com/girlbossceo/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d" dependencies = [ "concurrent-queue", "parking", diff --git a/Cargo.toml b/Cargo.toml index 3d65e4cd..76acda80 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -532,14 +532,14 @@ rev = "deaeb0694e2083f53d363b648da06e10fc13900c" # adds LIFO queue scheduling; this should be updated with PR progress. [patch.crates-io.event-listener] -git = "https://github.com/jevolk/event-listener" -rev = "96d7e0fc026d8f708b19bc9267a382676a50354c" +git = "https://github.com/girlbossceo/event-listener" +rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d" [patch.crates-io.async-channel] -git = "https://github.com/jevolk/async-channel" -rev = "fefa543ca5eddf21237d75776fce98b7e09e924a" +git = "https://github.com/girlbossceo/async-channel" +rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280" [patch.crates-io.core_affinity] -git = "https://github.com/jevolk/core_affinity_rs" +git = "https://github.com/girlbossceo/core_affinity_rs" rev = "9c8e51510c35077df888ee72a36b4b05637147da" # diff --git a/flake.lock b/flake.lock index 94cef4dd..35029076 100644 --- a/flake.lock +++ b/flake.lock @@ -117,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1733688869, - "narHash": "sha256-KrhxxFj1CjESDrL5+u/zsVH0K+Ik9tvoac/oFPoxSB8=", + "lastModified": 1734808813, + "narHash": "sha256-3aH/0Y6ajIlfy7j52FGZ+s4icVX0oHhqBzRdlOeztqg=", "owner": "ipetkov", "repo": "crane", - "rev": "604637106e420ad99907cae401e13ab6b452e7d9", + "rev": "72e2d02dbac80c8c86bf6bf3e785536acf8ee926", "type": "github" }, "original": { @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1733726208, - "narHash": "sha256-Z6zL4AtpZWxkvTd3l0KkPZamu2wtTKl4nNiqNSlgsb4=", + "lastModified": 1735799625, + "narHash": "sha256-lFadwWDvVIub11bwfZhsh2WUByf9LOi6yjsSUMmE0xk=", "owner": "nix-community", "repo": "fenix", - "rev": "d51a64e1d23e509f28a6955a6652cc62409dd4a8", + "rev": "a9d84a1545814910cb4ab0515ed6921e8b07ee95", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1733656523, - "narHash": "sha256-w0FXPfpGhOihoJDiwMsyN1EzpsXi2F8VQ+NVZQSMtys=", + "lastModified": 1735685343, + "narHash": "sha256-h1CpBzdJDNtSUb5QMyfFHKHocTTky+4McgQEBQBM+xA=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "93dc9803a1ee435e590b02cde9589038d5cc3a4e", + "rev": "81934660d6e9ea54d2f0cdee821e8533b10c221a", "type": "github" }, "original": { @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1733642337, - "narHash": "sha256-I1uc97f/cNhOpCemIbBAUS+CV0R7jts0NW9lc8jrpxc=", + "lastModified": 1735742096, + "narHash": "sha256-q3a80h8Jf8wfmPURUgRR46nQCB3I5fhZ+/swulTF5HY=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "4c755e62a617eeeef3066994731ce1cdd16504ac", + "rev": "7e639ee3dda6ed9cecc79d41f6d38235121e483d", "type": "github" }, "original": { diff --git a/src/database/pool.rs b/src/database/pool.rs index bcf20de8..0295e0e9 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -8,7 +8,7 @@ use std::{ }, }; -use async_channel::{Receiver, RecvError, Sched, Sender}; +use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ debug, debug_warn, defer, err, implement, result::DebugInspect, @@ -71,13 +71,13 @@ const BATCH_INLINE: usize = 1; #[implement(Pool)] pub(crate) async fn new(server: &Arc) -> Result> { - const CHAN_SCHED: (Sched, Sched) = (Sched::Fifo, Sched::Lifo); + const CHAN_SCHED: (QueueStrategy, QueueStrategy) = (QueueStrategy::Fifo, QueueStrategy::Lifo); let (total_workers, queue_sizes, topology) = configure(server); let (senders, receivers) = queue_sizes .into_iter() - .map(|cap| async_channel::bounded_with_sched(cap, CHAN_SCHED)) + .map(|cap| async_channel::bounded_with_queue_strategy(cap, CHAN_SCHED)) .unzip(); let pool = Arc::new(Self { From 3b8a90ad1381e740c5420846ccad7a418581521c Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 2 Jan 2025 20:56:27 -0500 Subject: [PATCH 0481/1248] delete more stuff to free up runner space Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 453d9df8..4a4df488 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -103,7 +103,7 @@ jobs: sudo docker image prune --all --force || true sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true sudo apt clean - sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku + sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true set -o pipefail - name: Sync repository @@ -274,7 +274,7 @@ jobs: sudo docker image prune --all --force || true sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true sudo apt clean - sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku + sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true set -o pipefail - name: Sync repository From f1af7ec08c9af0efbfebcd814111fb546e258552 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 3 Jan 2025 02:34:41 +0000 Subject: [PATCH 0482/1248] fix unused import in release mode Signed-off-by: Jason Volk --- src/router/request.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/router/request.rs b/src/router/request.rs index ab98fe4f..ca063338 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -4,7 +4,7 @@ use axum::{ extract::State, response::{IntoResponse, Response}, }; -use conduwuit::{debug, debug_error, debug_warn, defer, err, error, trace, Result}; +use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; use http::{Method, StatusCode, Uri}; @@ -33,7 +33,7 @@ pub(crate) async fn spawn( let server = &services.server; #[cfg(debug_assertions)] - defer! {{ + conduwuit::defer! {{ _ = server .metrics .requests_spawn_active @@ -74,7 +74,7 @@ pub(crate) async fn handle( let server = &services.server; #[cfg(debug_assertions)] - defer! {{ + conduwuit::defer! {{ _ = server .metrics .requests_handle_active From e5049cae4a3890dc5f61ead53281f23b36bf4c97 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 2 Jan 2025 22:02:11 -0500 Subject: [PATCH 0483/1248] update complement test results Signed-off-by: June Clementine Strawberry --- tests/test_results/complement/test_results.jsonl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 26ae6931..9b4d2838 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -191,32 +191,32 @@ {"Action":"fail","Test":"TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"} {"Action":"fail","Test":"TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"} {"Action":"pass","Test":"TestRemoteTyping"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoin"} +{"Action":"fail","Test":"TestRestrictedRoomsLocalJoin"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_with_mangled_join_rules"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_succeed_when_invited"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room"} +{"Action":"fail","Test":"TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room"} +{"Action":"fail","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_invited"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} +{"Action":"fail","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoin"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_with_mangled_join_rules"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room"} +{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited"} +{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinFailOver"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinFailOverInMSC3787Room"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"} {"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} +{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited"} +{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUser"} {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsSpacesSummaryFederation"} From 8c74e35e7640a041c1f3496d82585e5240294352 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 4 Jan 2025 03:15:48 +0000 Subject: [PATCH 0484/1248] automatically retry returning data in syncv3 (#652) * automatically retry returning data in syncv3 * reference service * clippy fixes --- src/api/client/sync/v3.rs | 50 ++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 406b497d..9f9ccfab 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -124,6 +124,33 @@ pub(crate) async fn sync_events_route( // Setup watchers, so if there's no response, we can wait for them let watcher = services.sync.watch(sender_user, sender_device); + let response = build_sync_events(&services, &body).await?; + if body.body.full_state + || !(response.rooms.is_empty() + && response.presence.is_empty() + && response.account_data.is_empty() + && response.device_lists.is_empty() + && response.to_device.is_empty()) + { + return Ok(response); + } + + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let default = Duration::from_secs(30); + let duration = cmp::min(body.body.timeout.unwrap_or(default), default); + _ = tokio::time::timeout(duration, watcher).await; + + // Retry returning data + build_sync_events(&services, &body).await +} + +pub(crate) async fn build_sync_events( + services: &Services, + body: &Ruma, +) -> Result> { + let (sender_user, sender_device) = body.sender(); + let next_batch = services.globals.current_count()?; let next_batch_string = next_batch.to_string(); @@ -163,7 +190,7 @@ pub(crate) async fn sync_events_route( .map(ToOwned::to_owned) .broad_filter_map(|room_id| { load_joined_room( - &services, + services, sender_user, sender_device, room_id.clone(), @@ -196,7 +223,7 @@ pub(crate) async fn sync_events_route( .rooms_left(sender_user) .broad_filter_map(|(room_id, _)| { handle_left_room( - &services, + services, since, room_id.clone(), sender_user, @@ -242,7 +269,7 @@ pub(crate) async fn sync_events_route( let presence_updates: OptionFuture<_> = services .globals .allow_local_presence() - .then(|| process_presence_updates(&services, since, sender_user)) + .then(|| process_presence_updates(services, since, sender_user)) .into(); let account_data = services @@ -292,7 +319,7 @@ pub(crate) async fn sync_events_route( .stream() .broad_filter_map(|user_id| async move { let no_shared_encrypted_room = - !share_encrypted_room(&services, sender_user, &user_id, None).await; + !share_encrypted_room(services, sender_user, &user_id, None).await; no_shared_encrypted_room.then_some(user_id) }) .ready_fold(HashSet::new(), |mut device_list_left, user_id| { @@ -327,21 +354,6 @@ pub(crate) async fn sync_events_route( to_device: ToDevice { events: to_device_events }, }; - // TODO: Retry the endpoint instead of returning - if !full_state - && response.rooms.is_empty() - && response.presence.is_empty() - && response.account_data.is_empty() - && response.device_lists.is_empty() - && response.to_device.is_empty() - { - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let default = Duration::from_secs(30); - let duration = cmp::min(body.body.timeout.unwrap_or(default), default); - _ = tokio::time::timeout(duration, watcher).await; - } - Ok(response) } From 7526ba9d6fc4221a338056802f8456733da314d6 Mon Sep 17 00:00:00 2001 From: Neil Svedberg Date: Mon, 6 Jan 2025 14:20:52 -0500 Subject: [PATCH 0485/1248] Add header to console When the console is launched, it now prints this message: conduwuit VERSION admin console "help" for help, ^D to exit the console, ^\ to stop the server --- src/service/admin/console.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index 0edcd2f3..de201f4b 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -58,6 +58,7 @@ impl Console { pub async fn close(self: &Arc) { self.interrupt(); + let Some(worker_join) = self.worker_join.lock().expect("locked").take() else { return; }; @@ -92,6 +93,12 @@ impl Console { #[tracing::instrument(skip_all, name = "console", level = "trace")] async fn worker(self: Arc) { debug!("session starting"); + + self.output + .print_inline(&format!("**conduwuit {}** admin console\n", conduwuit::version())); + self.output + .print_text("\"help\" for help, ^D to exit the console, ^\\ to stop the server\n"); + while self.server.running() { match self.readline().await { | Ok(event) => match event { @@ -147,6 +154,7 @@ impl Console { self.add_history(line.clone()); let future = self.clone().process(line); + let (abort, abort_reg) = AbortHandle::new_pair(); let future = Abortable::new(future, abort_reg); _ = self.command_abort.lock().expect("locked").insert(abort); From 82168b972a175720f79e68ec6a5ffce8dced9f9e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 8 Jan 2025 01:20:42 +0000 Subject: [PATCH 0486/1248] fix heroes calculation regression Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 9f9ccfab..b7ecd6b9 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -1235,7 +1235,7 @@ async fn calculate_counts( let (joined_member_count, invited_member_count) = join(joined_member_count, invited_member_count).await; - let small_room = joined_member_count.saturating_add(invited_member_count) > 5; + let small_room = joined_member_count.saturating_add(invited_member_count) <= 5; let heroes: OptionFuture<_> = small_room .then(|| calculate_heroes(services, room_id, sender_user)) From a3f9432da879ff0c34fd2a94dcc6f25d6e5fb9e3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 5 Jan 2025 03:18:30 +0000 Subject: [PATCH 0487/1248] eliminate the state-res mutex hazard Signed-off-by: Jason Volk --- src/service/globals/mod.rs | 3 --- src/service/rooms/event_handler/resolve_state.rs | 3 --- 2 files changed, 6 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 3a514d5c..38d7f786 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -11,7 +11,6 @@ use conduwuit::{error, utils::bytes::pretty, Config, Result}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; -use tokio::sync::Mutex; use crate::service; @@ -21,7 +20,6 @@ pub struct Service { pub config: Config, jwt_decoding_key: Option, pub bad_event_ratelimiter: Arc>>, - pub stateres_mutex: Arc>, pub server_user: OwnedUserId, pub admin_alias: OwnedRoomAliasId, pub turn_secret: String, @@ -70,7 +68,6 @@ impl crate::Service for Service { config: config.clone(), jwt_decoding_key, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - stateres_mutex: Arc::new(Mutex::new(())), admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &config.server_name)) .expect("#admins:server_name is valid alias name"), server_user: UserId::parse_with_server_name( diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 0e4b919d..f21f7b66 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -116,9 +116,6 @@ pub async fn state_resolution( state_sets: &[StateMap], auth_chain_sets: &[HashSet], ) -> Result> { - //TODO: ??? - let _lock = self.services.globals.stateres_mutex.lock(); - state_res::resolve( room_version, state_sets.iter(), From 27328cbc01e4d10bdade4c92a1823873c170d582 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 31 Dec 2024 01:11:58 +0000 Subject: [PATCH 0488/1248] additional futures extension utils Signed-off-by: Jason Volk --- src/core/utils/future/ext_ext.rs | 34 +++++++++++++++++++++ src/core/utils/future/mod.rs | 2 ++ src/core/utils/future/try_ext_ext.rs | 23 ++++++++++++++- src/core/utils/stream/mod.rs | 2 ++ src/core/utils/stream/try_ready.rs | 20 ++++++++++++- src/core/utils/stream/try_tools.rs | 44 ++++++++++++++++++++++++++++ 6 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 src/core/utils/future/ext_ext.rs create mode 100644 src/core/utils/stream/try_tools.rs diff --git a/src/core/utils/future/ext_ext.rs b/src/core/utils/future/ext_ext.rs new file mode 100644 index 00000000..38decaae --- /dev/null +++ b/src/core/utils/future/ext_ext.rs @@ -0,0 +1,34 @@ +//! Extended external extensions to futures::FutureExt + +use std::marker::Unpin; + +use futures::{future, future::Select, Future}; + +/// This interface is not necessarily complete; feel free to add as-needed. +pub trait ExtExt +where + Self: Future + Send, +{ + fn until(self, f: F) -> Select + where + Self: Sized, + F: FnOnce() -> B, + A: Future + From + Send + Unpin, + B: Future + Send + Unpin; +} + +impl ExtExt for Fut +where + Fut: Future + Send, +{ + #[inline] + fn until(self, f: F) -> Select + where + Self: Sized, + F: FnOnce() -> B, + A: Future + From + Send + Unpin, + B: Future + Send + Unpin, + { + future::select(self.into(), f()) + } +} diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 3d8ec8f4..153dcfe1 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -1,5 +1,7 @@ +mod ext_ext; mod option_ext; mod try_ext_ext; +pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index 19761309..aa3d72e4 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -4,8 +4,11 @@ // caller only ever caring about result status while discarding all contents. #![allow(clippy::wrong_self_convention)] +use std::marker::Unpin; + use futures::{ - future::{MapOkOrElse, UnwrapOrElse}, + future, + future::{MapOkOrElse, TrySelect, UnwrapOrElse}, TryFuture, TryFutureExt, }; @@ -46,6 +49,13 @@ where where Self: Sized; + fn try_until(self, f: F) -> TrySelect + where + Self: Sized, + F: FnOnce() -> B, + A: TryFuture + From + Send + Unpin, + B: TryFuture + Send + Unpin; + fn unwrap_or( self, default: Self::Ok, @@ -110,6 +120,17 @@ where self.map_ok_or(None, Some) } + #[inline] + fn try_until(self, f: F) -> TrySelect + where + Self: Sized, + F: FnOnce() -> B, + A: TryFuture + From + Send + Unpin, + B: TryFuture + Send + Unpin, + { + future::try_select(self.into(), f()) + } + #[inline] fn unwrap_or( self, diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 61ae993d..0fee0a3a 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -8,6 +8,7 @@ mod ready; mod tools; mod try_broadband; mod try_ready; +mod try_tools; mod wideband; pub use band::{ @@ -23,4 +24,5 @@ pub use ready::ReadyExt; pub use tools::Tools; pub use try_broadband::TryBroadbandExt; pub use try_ready::TryReadyExt; +pub use try_tools::TryTools; pub use wideband::WidebandExt; diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index d8da04ec..3261acb6 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -3,7 +3,7 @@ use futures::{ future::{ready, Ready}, - stream::{AndThen, TryFilterMap, TryFold, TryForEach, TryStream, TryStreamExt}, + stream::{AndThen, TryFilterMap, TryFold, TryForEach, TryStream, TryStreamExt, TryTakeWhile}, }; use crate::Result; @@ -56,6 +56,13 @@ where ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> where F: FnMut(S::Ok) -> Result<(), E>; + + fn ready_try_take_while( + self, + f: F, + ) -> TryTakeWhile>, impl FnMut(&S::Ok) -> Ready>> + where + F: Fn(&S::Ok) -> Result; } impl TryReadyExt for S @@ -122,4 +129,15 @@ where { self.try_for_each(move |t| ready(f(t))) } + + #[inline] + fn ready_try_take_while( + self, + f: F, + ) -> TryTakeWhile>, impl FnMut(&S::Ok) -> Ready>> + where + F: Fn(&S::Ok) -> Result, + { + self.try_take_while(move |t| ready(f(t))) + } } diff --git a/src/core/utils/stream/try_tools.rs b/src/core/utils/stream/try_tools.rs new file mode 100644 index 00000000..3ddce6ad --- /dev/null +++ b/src/core/utils/stream/try_tools.rs @@ -0,0 +1,44 @@ +//! TryStreamTools for futures::TryStream +#![allow(clippy::type_complexity)] + +use futures::{future, future::Ready, stream::TryTakeWhile, TryStream, TryStreamExt}; + +use crate::Result; + +/// TryStreamTools +pub trait TryTools +where + S: TryStream> + Send + ?Sized, + Self: TryStream + Send + Sized, +{ + fn try_take( + self, + n: usize, + ) -> TryTakeWhile< + Self, + Ready>, + impl FnMut(&S::Ok) -> Ready>, + >; +} + +impl TryTools for S +where + S: TryStream> + Send + ?Sized, + Self: TryStream + Send + Sized, +{ + #[inline] + fn try_take( + self, + mut n: usize, + ) -> TryTakeWhile< + Self, + Ready>, + impl FnMut(&S::Ok) -> Ready>, + > { + self.try_take_while(move |_| { + let res = future::ok(n > 0); + n = n.saturating_sub(1); + res + }) + } +} From 925061b92de1a60c6d7af9d28bdd158b7cc9901b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 4 Jan 2025 04:12:50 +0000 Subject: [PATCH 0489/1248] flatten timeline pdus iterations; increase concurrency Signed-off-by: Jason Volk --- src/admin/query/room_timeline.rs | 61 ++++++++ src/api/client/context.rs | 67 ++++----- src/api/client/membership.rs | 2 + src/api/client/message.rs | 6 +- src/api/client/room/initial_sync.rs | 15 +- src/api/client/sync/mod.rs | 20 ++- src/api/server/backfill.rs | 30 ++-- src/api/server/send.rs | 1 + .../rooms/event_handler/handle_prev_pdu.rs | 2 +- src/service/rooms/timeline/data.rs | 136 +++++++++--------- src/service/rooms/timeline/mod.rs | 135 ++++++----------- 11 files changed, 238 insertions(+), 237 deletions(-) create mode 100644 src/admin/query/room_timeline.rs diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs new file mode 100644 index 00000000..3fe653e3 --- /dev/null +++ b/src/admin/query/room_timeline.rs @@ -0,0 +1,61 @@ +use clap::Subcommand; +use conduwuit::{utils::stream::TryTools, PduCount, Result}; +use futures::TryStreamExt; +use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomOrAliasId}; + +use crate::{admin_command, admin_command_dispatch}; + +#[admin_command_dispatch] +#[derive(Debug, Subcommand)] +/// Query tables from database +pub(crate) enum RoomTimelineCommand { + Pdus { + room_id: OwnedRoomOrAliasId, + + from: Option, + + #[arg(short, long)] + limit: Option, + }, + + Last { + room_id: OwnedRoomOrAliasId, + }, +} + +#[admin_command] +pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { + let room_id = self.services.rooms.alias.resolve(&room_id).await?; + + let result = self + .services + .rooms + .timeline + .last_timeline_count(None, &room_id) + .await?; + + Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}"))) +} + +#[admin_command] +pub(super) async fn pdus( + &self, + room_id: OwnedRoomOrAliasId, + from: Option, + limit: Option, +) -> Result { + let room_id = self.services.rooms.alias.resolve(&room_id).await?; + + let from: Option = from.as_deref().map(str::parse).transpose()?; + + let result: Vec<_> = self + .services + .rooms + .timeline + .pdus_rev(None, &room_id, from) + .try_take(limit.unwrap_or(3)) + .try_collect() + .await?; + + Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}"))) +} diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 30ba170d..b957561c 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,14 +1,12 @@ -use std::iter::once; - use axum::extract::State; use conduwuit::{ at, err, ref_at, utils::{ future::TryExtExt, - stream::{BroadbandExt, ReadyExt, WidebandExt}, + stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, IterStream, }, - Err, Result, + Err, PduEvent, Result, }; use futures::{join, try_join, FutureExt, StreamExt, TryFutureExt}; use ruma::{ @@ -59,13 +57,13 @@ pub(crate) async fn get_context_route( false }; - let base_token = services + let base_id = services .rooms .timeline - .get_pdu_count(&body.event_id) + .get_pdu_id(&body.event_id) .map_err(|_| err!(Request(NotFound("Event not found.")))); - let base_event = services + let base_pdu = services .rooms .timeline .get_pdu(&body.event_id) @@ -77,48 +75,44 @@ pub(crate) async fn get_context_route( .user_can_see_event(sender_user, &body.room_id, &body.event_id) .map(Ok); - let (base_token, base_event, visible) = try_join!(base_token, base_event, visible)?; + let (base_id, base_pdu, visible) = try_join!(base_id, base_pdu, visible)?; - if base_event.room_id != body.room_id || base_event.event_id != body.event_id { + if base_pdu.room_id != body.room_id || base_pdu.event_id != body.event_id { return Err!(Request(NotFound("Base event not found."))); } - if !visible - || ignored_filter(&services, (base_token, base_event.clone()), sender_user) - .await - .is_none() - { + if !visible { return Err!(Request(Forbidden("You don't have permission to view this event."))); } - let events_before = - services - .rooms - .timeline - .pdus_rev(Some(sender_user), room_id, Some(base_token)); + let base_count = base_id.pdu_count(); + + let base_event = ignored_filter(&services, (base_count, base_pdu), sender_user); + + let events_before = services + .rooms + .timeline + .pdus_rev(Some(sender_user), room_id, Some(base_count)) + .ignore_err() + .ready_filter_map(|item| event_filter(item, filter)) + .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) + .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) + .take(limit / 2) + .collect(); let events_after = services .rooms .timeline - .pdus(Some(sender_user), room_id, Some(base_token)); - - let (events_before, events_after) = try_join!(events_before, events_after)?; - - let events_before = events_before + .pdus(Some(sender_user), room_id, Some(base_count)) + .ignore_err() .ready_filter_map(|item| event_filter(item, filter)) .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) .take(limit / 2) .collect(); - let events_after = events_after - .ready_filter_map(|item| event_filter(item, filter)) - .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) - .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) - .take(limit / 2) - .collect(); - - let (events_before, events_after): (Vec<_>, Vec<_>) = join!(events_before, events_after); + let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) = + join!(base_event, events_before, events_after); let state_at = events_after .last() @@ -134,7 +128,8 @@ pub(crate) async fn get_context_route( .map_err(|e| err!(Database("State not found: {e}"))) .await?; - let lazy = once(&(base_token, base_event.clone())) + let lazy = base_event + .iter() .chain(events_before.iter()) .chain(events_after.iter()) .stream() @@ -175,19 +170,19 @@ pub(crate) async fn get_context_route( .await; Ok(get_context::v3::Response { - event: Some(base_event.to_room_event()), + event: base_event.map(at!(1)).as_ref().map(PduEvent::to_room_event), start: events_before .last() .map(at!(0)) - .or(Some(base_token)) + .or(Some(base_count)) .as_ref() .map(ToString::to_string), end: events_after .last() .map(at!(0)) - .or(Some(base_token)) + .or(Some(base_count)) .as_ref() .map(ToString::to_string), diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 3eb52138..4046b493 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1314,6 +1314,7 @@ async fn join_room_by_id_helper_local( .rooms .event_handler .handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true) + .boxed() .await?; } else { return Err(error); @@ -1491,6 +1492,7 @@ pub(crate) async fn invite_helper( .rooms .event_handler .handle_incoming_pdu(&origin, room_id, &event_id, value, true) + .boxed() .await? .ok_or_else(|| { err!(Request(InvalidParam("Could not accept incoming PDU as timeline event."))) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 58f4f916..ec9a14d5 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -5,7 +5,7 @@ use conduwuit::{ at, is_equal_to, utils::{ result::{FlatOk, LogErr}, - stream::{BroadbandExt, WidebandExt}, + stream::{BroadbandExt, TryIgnore, WidebandExt}, IterStream, ReadyExt, }, Event, PduCount, Result, @@ -107,14 +107,14 @@ pub(crate) async fn get_message_events_route( .rooms .timeline .pdus(Some(sender_user), room_id, Some(from)) - .await? + .ignore_err() .boxed(), | Direction::Backward => services .rooms .timeline .pdus_rev(Some(sender_user), room_id, Some(from)) - .await? + .ignore_err() .boxed(), }; diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index cc3c9420..301b6e8d 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,6 +1,10 @@ use axum::extract::State; -use conduwuit::{at, utils::BoolExt, Err, Result}; -use futures::StreamExt; +use conduwuit::{ + at, + utils::{stream::TryTools, BoolExt}, + Err, Result, +}; +use futures::TryStreamExt; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; use crate::Ruma; @@ -27,10 +31,9 @@ pub(crate) async fn room_initial_sync_route( .rooms .timeline .pdus_rev(None, room_id, None) - .await? - .take(limit) - .collect() - .await; + .try_take(limit) + .try_collect() + .await?; let state: Vec<_> = services .rooms diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index b772fbf1..79e4b1ca 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -2,10 +2,10 @@ mod v3; mod v4; use conduwuit::{ - utils::stream::{BroadbandExt, ReadyExt}, + utils::stream::{BroadbandExt, ReadyExt, TryIgnore}, PduCount, }; -use futures::StreamExt; +use futures::{pin_mut, StreamExt}; use ruma::{RoomId, UserId}; pub(crate) use self::{v3::sync_events_route, v4::sync_events_v4_route}; @@ -29,23 +29,19 @@ async fn load_timeline( return Ok((Vec::new(), false)); } - let mut non_timeline_pdus = services + let non_timeline_pdus = services .rooms .timeline .pdus_rev(Some(sender_user), room_id, None) - .await? + .ignore_err() .ready_skip_while(|&(pducount, _)| pducount > next_batch.unwrap_or_else(PduCount::max)) .ready_take_while(|&(pducount, _)| pducount > roomsincecount); // Take the last events for the timeline - let timeline_pdus: Vec<_> = non_timeline_pdus - .by_ref() - .take(limit) - .collect::>() - .await - .into_iter() - .rev() - .collect(); + pin_mut!(non_timeline_pdus); + let timeline_pdus: Vec<_> = non_timeline_pdus.by_ref().take(limit).collect().await; + + let timeline_pdus: Vec<_> = timeline_pdus.into_iter().rev().collect(); // They /sync response doesn't always return all messages, so we say the output // is limited unless there are events in non_timeline_pdus diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index fac0e540..b44db67c 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -2,10 +2,10 @@ use std::cmp; use axum::extract::State; use conduwuit::{ - utils::{IterStream, ReadyExt}, + utils::{stream::TryTools, IterStream, ReadyExt}, PduCount, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{api::federation::backfill::get_backfill, uint, MilliSecondsSinceUnixEpoch}; use super::AccessCheck; @@ -57,26 +57,30 @@ pub(crate) async fn get_backfill_route( .rooms .timeline .pdus_rev(None, &body.room_id, Some(from.saturating_add(1))) - .await? - .take(limit) - .filter_map(|(_, pdu)| async move { - services + .try_take(limit) + .try_filter_map(|(_, pdu)| async move { + Ok(services .rooms .state_accessor .server_can_see_event(body.origin(), &pdu.room_id, &pdu.event_id) .await - .then_some(pdu) + .then_some(pdu)) }) - .filter_map(|pdu| async move { - services + .try_filter_map(|pdu| async move { + Ok(services .rooms .timeline .get_pdu_json(&pdu.event_id) .await - .ok() + .ok()) }) - .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) - .collect() - .await, + .and_then(|pdu| { + services + .sending + .convert_to_outgoing_federation_event(pdu) + .map(Ok) + }) + .try_collect() + .await?, }) } diff --git a/src/api/server/send.rs b/src/api/server/send.rs index dbe0108f..c0c8a0c9 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -135,6 +135,7 @@ async fn handle_pdus( .rooms .event_handler .handle_incoming_pdu(origin, &room_id, &event_id, value, true) + .boxed() .await .map(|_| ()); diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 0a5295dc..9bd4450e 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -26,7 +26,7 @@ pub(super) async fn handle_prev_pdu<'a>( (Arc, BTreeMap), >, create_event: &Arc, - first_pdu_in_room: &Arc, + first_pdu_in_room: &PduEvent, prev_id: &EventId, ) -> Result { // Check for disabled again because it might have changed diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 0be8aa52..457c1e8d 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,22 +1,15 @@ -use std::{ - borrow::Borrow, - collections::{hash_map, HashMap}, - sync::Arc, -}; +use std::{borrow::Borrow, sync::Arc}; use conduwuit::{ at, err, result::{LogErr, NotFound}, utils, - utils::{future::TryExtExt, stream::TryIgnore, ReadyExt}, + utils::stream::TryReadyExt, Err, PduCount, PduEvent, Result, }; use database::{Database, Deserialized, Json, KeyVal, Map}; -use futures::{future::select_ok, FutureExt, Stream, StreamExt}; -use ruma::{ - api::Direction, CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId, -}; -use tokio::sync::Mutex; +use futures::{future::select_ok, pin_mut, FutureExt, Stream, TryFutureExt, TryStreamExt}; +use ruma::{api::Direction, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; use super::{PduId, RawPduId}; use crate::{rooms, rooms::short::ShortRoomId, Dep}; @@ -27,7 +20,6 @@ pub(super) struct Data { pduid_pdu: Arc, userroomid_highlightcount: Arc, userroomid_notificationcount: Arc, - pub(super) lasttimelinecount_cache: LastTimelineCountCache, pub(super) db: Arc, services: Services, } @@ -37,7 +29,6 @@ struct Services { } pub type PdusIterItem = (PduCount, PduEvent); -type LastTimelineCountCache = Mutex>; impl Data { pub(super) fn new(args: &crate::Args<'_>) -> Self { @@ -48,7 +39,6 @@ impl Data { pduid_pdu: db["pduid_pdu"].clone(), userroomid_highlightcount: db["userroomid_highlightcount"].clone(), userroomid_notificationcount: db["userroomid_notificationcount"].clone(), - lasttimelinecount_cache: Mutex::new(HashMap::new()), db: args.db.clone(), services: Services { short: args.depend::("rooms::short"), @@ -56,27 +46,39 @@ impl Data { } } + #[inline] pub(super) async fn last_timeline_count( &self, sender_user: Option<&UserId>, room_id: &RoomId, ) -> Result { - match self - .lasttimelinecount_cache - .lock() - .await - .entry(room_id.into()) - { - | hash_map::Entry::Occupied(o) => Ok(*o.get()), - | hash_map::Entry::Vacant(v) => Ok(self - .pdus_rev(sender_user, room_id, PduCount::max()) - .await? - .next() - .await - .map(at!(0)) - .filter(|&count| matches!(count, PduCount::Normal(_))) - .map_or_else(PduCount::max, |count| *v.insert(count))), - } + let pdus_rev = self.pdus_rev(sender_user, room_id, PduCount::max()); + + pin_mut!(pdus_rev); + let last_count = pdus_rev + .try_next() + .await? + .map(at!(0)) + .filter(|&count| matches!(count, PduCount::Normal(_))) + .unwrap_or_else(PduCount::max); + + Ok(last_count) + } + + #[inline] + pub(super) async fn latest_pdu_in_room( + &self, + sender_user: Option<&UserId>, + room_id: &RoomId, + ) -> Result { + let pdus_rev = self.pdus_rev(sender_user, room_id, PduCount::max()); + + pin_mut!(pdus_rev); + pdus_rev + .try_next() + .await? + .map(at!(1)) + .ok_or_else(|| err!(Request(NotFound("no PDU's found in room")))) } /// Returns the `count` of this pdu's id. @@ -129,7 +131,7 @@ impl Data { pub(super) async fn non_outlier_pdu_exists(&self, event_id: &EventId) -> Result { let pduid = self.get_pdu_id(event_id).await?; - self.pduid_pdu.get(&pduid).await.map(|_| ()) + self.pduid_pdu.exists(&pduid).await } /// Returns the pdu. @@ -148,17 +150,17 @@ impl Data { /// Like get_non_outlier_pdu(), but without the expense of fetching and /// parsing the PduEvent + #[inline] pub(super) async fn outlier_pdu_exists(&self, event_id: &EventId) -> Result { - self.eventid_outlierpdu.get(event_id).await.map(|_| ()) + self.eventid_outlierpdu.exists(event_id).await } /// Like get_pdu(), but without the expense of fetching and parsing the data - pub(super) async fn pdu_exists(&self, event_id: &EventId) -> bool { - let non_outlier = self.non_outlier_pdu_exists(event_id).is_ok(); - let outlier = self.outlier_pdu_exists(event_id).is_ok(); + pub(super) async fn pdu_exists(&self, event_id: &EventId) -> Result { + let non_outlier = self.non_outlier_pdu_exists(event_id).boxed(); + let outlier = self.outlier_pdu_exists(event_id).boxed(); - //TODO: parallelize - non_outlier.await || outlier.await + select_ok([non_outlier, outlier]).await.map(at!(0)) } /// Returns the pdu. @@ -186,11 +188,6 @@ impl Data { debug_assert!(matches!(count, PduCount::Normal(_)), "PduCount not Normal"); self.pduid_pdu.raw_put(pdu_id, Json(json)); - self.lasttimelinecount_cache - .lock() - .await - .insert(pdu.room_id.clone(), count); - self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id); self.eventid_outlierpdu.remove(pdu.event_id.as_bytes()); } @@ -225,49 +222,44 @@ impl Data { /// Returns an iterator over all events and their tokens in a room that /// happened before the event with id `until` in reverse-chronological /// order. - pub(super) async fn pdus_rev<'a>( + pub(super) fn pdus_rev<'a>( &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, until: PduCount, - ) -> Result + Send + 'a> { - let current = self - .count_to_id(room_id, until, Direction::Backward) - .await?; - let prefix = current.shortroomid(); - let stream = self - .pduid_pdu - .rev_raw_stream_from(¤t) - .ignore_err() - .ready_take_while(move |(key, _)| key.starts_with(&prefix)) - .map(move |item| Self::each_pdu(item, user_id)); - - Ok(stream) + ) -> impl Stream> + Send + 'a { + self.count_to_id(room_id, until, Direction::Backward) + .map_ok(move |current| { + let prefix = current.shortroomid(); + self.pduid_pdu + .rev_raw_stream_from(¤t) + .ready_try_take_while(move |(key, _)| Ok(key.starts_with(&prefix))) + .ready_and_then(move |item| Self::each_pdu(item, user_id)) + }) + .try_flatten_stream() } - pub(super) async fn pdus<'a>( + pub(super) fn pdus<'a>( &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, from: PduCount, - ) -> Result + Send + Unpin + 'a> { - let current = self.count_to_id(room_id, from, Direction::Forward).await?; - let prefix = current.shortroomid(); - let stream = self - .pduid_pdu - .raw_stream_from(¤t) - .ignore_err() - .ready_take_while(move |(key, _)| key.starts_with(&prefix)) - .map(move |item| Self::each_pdu(item, user_id)); - - Ok(stream) + ) -> impl Stream> + Send + 'a { + self.count_to_id(room_id, from, Direction::Forward) + .map_ok(move |current| { + let prefix = current.shortroomid(); + self.pduid_pdu + .raw_stream_from(¤t) + .ready_try_take_while(move |(key, _)| Ok(key.starts_with(&prefix))) + .ready_and_then(move |item| Self::each_pdu(item, user_id)) + }) + .try_flatten_stream() } - fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: Option<&UserId>) -> PdusIterItem { + fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: Option<&UserId>) -> Result { let pdu_id: RawPduId = pdu_id.into(); - let mut pdu = serde_json::from_slice::(pdu) - .expect("PduEvent in pduid_pdu database column is invalid JSON"); + let mut pdu = serde_json::from_slice::(pdu)?; if Some(pdu.sender.borrow()) != user_id { pdu.remove_transaction_id().log_err().ok(); @@ -275,7 +267,7 @@ impl Data { pdu.add_age().log_err().ok(); - (pdu_id.pdu_count(), pdu) + Ok((pdu_id.pdu_count(), pdu)) } pub(super) fn increment_notification_counts( diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 2a272c38..fe7f885a 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -9,14 +9,16 @@ use std::{ }; use conduwuit::{ - debug, debug_warn, err, error, implement, info, + at, debug, debug_warn, err, error, implement, info, pdu::{gen_event_id, EventHash, PduBuilder, PduCount, PduEvent}, - utils::{self, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt}, + utils::{ + self, future::TryExtExt, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt, + }, validated, warn, Err, Error, Result, Server, }; pub use conduwuit::{PduId, RawPduId}; use futures::{ - future, future::ready, Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, + future, future::ready, pin_mut, Future, FutureExt, Stream, StreamExt, TryStreamExt, }; use ruma::{ api::federation, @@ -34,7 +36,7 @@ use ruma::{ }, push::{Action, Ruleset, Tweak}, state_res::{self, Event, RoomVersion}, - uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; @@ -139,53 +141,34 @@ impl crate::Service for Service { } fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { - /* - let lasttimelinecount_cache = self - .db - .lasttimelinecount_cache - .lock() - .expect("locked") - .len(); - writeln!(out, "lasttimelinecount_cache: {lasttimelinecount_cache}")?; - */ - let mutex_insert = self.mutex_insert.len(); writeln!(out, "insert_mutex: {mutex_insert}")?; Ok(()) } - fn clear_cache(&self) { - /* - self.db - .lasttimelinecount_cache - .lock() - .expect("locked") - .clear(); - */ - } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } impl Service { #[tracing::instrument(skip(self), level = "debug")] - pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result> { - self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id) - .next() - .await - .map(|(_, p)| Arc::new(p)) + pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result { + self.first_item_in_room(room_id).await.map(at!(1)) + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn first_item_in_room(&self, room_id: &RoomId) -> Result<(PduCount, PduEvent)> { + let pdus = self.pdus(None, room_id, None); + + pin_mut!(pdus); + pdus.try_next() + .await? .ok_or_else(|| err!(Request(NotFound("No PDU found in room")))) } #[tracing::instrument(skip(self), level = "debug")] - pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result> { - self.pdus_rev(None, room_id, None) - .await? - .next() - .await - .map(|(_, p)| Arc::new(p)) - .ok_or_else(|| err!(Request(NotFound("No PDU found in room")))) + pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result { + self.db.latest_pdu_in_room(None, room_id).await } #[tracing::instrument(skip(self), level = "debug")] @@ -202,29 +185,6 @@ impl Service { self.db.get_pdu_count(event_id).await } - // TODO Is this the same as the function above? - /* - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - */ - /// Returns the json of a pdu. pub async fn get_pdu_json(&self, event_id: &EventId) -> Result { self.db.get_pdu_json(event_id).await @@ -260,16 +220,6 @@ impl Service { self.db.get_pdu(event_id).await } - /// Checks if pdu exists - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn pdu_exists<'a>( - &'a self, - event_id: &'a EventId, - ) -> impl Future + Send + 'a { - self.db.pdu_exists(event_id) - } - /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. @@ -282,6 +232,16 @@ impl Service { self.db.get_pdu_json_from_id(pdu_id).await } + /// Checks if pdu exists + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + pub fn pdu_exists<'a>( + &'a self, + event_id: &'a EventId, + ) -> impl Future + Send + 'a { + self.db.pdu_exists(event_id).is_ok() + } + /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self), level = "debug")] pub async fn replace_pdu( @@ -1027,38 +987,32 @@ impl Service { &'a self, user_id: &'a UserId, room_id: &'a RoomId, - ) -> impl Stream + Send + Unpin + 'a { - self.pdus(Some(user_id), room_id, None) - .map_ok(|stream| stream.map(Ok)) - .try_flatten_stream() - .ignore_err() - .boxed() + ) -> impl Stream + Send + 'a { + self.pdus(Some(user_id), room_id, None).ignore_err() } /// Reverse iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] - pub async fn pdus_rev<'a>( + pub fn pdus_rev<'a>( &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, until: Option, - ) -> Result + Send + 'a> { + ) -> impl Stream> + Send + 'a { self.db .pdus_rev(user_id, room_id, until.unwrap_or_else(PduCount::max)) - .await } /// Forward iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] - pub async fn pdus<'a>( + pub fn pdus<'a>( &'a self, user_id: Option<&'a UserId>, room_id: &'a RoomId, from: Option, - ) -> Result + Send + 'a> { + ) -> impl Stream> + Send + 'a { self.db .pdus(user_id, room_id, from.unwrap_or_else(PduCount::min)) - .await } /// Replace a PDU with the redacted form. @@ -1117,8 +1071,7 @@ impl Service { } let first_pdu = self - .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id) - .next() + .first_item_in_room(room_id) .await .expect("Room is not empty"); @@ -1232,20 +1185,14 @@ impl Service { self.services .event_handler .handle_incoming_pdu(origin, &room_id, &event_id, value, false) + .boxed() .await?; - let value = self - .get_pdu_json(&event_id) - .await - .expect("We just created it"); - let pdu = self.get_pdu(&event_id).await.expect("We just created it"); + let value = self.get_pdu_json(&event_id).await?; - let shortroomid = self - .services - .short - .get_shortroomid(&room_id) - .await - .expect("room exists"); + let pdu = self.get_pdu(&event_id).await?; + + let shortroomid = self.services.short.get_shortroomid(&room_id).await?; let insert_lock = self.mutex_insert.lock(&room_id).await; From d36167ab64560221ac08e848e479b285d8c4dc48 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 5 Jan 2025 23:33:27 +0000 Subject: [PATCH 0490/1248] partially revert 9a9c071e82; use std threads for db pool. Signed-off-by: Jason Volk --- src/database/engine.rs | 26 +++++----- src/database/pool.rs | 107 +++++++++++++++++++++++------------------ src/main/runtime.rs | 2 +- src/router/run.rs | 12 ++--- 4 files changed, 80 insertions(+), 67 deletions(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index faf5b131..670817b5 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -31,11 +31,11 @@ pub struct Engine { opts: Options, env: Env, cfs: Mutex>, + pub(crate) pool: Arc, pub(crate) db: Db, corks: AtomicU32, pub(super) read_only: bool, pub(super) secondary: bool, - pub(crate) pool: Arc, } pub(crate) type Db = DBWithThreadMode; @@ -44,6 +44,8 @@ impl Engine { #[tracing::instrument(skip_all)] pub(crate) async fn open(server: &Arc) -> Result> { let config = &server.config; + let path = &config.database_path; + let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0; #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] @@ -64,30 +66,32 @@ impl Engine { col_cache.get("primary").expect("primary cache exists"), )?; - let load_time = std::time::Instant::now(); - if config.rocksdb_repair { - repair(&db_opts, &config.database_path)?; - } - debug!("Listing column families in database"); let cfs = Db::list_cf(&db_opts, &config.database_path) .unwrap_or_default() .into_iter() .collect::>(); - debug!("Opening {} column family descriptors in database", cfs.len()); + debug!("Configuring {} column families found in database", cfs.len()); let cfopts = cfs .iter() .map(|name| cf_options(config, name, db_opts.clone(), &mut col_cache)) .collect::>>()?; + debug!("Opening {} column family descriptors in database", cfs.len()); let cfds = cfs .iter() .zip(cfopts.into_iter()) .map(|(name, opts)| ColumnFamilyDescriptor::new(name, opts)) .collect::>(); - let path = &config.database_path; + debug!("Starting frontend request pool"); + let pool = Pool::new(server)?; + + let load_time = std::time::Instant::now(); + if config.rocksdb_repair { + repair(&db_opts, &config.database_path)?; + } debug!("Opening database..."); let res = if config.rocksdb_read_only { @@ -113,11 +117,11 @@ impl Engine { opts: db_opts, env: db_env, cfs: Mutex::new(cfs), - db, corks: AtomicU32::new(0), read_only: config.rocksdb_read_only, secondary: config.rocksdb_secondary, - pool: Pool::new(server).await?, + pool, + db, })) } @@ -146,8 +150,6 @@ impl Engine { .expect("column was created and exists") } - pub async fn shutdown_pool(&self) { self.pool.shutdown().await; } - pub fn flush(&self) -> Result<()> { result(DBCommon::flush_wal(&self.db, false)) } pub fn sync(&self) -> Result<()> { result(DBCommon::flush_wal(&self.db, true)) } diff --git a/src/database/pool.rs b/src/database/pool.rs index 0295e0e9..1560c8b0 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -6,21 +6,22 @@ use std::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }, + thread, + thread::JoinHandle, }; use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - debug, debug_warn, defer, err, implement, + debug, debug_warn, defer, err, error, implement, result::DebugInspect, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, - Result, Server, + Error, Result, Server, }; use futures::{channel::oneshot, TryFutureExt}; use oneshot::Sender as ResultSender; use rocksdb::Direction; use smallvec::SmallVec; -use tokio::task::JoinSet; use self::configure::configure; use crate::{keyval::KeyBuf, stream, Handle, Map}; @@ -29,9 +30,9 @@ use crate::{keyval::KeyBuf, stream, Handle, Map}; /// requests which are not cached. These thread-blocking requests are offloaded /// from the tokio async workers and executed on this threadpool. pub(crate) struct Pool { - server: Arc, + _server: Arc, queues: Vec>, - workers: Mutex>, + workers: Mutex>>, topology: Vec, busy: AtomicUsize, queued_max: AtomicUsize, @@ -69,39 +70,42 @@ const WORKER_LIMIT: (usize, usize) = (1, 1024); const QUEUE_LIMIT: (usize, usize) = (1, 2048); const BATCH_INLINE: usize = 1; +const WORKER_STACK_SIZE: usize = 1_048_576; +const WORKER_NAME: &str = "conduwuit:db"; + #[implement(Pool)] -pub(crate) async fn new(server: &Arc) -> Result> { +pub(crate) fn new(server: &Arc) -> Result> { const CHAN_SCHED: (QueueStrategy, QueueStrategy) = (QueueStrategy::Fifo, QueueStrategy::Lifo); let (total_workers, queue_sizes, topology) = configure(server); - let (senders, receivers) = queue_sizes + let (senders, receivers): (Vec<_>, Vec<_>) = queue_sizes .into_iter() .map(|cap| async_channel::bounded_with_queue_strategy(cap, CHAN_SCHED)) .unzip(); let pool = Arc::new(Self { - server: server.clone(), - + _server: server.clone(), queues: senders, - - workers: JoinSet::new().into(), - + workers: Vec::new().into(), topology, - busy: AtomicUsize::default(), - queued_max: AtomicUsize::default(), }); - pool.spawn_until(receivers, total_workers).await?; + pool.spawn_until(&receivers, total_workers)?; Ok(pool) } impl Drop for Pool { fn drop(&mut self) { - debug_assert!(self.queues.iter().all(Sender::is_empty), "channel must be empty on drop"); + self.close(); + + debug_assert!( + self.queues.iter().all(Sender::is_empty), + "channel must should not have requests queued on drop" + ); debug_assert!( self.queues.iter().all(Sender::is_closed), "channel should be closed on drop" @@ -110,17 +114,10 @@ impl Drop for Pool { } #[implement(Pool)] -pub(crate) async fn shutdown(self: &Arc) { - self.close(); - - let workers = take(&mut *self.workers.lock().expect("locked")); - debug!(workers = workers.len(), "Waiting for workers to join..."); - - workers.join_all().await; -} - -#[implement(Pool)] +#[tracing::instrument(skip_all)] pub(crate) fn close(&self) { + let workers = take(&mut *self.workers.lock().expect("locked")); + let senders = self.queues.iter().map(Sender::sender_count).sum::(); let receivers = self @@ -129,27 +126,40 @@ pub(crate) fn close(&self) { .map(Sender::receiver_count) .sum::(); - debug!( - queues = self.queues.len(), - workers = self.workers.lock().expect("locked").len(), - ?senders, - ?receivers, - "Closing pool..." - ); - for queue in &self.queues { queue.close(); } - self.workers.lock().expect("locked").abort_all(); - std::thread::yield_now(); + if workers.is_empty() { + return; + } + + debug!( + senders, + receivers, + queues = self.queues.len(), + workers = workers.len(), + "Closing pool. Waiting for workers to join..." + ); + + workers + .into_iter() + .map(JoinHandle::join) + .map(|result| result.map_err(Error::from_panic)) + .enumerate() + .for_each(|(id, result)| { + match result { + | Ok(()) => trace!(?id, "worker joined"), + | Err(error) => error!(?id, "worker joined with error: {error}"), + }; + }); } #[implement(Pool)] -async fn spawn_until(self: &Arc, recv: Vec>, count: usize) -> Result { +fn spawn_until(self: &Arc, recv: &[Receiver], count: usize) -> Result { let mut workers = self.workers.lock().expect("locked"); while workers.len() < count { - self.spawn_one(&mut workers, &recv)?; + self.clone().spawn_one(&mut workers, recv)?; } Ok(()) @@ -162,23 +172,24 @@ async fn spawn_until(self: &Arc, recv: Vec>, count: usize) - skip_all, fields(id = %workers.len()) )] -fn spawn_one(self: &Arc, workers: &mut JoinSet<()>, recv: &[Receiver]) -> Result { +fn spawn_one( + self: Arc, + workers: &mut Vec>, + recv: &[Receiver], +) -> Result { debug_assert!(!self.queues.is_empty(), "Must have at least one queue"); debug_assert!(!recv.is_empty(), "Must have at least one receiver"); let id = workers.len(); let group = id.overflowing_rem(self.queues.len()).0; let recv = recv[group].clone(); - let self_ = self.clone(); - #[cfg(not(tokio_unstable))] - let _abort = workers.spawn_blocking_on(move || self_.worker(id, recv), self.server.runtime()); + let handle = thread::Builder::new() + .name(WORKER_NAME.into()) + .stack_size(WORKER_STACK_SIZE) + .spawn(move || self.worker(id, recv))?; - #[cfg(tokio_unstable)] - let _abort = workers - .build_task() - .name("conduwuit:dbpool") - .spawn_blocking_on(move || self_.worker(id, recv), self.server.runtime()); + workers.push(handle); Ok(()) } @@ -258,7 +269,7 @@ async fn execute(&self, queue: &Sender, cmd: Cmd) -> Result { level = "debug", skip(self, recv), fields( - tid = ?std::thread::current().id(), + tid = ?thread::current().id(), ), )] fn worker(self: Arc, id: usize, recv: Receiver) { diff --git a/src/main/runtime.rs b/src/main/runtime.rs index b9dfc866..3039ef1b 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -19,7 +19,7 @@ use crate::clap::Args; const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; -const MAX_BLOCKING_THREADS: usize = 2048; +const MAX_BLOCKING_THREADS: usize = 1024; static WORKER_AFFINITY: OnceLock = OnceLock::new(); diff --git a/src/router/run.rs b/src/router/run.rs index 248f7052..1b4d7437 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -3,7 +3,7 @@ extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; use std::{ - sync::{atomic::Ordering, Arc}, + sync::{atomic::Ordering, Arc, Weak}, time::Duration, }; @@ -92,11 +92,11 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { ); } - // The db threadpool requires async join if we use tokio/spawn_blocking to - // manage the threads. Without async-drop we have to wait here; for symmetry - // with Services construction it can't be done in services.stop(). - if let Some(db) = db.upgrade() { - db.db.shutdown_pool().await; + if Weak::strong_count(&db) > 0 { + debug_error!( + "{} dangling references to Database after shutdown", + Weak::strong_count(&db) + ); } #[cfg(feature = "systemd")] From 94c8683836764e7adfa8fe978692ac7154b0d8ec Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 6 Jan 2025 01:30:22 +0000 Subject: [PATCH 0491/1248] improve db pool topology configuration Signed-off-by: Jason Volk --- conduwuit-example.toml | 2 +- src/core/config/mod.rs | 2 +- src/database/pool.rs | 7 ++-- src/database/pool/configure.rs | 65 +++++++++++++++++----------------- 4 files changed, 39 insertions(+), 37 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 526e9fe2..e2ed5daa 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1452,7 +1452,7 @@ # responsiveness for many users at the cost of throughput for each. # # Setting this value to 0.0 causes the stream width to be fixed at the -# value of stream_width_default. The default is 1.0 to match the +# value of stream_width_default. The default scale is 1.0 to match the # capabilities detected for the system. # #stream_width_scale = 1.0 diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index b1ede844..bf6a4ba6 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1646,7 +1646,7 @@ pub struct Config { /// responsiveness for many users at the cost of throughput for each. /// /// Setting this value to 0.0 causes the stream width to be fixed at the - /// value of stream_width_default. The default is 1.0 to match the + /// value of stream_width_default. The default scale is 1.0 to match the /// capabilities detected for the system. /// /// default: 1.0 diff --git a/src/database/pool.rs b/src/database/pool.rs index 1560c8b0..11871ff6 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -30,7 +30,7 @@ use crate::{keyval::KeyBuf, stream, Handle, Map}; /// requests which are not cached. These thread-blocking requests are offloaded /// from the tokio async workers and executed on this threadpool. pub(crate) struct Pool { - _server: Arc, + server: Arc, queues: Vec>, workers: Mutex>>, topology: Vec, @@ -67,7 +67,7 @@ pub(crate) type BatchResult<'a> = SmallVec<[ResultHandle<'a>; BATCH_INLINE]>; pub(crate) type ResultHandle<'a> = Result>; const WORKER_LIMIT: (usize, usize) = (1, 1024); -const QUEUE_LIMIT: (usize, usize) = (1, 2048); +const QUEUE_LIMIT: (usize, usize) = (1, 4096); const BATCH_INLINE: usize = 1; const WORKER_STACK_SIZE: usize = 1_048_576; @@ -85,7 +85,7 @@ pub(crate) fn new(server: &Arc) -> Result> { .unzip(); let pool = Arc::new(Self { - _server: server.clone(), + server: server.clone(), queues: senders, workers: Vec::new().into(), topology, @@ -288,6 +288,7 @@ fn worker_init(&self, id: usize) { .iter() .enumerate() .filter(|_| self.queues.len() > 1) + .filter(|_| self.server.config.db_pool_affinity) .filter_map(|(core_id, &queue_id)| (group == queue_id).then_some(core_id)) .filter_map(nth_core_available); diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs index 6cac58e7..ff42ef51 100644 --- a/src/database/pool/configure.rs +++ b/src/database/pool/configure.rs @@ -1,14 +1,13 @@ use std::{path::PathBuf, sync::Arc}; use conduwuit::{ - debug, debug_info, expected, + debug, debug_info, expected, is_equal_to, utils::{ math::usize_from_f64, result::LogDebugErr, stream, stream::{AMPLIFICATION_LIMIT, WIDTH_LIMIT}, sys::{compute::is_core_available, storage}, - BoolExt, }, Server, }; @@ -19,39 +18,32 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) let config = &server.config; // This finds the block device and gathers all the properties we need. - let (device_name, device_prop) = config - .db_pool_affinity - .and_then(|| { - let path: PathBuf = config.database_path.clone(); - let name = storage::name_from_path(&path).log_debug_err().ok(); - let prop = storage::parallelism(&path); - name.map(|name| (name, prop)) - }) - .unzip(); + let path: PathBuf = config.database_path.clone(); + let device_name = storage::name_from_path(&path).log_debug_err().ok(); + let device_prop = storage::parallelism(&path); // The default worker count is masked-on if we didn't find better information. - let default_worker_count = device_prop - .as_ref() - .is_none_or(|prop| prop.mq.is_empty()) - .then_some(config.db_pool_workers); + let default_worker_count = device_prop.mq.is_empty().then_some(config.db_pool_workers); // Determine the worker groupings. Each indice represents a hardware queue and // contains the number of workers which will service it. let worker_counts: Vec<_> = device_prop + .mq .iter() - .map(|dev| &dev.mq) - .flat_map(|mq| mq.iter()) .filter(|mq| mq.cpu_list.iter().copied().any(is_core_available)) .map(|mq| { - mq.nr_tags.unwrap_or_default().min( - config.db_pool_workers_limit.saturating_mul( - mq.cpu_list - .iter() - .filter(|&&id| is_core_available(id)) - .count() - .max(1), - ), - ) + let shares = mq + .cpu_list + .iter() + .filter(|&&id| is_core_available(id)) + .count() + .max(1); + + let limit = config.db_pool_workers_limit.saturating_mul(shares); + + let limit = device_prop.nr_requests.map_or(limit, |nr| nr.min(limit)); + + mq.nr_tags.unwrap_or(WORKER_LIMIT.0).min(limit) }) .chain(default_worker_count) .collect(); @@ -72,9 +64,8 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) // going on because cpu's which are not available to the process are filtered // out, similar to the worker_counts. let topology = device_prop + .mq .iter() - .map(|dev| &dev.mq) - .flat_map(|mq| mq.iter()) .fold(vec![0; 128], |mut topology, mq| { mq.cpu_list .iter() @@ -89,9 +80,12 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) // Regardless of the capacity of all queues we establish some limit on the total // number of workers; this is hopefully hinted by nr_requests. let max_workers = device_prop - .as_ref() - .and_then(|prop| prop.nr_requests) - .unwrap_or(WORKER_LIMIT.1); + .mq + .iter() + .filter_map(|mq| mq.nr_tags) + .chain(default_worker_count) + .fold(0_usize, usize::saturating_add) + .clamp(WORKER_LIMIT.0, WORKER_LIMIT.1); // Determine the final worker count which we'll be spawning. let total_workers = worker_counts @@ -102,7 +96,7 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) // After computing all of the above we can update the global automatic stream // width, hopefully with a better value tailored to this system. if config.stream_width_scale > 0.0 { - let num_queues = queue_sizes.len(); + let num_queues = queue_sizes.len().max(1); update_stream_width(server, num_queues, total_workers); } @@ -117,6 +111,13 @@ pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) "Frontend topology", ); + assert!(total_workers > 0, "some workers expected"); + assert!(!queue_sizes.is_empty(), "some queues expected"); + assert!( + !queue_sizes.iter().copied().any(is_equal_to!(0)), + "positive queue sizes expected" + ); + (total_workers, queue_sizes, topology) } From 44e6b1af3c24f4fdf664a138db3a9ecde9c4026c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 8 Jan 2025 20:43:02 +0000 Subject: [PATCH 0492/1248] fixes for tests to be run in release-mode Signed-off-by: Jason Volk --- src/core/pdu/mod.rs | 1 + src/core/pdu/tests.rs | 2 -- src/core/utils/mod.rs | 1 + src/core/utils/tests.rs | 1 - src/database/mod.rs | 1 + src/database/tests.rs | 7 +++---- 6 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index ba1d2ce1..1a8f6a70 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -9,6 +9,7 @@ mod raw_id; mod redact; mod relation; mod strip; +#[cfg(test)] mod tests; mod unsigned; diff --git a/src/core/pdu/tests.rs b/src/core/pdu/tests.rs index ae3b1dd6..ed9b7caa 100644 --- a/src/core/pdu/tests.rs +++ b/src/core/pdu/tests.rs @@ -1,5 +1,3 @@ -#![cfg(test)] - use super::Count; #[test] diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 16072765..631b2820 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -16,6 +16,7 @@ pub mod set; pub mod stream; pub mod string; pub mod sys; +#[cfg(test)] mod tests; pub mod time; diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs index 5c870730..1bcb92b8 100644 --- a/src/core/utils/tests.rs +++ b/src/core/utils/tests.rs @@ -1,4 +1,3 @@ -#![cfg(test)] #![allow(clippy::disallowed_methods)] use crate::utils; diff --git a/src/database/mod.rs b/src/database/mod.rs index 183cba8d..bdb7d3ea 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -11,6 +11,7 @@ mod opts; mod pool; mod ser; mod stream; +#[cfg(test)] mod tests; pub(crate) mod util; mod watchers; diff --git a/src/database/tests.rs b/src/database/tests.rs index 3d41a544..0c9fb41a 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -1,4 +1,3 @@ -#![cfg(test)] #![allow(clippy::needless_borrows_for_generic_args)] use std::fmt::Debug; @@ -14,7 +13,7 @@ use crate::{ }; #[test] -#[should_panic(expected = "serializing string at the top-level")] +#[cfg_attr(debug_assertions, should_panic(expected = "serializing string at the top-level"))] fn ser_str() { let user_id: &UserId = "@user:example.com".try_into().unwrap(); let s = serialize_to_vec(&user_id).expect("failed to serialize user_id"); @@ -139,7 +138,7 @@ fn ser_json_macro() { } #[test] -#[should_panic(expected = "serializing string at the top-level")] +#[cfg_attr(debug_assertions, should_panic(expected = "serializing string at the top-level"))] fn ser_json_raw() { use conduwuit::ruma::api::client::filter::FilterDefinition; @@ -156,7 +155,7 @@ fn ser_json_raw() { } #[test] -#[should_panic(expected = "you can skip serialization instead")] +#[cfg_attr(debug_assertions, should_panic(expected = "you can skip serialization instead"))] fn ser_json_raw_json() { use conduwuit::ruma::api::client::filter::FilterDefinition; From 5dae086197cd751e2a386cc4e8701623ef12bc8a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 8 Jan 2025 23:59:19 +0000 Subject: [PATCH 0493/1248] exclude config item from doctest Signed-off-by: Jason Volk --- src/core/config/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index bf6a4ba6..56580fda 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -621,6 +621,7 @@ pub struct Config { #[serde(default = "default_tracing_flame_output_path")] pub tracing_flame_output_path: String, + #[cfg(not(doctest))] /// Examples: /// /// - No proxy (default): From 0238f27605e01847219a5cf1061aa4b597636c61 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 10 Jan 2025 06:59:12 +0000 Subject: [PATCH 0494/1248] prevent example-config generating in test builds Signed-off-by: Jason Volk --- src/macros/config.rs | 4 ++-- src/macros/utils.rs | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/macros/config.rs b/src/macros/config.rs index eb269e1e..0fb79728 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -9,7 +9,7 @@ use syn::{ }; use crate::{ - utils::{get_simple_settings, is_cargo_build}, + utils::{get_simple_settings, is_cargo_build, is_cargo_test}, Result, }; @@ -17,7 +17,7 @@ const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute docum #[allow(clippy::needless_pass_by_value)] pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { - if is_cargo_build() { + if is_cargo_build() && !is_cargo_test() { generate_example(&input, args)?; } diff --git a/src/macros/utils.rs b/src/macros/utils.rs index 9f799f7b..e33ee8b4 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -32,6 +32,8 @@ pub(crate) fn is_cargo_build() -> bool { .is_some() } +pub(crate) fn is_cargo_test() -> bool { std::env::args().any(|flag| flag == "--test") } + pub(crate) fn get_named_generics(args: &[Meta], name: &str) -> Result { const DEFAULT: &str = "<>"; From cc1889d135763edc44b9eb28c991bbeee8badbc0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 18 Dec 2024 21:29:30 +0000 Subject: [PATCH 0495/1248] Add default-enabled feature-gates for url_preview and media_thumbnail Signed-off-by: Jason Volk --- Cargo.lock | 1 - src/core/Cargo.toml | 1 - src/core/error/mod.rs | 2 - src/main/Cargo.toml | 8 ++ src/service/Cargo.toml | 13 ++- src/service/media/data.rs | 37 +++----- src/service/media/preview.rs | 93 ++++++++++++-------- src/service/media/thumbnail.rs | 150 ++++++++++++++++++++------------- 8 files changed, 182 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f900a11..d25197e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -723,7 +723,6 @@ dependencies = [ "hardened_malloc-rs", "http", "http-body-util", - "image", "ipaddress", "itertools 0.13.0", "libc", diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index dd8f634a..4a9cc462 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -71,7 +71,6 @@ figment.workspace = true futures.workspace = true http-body-util.workspace = true http.workspace = true -image.workspace = true ipaddress.workspace = true itertools.workspace = true libc.workspace = true diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index f1e3b924..ffa829d9 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -48,8 +48,6 @@ pub enum Error { Http(#[from] http::Error), #[error(transparent)] HttpHeader(#[from] http::header::InvalidHeaderValue), - #[error("Image error: {0}")] - Image(#[from] image::error::ImageError), #[error("Join error: {0}")] JoinError(#[from] tokio::task::JoinError), #[error(transparent)] diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index a6421b34..38eb7188 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -41,8 +41,10 @@ default = [ "gzip_compression", "io_uring", "jemalloc", + "media_thumbnail", "release_max_log_level", "systemd", + "url_preview", "zstd_compression", ] @@ -83,6 +85,9 @@ jemalloc_prof = [ jemalloc_stats = [ "conduwuit-core/jemalloc_stats", ] +media_thumbnail = [ + "conduwuit-service/media_thumbnail", +] perf_measurements = [ "dep:opentelemetry", "dep:tracing-flame", @@ -121,6 +126,9 @@ tokio_console = [ "dep:console-subscriber", "tokio/tracing", ] +url_preview = [ + "conduwuit-service/url_preview", +] zstd_compression = [ "conduwuit-api/zstd_compression", "conduwuit-core/zstd_compression", diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 26f737ee..4708ff4e 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -28,8 +28,8 @@ element_hacks = [] gzip_compression = [ "reqwest/gzip", ] -zstd_compression = [ - "reqwest/zstd", +media_thumbnail = [ + "dep:image", ] release_max_log_level = [ "tracing/max_level_trace", @@ -37,6 +37,13 @@ release_max_log_level = [ "log/max_level_trace", "log/release_max_level_info", ] +url_preview = [ + "dep:image", + "dep:webpage", +] +zstd_compression = [ + "reqwest/zstd", +] [dependencies] arrayvec.workspace = true @@ -51,6 +58,7 @@ futures.workspace = true hickory-resolver.workspace = true http.workspace = true image.workspace = true +image.optional = true ipaddress.workspace = true itertools.workspace = true jsonwebtoken.workspace = true @@ -73,6 +81,7 @@ tokio.workspace = true tracing.workspace = true url.workspace = true webpage.workspace = true +webpage.optional = true [lints] workspace = true diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 43310515..f48482ea 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration}; use conduwuit::{ debug, debug_info, err, utils::{str_from_bytes, stream::TryIgnore, string_from_bytes, ReadyExt}, - Err, Error, Result, + Err, Result, }; use database::{Database, Interfix, Map}; use futures::StreamExt; @@ -123,30 +123,21 @@ impl Data { let content_type = parts .next() - .map(|bytes| { - string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; + .map(string_from_bytes) + .transpose() + .map_err(|e| err!(Database(error!(?mxc, "Content-type is invalid: {e}"))))?; - let content_disposition_bytes = parts + let content_disposition = parts .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - string_from_bytes(content_disposition_bytes) - .map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })? - .parse()?, - ) - }; + .map(Some) + .ok_or_else(|| err!(Database(error!(?mxc, "Media ID in db is invalid."))))? + .filter(|bytes| !bytes.is_empty()) + .map(string_from_bytes) + .transpose() + .map_err(|e| err!(Database(error!(?mxc, "Content-type is invalid: {e}"))))? + .as_deref() + .map(str::parse) + .transpose()?; Ok(Metadata { content_disposition, content_type, key }) } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index b1c53305..e7f76bab 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -1,15 +1,19 @@ -use std::{io::Cursor, time::SystemTime}; +//! URL Previews +//! +//! This functionality is gated by 'url_preview', but not at the unit level for +//! historical and simplicity reasons. Instead the feature gates the inclusion +//! of dependencies and nulls out results through the existing interface when +//! not featured. -use conduwuit::{debug, utils, Err, Result}; +use std::time::SystemTime; + +use conduwuit::{debug, Err, Result}; use conduwuit_core::implement; -use image::ImageReader as ImgReader; use ipaddress::IPAddress; -use ruma::Mxc; use serde::Serialize; use url::Url; -use webpage::HTML; -use super::{Service, MXC_LENGTH}; +use super::Service; #[derive(Serialize, Default)] pub struct UrlPreviewData { @@ -41,34 +45,6 @@ pub async fn set_url_preview(&self, url: &str, data: &UrlPreviewData) -> Result< self.db.set_url_preview(url, data, now) } -#[implement(Service)] -pub async fn download_image(&self, url: &str) -> Result { - let client = &self.services.client.url_preview; - let image = client.get(url).send().await?.bytes().await?; - let mxc = Mxc { - server_name: self.services.globals.server_name(), - media_id: &utils::random_string(MXC_LENGTH), - }; - - self.create(&mxc, None, None, None, &image).await?; - - let (width, height) = match ImgReader::new(Cursor::new(&image)).with_guessed_format() { - | Err(_) => (None, None), - | Ok(reader) => match reader.into_dimensions() { - | Err(_) => (None, None), - | Ok((width, height)) => (Some(width), Some(height)), - }, - }; - - Ok(UrlPreviewData { - image: Some(mxc.to_string()), - image_size: Some(image.len()), - image_width: width, - image_height: height, - ..Default::default() - }) -} - #[implement(Service)] pub async fn get_url_preview(&self, url: &Url) -> Result { if let Ok(preview) = self.db.get_url_preview(url.as_str()).await { @@ -121,8 +97,51 @@ async fn request_url_preview(&self, url: &Url) -> Result { Ok(data) } +#[cfg(feature = "url_preview")] +#[implement(Service)] +pub async fn download_image(&self, url: &str) -> Result { + use conduwuit::utils::random_string; + use image::ImageReader; + use ruma::Mxc; + + let image = self.services.client.url_preview.get(url).send().await?; + let image = image.bytes().await?; + let mxc = Mxc { + server_name: self.services.globals.server_name(), + media_id: &random_string(super::MXC_LENGTH), + }; + + self.create(&mxc, None, None, None, &image).await?; + + let cursor = std::io::Cursor::new(&image); + let (width, height) = match ImageReader::new(cursor).with_guessed_format() { + | Err(_) => (None, None), + | Ok(reader) => match reader.into_dimensions() { + | Err(_) => (None, None), + | Ok((width, height)) => (Some(width), Some(height)), + }, + }; + + Ok(UrlPreviewData { + image: Some(mxc.to_string()), + image_size: Some(image.len()), + image_width: width, + image_height: height, + ..Default::default() + }) +} + +#[cfg(not(feature = "url_preview"))] +#[implement(Service)] +pub async fn download_image(&self, _url: &str) -> Result { + Err!(FeatureDisabled("url_preview")) +} + +#[cfg(feature = "url_preview")] #[implement(Service)] async fn download_html(&self, url: &str) -> Result { + use webpage::HTML; + let client = &self.services.client.url_preview; let mut response = client.get(url).send().await?; @@ -159,6 +178,12 @@ async fn download_html(&self, url: &str) -> Result { Ok(data) } +#[cfg(not(feature = "url_preview"))] +#[implement(Service)] +async fn download_html(&self, _url: &str) -> Result { + Err!(FeatureDisabled("url_preview")) +} + #[implement(Service)] pub fn url_preview_allowed(&self, url: &Url) -> bool { if ["http", "https"] diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs index 5c8063cb..7350b3a1 100644 --- a/src/service/media/thumbnail.rs +++ b/src/service/media/thumbnail.rs @@ -1,7 +1,13 @@ -use std::{cmp, io::Cursor, num::Saturating as Sat}; +//! Media Thumbnails +//! +//! This functionality is gated by 'media_thumbnail', but not at the unit level +//! for historical and simplicity reasons. Instead the feature gates the +//! inclusion of dependencies and nulls out results using the existing interface +//! when not featured. -use conduwuit::{checked, err, Result}; -use image::{imageops::FilterType, DynamicImage}; +use std::{cmp, num::Saturating as Sat}; + +use conduwuit::{checked, err, implement, Result}; use ruma::{http_headers::ContentDisposition, media::Method, Mxc, UInt, UserId}; use tokio::{ fs, @@ -67,65 +73,89 @@ impl super::Service { Ok(None) } } - - /// Using saved thumbnail - #[tracing::instrument(skip(self), name = "saved", level = "debug")] - async fn get_thumbnail_saved(&self, data: Metadata) -> Result> { - let mut content = Vec::new(); - let path = self.get_media_file(&data.key); - fs::File::open(path) - .await? - .read_to_end(&mut content) - .await?; - - Ok(Some(into_filemeta(data, content))) - } - - /// Generate a thumbnail - #[tracing::instrument(skip(self), name = "generate", level = "debug")] - async fn get_thumbnail_generate( - &self, - mxc: &Mxc<'_>, - dim: &Dim, - data: Metadata, - ) -> Result> { - let mut content = Vec::new(); - let path = self.get_media_file(&data.key); - fs::File::open(path) - .await? - .read_to_end(&mut content) - .await?; - - let Ok(image) = image::load_from_memory(&content) else { - // Couldn't parse file to generate thumbnail, send original - return Ok(Some(into_filemeta(data, content))); - }; - - if dim.width > image.width() || dim.height > image.height() { - return Ok(Some(into_filemeta(data, content))); - } - - let mut thumbnail_bytes = Vec::new(); - let thumbnail = thumbnail_generate(&image, dim)?; - thumbnail.write_to(&mut Cursor::new(&mut thumbnail_bytes), image::ImageFormat::Png)?; - - // Save thumbnail in database so we don't have to generate it again next time - let thumbnail_key = self.db.create_file_metadata( - mxc, - None, - dim, - data.content_disposition.as_ref(), - data.content_type.as_deref(), - )?; - - let mut f = self.create_media_file(&thumbnail_key).await?; - f.write_all(&thumbnail_bytes).await?; - - Ok(Some(into_filemeta(data, thumbnail_bytes))) - } } -fn thumbnail_generate(image: &DynamicImage, requested: &Dim) -> Result { +/// Using saved thumbnail +#[implement(super::Service)] +#[tracing::instrument(name = "saved", level = "debug", skip(self, data))] +async fn get_thumbnail_saved(&self, data: Metadata) -> Result> { + let mut content = Vec::new(); + let path = self.get_media_file(&data.key); + fs::File::open(path) + .await? + .read_to_end(&mut content) + .await?; + + Ok(Some(into_filemeta(data, content))) +} + +/// Generate a thumbnail +#[cfg(feature = "media_thumbnail")] +#[implement(super::Service)] +#[tracing::instrument(name = "generate", level = "debug", skip(self, data))] +async fn get_thumbnail_generate( + &self, + mxc: &Mxc<'_>, + dim: &Dim, + data: Metadata, +) -> Result> { + let mut content = Vec::new(); + let path = self.get_media_file(&data.key); + fs::File::open(path) + .await? + .read_to_end(&mut content) + .await?; + + let Ok(image) = image::load_from_memory(&content) else { + // Couldn't parse file to generate thumbnail, send original + return Ok(Some(into_filemeta(data, content))); + }; + + if dim.width > image.width() || dim.height > image.height() { + return Ok(Some(into_filemeta(data, content))); + } + + let mut thumbnail_bytes = Vec::new(); + let thumbnail = thumbnail_generate(&image, dim)?; + let mut cursor = std::io::Cursor::new(&mut thumbnail_bytes); + thumbnail + .write_to(&mut cursor, image::ImageFormat::Png) + .map_err(|error| err!(error!(?error, "Error writing PNG thumbnail.")))?; + + // Save thumbnail in database so we don't have to generate it again next time + let thumbnail_key = self.db.create_file_metadata( + mxc, + None, + dim, + data.content_disposition.as_ref(), + data.content_type.as_deref(), + )?; + + let mut f = self.create_media_file(&thumbnail_key).await?; + f.write_all(&thumbnail_bytes).await?; + + Ok(Some(into_filemeta(data, thumbnail_bytes))) +} + +#[cfg(not(feature = "media_thumbnail"))] +#[implement(super::Service)] +#[tracing::instrument(name = "fallback", level = "debug", skip_all)] +async fn get_thumbnail_generate( + &self, + _mxc: &Mxc<'_>, + _dim: &Dim, + data: Metadata, +) -> Result> { + self.get_thumbnail_saved(data).await +} + +#[cfg(feature = "media_thumbnail")] +fn thumbnail_generate( + image: &image::DynamicImage, + requested: &Dim, +) -> Result { + use image::imageops::FilterType; + let thumbnail = if !requested.crop() { let Dim { width, height, .. } = requested.scaled(&Dim { width: image.width(), From 685b127f99d3168f28f82e4a6c7171ace4e8cd6a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 6 Jan 2025 05:12:01 +0000 Subject: [PATCH 0496/1248] simplify iterator state constructor arguments Signed-off-by: Jason Volk --- src/database/map.rs | 6 +++++- src/database/map/keys.rs | 2 +- src/database/map/keys_from.rs | 2 +- src/database/map/rev_keys.rs | 2 +- src/database/map/rev_keys_from.rs | 2 +- src/database/map/rev_stream.rs | 6 +++--- src/database/map/rev_stream_from.rs | 4 ++-- src/database/map/stream.rs | 6 +++--- src/database/map/stream_from.rs | 4 ++-- src/database/stream.rs | 8 ++++---- 10 files changed, 23 insertions(+), 19 deletions(-) diff --git a/src/database/map.rs b/src/database/map.rs index af22a43c..de37b8f9 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -77,7 +77,11 @@ impl Map { #[inline] pub fn name(&self) -> &str { &self.name } - fn cf(&self) -> impl AsColumnFamilyRef + '_ { &*self.cf } + #[inline] + pub(crate) fn db(&self) -> &Arc { &self.db } + + #[inline] + pub(crate) fn cf(&self) -> impl AsColumnFamilyRef + '_ { &*self.cf } } impl Debug for Map { diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 7d09f3da..60742334 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -23,7 +23,7 @@ pub fn raw_keys(self: &Arc) -> impl Stream>> + Send use crate::pool::Seek; let opts = super::iter_options_default(); - let state = stream::State::new(&self.db, &self.cf, opts); + let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_fwd(None); return task::consume_budget() diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 95c6611b..b83775ac 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -54,7 +54,7 @@ where use crate::pool::Seek; let opts = super::iter_options_default(); - let state = stream::State::new(&self.db, &self.cf, opts); + let state = stream::State::new(self, opts); if is_cached(self, from) { return stream::Keys::<'_>::from(state.init_fwd(from.as_ref().into())).boxed(); } diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index 0ca6ad0f..a559d04b 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -23,7 +23,7 @@ pub fn rev_raw_keys(self: &Arc) -> impl Stream>> + S use crate::pool::Seek; let opts = super::iter_options_default(); - let state = stream::State::new(&self.db, &self.cf, opts); + let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_rev(None); return task::consume_budget() diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index e208c505..5b159195 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -62,7 +62,7 @@ where use crate::pool::Seek; let opts = super::iter_options_default(); - let state = stream::State::new(&self.db, &self.cf, opts); + let state = stream::State::new(self, opts); if is_cached(self, from) { return stream::KeysRev::<'_>::from(state.init_rev(from.as_ref().into())).boxed(); } diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index d882dd91..1d5d3d10 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -32,7 +32,7 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> use crate::pool::Seek; let opts = super::read_options_default(); - let state = stream::State::new(&self.db, &self.cf, opts); + let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_rev(None); return task::consume_budget() @@ -65,9 +65,9 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> skip_all, fields(%map), )] -pub(super) fn is_cached(map: &super::Map) -> bool { +pub(super) fn is_cached(map: &Arc) -> bool { let opts = super::cache_read_options_default(); - let state = stream::State::new(&map.db, &map.cf, opts).init_rev(None); + let state = stream::State::new(map, opts).init_rev(None); !state.is_incomplete() } diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 72fc739c..1b66e8cc 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -81,7 +81,7 @@ where use crate::pool::Seek; let opts = super::iter_options_default(); - let state = stream::State::new(&self.db, &self.cf, opts); + let state = stream::State::new(self, opts); if is_cached(self, from) { let state = state.init_rev(from.as_ref().into()); return task::consume_budget() @@ -119,7 +119,7 @@ where P: AsRef<[u8]> + ?Sized, { let cache_opts = super::cache_read_options_default(); - let cache_status = stream::State::new(&map.db, &map.cf, cache_opts) + let cache_status = stream::State::new(map, cache_opts) .init_rev(from.as_ref().into()) .status(); diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index 11b0676c..fa3b0ad7 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -31,7 +31,7 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + use crate::pool::Seek; let opts = super::read_options_default(); - let state = stream::State::new(&self.db, &self.cf, opts); + let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_fwd(None); return task::consume_budget() @@ -64,9 +64,9 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + skip_all, fields(%map), )] -pub(super) fn is_cached(map: &super::Map) -> bool { +pub(super) fn is_cached(map: &Arc) -> bool { let opts = super::cache_read_options_default(); - let state = stream::State::new(&map.db, &map.cf, opts).init_fwd(None); + let state = stream::State::new(map, opts).init_fwd(None); !state.is_incomplete() } diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 79ea8f51..4296b6f6 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -78,7 +78,7 @@ where use crate::pool::Seek; let opts = super::read_options_default(); - let state = stream::State::new(&self.db, &self.cf, opts); + let state = stream::State::new(self, opts); if is_cached(self, from) { let state = state.init_fwd(from.as_ref().into()); return task::consume_budget() @@ -116,7 +116,7 @@ where P: AsRef<[u8]> + ?Sized, { let opts = super::cache_read_options_default(); - let state = stream::State::new(&map.db, &map.cf, opts).init_fwd(from.as_ref().into()); + let state = stream::State::new(map, opts).init_fwd(from.as_ref().into()); !state.is_incomplete() } diff --git a/src/database/stream.rs b/src/database/stream.rs index d7cb16c6..5f0fc0a1 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -6,14 +6,14 @@ mod keys_rev; use std::sync::Arc; use conduwuit::{utils::exchange, Result}; -use rocksdb::{ColumnFamily, DBRawIteratorWithThreadMode, ReadOptions}; +use rocksdb::{DBRawIteratorWithThreadMode, ReadOptions}; pub(crate) use self::{items::Items, items_rev::ItemsRev, keys::Keys, keys_rev::KeysRev}; use crate::{ engine::Db, keyval::{Key, KeyVal, Val}, util::{is_incomplete, map_err}, - Engine, Slice, + Map, Slice, }; pub(crate) struct State<'a> { @@ -45,9 +45,9 @@ type Inner<'a> = DBRawIteratorWithThreadMode<'a, Db>; type From<'a> = Option>; impl<'a> State<'a> { - pub(super) fn new(db: &'a Arc, cf: &'a Arc, opts: ReadOptions) -> Self { + pub(super) fn new(map: &'a Arc, opts: ReadOptions) -> Self { Self { - inner: db.db.raw_iterator_cf_opt(&**cf, opts), + inner: map.db().db.raw_iterator_cf_opt(&map.cf(), opts), init: true, seek: false, } From 02f19cf951d5bfe90313bf7acb674f020a2e125e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 2 Jan 2025 05:30:51 +0000 Subject: [PATCH 0497/1248] tweak tracing spans; inlines db deserializer tracing instrument cover Signed-off-by: Jason Volk --- src/core/utils/mutex_map.rs | 13 +++-- src/database/de.rs | 55 +++++++++++++++++++ src/database/engine.rs | 2 + src/database/pool.rs | 2 - src/database/ser.rs | 2 + src/database/stream.rs | 13 +++++ src/database/stream/items.rs | 3 + src/database/stream/items_rev.rs | 3 + src/database/stream/keys.rs | 2 + src/database/stream/keys_rev.rs | 2 + src/service/rooms/event_handler/fetch_prev.rs | 6 +- .../rooms/event_handler/fetch_state.rs | 11 ++-- .../event_handler/handle_incoming_pdu.rs | 7 ++- .../rooms/event_handler/handle_prev_pdu.rs | 6 +- src/service/rooms/state_cache/mod.rs | 49 +++++++++++------ src/service/sending/sender.rs | 4 +- 16 files changed, 146 insertions(+), 34 deletions(-) diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs index 152a75d1..9b9821fe 100644 --- a/src/core/utils/mutex_map.rs +++ b/src/core/utils/mutex_map.rs @@ -19,8 +19,8 @@ type Value = Arc>; impl MutexMap where - Key: Send + Hash + Eq + Clone, - Val: Send + Default, + Key: Clone + Eq + Hash + Send, + Val: Default + Send, { #[must_use] pub fn new() -> Self { @@ -29,10 +29,10 @@ where } } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(level = "trace", skip(self))] pub async fn lock(&self, k: &K) -> Guard where - K: ?Sized + Send + Sync + Debug, + K: Debug + Send + ?Sized + Sync, Key: for<'a> From<&'a K>, { let val = self @@ -61,13 +61,14 @@ where impl Default for MutexMap where - Key: Send + Hash + Eq + Clone, - Val: Send + Default, + Key: Clone + Eq + Hash + Send, + Val: Default + Send, { fn default() -> Self { Self::new() } } impl Drop for Guard { + #[tracing::instrument(name = "unlock", level = "trace", skip_all)] fn drop(&mut self) { if Arc::strong_count(Omg::mutex(&self.val)) <= 2 { self.map.lock().expect("locked").retain(|_, val| { diff --git a/src/database/de.rs b/src/database/de.rs index 4f5be6fc..48bc9f64 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -9,6 +9,15 @@ use serde::{ use crate::util::unhandled; /// Deserialize into T from buffer. +#[cfg_attr( + unabridged, + tracing::instrument( + name = "deserialize", + level = "trace", + skip_all, + fields(len = %buf.len()), + ) +)] pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result where T: Deserialize<'a>, @@ -132,6 +141,17 @@ impl<'de> Deserializer<'de> { /// Increment the position pointer. #[inline] + #[cfg_attr( + unabridged, + tracing::instrument( + level = "trace", + skip(self), + fields( + len = self.buf.len(), + rem = self.remaining().unwrap_or_default().saturating_sub(n), + ), + ) + )] fn inc_pos(&mut self, n: usize) { self.pos = self.pos.saturating_add(n); debug_assert!(self.pos <= self.buf.len(), "pos out of range"); @@ -149,6 +169,7 @@ impl<'de> Deserializer<'de> { impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { type Error = Error; + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_seq(self, visitor: V) -> Result where V: Visitor<'de>, @@ -157,6 +178,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { visitor.visit_seq(self) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] fn deserialize_tuple(self, _len: usize, visitor: V) -> Result where V: Visitor<'de>, @@ -165,6 +187,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { visitor.visit_seq(self) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] fn deserialize_tuple_struct( self, _name: &'static str, @@ -178,6 +201,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { visitor.visit_seq(self) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_map(self, visitor: V) -> Result where V: Visitor<'de>, @@ -187,6 +211,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { d.deserialize_map(visitor).map_err(Into::into) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] fn deserialize_struct( self, name: &'static str, @@ -202,6 +227,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { .map_err(Into::into) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] fn deserialize_unit_struct(self, name: &'static str, visitor: V) -> Result where V: Visitor<'de>, @@ -215,6 +241,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { visitor.visit_unit() } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] fn deserialize_newtype_struct(self, name: &'static str, visitor: V) -> Result where V: Visitor<'de>, @@ -225,6 +252,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, _visitor)))] fn deserialize_enum( self, _name: &'static str, @@ -237,26 +265,32 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { unhandled!("deserialize Enum not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_option>(self, _visitor: V) -> Result { unhandled!("deserialize Option not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_bool>(self, _visitor: V) -> Result { unhandled!("deserialize bool not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_i8>(self, _visitor: V) -> Result { unhandled!("deserialize i8 not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_i16>(self, _visitor: V) -> Result { unhandled!("deserialize i16 not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_i32>(self, _visitor: V) -> Result { unhandled!("deserialize i32 not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_i64>(self, visitor: V) -> Result { const BYTES: usize = size_of::(); @@ -268,6 +302,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { visitor.visit_i64(i64::from_be_bytes(bytes)) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_u8>(self, _visitor: V) -> Result { unhandled!( "deserialize u8 not implemented; try dereferencing the Handle for [u8] access \ @@ -275,14 +310,17 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { ) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_u16>(self, _visitor: V) -> Result { unhandled!("deserialize u16 not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_u32>(self, _visitor: V) -> Result { unhandled!("deserialize u32 not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_u64>(self, visitor: V) -> Result { const BYTES: usize = size_of::(); @@ -294,53 +332,67 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { visitor.visit_u64(u64::from_be_bytes(bytes)) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_f32>(self, _visitor: V) -> Result { unhandled!("deserialize f32 not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_f64>(self, _visitor: V) -> Result { unhandled!("deserialize f64 not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_char>(self, _visitor: V) -> Result { unhandled!("deserialize char not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_str>(self, visitor: V) -> Result { let input = self.record_next(); let out = deserialize_str(input)?; visitor.visit_borrowed_str(out) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_string>(self, visitor: V) -> Result { let input = self.record_next(); let out = string::string_from_bytes(input)?; visitor.visit_string(out) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_bytes>(self, visitor: V) -> Result { let input = self.record_trail(); visitor.visit_borrowed_bytes(input) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_byte_buf>(self, _visitor: V) -> Result { unhandled!("deserialize Byte Buf not implemented") } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_unit>(self, _visitor: V) -> Result { unhandled!("deserialize Unit not implemented") } // this only used for $serde_json::private::RawValue at this time; see MapAccess + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_identifier>(self, visitor: V) -> Result { let input = "$serde_json::private::RawValue"; visitor.visit_borrowed_str(input) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] fn deserialize_ignored_any>(self, _visitor: V) -> Result { unhandled!("deserialize Ignored Any not implemented") } + #[cfg_attr( + unabridged, + tracing::instrument(level = "trace", skip_all, fields(?self.buf)) + )] fn deserialize_any>(self, visitor: V) -> Result { debug_assert_eq!( conduwuit::debug::type_name::(), @@ -363,6 +415,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { impl<'a, 'de: 'a> de::SeqAccess<'de> for &'a mut Deserializer<'de> { type Error = Error; + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, seed)))] fn next_element_seed(&mut self, seed: T) -> Result> where T: DeserializeSeed<'de>, @@ -381,6 +434,7 @@ impl<'a, 'de: 'a> de::SeqAccess<'de> for &'a mut Deserializer<'de> { impl<'a, 'de: 'a> de::MapAccess<'de> for &'a mut Deserializer<'de> { type Error = Error; + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, seed)))] fn next_key_seed(&mut self, seed: K) -> Result> where K: DeserializeSeed<'de>, @@ -388,6 +442,7 @@ impl<'a, 'de: 'a> de::MapAccess<'de> for &'a mut Deserializer<'de> { seed.deserialize(&mut **self).map(Some) } + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, seed)))] fn next_value_seed(&mut self, seed: V) -> Result where V: DeserializeSeed<'de>, diff --git a/src/database/engine.rs b/src/database/engine.rs index 670817b5..3d554eac 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -157,11 +157,13 @@ impl Engine { #[inline] pub fn corked(&self) -> bool { self.corks.load(std::sync::atomic::Ordering::Relaxed) > 0 } + #[inline] pub(crate) fn cork(&self) { self.corks .fetch_add(1, std::sync::atomic::Ordering::Relaxed); } + #[inline] pub(crate) fn uncork(&self) { self.corks .fetch_sub(1, std::sync::atomic::Ordering::Relaxed); diff --git a/src/database/pool.rs b/src/database/pool.rs index 11871ff6..b972e763 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -207,8 +207,6 @@ pub(crate) async fn execute_get(self: &Arc, mut cmd: Get) -> Result(out: &'a mut W, val: T) -> Result<&'a [u8]> where W: Write + AsRef<[u8]> + 'a, diff --git a/src/database/stream.rs b/src/database/stream.rs index 5f0fc0a1..f3063bb3 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -29,12 +29,14 @@ pub(crate) trait Cursor<'a, T> { fn seek(&mut self); + #[inline] fn get(&self) -> Option> { self.fetch() .map(Ok) .or_else(|| self.state().status().map(map_err).map(Err)) } + #[inline] fn seek_and_get(&mut self) -> Option> { self.seek(); self.get() @@ -45,6 +47,7 @@ type Inner<'a> = DBRawIteratorWithThreadMode<'a, Db>; type From<'a> = Option>; impl<'a> State<'a> { + #[inline] pub(super) fn new(map: &'a Arc, opts: ReadOptions) -> Self { Self { inner: map.db().db.raw_iterator_cf_opt(&map.cf(), opts), @@ -53,6 +56,8 @@ impl<'a> State<'a> { } } + #[inline] + #[tracing::instrument(level = "trace", skip_all)] pub(super) fn init_fwd(mut self, from: From<'_>) -> Self { debug_assert!(self.init, "init must be set to make this call"); debug_assert!(!self.seek, "seek must not be set to make this call"); @@ -67,6 +72,8 @@ impl<'a> State<'a> { self } + #[inline] + #[tracing::instrument(level = "trace", skip_all)] pub(super) fn init_rev(mut self, from: From<'_>) -> Self { debug_assert!(self.init, "init must be set to make this call"); debug_assert!(!self.seek, "seek must not be set to make this call"); @@ -82,6 +89,7 @@ impl<'a> State<'a> { } #[inline] + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] pub(super) fn seek_fwd(&mut self) { if !exchange(&mut self.init, false) { self.inner.next(); @@ -91,6 +99,7 @@ impl<'a> State<'a> { } #[inline] + #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] pub(super) fn seek_rev(&mut self) { if !exchange(&mut self.init, false) { self.inner.prev(); @@ -103,12 +112,16 @@ impl<'a> State<'a> { matches!(self.status(), Some(e) if is_incomplete(&e)) } + #[inline] fn fetch_key(&self) -> Option> { self.inner.key().map(Key::from) } + #[inline] fn _fetch_val(&self) -> Option> { self.inner.value().map(Val::from) } + #[inline] fn fetch(&self) -> Option> { self.inner.item().map(KeyVal::from) } + #[inline] pub(super) fn status(&self) -> Option { self.inner.status().err() } #[inline] diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index cd81b4a0..8814419e 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -15,12 +15,15 @@ pub(crate) struct Items<'a> { } impl<'a> From> for Items<'a> { + #[inline] fn from(state: State<'a>) -> Self { Self { state } } } impl<'a> Cursor<'a, KeyVal<'a>> for Items<'a> { + #[inline] fn state(&self) -> &State<'a> { &self.state } + #[inline] fn fetch(&self) -> Option> { self.state.fetch().map(keyval_longevity) } #[inline] diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index c6cf9b53..f6fcb0e5 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -15,12 +15,15 @@ pub(crate) struct ItemsRev<'a> { } impl<'a> From> for ItemsRev<'a> { + #[inline] fn from(state: State<'a>) -> Self { Self { state } } } impl<'a> Cursor<'a, KeyVal<'a>> for ItemsRev<'a> { + #[inline] fn state(&self) -> &State<'a> { &self.state } + #[inline] fn fetch(&self) -> Option> { self.state.fetch().map(keyval_longevity) } #[inline] diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index 9bf27507..b953f51c 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -15,10 +15,12 @@ pub(crate) struct Keys<'a> { } impl<'a> From> for Keys<'a> { + #[inline] fn from(state: State<'a>) -> Self { Self { state } } } impl<'a> Cursor<'a, Key<'a>> for Keys<'a> { + #[inline] fn state(&self) -> &State<'a> { &self.state } #[inline] diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index 8657df0f..acf78d88 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -15,10 +15,12 @@ pub(crate) struct KeysRev<'a> { } impl<'a> From> for KeysRev<'a> { + #[inline] fn from(state: State<'a>) -> Self { Self { state } } } impl<'a> Cursor<'a, Key<'a>> for KeysRev<'a> { + #[inline] fn state(&self) -> &State<'a> { &self.state } #[inline] diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 3f121f69..0d64e98e 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -15,8 +15,12 @@ use ruma::{ use super::check_room_id; #[implement(super::Service)] +#[tracing::instrument( + level = "warn", + skip_all, + fields(%origin), +)] #[allow(clippy::type_complexity)] -#[tracing::instrument(skip_all)] pub(super) async fn fetch_prev( &self, origin: &ServerName, diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index edc47194..cc4a3e46 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -1,6 +1,6 @@ use std::collections::{hash_map, HashMap}; -use conduwuit::{debug, implement, warn, Err, Error, PduEvent, Result}; +use conduwuit::{debug, debug_warn, implement, Err, Error, PduEvent, Result}; use futures::FutureExt; use ruma::{ api::federation::event::get_room_state_ids, events::StateEventType, EventId, OwnedEventId, @@ -13,7 +13,11 @@ use crate::rooms::short::ShortStateKey; /// server's response to some extend (sic), but we still do a lot of checks /// on the events #[implement(super::Service)] -#[tracing::instrument(skip(self, create_event, room_version_id))] +#[tracing::instrument( + level = "warn", + skip_all, + fields(%origin), +)] pub(super) async fn fetch_state( &self, origin: &ServerName, @@ -22,7 +26,6 @@ pub(super) async fn fetch_state( room_version_id: &RoomVersionId, event_id: &EventId, ) -> Result>> { - debug!("Fetching state ids"); let res = self .services .sending @@ -31,7 +34,7 @@ pub(super) async fn fetch_state( event_id: event_id.to_owned(), }) .await - .inspect_err(|e| warn!("Fetching state for event failed: {e}"))?; + .inspect_err(|e| debug_warn!("Fetching state for event failed: {e}"))?; debug!("Fetching state events"); let state_vec = self diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 4c2fb2f7..c2e6ccc9 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -39,7 +39,12 @@ use crate::rooms::timeline::RawPduId; /// 14. Check if the event passes auth based on the "current state" of the room, /// if not soft fail it #[implement(super::Service)] -#[tracing::instrument(skip(self, origin, value, is_timeline_event), name = "pdu")] +#[tracing::instrument( + name = "pdu", + level = "warn", + skip_all, + fields(%room_id, %event_id), +)] pub async fn handle_incoming_pdu<'a>( &self, origin: &'a ServerName, diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 9bd4450e..ad71c173 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -13,8 +13,10 @@ use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName}; #[allow(clippy::type_complexity)] #[allow(clippy::too_many_arguments)] #[tracing::instrument( - skip(self, origin, event_id, room_id, eventid_info, create_event, first_pdu_in_room), - name = "prev" + name = "prev", + level = "warn", + skip_all, + fields(%prev_id), )] pub(super) async fn handle_prev_pdu<'a>( &self, diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index c2de8b62..89421dfd 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -95,7 +95,16 @@ impl crate::Service for Service { impl Service { /// Update current membership data. - #[tracing::instrument(skip(self, last_state))] + #[tracing::instrument( + level = "debug", + skip_all, + fields( + %room_id, + %user_id, + %sender, + ?membership_event, + ), + )] #[allow(clippy::too_many_arguments)] pub async fn update_membership( &self, @@ -265,7 +274,7 @@ impl Service { Ok(()) } - #[tracing::instrument(skip(self, room_id, appservice), level = "debug")] + #[tracing::instrument(level = "trace", skip_all)] pub async fn appservice_in_room( &self, room_id: &RoomId, @@ -383,7 +392,7 @@ impl Service { .map(|(_, server): (Ignore, &ServerName)| server) } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn server_in_room<'a>( &'a self, server: &'a ServerName, @@ -409,7 +418,7 @@ impl Service { } /// Returns true if server can see user by sharing at least one room. - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn server_sees_user(&self, server: &ServerName, user_id: &UserId) -> bool { self.server_rooms(server) .any(|room_id| self.is_joined(user_id, room_id)) @@ -417,7 +426,7 @@ impl Service { } /// Returns true if user_a and user_b share at least one room. - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> bool { let get_shared_rooms = self.get_shared_rooms(user_a, user_b); @@ -426,6 +435,7 @@ impl Service { } /// List the rooms common between two users + #[tracing::instrument(skip(self), level = "debug")] pub fn get_shared_rooms<'a>( &'a self, user_a: &'a UserId, @@ -453,7 +463,7 @@ impl Service { } /// Returns the number of users which are currently in a room - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn room_joined_count(&self, room_id: &RoomId) -> Result { self.db.roomid_joinedcount.get(room_id).await.deserialized() } @@ -469,9 +479,9 @@ impl Service { .ready_filter(|user| self.services.globals.user_is_local(user)) } - #[tracing::instrument(skip(self), level = "debug")] /// Returns an iterator of all our local joined users in a room who are /// active (not deactivated, not guest) + #[tracing::instrument(skip(self), level = "trace")] pub fn active_local_users_in_room<'a>( &'a self, room_id: &'a RoomId, @@ -481,7 +491,7 @@ impl Service { } /// Returns the number of users which are currently invited to a room - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn room_invited_count(&self, room_id: &RoomId) -> Result { self.db .roomid_invitedcount @@ -518,7 +528,7 @@ impl Service { .map(|(_, user_id): (Ignore, &UserId)| user_id) } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { let key = (room_id, user_id); self.db @@ -528,7 +538,7 @@ impl Service { .deserialized() } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { let key = (room_id, user_id); self.db.roomuserid_leftcount.qry(&key).await.deserialized() @@ -566,7 +576,7 @@ impl Service { .ignore_err() } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn invite_state( &self, user_id: &UserId, @@ -583,7 +593,7 @@ impl Service { }) } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn left_state( &self, user_id: &UserId, @@ -625,24 +635,25 @@ impl Service { self.db.roomuseroncejoinedids.qry(&key).await.is_ok() } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn is_joined<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { let key = (user_id, room_id); self.db.userroomid_joined.qry(&key).await.is_ok() } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> bool { let key = (user_id, room_id); self.db.userroomid_invitestate.qry(&key).await.is_ok() } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> bool { let key = (user_id, room_id); self.db.userroomid_leftstate.qry(&key).await.is_ok() } + #[tracing::instrument(skip(self), level = "trace")] pub async fn user_membership( &self, user_id: &UserId, @@ -683,7 +694,7 @@ impl Service { /// distant future. /// /// See - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "trace")] pub async fn servers_route_via(&self, room_id: &RoomId) -> Result> { let most_powerful_user_server = self .services @@ -724,6 +735,7 @@ impl Service { (cache.len(), cache.capacity()) } + #[tracing::instrument(level = "debug", skip_all)] pub fn clear_appservice_in_room_cache(&self) { self.appservice_in_room_cache .write() @@ -731,6 +743,7 @@ impl Service { .clear(); } + #[tracing::instrument(level = "debug", skip(self))] pub async fn update_joined_count(&self, room_id: &RoomId) { let mut joinedcount = 0_u64; let mut invitedcount = 0_u64; @@ -784,11 +797,13 @@ impl Service { .remove(room_id); } + #[tracing::instrument(level = "debug", skip(self))] fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) { let key = (user_id, room_id); self.db.roomuseroncejoinedids.put_raw(key, []); } + #[tracing::instrument(level = "debug", skip(self, last_state, invite_via))] pub async fn mark_as_invited( &self, user_id: &UserId, @@ -821,7 +836,7 @@ impl Service { } } - #[tracing::instrument(skip(self, servers), level = "debug")] + #[tracing::instrument(level = "debug", skip(self, servers))] pub async fn add_servers_invite_via(&self, room_id: &RoomId, servers: Vec) { let mut servers: Vec<_> = self .servers_invite_via(room_id) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index a9abada4..5fd4cf91 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -80,7 +80,9 @@ impl Service { self.work_loop(id, &mut futures, &mut statuses).await; - self.finish_responses(&mut futures).boxed().await; + if !futures.is_empty() { + self.finish_responses(&mut futures).boxed().await; + } Ok(()) } From 6a0f9add0c1f882d4cbae1b595fcdaa1577183be Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 6 Jan 2025 14:07:26 +0000 Subject: [PATCH 0498/1248] refactor database engine/options; add column descriptors Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 4 +- src/database/database.rs | 55 ---- src/database/engine.rs | 336 +++----------------- src/database/engine/backup.rs | 73 +++++ src/database/engine/cf_opts.rs | 214 +++++++++++++ src/database/engine/context.rs | 73 +++++ src/database/engine/db_opts.rs | 133 ++++++++ src/database/engine/descriptor.rs | 89 ++++++ src/database/engine/files.rs | 32 ++ src/database/engine/logger.rs | 22 ++ src/database/engine/memory_usage.rs | 30 ++ src/database/engine/open.rs | 121 ++++++++ src/database/engine/repair.rs | 16 + src/database/map.rs | 74 +---- src/database/map/open.rs | 37 +++ src/database/map/options.rs | 26 ++ src/database/maps.rs | 466 ++++++++++++++++++++++------ src/database/mod.rs | 77 ++++- src/database/opts.rs | 433 -------------------------- src/service/migrations.rs | 8 +- src/service/services.rs | 4 +- 21 files changed, 1358 insertions(+), 965 deletions(-) delete mode 100644 src/database/database.rs create mode 100644 src/database/engine/backup.rs create mode 100644 src/database/engine/cf_opts.rs create mode 100644 src/database/engine/context.rs create mode 100644 src/database/engine/db_opts.rs create mode 100644 src/database/engine/descriptor.rs create mode 100644 src/database/engine/files.rs create mode 100644 src/database/engine/logger.rs create mode 100644 src/database/engine/memory_usage.rs create mode 100644 src/database/engine/open.rs create mode 100644 src/database/engine/repair.rs create mode 100644 src/database/map/open.rs create mode 100644 src/database/map/options.rs delete mode 100644 src/database/opts.rs diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 5bc65d9b..07daaf0a 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -911,8 +911,8 @@ pub(super) async fn database_stats( let map_name = map.as_ref().map_or(EMPTY, String::as_str); let mut out = String::new(); - for (name, map) in self.services.db.iter() { - if !map_name.is_empty() && *map_name != *name { + for (&name, map) in self.services.db.iter() { + if !map_name.is_empty() && map_name != name { continue; } diff --git a/src/database/database.rs b/src/database/database.rs deleted file mode 100644 index 83d2c201..00000000 --- a/src/database/database.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::{ops::Index, sync::Arc}; - -use conduwuit::{err, Result, Server}; - -use crate::{ - maps, - maps::{Maps, MapsKey, MapsVal}, - Engine, Map, -}; - -pub struct Database { - pub db: Arc, - maps: Maps, -} - -impl Database { - /// Load an existing database or create a new one. - pub async fn open(server: &Arc) -> Result> { - let db = Engine::open(server).await?; - Ok(Arc::new(Self { db: db.clone(), maps: maps::open(&db)? })) - } - - #[inline] - pub fn get(&self, name: &str) -> Result<&Arc> { - self.maps - .get(name) - .ok_or_else(|| err!(Request(NotFound("column not found")))) - } - - #[inline] - pub fn iter(&self) -> impl Iterator + Send + '_ { - self.maps.iter() - } - - #[inline] - pub fn keys(&self) -> impl Iterator + Send + '_ { self.maps.keys() } - - #[inline] - #[must_use] - pub fn is_read_only(&self) -> bool { self.db.is_read_only() } - - #[inline] - #[must_use] - pub fn is_secondary(&self) -> bool { self.db.is_secondary() } -} - -impl Index<&str> for Database { - type Output = Arc; - - fn index(&self, name: &str) -> &Self::Output { - self.maps - .get(name) - .expect("column in database does not exist") - } -} diff --git a/src/database/engine.rs b/src/database/engine.rs index 3d554eac..2958f73f 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -1,288 +1,64 @@ +mod backup; +mod cf_opts; +pub(crate) mod context; +mod db_opts; +pub(crate) mod descriptor; +mod files; +mod logger; +mod memory_usage; +mod open; +mod repair; + use std::{ - collections::{BTreeSet, HashMap}, ffi::CStr, - fmt::Write, - path::PathBuf, - sync::{atomic::AtomicU32, Arc, Mutex, RwLock}, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, }; -use conduwuit::{ - debug, error, info, utils::time::rfc2822_from_seconds, warn, Err, Result, Server, -}; -use rocksdb::{ - backup::{BackupEngine, BackupEngineOptions}, - perf::get_memory_usage_stats, - AsColumnFamilyRef, BoundColumnFamily, Cache, ColumnFamilyDescriptor, DBCommon, - DBWithThreadMode, Env, LogLevel, MultiThreaded, Options, -}; +use conduwuit::{debug, info, warn, Err, Result}; +use rocksdb::{AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded}; -use crate::{ - opts::{cf_options, db_options}, - or_else, - pool::Pool, - result, - util::map_err, -}; +use crate::{pool::Pool, result, Context}; pub struct Engine { - pub(crate) server: Arc, - row_cache: Cache, - col_cache: RwLock>, - opts: Options, - env: Env, - cfs: Mutex>, - pub(crate) pool: Arc, - pub(crate) db: Db, - corks: AtomicU32, pub(super) read_only: bool, pub(super) secondary: bool, + corks: AtomicU32, + pub(crate) db: Db, + pub(crate) pool: Arc, + pub(crate) ctx: Arc, } pub(crate) type Db = DBWithThreadMode; impl Engine { - #[tracing::instrument(skip_all)] - pub(crate) async fn open(server: &Arc) -> Result> { - let config = &server.config; - let path = &config.database_path; - - let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0; - - #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] - let row_cache_capacity_bytes = (cache_capacity_bytes * 0.50) as usize; - - #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] - let col_cache_capacity_bytes = (cache_capacity_bytes * 0.50) as usize; - - let mut col_cache = HashMap::new(); - col_cache.insert("primary".to_owned(), Cache::new_lru_cache(col_cache_capacity_bytes)); - - let mut db_env = Env::new().or_else(or_else)?; - let row_cache = Cache::new_lru_cache(row_cache_capacity_bytes); - let db_opts = db_options( - config, - &mut db_env, - &row_cache, - col_cache.get("primary").expect("primary cache exists"), - )?; - - debug!("Listing column families in database"); - let cfs = Db::list_cf(&db_opts, &config.database_path) - .unwrap_or_default() - .into_iter() - .collect::>(); - - debug!("Configuring {} column families found in database", cfs.len()); - let cfopts = cfs - .iter() - .map(|name| cf_options(config, name, db_opts.clone(), &mut col_cache)) - .collect::>>()?; - - debug!("Opening {} column family descriptors in database", cfs.len()); - let cfds = cfs - .iter() - .zip(cfopts.into_iter()) - .map(|(name, opts)| ColumnFamilyDescriptor::new(name, opts)) - .collect::>(); - - debug!("Starting frontend request pool"); - let pool = Pool::new(server)?; - - let load_time = std::time::Instant::now(); - if config.rocksdb_repair { - repair(&db_opts, &config.database_path)?; - } - - debug!("Opening database..."); - let res = if config.rocksdb_read_only { - Db::open_cf_descriptors_read_only(&db_opts, path, cfds, false) - } else if config.rocksdb_secondary { - Db::open_cf_descriptors_as_secondary(&db_opts, path, path, cfds) - } else { - Db::open_cf_descriptors(&db_opts, path, cfds) - }; - - let db = res.or_else(or_else)?; - info!( - columns = cfs.len(), - sequence = %db.latest_sequence_number(), - time = ?load_time.elapsed(), - "Opened database." - ); - - Ok(Arc::new(Self { - server: server.clone(), - row_cache, - col_cache: RwLock::new(col_cache), - opts: db_opts, - env: db_env, - cfs: Mutex::new(cfs), - corks: AtomicU32::new(0), - read_only: config.rocksdb_read_only, - secondary: config.rocksdb_secondary, - pool, - db, - })) - } - - #[tracing::instrument(skip(self), level = "trace")] - pub(crate) fn open_cf(&self, name: &str) -> Result>> { - let mut cfs = self.cfs.lock().expect("locked"); - if !cfs.contains(name) { - debug!("Creating new column family in database: {name}"); - - let mut col_cache = self.col_cache.write().expect("locked"); - let opts = cf_options(&self.server.config, name, self.opts.clone(), &mut col_cache)?; - if let Err(e) = self.db.create_cf(name, &opts) { - error!(?name, "Failed to create new column family: {e}"); - return or_else(e); - } - - cfs.insert(name.to_owned()); - } - - Ok(self.cf(name)) - } - pub(crate) fn cf(&self, name: &str) -> Arc> { self.db .cf_handle(name) - .expect("column was created and exists") - } - - pub fn flush(&self) -> Result<()> { result(DBCommon::flush_wal(&self.db, false)) } - - pub fn sync(&self) -> Result<()> { result(DBCommon::flush_wal(&self.db, true)) } - - #[inline] - pub fn corked(&self) -> bool { self.corks.load(std::sync::atomic::Ordering::Relaxed) > 0 } - - #[inline] - pub(crate) fn cork(&self) { - self.corks - .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + .expect("column must be described prior to database open") } #[inline] - pub(crate) fn uncork(&self) { - self.corks - .fetch_sub(1, std::sync::atomic::Ordering::Relaxed); - } + pub(crate) fn cork(&self) { self.corks.fetch_add(1, Ordering::Relaxed); } - pub fn memory_usage(&self) -> Result { - let mut res = String::new(); - let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&self.row_cache])) - .or_else(or_else)?; - let mibs = |input| f64::from(u32::try_from(input / 1024).unwrap_or(0)) / 1024.0; - writeln!( - res, - "Memory buffers: {:.2} MiB\nPending write: {:.2} MiB\nTable readers: {:.2} MiB\nRow \ - cache: {:.2} MiB", - mibs(stats.mem_table_total), - mibs(stats.mem_table_unflushed), - mibs(stats.mem_table_readers_total), - mibs(u64::try_from(self.row_cache.get_usage())?), - )?; + #[inline] + pub(crate) fn uncork(&self) { self.corks.fetch_sub(1, Ordering::Relaxed); } - for (name, cache) in &*self.col_cache.read().expect("locked") { - writeln!(res, "{name} cache: {:.2} MiB", mibs(u64::try_from(cache.get_usage())?))?; - } - - Ok(res) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn cleanup(&self) -> Result<()> { - debug!("Running flush_opt"); - let flushoptions = rocksdb::FlushOptions::default(); - result(DBCommon::flush_opt(&self.db, &flushoptions)) - } + #[inline] + pub fn corked(&self) -> bool { self.corks.load(Ordering::Relaxed) > 0 } #[tracing::instrument(skip(self))] - pub fn backup(&self) -> Result { - let config = &self.server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok(()); - } + pub fn sync(&self) -> Result { result(DBCommon::flush_wal(&self.db, true)) } - let options = BackupEngineOptions::new(path.expect("valid database backup path")) - .map_err(map_err)?; - let mut engine = BackupEngine::open(&options, &self.env).map_err(map_err)?; - if config.database_backups_to_keep > 0 { - let flush = !self.is_read_only(); - engine - .create_new_backup_flush(&self.db, flush) - .map_err(map_err)?; + #[tracing::instrument(skip(self), level = "debug")] + pub fn flush(&self) -> Result { result(DBCommon::flush_wal(&self.db, false)) } - let engine_info = engine.get_backup_info(); - let info = &engine_info.last().expect("backup engine info is not empty"); - info!( - "Created database backup #{} using {} bytes in {} files", - info.backup_id, info.size, info.num_files, - ); - } - - if config.database_backups_to_keep >= 0 { - let keep = u32::try_from(config.database_backups_to_keep)?; - if let Err(e) = engine.purge_old_backups(keep.try_into()?) { - error!("Failed to purge old backup: {e:?}"); - } - } - - Ok(()) - } - - pub fn backup_list(&self) -> Result { - let config = &self.server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok("Configure database_backup_path to enable backups, or the path \ - specified is not valid" - .to_owned()); - } - - let mut res = String::new(); - let options = BackupEngineOptions::new(path.expect("valid database backup path")) - .or_else(or_else)?; - let engine = BackupEngine::open(&options, &self.env).or_else(or_else)?; - for info in engine.get_backup_info() { - writeln!( - res, - "#{} {}: {} bytes, {} files", - info.backup_id, - rfc2822_from_seconds(info.timestamp), - info.size, - info.num_files, - )?; - } - - Ok(res) - } - - pub fn file_list(&self) -> Result { - match self.db.live_files() { - | Err(e) => Ok(String::from(e)), - | Ok(files) => { - let mut res = String::new(); - writeln!(res, "| lev | sst | keys | dels | size | column |")?; - writeln!(res, "| ---: | :--- | ---: | ---: | ---: | :--- |")?; - for file in files { - writeln!( - res, - "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", - file.level, - file.name, - file.num_entries, - file.num_deletions, - file.size, - file.column_family_name, - )?; - } - - Ok(res) - }, - } + #[tracing::instrument(skip(self), level = "debug")] + pub fn sort(&self) -> Result { + let flushoptions = rocksdb::FlushOptions::default(); + result(DBCommon::flush_opt(&self.db, &flushoptions)) } /// Query for database property by null-terminated name which is expected to @@ -312,56 +88,14 @@ impl Engine { pub fn is_secondary(&self) -> bool { self.secondary } } -pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result<()> { - warn!("Starting database repair. This may take a long time..."); - match Db::repair(db_opts, path) { - | Ok(()) => info!("Database repair successful."), - | Err(e) => return Err!("Repair failed: {e:?}"), - } - - Ok(()) -} - -#[tracing::instrument( - parent = None, - name = "rocksdb", - level = "trace" - skip(msg), -)] -pub(crate) fn handle_log(level: LogLevel, msg: &str) { - let msg = msg.trim(); - if msg.starts_with("Options") { - return; - } - - match level { - | LogLevel::Header | LogLevel::Debug => debug!("{msg}"), - | LogLevel::Error | LogLevel::Fatal => error!("{msg}"), - | LogLevel::Info => debug!("{msg}"), - | LogLevel::Warn => warn!("{msg}"), - }; -} - impl Drop for Engine { #[cold] fn drop(&mut self) { const BLOCKING: bool = true; - debug!("Closing frontend pool"); - self.pool.close(); - debug!("Waiting for background tasks to finish..."); self.db.cancel_all_background_work(BLOCKING); - debug!("Shutting down background threads"); - self.env.set_high_priority_background_threads(0); - self.env.set_low_priority_background_threads(0); - self.env.set_bottom_priority_background_threads(0); - self.env.set_background_threads(0); - - debug!("Joining background threads..."); - self.env.join_all_threads(); - info!( sequence = %self.db.latest_sequence_number(), "Closing database..." diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs new file mode 100644 index 00000000..db718c2c --- /dev/null +++ b/src/database/engine/backup.rs @@ -0,0 +1,73 @@ +use std::fmt::Write; + +use conduwuit::{error, implement, info, utils::time::rfc2822_from_seconds, warn, Result}; +use rocksdb::backup::{BackupEngine, BackupEngineOptions}; + +use super::Engine; +use crate::{or_else, util::map_err}; + +#[implement(Engine)] +#[tracing::instrument(skip(self))] +pub fn backup(&self) -> Result { + let server = &self.ctx.server; + let config = &server.config; + let path = config.database_backup_path.as_ref(); + if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { + return Ok(()); + } + + let options = + BackupEngineOptions::new(path.expect("valid database backup path")).map_err(map_err)?; + let mut engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err)?; + if config.database_backups_to_keep > 0 { + let flush = !self.is_read_only(); + engine + .create_new_backup_flush(&self.db, flush) + .map_err(map_err)?; + + let engine_info = engine.get_backup_info(); + let info = &engine_info.last().expect("backup engine info is not empty"); + info!( + "Created database backup #{} using {} bytes in {} files", + info.backup_id, info.size, info.num_files, + ); + } + + if config.database_backups_to_keep >= 0 { + let keep = u32::try_from(config.database_backups_to_keep)?; + if let Err(e) = engine.purge_old_backups(keep.try_into()?) { + error!("Failed to purge old backup: {e:?}"); + } + } + + Ok(()) +} + +#[implement(Engine)] +pub fn backup_list(&self) -> Result { + let server = &self.ctx.server; + let config = &server.config; + let path = config.database_backup_path.as_ref(); + if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { + return Ok("Configure database_backup_path to enable backups, or the path specified is \ + not valid" + .to_owned()); + } + + let mut res = String::new(); + let options = + BackupEngineOptions::new(path.expect("valid database backup path")).or_else(or_else)?; + let engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).or_else(or_else)?; + for info in engine.get_backup_info() { + writeln!( + res, + "#{} {}: {} bytes, {} files", + info.backup_id, + rfc2822_from_seconds(info.timestamp), + info.size, + info.num_files, + )?; + } + + Ok(res) +} diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs new file mode 100644 index 00000000..98d74044 --- /dev/null +++ b/src/database/engine/cf_opts.rs @@ -0,0 +1,214 @@ +use conduwuit::{ + err, + utils::{math::Expected, BoolExt}, + Config, Result, +}; +use rocksdb::{ + BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, + DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, + UniversalCompactOptions, UniversalCompactionStopStyle, +}; + +use super::descriptor::{CacheDisp, Descriptor}; +use crate::Context; + +/// Adjust options for the specific column by name. Provide the result of +/// db_options() as the argument to this function and use the return value in +/// the arguments to open the specific column. +pub(crate) fn cf_options(ctx: &Context, opts: Options, desc: &Descriptor) -> Result { + let cache = get_cache(ctx, desc); + let config = &ctx.server.config; + descriptor_cf_options(opts, desc.clone(), config, cache.as_ref()) +} + +fn descriptor_cf_options( + mut opts: Options, + mut desc: Descriptor, + config: &Config, + cache: Option<&Cache>, +) -> Result { + set_compression(&mut desc, config); + set_table_options(&mut opts, &desc, cache); + + opts.set_min_write_buffer_number(1); + opts.set_max_write_buffer_number(2); + if let Some(write_size) = desc.write_size { + opts.set_write_buffer_size(write_size); + } + + opts.set_target_file_size_base(desc.file_size); + opts.set_target_file_size_multiplier(desc.file_shape[0]); + + opts.set_level_zero_file_num_compaction_trigger(desc.level0_width); + opts.set_level_compaction_dynamic_level_bytes(false); + opts.set_ttl(desc.ttl); + + opts.set_max_bytes_for_level_base(desc.level_size); + opts.set_max_bytes_for_level_multiplier(1.0); + opts.set_max_bytes_for_level_multiplier_additional(&desc.level_shape); + + opts.set_compaction_style(desc.compaction); + opts.set_compaction_pri(desc.compaction_pri); + opts.set_universal_compaction_options(&uc_options(&desc)); + + opts.set_compression_type(desc.compression); + opts.set_compression_options(-14, desc.compression_level, 0, 0); // -14 w_bits used by zlib. + if let Some(&bottommost_level) = desc.bottommost_level.as_ref() { + opts.set_bottommost_compression_type(desc.compression); + opts.set_bottommost_zstd_max_train_bytes(0, true); + opts.set_bottommost_compression_options( + -14, // -14 w_bits is only read by zlib. + bottommost_level, + 0, + 0, + true, + ); + } + + Ok(opts) +} + +fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache>) { + let mut table = table_options(desc); + if let Some(cache) = cache { + table.set_block_cache(cache); + } else { + table.disable_cache(); + } + + opts.set_block_based_table_factory(&table); +} + +fn set_compression(desc: &mut Descriptor, config: &Config) { + desc.compression = match config.rocksdb_compression_algo.as_ref() { + | "snappy" => CompressionType::Snappy, + | "zlib" => CompressionType::Zlib, + | "bz2" => CompressionType::Bz2, + | "lz4" => CompressionType::Lz4, + | "lz4hc" => CompressionType::Lz4hc, + | "none" => CompressionType::None, + | _ => CompressionType::Zstd, + }; + + desc.compression_level = config.rocksdb_compression_level; + desc.bottommost_level = config + .rocksdb_bottommost_compression + .then_some(config.rocksdb_bottommost_compression_level); +} + +fn uc_options(desc: &Descriptor) -> UniversalCompactOptions { + let mut opts = UniversalCompactOptions::default(); + opts.set_stop_style(UniversalCompactionStopStyle::Total); + opts.set_min_merge_width(desc.merge_width.0); + opts.set_max_merge_width(desc.merge_width.1); + opts.set_max_size_amplification_percent(10000); + opts.set_compression_size_percent(-1); + opts.set_size_ratio(1); + + opts +} + +fn table_options(desc: &Descriptor) -> BlockBasedOptions { + let mut opts = BlockBasedOptions::default(); + + opts.set_block_size(desc.block_size); + opts.set_metadata_block_size(desc.index_size); + + opts.set_cache_index_and_filter_blocks(true); + opts.set_pin_top_level_index_and_filter(false); + opts.set_pin_l0_filter_and_index_blocks_in_cache(false); + opts.set_partition_pinning_tier(BlockBasedPinningTier::None); + opts.set_unpartitioned_pinning_tier(BlockBasedPinningTier::None); + opts.set_top_level_index_pinning_tier(BlockBasedPinningTier::None); + + opts.set_use_delta_encoding(false); + opts.set_index_type(BlockBasedIndexType::TwoLevelIndexSearch); + opts.set_data_block_index_type( + desc.block_index_hashing + .map_or(DataBlockIndexType::BinarySearch, || DataBlockIndexType::BinaryAndHash), + ); + + opts +} + +fn get_cache(ctx: &Context, desc: &Descriptor) -> Option { + let config = &ctx.server.config; + + // Some cache capacities are overriden by server config in a strange but + // legacy-compat way + let cap = match desc.name { + | "eventid_pduid" => Some(config.eventid_pdu_cache_capacity), + | "eventid_shorteventid" => Some(config.eventidshort_cache_capacity), + | "shorteventid_eventid" => Some(config.shorteventid_cache_capacity), + | "shorteventid_authchain" => Some(config.auth_chain_cache_capacity), + | "shortstatekey_statekey" => Some(config.shortstatekey_cache_capacity), + | "statekey_shortstatekey" => Some(config.statekeyshort_cache_capacity), + | "servernameevent_data" => Some(config.servernameevent_data_cache_capacity), + | "pduid_pdu" | "eventid_outlierpdu" => Some(config.pdu_cache_capacity), + | _ => None, + } + .map(TryInto::try_into) + .transpose() + .expect("u32 to usize"); + + let ent_size: usize = desc + .key_size_hint + .unwrap_or_default() + .expected_add(desc.val_size_hint.unwrap_or_default()); + + let size = match cap { + | Some(cap) => cache_size(config, cap, ent_size), + | _ => desc.cache_size, + }; + + let shard_bits: i32 = desc + .cache_shards + .ilog2() + .try_into() + .expect("u32 to i32 conversion"); + + debug_assert!(shard_bits <= 6, "cache shards limited to 64"); + let mut cache_opts = LruCacheOptions::default(); + cache_opts.set_num_shard_bits(shard_bits); + cache_opts.set_capacity(size); + + let mut caches = ctx.col_cache.lock().expect("locked"); + match desc.cache_disp { + | CacheDisp::Unique if desc.cache_size == 0 => None, + | CacheDisp::Unique => { + let cache = Cache::new_lru_cache_opts(&cache_opts); + caches.insert(desc.name.into(), cache.clone()); + Some(cache) + }, + + | CacheDisp::SharedWith(other) if !caches.contains_key(other) => { + let cache = Cache::new_lru_cache_opts(&cache_opts); + caches.insert(desc.name.into(), cache.clone()); + Some(cache) + }, + + | CacheDisp::SharedWith(other) => Some( + caches + .get(other) + .cloned() + .expect("caches.contains_key(other) must be true"), + ), + + | CacheDisp::Shared => Some( + caches + .get("Shared") + .cloned() + .expect("shared cache must already exist"), + ), + } +} + +#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] +pub(crate) fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> usize { + let ents = f64::from(base_size) * config.cache_capacity_modifier; + + (ents as usize) + .checked_mul(entity_size) + .ok_or_else(|| err!(Config("cache_capacity_modifier", "Cache size is too large."))) + .expect("invalid cache size") +} diff --git a/src/database/engine/context.rs b/src/database/engine/context.rs new file mode 100644 index 00000000..76238f7d --- /dev/null +++ b/src/database/engine/context.rs @@ -0,0 +1,73 @@ +use std::{ + collections::BTreeMap, + sync::{Arc, Mutex}, +}; + +use conduwuit::{debug, utils::math::usize_from_f64, Result, Server}; +use rocksdb::{Cache, Env}; + +use crate::{or_else, pool::Pool}; + +/// Some components are constructed prior to opening the database and must +/// outlive the database. These can also be shared between database instances +/// though at the time of this comment we only open one database per process. +/// These assets are housed in the shared Context. +pub(crate) struct Context { + pub(crate) pool: Arc, + pub(crate) col_cache: Mutex>, + pub(crate) row_cache: Mutex, + pub(crate) env: Mutex, + pub(crate) server: Arc, +} + +impl Context { + pub(crate) fn new(server: &Arc) -> Result> { + let config = &server.config; + let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0; + + let row_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; + let row_cache = Cache::new_lru_cache(row_cache_capacity_bytes); + + let col_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; + let col_cache = Cache::new_lru_cache(col_cache_capacity_bytes); + + let col_cache: BTreeMap<_, _> = [("Shared".to_owned(), col_cache)].into(); + + let mut env = Env::new().or_else(or_else)?; + + if config.rocksdb_compaction_prio_idle { + env.lower_thread_pool_cpu_priority(); + } + + if config.rocksdb_compaction_ioprio_idle { + env.lower_thread_pool_io_priority(); + } + + Ok(Arc::new(Self { + pool: Pool::new(server)?, + col_cache: col_cache.into(), + row_cache: row_cache.into(), + env: env.into(), + server: server.clone(), + })) + } +} + +impl Drop for Context { + #[cold] + fn drop(&mut self) { + debug!("Closing frontend pool"); + self.pool.close(); + + let mut env = self.env.lock().expect("locked"); + + debug!("Shutting down background threads"); + env.set_high_priority_background_threads(0); + env.set_low_priority_background_threads(0); + env.set_bottom_priority_background_threads(0); + env.set_background_threads(0); + + debug!("Joining background threads..."); + env.join_all_threads(); + } +} diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs new file mode 100644 index 00000000..211265de --- /dev/null +++ b/src/database/engine/db_opts.rs @@ -0,0 +1,133 @@ +use std::{cmp, convert::TryFrom}; + +use conduwuit::{utils, Config, Result}; +use rocksdb::{statistics::StatsLevel, Cache, DBRecoveryMode, Env, LogLevel, Options}; + +use super::{cf_opts::cache_size, logger::handle as handle_log}; + +/// Create database-wide options suitable for opening the database. This also +/// sets our default column options in case of opening a column with the same +/// resulting value. Note that we require special per-column options on some +/// columns, therefor columns should only be opened after passing this result +/// through cf_options(). +pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Result { + const DEFAULT_STATS_LEVEL: StatsLevel = if cfg!(debug_assertions) { + StatsLevel::ExceptDetailedTimers + } else { + StatsLevel::DisableAll + }; + + let mut opts = Options::default(); + + // Logging + set_logging_defaults(&mut opts, config); + + // Processing + opts.set_max_background_jobs(num_threads::(config)?); + opts.set_max_subcompactions(num_threads::(config)?); + opts.set_avoid_unnecessary_blocking_io(true); + opts.set_max_file_opening_threads(0); + + // IO + opts.set_atomic_flush(true); + opts.set_manual_wal_flush(true); + opts.set_enable_pipelined_write(false); + if config.rocksdb_direct_io { + opts.set_use_direct_reads(true); + opts.set_use_direct_io_for_flush_and_compaction(true); + } + if config.rocksdb_optimize_for_spinning_disks { + // speeds up opening DB on hard drives + opts.set_skip_checking_sst_file_sizes_on_db_open(true); + opts.set_skip_stats_update_on_db_open(true); + //opts.set_max_file_opening_threads(threads.try_into().unwrap()); + } + + // Blocks + opts.set_row_cache(row_cache); + + // Files + opts.set_table_cache_num_shard_bits(7); + opts.set_wal_size_limit_mb(1024 * 1024 * 1024); + opts.set_max_total_wal_size(1024 * 1024 * 512); + opts.set_db_write_buffer_size(cache_size(config, 1024 * 1024 * 32, 1)); + + // Misc + opts.set_disable_auto_compactions(!config.rocksdb_compaction); + opts.create_missing_column_families(true); + opts.create_if_missing(true); + + opts.set_statistics_level(match config.rocksdb_stats_level { + | 0 => StatsLevel::DisableAll, + | 1 => DEFAULT_STATS_LEVEL, + | 2 => StatsLevel::ExceptHistogramOrTimers, + | 3 => StatsLevel::ExceptTimers, + | 4 => StatsLevel::ExceptDetailedTimers, + | 5 => StatsLevel::ExceptTimeForMutex, + | 6_u8..=u8::MAX => StatsLevel::All, + }); + + opts.set_report_bg_io_stats(match config.rocksdb_stats_level { + | 0..=1 => false, + | 2_u8..=u8::MAX => true, + }); + + // Default: https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords + // + // Unclean shutdowns of a Matrix homeserver are likely to be fine when + // recovered in this manner as it's likely any lost information will be + // restored via federation. + opts.set_wal_recovery_mode(match config.rocksdb_recovery_mode { + | 0 => DBRecoveryMode::AbsoluteConsistency, + | 1 => DBRecoveryMode::TolerateCorruptedTailRecords, + | 2 => DBRecoveryMode::PointInTime, + | 3 => DBRecoveryMode::SkipAnyCorruptedRecord, + | 4_u8..=u8::MAX => unimplemented!(), + }); + + // + // "We recommend to set track_and_verify_wals_in_manifest to true for + // production, it has been enabled in production for the entire database cluster + // serving the social graph for all Meta apps." + opts.set_track_and_verify_wals_in_manifest(true); + + opts.set_paranoid_checks(config.rocksdb_paranoid_file_checks); + + opts.set_env(env); + + Ok(opts) +} + +fn set_logging_defaults(opts: &mut Options, config: &Config) { + let rocksdb_log_level = match config.rocksdb_log_level.as_ref() { + | "debug" => LogLevel::Debug, + | "info" => LogLevel::Info, + | "warn" => LogLevel::Warn, + | "fatal" => LogLevel::Fatal, + | _ => LogLevel::Error, + }; + + opts.set_log_level(rocksdb_log_level); + opts.set_max_log_file_size(config.rocksdb_max_log_file_size); + opts.set_log_file_time_to_roll(config.rocksdb_log_time_to_roll); + opts.set_keep_log_file_num(config.rocksdb_max_log_files); + opts.set_stats_dump_period_sec(0); + + if config.rocksdb_log_stderr { + opts.set_stderr_logger(rocksdb_log_level, "rocksdb"); + } else { + opts.set_callback_logger(rocksdb_log_level, &handle_log); + } +} + +fn num_threads>(config: &Config) -> Result { + const MIN_PARALLELISM: usize = 2; + + let requested = if config.rocksdb_parallelism_threads != 0 { + config.rocksdb_parallelism_threads + } else { + utils::available_parallelism() + }; + + utils::math::try_into::(cmp::max(MIN_PARALLELISM, requested)) +} diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs new file mode 100644 index 00000000..f0fd83f1 --- /dev/null +++ b/src/database/engine/descriptor.rs @@ -0,0 +1,89 @@ +use conduwuit::utils::string::EMPTY; +use rocksdb::{ + DBCompactionPri as CompactionPri, DBCompactionStyle as CompactionStyle, + DBCompressionType as CompressionType, +}; + +#[derive(Debug, Clone, Copy)] +pub(crate) enum CacheDisp { + Unique, + Shared, + SharedWith(&'static str), +} + +#[derive(Debug, Clone)] +pub(crate) struct Descriptor { + pub(crate) name: &'static str, + pub(crate) cache_disp: CacheDisp, + pub(crate) key_size_hint: Option, + pub(crate) val_size_hint: Option, + pub(crate) block_size: usize, + pub(crate) index_size: usize, + pub(crate) write_size: Option, + pub(crate) cache_size: usize, + pub(crate) level_size: u64, + pub(crate) level_shape: [i32; 7], + pub(crate) file_size: u64, + pub(crate) file_shape: [i32; 1], + pub(crate) level0_width: i32, + pub(crate) merge_width: (i32, i32), + pub(crate) ttl: u64, + pub(crate) compaction: CompactionStyle, + pub(crate) compaction_pri: CompactionPri, + pub(crate) compression: CompressionType, + pub(crate) compression_level: i32, + pub(crate) bottommost_level: Option, + pub(crate) block_index_hashing: bool, + pub(crate) cache_shards: u32, +} + +pub(crate) static BASE: Descriptor = Descriptor { + name: EMPTY, + cache_disp: CacheDisp::Shared, + key_size_hint: None, + val_size_hint: None, + block_size: 1024 * 4, + index_size: 1024 * 4, + write_size: None, + cache_size: 1024 * 1024 * 4, + level_size: 1024 * 1024 * 8, + level_shape: [1, 1, 1, 3, 7, 15, 31], + file_size: 1024 * 1024, + file_shape: [2], + level0_width: 2, + merge_width: (2, 16), + ttl: 60 * 60 * 24 * 21, + compaction: CompactionStyle::Level, + compaction_pri: CompactionPri::MinOverlappingRatio, + compression: CompressionType::Zstd, + compression_level: 32767, + bottommost_level: Some(32767), + block_index_hashing: false, + cache_shards: 64, +}; + +pub(crate) static RANDOM: Descriptor = Descriptor { + compaction_pri: CompactionPri::OldestSmallestSeqFirst, + ..BASE +}; + +pub(crate) static SEQUENTIAL: Descriptor = Descriptor { + compaction_pri: CompactionPri::OldestLargestSeqFirst, + level_size: 1024 * 1024 * 32, + file_size: 1024 * 1024 * 2, + ..BASE +}; + +pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { + compaction: CompactionStyle::Universal, + level_size: 1024 * 512, + file_size: 1024 * 128, + ..RANDOM +}; + +pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { + compaction: CompactionStyle::Universal, + level_size: 1024 * 1024, + file_size: 1024 * 512, + ..SEQUENTIAL +}; diff --git a/src/database/engine/files.rs b/src/database/engine/files.rs new file mode 100644 index 00000000..f603c57b --- /dev/null +++ b/src/database/engine/files.rs @@ -0,0 +1,32 @@ +use std::fmt::Write; + +use conduwuit::{implement, Result}; + +use super::Engine; + +#[implement(Engine)] +pub fn file_list(&self) -> Result { + match self.db.live_files() { + | Err(e) => Ok(String::from(e)), + | Ok(mut files) => { + files.sort_by_key(|f| f.name.clone()); + let mut res = String::new(); + writeln!(res, "| lev | sst | keys | dels | size | column |")?; + writeln!(res, "| ---: | :--- | ---: | ---: | ---: | :--- |")?; + for file in files { + writeln!( + res, + "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", + file.level, + file.name, + file.num_entries, + file.num_deletions, + file.size, + file.column_family_name, + )?; + } + + Ok(res) + }, + } +} diff --git a/src/database/engine/logger.rs b/src/database/engine/logger.rs new file mode 100644 index 00000000..a1898e30 --- /dev/null +++ b/src/database/engine/logger.rs @@ -0,0 +1,22 @@ +use conduwuit::{debug, error, warn}; +use rocksdb::LogLevel; + +#[tracing::instrument( + parent = None, + name = "rocksdb", + level = "trace" + skip(msg), +)] +pub(crate) fn handle(level: LogLevel, msg: &str) { + let msg = msg.trim(); + if msg.starts_with("Options") { + return; + } + + match level { + | LogLevel::Header | LogLevel::Debug => debug!("{msg}"), + | LogLevel::Error | LogLevel::Fatal => error!("{msg}"), + | LogLevel::Info => debug!("{msg}"), + | LogLevel::Warn => warn!("{msg}"), + }; +} diff --git a/src/database/engine/memory_usage.rs b/src/database/engine/memory_usage.rs new file mode 100644 index 00000000..01859815 --- /dev/null +++ b/src/database/engine/memory_usage.rs @@ -0,0 +1,30 @@ +use std::fmt::Write; + +use conduwuit::{implement, Result}; +use rocksdb::perf::get_memory_usage_stats; + +use super::Engine; +use crate::or_else; + +#[implement(Engine)] +pub fn memory_usage(&self) -> Result { + let mut res = String::new(); + let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&*self.ctx.row_cache.lock()?])) + .or_else(or_else)?; + let mibs = |input| f64::from(u32::try_from(input / 1024).unwrap_or(0)) / 1024.0; + writeln!( + res, + "Memory buffers: {:.2} MiB\nPending write: {:.2} MiB\nTable readers: {:.2} MiB\nRow \ + cache: {:.2} MiB", + mibs(stats.mem_table_total), + mibs(stats.mem_table_unflushed), + mibs(stats.mem_table_readers_total), + mibs(u64::try_from(self.ctx.row_cache.lock()?.get_usage())?), + )?; + + for (name, cache) in &*self.ctx.col_cache.lock()? { + writeln!(res, "{name} cache: {:.2} MiB", mibs(u64::try_from(cache.get_usage())?))?; + } + + Ok(res) +} diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs new file mode 100644 index 00000000..9999296b --- /dev/null +++ b/src/database/engine/open.rs @@ -0,0 +1,121 @@ +use std::{ + collections::BTreeSet, + path::Path, + sync::{atomic::AtomicU32, Arc}, +}; + +use conduwuit::{debug, debug_warn, implement, info, warn, Result}; +use rocksdb::{ColumnFamilyDescriptor, Options}; + +use super::{ + cf_opts::cf_options, db_opts::db_options, descriptor::Descriptor, repair::repair, Db, Engine, +}; +use crate::{or_else, Context}; + +#[implement(Engine)] +#[tracing::instrument(skip_all)] +pub(crate) async fn open(ctx: Arc, desc: &[Descriptor]) -> Result> { + let server = &ctx.server; + let config = &server.config; + let path = &config.database_path; + + let db_opts = db_options( + config, + &ctx.env.lock().expect("environment locked"), + &ctx.row_cache.lock().expect("row cache locked"), + )?; + + let cfds = Self::configure_cfds(&ctx, &db_opts, desc)?; + let num_cfds = cfds.len(); + debug!("Configured {num_cfds} column descriptors..."); + + let load_time = std::time::Instant::now(); + if config.rocksdb_repair { + repair(&db_opts, &config.database_path)?; + } + + debug!("Opening database..."); + let db = if config.rocksdb_read_only { + Db::open_cf_descriptors_read_only(&db_opts, path, cfds, false) + } else if config.rocksdb_secondary { + Db::open_cf_descriptors_as_secondary(&db_opts, path, path, cfds) + } else { + Db::open_cf_descriptors(&db_opts, path, cfds) + } + .or_else(or_else)?; + + info!( + columns = num_cfds, + sequence = %db.latest_sequence_number(), + time = ?load_time.elapsed(), + "Opened database." + ); + + Ok(Arc::new(Self { + read_only: config.rocksdb_read_only, + secondary: config.rocksdb_secondary, + corks: AtomicU32::new(0), + pool: ctx.pool.clone(), + db, + ctx, + })) +} + +#[implement(Engine)] +#[tracing::instrument(name = "configure", skip_all)] +fn configure_cfds( + ctx: &Arc, + db_opts: &Options, + desc: &[Descriptor], +) -> Result> { + let server = &ctx.server; + let config = &server.config; + let path = &config.database_path; + let existing = Self::discover_cfs(path, db_opts); + debug!( + "Found {} existing columns; have {} described columns", + existing.len(), + desc.len() + ); + + existing + .iter() + .filter(|&name| name != "default") + .filter(|&name| !desc.iter().any(|desc| desc.name == name)) + .for_each(|name| { + debug_warn!("Found unknown column {name:?} in database which will not be opened."); + }); + + desc.iter() + .filter(|desc| !existing.contains(desc.name)) + .for_each(|desc| { + debug!( + "Creating new column {:?} which was not found in the existing database.", + desc.name, + ); + }); + + let cfopts: Vec<_> = desc + .iter() + .map(|desc| cf_options(ctx, db_opts.clone(), desc)) + .collect::>()?; + + let cfds: Vec<_> = desc + .iter() + .map(|desc| desc.name) + .map(ToOwned::to_owned) + .zip(cfopts.into_iter()) + .map(|(name, opts)| ColumnFamilyDescriptor::new(name, opts)) + .collect(); + + Ok(cfds) +} + +#[implement(Engine)] +#[tracing::instrument(name = "discover", skip_all)] +fn discover_cfs(path: &Path, opts: &Options) -> BTreeSet { + Db::list_cf(opts, path) + .unwrap_or_default() + .into_iter() + .collect::>() +} diff --git a/src/database/engine/repair.rs b/src/database/engine/repair.rs new file mode 100644 index 00000000..61283904 --- /dev/null +++ b/src/database/engine/repair.rs @@ -0,0 +1,16 @@ +use std::path::PathBuf; + +use conduwuit::{info, warn, Err, Result}; +use rocksdb::Options; + +use super::Db; + +pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result { + warn!("Starting database repair. This may take a long time..."); + match Db::repair(db_opts, path) { + | Ok(()) => info!("Database repair successful."), + | Err(e) => return Err!("Repair failed: {e:?}"), + } + + Ok(()) +} diff --git a/src/database/map.rs b/src/database/map.rs index de37b8f9..60d66585 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -6,6 +6,8 @@ mod insert; mod keys; mod keys_from; mod keys_prefix; +mod open; +mod options; mod remove; mod rev_keys; mod rev_keys_from; @@ -28,12 +30,15 @@ use std::{ }; use conduwuit::Result; -use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, ReadTier, WriteOptions}; +use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteOptions}; +pub(crate) use self::options::{ + cache_read_options_default, iter_options_default, read_options_default, write_options_default, +}; use crate::{watchers::Watchers, Engine}; pub struct Map { - name: String, + name: &'static str, db: Arc, cf: Arc, watchers: Watchers, @@ -43,11 +48,11 @@ pub struct Map { } impl Map { - pub(crate) fn open(db: &Arc, name: &str) -> Result> { + pub(crate) fn open(db: &Arc, name: &'static str) -> Result> { Ok(Arc::new(Self { - name: name.to_owned(), + name, db: db.clone(), - cf: open(db, name)?, + cf: open::open(db, name), watchers: Watchers::default(), write_options: write_options_default(), read_options: read_options_default(), @@ -75,7 +80,7 @@ impl Map { pub fn property(&self, name: &str) -> Result { self.db.property(&self.cf(), name) } #[inline] - pub fn name(&self) -> &str { &self.name } + pub fn name(&self) -> &str { self.name } #[inline] pub(crate) fn db(&self) -> &Arc { &self.db } @@ -93,60 +98,3 @@ impl Debug for Map { impl Display for Map { fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result { write!(out, "{0}", self.name) } } - -fn open(db: &Arc, name: &str) -> Result> { - let bounded_arc = db.open_cf(name)?; - let bounded_ptr = Arc::into_raw(bounded_arc); - let cf_ptr = bounded_ptr.cast::(); - - // SAFETY: Column family handles out of RocksDB are basic pointers and can - // be invalidated: 1. when the database closes. 2. when the column is dropped or - // closed. rust_rocksdb wraps this for us by storing handles in their own - // `RwLock` map and returning an Arc>` to - // provide expected safety. Similarly in "single-threaded mode" we would - // receive `&'_ ColumnFamily`. - // - // PROBLEM: We need to hold these handles in a field, otherwise we have to take - // a lock and get them by name from this map for every query, which is what - // conduit was doing, but we're not going to make a query for every query so we - // need to be holding it right. The lifetime parameter on these references makes - // that complicated. If this can be done without polluting the userspace - // with lifetimes on every instance of `Map` then this `unsafe` might not be - // necessary. - // - // SOLUTION: After investigating the underlying types it appears valid to - // Arc-swap `BoundColumnFamily<'_>` for `ColumnFamily`. They have the - // same inner data, the same Drop behavior, Deref, etc. We're just losing the - // lifetime parameter. We should not hold this handle, even in its Arc, after - // closing the database (dropping `Engine`). Since `Arc` is a sibling - // member along with this handle in `Map`, that is prevented. - Ok(unsafe { - Arc::increment_strong_count(cf_ptr); - Arc::from_raw(cf_ptr) - }) -} - -#[inline] -pub(crate) fn iter_options_default() -> ReadOptions { - let mut read_options = read_options_default(); - read_options.set_background_purge_on_iterator_cleanup(true); - //read_options.set_pin_data(true); - read_options -} - -#[inline] -pub(crate) fn cache_read_options_default() -> ReadOptions { - let mut read_options = read_options_default(); - read_options.set_read_tier(ReadTier::BlockCache); - read_options -} - -#[inline] -pub(crate) fn read_options_default() -> ReadOptions { - let mut read_options = ReadOptions::default(); - read_options.set_total_order_seek(true); - read_options -} - -#[inline] -pub(crate) fn write_options_default() -> WriteOptions { WriteOptions::default() } diff --git a/src/database/map/open.rs b/src/database/map/open.rs new file mode 100644 index 00000000..6ecec044 --- /dev/null +++ b/src/database/map/open.rs @@ -0,0 +1,37 @@ +use std::sync::Arc; + +use rocksdb::ColumnFamily; + +use crate::Engine; + +pub(super) fn open(db: &Arc, name: &str) -> Arc { + let bounded_arc = db.cf(name); + let bounded_ptr = Arc::into_raw(bounded_arc); + let cf_ptr = bounded_ptr.cast::(); + + // SAFETY: Column family handles out of RocksDB are basic pointers and can + // be invalidated: 1. when the database closes. 2. when the column is dropped or + // closed. rust_rocksdb wraps this for us by storing handles in their own + // `RwLock` map and returning an Arc>` to + // provide expected safety. Similarly in "single-threaded mode" we would + // receive `&'_ ColumnFamily`. + // + // PROBLEM: We need to hold these handles in a field, otherwise we have to take + // a lock and get them by name from this map for every query, which is what + // conduit was doing, but we're not going to make a query for every query so we + // need to be holding it right. The lifetime parameter on these references makes + // that complicated. If this can be done without polluting the userspace + // with lifetimes on every instance of `Map` then this `unsafe` might not be + // necessary. + // + // SOLUTION: After investigating the underlying types it appears valid to + // Arc-swap `BoundColumnFamily<'_>` for `ColumnFamily`. They have the + // same inner data, the same Drop behavior, Deref, etc. We're just losing the + // lifetime parameter. We should not hold this handle, even in its Arc, after + // closing the database (dropping `Engine`). Since `Arc` is a sibling + // member along with this handle in `Map`, that is prevented. + unsafe { + Arc::increment_strong_count(cf_ptr); + Arc::from_raw(cf_ptr) + } +} diff --git a/src/database/map/options.rs b/src/database/map/options.rs new file mode 100644 index 00000000..90dc0261 --- /dev/null +++ b/src/database/map/options.rs @@ -0,0 +1,26 @@ +use rocksdb::{ReadOptions, ReadTier, WriteOptions}; + +#[inline] +pub(crate) fn iter_options_default() -> ReadOptions { + let mut read_options = read_options_default(); + read_options.set_background_purge_on_iterator_cleanup(true); + //read_options.set_pin_data(true); + read_options +} + +#[inline] +pub(crate) fn cache_read_options_default() -> ReadOptions { + let mut read_options = read_options_default(); + read_options.set_read_tier(ReadTier::BlockCache); + read_options +} + +#[inline] +pub(crate) fn read_options_default() -> ReadOptions { + let mut read_options = ReadOptions::default(); + read_options.set_total_order_seek(true); + read_options +} + +#[inline] +pub(crate) fn write_options_default() -> WriteOptions { WriteOptions::default() } diff --git a/src/database/maps.rs b/src/database/maps.rs index d69cc7fd..e9b26818 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -2,103 +2,383 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::Result; -use crate::{Engine, Map}; +use crate::{ + engine::descriptor::{self, CacheDisp, Descriptor}, + Engine, Map, +}; -pub type Maps = BTreeMap; -pub(crate) type MapsVal = Arc; -pub(crate) type MapsKey = String; +pub(super) type Maps = BTreeMap; +pub(super) type MapsKey = &'static str; +pub(super) type MapsVal = Arc; -pub(crate) fn open(db: &Arc) -> Result { open_list(db, MAPS) } +pub(super) fn open(db: &Arc) -> Result { open_list(db, MAPS) } #[tracing::instrument(name = "maps", level = "debug", skip_all)] -pub(crate) fn open_list(db: &Arc, maps: &[&str]) -> Result { - Ok(maps - .iter() - .map(|&name| (name.to_owned(), Map::open(db, name).expect("valid column opened"))) - .collect::()) +pub(super) fn open_list(db: &Arc, maps: &[Descriptor]) -> Result { + maps.iter() + .map(|desc| Ok((desc.name, Map::open(db, desc.name)?))) + .collect() } -pub const MAPS: &[&str] = &[ - "alias_roomid", - "alias_userid", - "aliasid_alias", - "backupid_algorithm", - "backupid_etag", - "backupkeyid_backup", - "bannedroomids", - "disabledroomids", - "eventid_outlierpdu", - "eventid_pduid", - "eventid_shorteventid", - "global", - "id_appserviceregistrations", - "keychangeid_userid", - "keyid_key", - "lazyloadedids", - "mediaid_file", - "mediaid_user", - "onetimekeyid_onetimekeys", - "pduid_pdu", - "presenceid_presence", - "publicroomids", - "readreceiptid_readreceipt", - "referencedevents", - "roomid_invitedcount", - "roomid_inviteviaservers", - "roomid_joinedcount", - "roomid_pduleaves", - "roomid_shortroomid", - "roomid_shortstatehash", - "roomserverids", - "roomsynctoken_shortstatehash", - "roomuserdataid_accountdata", - "roomuserid_invitecount", - "roomuserid_joined", - "roomuserid_lastprivatereadupdate", - "roomuserid_leftcount", - "roomuserid_privateread", - "roomuseroncejoinedids", - "roomusertype_roomuserdataid", - "senderkey_pusher", - "server_signingkeys", - "servercurrentevent_data", - "servername_educount", - "servernameevent_data", - "serverroomids", - "shorteventid_authchain", - "shorteventid_eventid", - "shorteventid_shortstatehash", - "shortstatehash_statediff", - "shortstatekey_statekey", - "softfailedeventids", - "statehash_shortstatehash", - "statekey_shortstatekey", - "threadid_userids", - "todeviceid_events", - "tofrom_relation", - "token_userdeviceid", - "tokenids", - "url_previews", - "userdeviceid_metadata", - "userdeviceid_token", - "userdevicesessionid_uiaainfo", - "userdevicetxnid_response", - "userfilterid_filter", - "userid_avatarurl", - "userid_blurhash", - "userid_devicelistversion", - "userid_displayname", - "userid_lastonetimekeyupdate", - "userid_masterkeyid", - "userid_password", - "userid_presenceid", - "userid_selfsigningkeyid", - "userid_usersigningkeyid", - "useridprofilekey_value", - "openidtoken_expiresatuserid", - "userroomid_highlightcount", - "userroomid_invitestate", - "userroomid_joined", - "userroomid_leftstate", - "userroomid_notificationcount", +pub(super) static MAPS: &[Descriptor] = &[ + Descriptor { + name: "alias_roomid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "alias_userid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "aliasid_alias", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "backupid_algorithm", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "backupid_etag", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "backupkeyid_backup", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "bannedroomids", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "disabledroomids", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "eventid_outlierpdu", + cache_disp: CacheDisp::SharedWith("pduid_pdu"), + key_size_hint: Some(48), + val_size_hint: Some(1488), + ..descriptor::RANDOM + }, + Descriptor { + name: "eventid_pduid", + cache_disp: CacheDisp::Unique, + key_size_hint: Some(48), + val_size_hint: Some(16), + ..descriptor::RANDOM + }, + Descriptor { + name: "eventid_shorteventid", + cache_disp: CacheDisp::Unique, + key_size_hint: Some(48), + val_size_hint: Some(8), + ..descriptor::RANDOM + }, + Descriptor { + name: "global", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "id_appserviceregistrations", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "keychangeid_userid", + ..descriptor::RANDOM + }, + Descriptor { + name: "keyid_key", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "lazyloadedids", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "mediaid_file", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "mediaid_user", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "onetimekeyid_onetimekeys", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "pduid_pdu", + cache_disp: CacheDisp::SharedWith("eventid_outlierpdu"), + key_size_hint: Some(16), + val_size_hint: Some(1520), + ..descriptor::SEQUENTIAL + }, + Descriptor { + name: "presenceid_presence", + ..descriptor::SEQUENTIAL_SMALL + }, + Descriptor { + name: "publicroomids", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "readreceiptid_readreceipt", + ..descriptor::RANDOM + }, + Descriptor { + name: "referencedevents", + ..descriptor::RANDOM + }, + Descriptor { + name: "roomid_invitedcount", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomid_inviteviaservers", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomid_joinedcount", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomid_pduleaves", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomid_shortroomid", + val_size_hint: Some(8), + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomid_shortstatehash", + val_size_hint: Some(8), + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomserverids", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomsynctoken_shortstatehash", + val_size_hint: Some(8), + ..descriptor::SEQUENTIAL + }, + Descriptor { + name: "roomuserdataid_accountdata", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomuserid_invitecount", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomuserid_joined", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomuserid_lastprivatereadupdate", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomuserid_leftcount", + ..descriptor::RANDOM + }, + Descriptor { + name: "roomuserid_privateread", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "roomuseroncejoinedids", + ..descriptor::RANDOM + }, + Descriptor { + name: "roomusertype_roomuserdataid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "senderkey_pusher", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "server_signingkeys", + ..descriptor::RANDOM + }, + Descriptor { + name: "servercurrentevent_data", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "servername_educount", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "servernameevent_data", + cache_disp: CacheDisp::Unique, + val_size_hint: Some(128), + ..descriptor::RANDOM + }, + Descriptor { + name: "serverroomids", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "shorteventid_authchain", + cache_disp: CacheDisp::Unique, + key_size_hint: Some(8), + ..descriptor::SEQUENTIAL + }, + Descriptor { + name: "shorteventid_eventid", + cache_disp: CacheDisp::Unique, + key_size_hint: Some(8), + val_size_hint: Some(48), + ..descriptor::SEQUENTIAL_SMALL + }, + Descriptor { + name: "shorteventid_shortstatehash", + key_size_hint: Some(8), + val_size_hint: Some(8), + ..descriptor::SEQUENTIAL + }, + Descriptor { + name: "shortstatehash_statediff", + key_size_hint: Some(8), + ..descriptor::SEQUENTIAL_SMALL + }, + Descriptor { + name: "shortstatekey_statekey", + cache_disp: CacheDisp::Unique, + key_size_hint: Some(8), + val_size_hint: Some(1016), + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "softfailedeventids", + key_size_hint: Some(48), + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "statehash_shortstatehash", + val_size_hint: Some(8), + ..descriptor::RANDOM + }, + Descriptor { + name: "statekey_shortstatekey", + cache_disp: CacheDisp::Unique, + key_size_hint: Some(1016), + val_size_hint: Some(8), + ..descriptor::RANDOM + }, + Descriptor { + name: "threadid_userids", + ..descriptor::SEQUENTIAL_SMALL + }, + Descriptor { + name: "todeviceid_events", + ..descriptor::RANDOM + }, + Descriptor { + name: "tofrom_relation", + key_size_hint: Some(8), + val_size_hint: Some(8), + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "token_userdeviceid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { name: "tokenids", ..descriptor::RANDOM }, + Descriptor { + name: "url_previews", + ..descriptor::RANDOM + }, + Descriptor { + name: "userdeviceid_metadata", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userdeviceid_token", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userdevicesessionid_uiaainfo", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userdevicetxnid_response", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userfilterid_filter", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userid_avatarurl", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userid_blurhash", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userid_devicelistversion", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userid_displayname", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userid_lastonetimekeyupdate", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userid_masterkeyid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userid_password", + ..descriptor::RANDOM + }, + Descriptor { + name: "userid_presenceid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userid_selfsigningkeyid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userid_usersigningkeyid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "useridprofilekey_value", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "openidtoken_expiresatuserid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userroomid_highlightcount", + ..descriptor::RANDOM + }, + Descriptor { + name: "userroomid_invitestate", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "userroomid_joined", + ..descriptor::RANDOM + }, + Descriptor { + name: "userroomid_leftstate", + ..descriptor::RANDOM + }, + Descriptor { + name: "userroomid_notificationcount", + ..descriptor::RANDOM + }, ]; diff --git a/src/database/mod.rs b/src/database/mod.rs index bdb7d3ea..6e3f8c96 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,5 +1,11 @@ +extern crate conduwuit_core as conduwuit; +extern crate rust_rocksdb as rocksdb; + +conduwuit::mod_ctor! {} +conduwuit::mod_dtor! {} +conduwuit::rustc_flags_capture! {} + mod cork; -mod database; mod de; mod deserialized; mod engine; @@ -7,7 +13,6 @@ mod handle; pub mod keyval; mod map; pub mod maps; -mod opts; mod pool; mod ser; mod stream; @@ -16,16 +21,11 @@ mod tests; pub(crate) mod util; mod watchers; -pub(crate) use self::{ - engine::Engine, - util::{or_else, result}, -}; +use std::{ops::Index, sync::Arc}; -extern crate conduwuit_core as conduwuit; -extern crate rust_rocksdb as rocksdb; +use conduwuit::{err, Result, Server}; pub use self::{ - database::Database, de::{Ignore, IgnoreAll}, deserialized::Deserialized, handle::Handle, @@ -33,7 +33,60 @@ pub use self::{ map::Map, ser::{serialize, serialize_to, serialize_to_vec, Interfix, Json, Separator, SEP}, }; +pub(crate) use self::{ + engine::{context::Context, Engine}, + util::{or_else, result}, +}; +use crate::maps::{Maps, MapsKey, MapsVal}; -conduwuit::mod_ctor! {} -conduwuit::mod_dtor! {} -conduwuit::rustc_flags_capture! {} +pub struct Database { + maps: Maps, + pub db: Arc, + pub(crate) _ctx: Arc, +} + +impl Database { + /// Load an existing database or create a new one. + pub async fn open(server: &Arc) -> Result> { + let ctx = Context::new(server)?; + let db = Engine::open(ctx.clone(), maps::MAPS).await?; + Ok(Arc::new(Self { + maps: maps::open(&db)?, + db: db.clone(), + _ctx: ctx, + })) + } + + #[inline] + pub fn get(&self, name: &str) -> Result<&Arc> { + self.maps + .get(name) + .ok_or_else(|| err!(Request(NotFound("column not found")))) + } + + #[inline] + pub fn iter(&self) -> impl Iterator + Send + '_ { + self.maps.iter() + } + + #[inline] + pub fn keys(&self) -> impl Iterator + Send + '_ { self.maps.keys() } + + #[inline] + #[must_use] + pub fn is_read_only(&self) -> bool { self.db.is_read_only() } + + #[inline] + #[must_use] + pub fn is_secondary(&self) -> bool { self.db.is_secondary() } +} + +impl Index<&str> for Database { + type Output = Arc; + + fn index(&self, name: &str) -> &Self::Output { + self.maps + .get(name) + .expect("column in database does not exist") + } +} diff --git a/src/database/opts.rs b/src/database/opts.rs deleted file mode 100644 index 24128f14..00000000 --- a/src/database/opts.rs +++ /dev/null @@ -1,433 +0,0 @@ -use std::{cmp, collections::HashMap, convert::TryFrom}; - -use conduwuit::{err, utils, Config, Result}; -use rocksdb::{ - statistics::StatsLevel, BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, - DBRecoveryMode, Env, LogLevel, LruCacheOptions, Options, UniversalCompactOptions, - UniversalCompactionStopStyle, -}; - -/// Create database-wide options suitable for opening the database. This also -/// sets our default column options in case of opening a column with the same -/// resulting value. Note that we require special per-column options on some -/// columns, therefor columns should only be opened after passing this result -/// through cf_options(). -pub(crate) fn db_options( - config: &Config, - env: &mut Env, - row_cache: &Cache, - col_cache: &Cache, -) -> Result { - const DEFAULT_STATS_LEVEL: StatsLevel = if cfg!(debug_assertions) { - StatsLevel::ExceptDetailedTimers - } else { - StatsLevel::DisableAll - }; - - let mut opts = Options::default(); - - // Logging - set_logging_defaults(&mut opts, config); - - // Processing - opts.set_max_background_jobs(num_threads::(config)?); - opts.set_max_subcompactions(num_threads::(config)?); - opts.set_avoid_unnecessary_blocking_io(true); - opts.set_max_file_opening_threads(0); - if config.rocksdb_compaction_prio_idle { - env.lower_thread_pool_cpu_priority(); - } - - // IO - opts.set_atomic_flush(true); - opts.set_manual_wal_flush(true); - opts.set_enable_pipelined_write(false); - if config.rocksdb_direct_io { - opts.set_use_direct_reads(true); - opts.set_use_direct_io_for_flush_and_compaction(true); - } - if config.rocksdb_optimize_for_spinning_disks { - // speeds up opening DB on hard drives - opts.set_skip_checking_sst_file_sizes_on_db_open(true); - opts.set_skip_stats_update_on_db_open(true); - //opts.set_max_file_opening_threads(threads.try_into().unwrap()); - } - if config.rocksdb_compaction_ioprio_idle { - env.lower_thread_pool_io_priority(); - } - - // Blocks - let mut table_opts = table_options(config); - table_opts.set_block_cache(col_cache); - opts.set_block_based_table_factory(&table_opts); - opts.set_row_cache(row_cache); - - // Buffers - opts.set_write_buffer_size(2 * 1024 * 1024); - opts.set_max_write_buffer_number(2); - opts.set_min_write_buffer_number(1); - - // Files - opts.set_table_cache_num_shard_bits(7); - opts.set_max_total_wal_size(96 * 1024 * 1024); - set_level_defaults(&mut opts, config); - - // Compression - set_compression_defaults(&mut opts, config); - - // Misc - opts.create_if_missing(true); - opts.set_disable_auto_compactions(!config.rocksdb_compaction); - - opts.set_statistics_level(match config.rocksdb_stats_level { - | 0 => StatsLevel::DisableAll, - | 1 => DEFAULT_STATS_LEVEL, - | 2 => StatsLevel::ExceptHistogramOrTimers, - | 3 => StatsLevel::ExceptTimers, - | 4 => StatsLevel::ExceptDetailedTimers, - | 5 => StatsLevel::ExceptTimeForMutex, - | 6_u8..=u8::MAX => StatsLevel::All, - }); - - // Default: https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords - // - // Unclean shutdowns of a Matrix homeserver are likely to be fine when - // recovered in this manner as it's likely any lost information will be - // restored via federation. - opts.set_wal_recovery_mode(match config.rocksdb_recovery_mode { - | 0 => DBRecoveryMode::AbsoluteConsistency, - | 1 => DBRecoveryMode::TolerateCorruptedTailRecords, - | 2 => DBRecoveryMode::PointInTime, - | 3 => DBRecoveryMode::SkipAnyCorruptedRecord, - | 4_u8..=u8::MAX => unimplemented!(), - }); - - // - // "We recommend to set track_and_verify_wals_in_manifest to true for - // production, it has been enabled in production for the entire database cluster - // serving the social graph for all Meta apps." - opts.set_track_and_verify_wals_in_manifest(true); - - opts.set_paranoid_checks(config.rocksdb_paranoid_file_checks); - - opts.set_env(env); - Ok(opts) -} - -/// Adjust options for the specific column by name. Provide the result of -/// db_options() as the argument to this function and use the return value in -/// the arguments to open the specific column. -pub(crate) fn cf_options( - cfg: &Config, - name: &str, - mut opts: Options, - cache: &mut HashMap, -) -> Result { - // Columns with non-default compaction options - match name { - | "backupid_algorithm" - | "backupid_etag" - | "backupkeyid_backup" - | "roomid_shortroomid" - | "shorteventid_shortstatehash" - | "shorteventid_eventid" - | "shortstatekey_statekey" - | "shortstatehash_statediff" - | "userdevicetxnid_response" - | "userfilterid_filter" => set_for_sequential_small_uc(&mut opts, cfg), - | &_ => {}, - } - - // Columns with non-default table/cache configs - match name { - | "shorteventid_eventid" => set_table_with_new_cache( - &mut opts, - cfg, - cache, - name, - cache_size(cfg, cfg.shorteventid_cache_capacity, 64)?, - ), - - | "eventid_shorteventid" => set_table_with_new_cache( - &mut opts, - cfg, - cache, - name, - cache_size(cfg, cfg.eventidshort_cache_capacity, 64)?, - ), - - | "eventid_pduid" => set_table_with_new_cache( - &mut opts, - cfg, - cache, - name, - cache_size(cfg, cfg.eventid_pdu_cache_capacity, 64)?, - ), - - | "shorteventid_authchain" => { - set_table_with_new_cache( - &mut opts, - cfg, - cache, - name, - cache_size(cfg, cfg.auth_chain_cache_capacity, 192)?, - ); - }, - - | "shortstatekey_statekey" => set_table_with_new_cache( - &mut opts, - cfg, - cache, - name, - cache_size(cfg, cfg.shortstatekey_cache_capacity, 1024)?, - ), - - | "statekey_shortstatekey" => set_table_with_new_cache( - &mut opts, - cfg, - cache, - name, - cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024)?, - ), - - | "servernameevent_data" => set_table_with_new_cache( - &mut opts, - cfg, - cache, - name, - cache_size(cfg, cfg.servernameevent_data_cache_capacity, 128)?, /* Raw average - * value size = - * 102, key - * size = 34 */ - ), - - | "eventid_outlierpdu" => { - set_table_with_new_cache( - &mut opts, - cfg, - cache, - name, - cache_size(cfg, cfg.pdu_cache_capacity, 1536)?, - ); - }, - - | "pduid_pdu" => { - set_table_with_shared_cache(&mut opts, cfg, cache, name, "eventid_outlierpdu"); - }, - - | &_ => {}, - } - - Ok(opts) -} - -fn set_logging_defaults(opts: &mut Options, config: &Config) { - let rocksdb_log_level = match config.rocksdb_log_level.as_ref() { - | "debug" => LogLevel::Debug, - | "info" => LogLevel::Info, - | "warn" => LogLevel::Warn, - | "fatal" => LogLevel::Fatal, - | _ => LogLevel::Error, - }; - - opts.set_log_level(rocksdb_log_level); - opts.set_max_log_file_size(config.rocksdb_max_log_file_size); - opts.set_log_file_time_to_roll(config.rocksdb_log_time_to_roll); - opts.set_keep_log_file_num(config.rocksdb_max_log_files); - opts.set_stats_dump_period_sec(0); - - if config.rocksdb_log_stderr { - opts.set_stderr_logger(rocksdb_log_level, "rocksdb"); - } else { - opts.set_callback_logger(rocksdb_log_level, &super::engine::handle_log); - } -} - -fn set_compression_defaults(opts: &mut Options, config: &Config) { - let rocksdb_compression_algo = match config.rocksdb_compression_algo.as_ref() { - | "snappy" => DBCompressionType::Snappy, - | "zlib" => DBCompressionType::Zlib, - | "bz2" => DBCompressionType::Bz2, - | "lz4" => DBCompressionType::Lz4, - | "lz4hc" => DBCompressionType::Lz4hc, - | "none" => DBCompressionType::None, - | _ => DBCompressionType::Zstd, - }; - - if config.rocksdb_bottommost_compression { - opts.set_bottommost_compression_type(rocksdb_compression_algo); - opts.set_bottommost_zstd_max_train_bytes(0, true); - - // -14 w_bits is only read by zlib. - opts.set_bottommost_compression_options( - -14, - config.rocksdb_bottommost_compression_level, - 0, - 0, - true, - ); - } - - // -14 w_bits is only read by zlib. - opts.set_compression_options(-14, config.rocksdb_compression_level, 0, 0); - opts.set_compression_type(rocksdb_compression_algo); -} - -#[allow(dead_code)] -fn set_for_random_small_uc(opts: &mut Options, config: &Config) { - let uco = uc_options(config); - set_for_random_small(opts, config); - opts.set_universal_compaction_options(&uco); - opts.set_compaction_style(DBCompactionStyle::Universal); -} - -fn set_for_sequential_small_uc(opts: &mut Options, config: &Config) { - let uco = uc_options(config); - set_for_sequential_small(opts, config); - opts.set_universal_compaction_options(&uco); - opts.set_compaction_style(DBCompactionStyle::Universal); -} - -#[allow(dead_code)] -fn set_for_random_small(opts: &mut Options, config: &Config) { - set_for_random(opts, config); - - opts.set_write_buffer_size(1024 * 128); - opts.set_target_file_size_base(1024 * 128); - opts.set_target_file_size_multiplier(2); - opts.set_max_bytes_for_level_base(1024 * 512); - opts.set_max_bytes_for_level_multiplier(2.0); -} - -fn set_for_sequential_small(opts: &mut Options, config: &Config) { - set_for_sequential(opts, config); - - opts.set_write_buffer_size(1024 * 512); - opts.set_target_file_size_base(1024 * 512); - opts.set_target_file_size_multiplier(2); - opts.set_max_bytes_for_level_base(1024 * 1024); - opts.set_max_bytes_for_level_multiplier(2.0); -} - -fn set_for_random(opts: &mut Options, config: &Config) { - set_level_defaults(opts, config); - - let pri = "compaction_pri=kOldestSmallestSeqFirst"; - opts.set_options_from_string(pri) - .expect("set compaction priority string"); - - opts.set_max_bytes_for_level_base(8 * 1024 * 1024); - opts.set_max_bytes_for_level_multiplier(1.0); - opts.set_max_bytes_for_level_multiplier_additional(&[0, 1, 1, 3, 7, 15, 31]); -} - -fn set_for_sequential(opts: &mut Options, config: &Config) { - set_level_defaults(opts, config); - - let pri = "compaction_pri=kOldestLargestSeqFirst"; - opts.set_options_from_string(pri) - .expect("set compaction priority string"); - - opts.set_target_file_size_base(2 * 1024 * 1024); - opts.set_target_file_size_multiplier(2); - - opts.set_max_bytes_for_level_base(32 * 1024 * 1024); - opts.set_max_bytes_for_level_multiplier(1.0); - opts.set_max_bytes_for_level_multiplier_additional(&[0, 1, 1, 3, 7, 15, 31]); -} - -fn set_level_defaults(opts: &mut Options, _config: &Config) { - opts.set_level_zero_file_num_compaction_trigger(2); - - opts.set_target_file_size_base(1024 * 1024); - opts.set_target_file_size_multiplier(2); - - opts.set_level_compaction_dynamic_level_bytes(false); - opts.set_max_bytes_for_level_base(16 * 1024 * 1024); - opts.set_max_bytes_for_level_multiplier(2.0); - - opts.set_ttl(21 * 24 * 60 * 60); -} - -fn uc_options(_config: &Config) -> UniversalCompactOptions { - let mut opts = UniversalCompactOptions::default(); - - opts.set_stop_style(UniversalCompactionStopStyle::Total); - opts.set_max_size_amplification_percent(10000); - opts.set_compression_size_percent(-1); - opts.set_size_ratio(1); - - opts.set_min_merge_width(2); - opts.set_max_merge_width(16); - - opts -} - -fn set_table_with_new_cache( - opts: &mut Options, - config: &Config, - caches: &mut HashMap, - name: &str, - size: usize, -) { - let mut cache_opts = LruCacheOptions::default(); - cache_opts.set_capacity(size); - cache_opts.set_num_shard_bits(7); - - let cache = Cache::new_lru_cache_opts(&cache_opts); - caches.insert(name.into(), cache); - - set_table_with_shared_cache(opts, config, caches, name, name); -} - -fn set_table_with_shared_cache( - opts: &mut Options, - config: &Config, - cache: &HashMap, - _name: &str, - cache_name: &str, -) { - let mut table = table_options(config); - table.set_block_cache( - cache - .get(cache_name) - .expect("existing cache to share with this column"), - ); - - opts.set_block_based_table_factory(&table); -} - -fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> Result { - let ents = f64::from(base_size) * config.cache_capacity_modifier; - - #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] - (ents as usize) - .checked_mul(entity_size) - .ok_or_else(|| err!(Config("cache_capacity_modifier", "Cache size is too large."))) -} - -fn table_options(_config: &Config) -> BlockBasedOptions { - let mut opts = BlockBasedOptions::default(); - - opts.set_block_size(4 * 1024); - opts.set_metadata_block_size(4 * 1024); - - opts.set_use_delta_encoding(false); - opts.set_optimize_filters_for_memory(true); - opts.set_cache_index_and_filter_blocks(true); - opts.set_pin_top_level_index_and_filter(true); - - opts -} - -fn num_threads>(config: &Config) -> Result { - const MIN_PARALLELISM: usize = 2; - - let requested = if config.rocksdb_parallelism_threads != 0 { - config.rocksdb_parallelism_threads - } else { - utils::available_parallelism() - }; - - utils::math::try_into::(cmp::max(MIN_PARALLELISM, requested)) -} diff --git a/src/service/migrations.rs b/src/service/migrations.rs index adf75c0b..c42c0324 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -379,7 +379,7 @@ async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result< }) .await; - db.db.cleanup()?; + db.db.sort()?; db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); info!("Finished fixing"); @@ -465,7 +465,7 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) .await; } - db.db.cleanup()?; + db.db.sort()?; db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []); info!("Finished fixing"); @@ -511,7 +511,7 @@ async fn fix_referencedevents_missing_sep(services: &Services) -> Result { info!(?total, ?fixed, "Fixed missing record separators in 'referencedevents'."); db["global"].insert(b"fix_referencedevents_missing_sep", []); - db.db.cleanup() + db.db.sort() } async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result { @@ -561,5 +561,5 @@ async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result info!(?total, ?fixed, "Fixed undeleted entries in readreceiptid_readreceipt."); db["global"].insert(b"fix_readreceiptid_readreceipt_duplicates", []); - db.db.cleanup() + db.db.sort() } diff --git a/src/service/services.rs b/src/service/services.rs index 9f9d10f5..c955834e 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -124,7 +124,7 @@ impl Services { .await?; // set the server user as online - if self.server.config.allow_local_presence { + if self.server.config.allow_local_presence && !self.db.is_read_only() { _ = self .presence .ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Online) @@ -139,7 +139,7 @@ impl Services { info!("Shutting down services..."); // set the server user as offline - if self.server.config.allow_local_presence { + if self.server.config.allow_local_presence && !self.db.is_read_only() { _ = self .presence .ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Offline) From 16fa2eca87ef2b3b006a8467dd620ef3cce240a0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 9 Jan 2025 19:55:25 +0000 Subject: [PATCH 0499/1248] add conf item for write buffer size Signed-off-by: Jason Volk --- conduwuit-example.toml | 42 ++++++++++++++++++---------- src/core/config/mod.rs | 50 +++++++++++++++++++++++----------- src/database/engine/cf_opts.rs | 26 ++++++++++++++---- src/database/engine/db_opts.rs | 11 ++++++-- 4 files changed, 92 insertions(+), 37 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index e2ed5daa..9eefedbb 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -100,20 +100,6 @@ # #database_backups_to_keep = 1 -# Set this to any float value in megabytes for conduwuit to tell the -# database engine that this much memory is available for database-related -# caches. -# -# May be useful if you have significant memory to spare to increase -# performance. -# -# Similar to the individual LRU caches, this is scaled up with your CPU -# core count. -# -# This defaults to 128.0 + (64.0 * CPU core count). -# -#db_cache_capacity_mb = varies by system - # Text which will be added to the end of the user's displayname upon # registration with a space before the text. In Conduit, this was the # lightning bolt emoji. @@ -149,6 +135,34 @@ # #cache_capacity_modifier = 1.0 +# Set this to any float value in megabytes for conduwuit to tell the +# database engine that this much memory is available for database read +# caches. +# +# May be useful if you have significant memory to spare to increase +# performance. +# +# Similar to the individual LRU caches, this is scaled up with your CPU +# core count. +# +# This defaults to 128.0 + (64.0 * CPU core count). +# +#db_cache_capacity_mb = varies by system + +# Set this to any float value in megabytes for conduwuit to tell the +# database engine that this much memory is available for database write +# caches. +# +# May be useful if you have significant memory to spare to increase +# performance. +# +# Similar to the individual LRU caches, this is scaled up with your CPU +# core count. +# +# This defaults to 48.0 + (4.0 * CPU core count). +# +#db_write_buffer_capacity_mb = varies by system + # This item is undocumented. Please contribute documentation for it. # #pdu_cache_capacity = varies by system diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 56580fda..97ecbeaf 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -147,22 +147,6 @@ pub struct Config { #[serde(default = "default_database_backups_to_keep")] pub database_backups_to_keep: i16, - /// Set this to any float value in megabytes for conduwuit to tell the - /// database engine that this much memory is available for database-related - /// caches. - /// - /// May be useful if you have significant memory to spare to increase - /// performance. - /// - /// Similar to the individual LRU caches, this is scaled up with your CPU - /// core count. - /// - /// This defaults to 128.0 + (64.0 * CPU core count). - /// - /// default: varies by system - #[serde(default = "default_db_cache_capacity_mb")] - pub db_cache_capacity_mb: f64, - /// Text which will be added to the end of the user's displayname upon /// registration with a space before the text. In Conduit, this was the /// lightning bolt emoji. @@ -205,6 +189,38 @@ pub struct Config { )] pub cache_capacity_modifier: f64, + /// Set this to any float value in megabytes for conduwuit to tell the + /// database engine that this much memory is available for database read + /// caches. + /// + /// May be useful if you have significant memory to spare to increase + /// performance. + /// + /// Similar to the individual LRU caches, this is scaled up with your CPU + /// core count. + /// + /// This defaults to 128.0 + (64.0 * CPU core count). + /// + /// default: varies by system + #[serde(default = "default_db_cache_capacity_mb")] + pub db_cache_capacity_mb: f64, + + /// Set this to any float value in megabytes for conduwuit to tell the + /// database engine that this much memory is available for database write + /// caches. + /// + /// May be useful if you have significant memory to spare to increase + /// performance. + /// + /// Similar to the individual LRU caches, this is scaled up with your CPU + /// core count. + /// + /// This defaults to 48.0 + (4.0 * CPU core count). + /// + /// default: varies by system + #[serde(default = "default_db_write_buffer_capacity_mb")] + pub db_write_buffer_capacity_mb: f64, + /// default: varies by system #[serde(default = "default_pdu_cache_capacity")] pub pdu_cache_capacity: u32, @@ -2233,6 +2249,8 @@ fn default_unix_socket_perms() -> u32 { 660 } fn default_database_backups_to_keep() -> i16 { 1 } +fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) } + fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) } fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 98d74044..006d36fe 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -10,7 +10,7 @@ use rocksdb::{ }; use super::descriptor::{CacheDisp, Descriptor}; -use crate::Context; +use crate::{util::map_err, Context}; /// Adjust options for the specific column by name. Provide the result of /// db_options() as the argument to this function and use the return value in @@ -28,7 +28,7 @@ fn descriptor_cf_options( cache: Option<&Cache>, ) -> Result { set_compression(&mut desc, config); - set_table_options(&mut opts, &desc, cache); + set_table_options(&mut opts, &desc, cache)?; opts.set_min_write_buffer_number(1); opts.set_max_write_buffer_number(2); @@ -65,10 +65,13 @@ fn descriptor_cf_options( ); } + opts.set_options_from_string("{{arena_block_size=2097152;}}") + .map_err(map_err)?; + Ok(opts) } -fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache>) { +fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache>) -> Result { let mut table = table_options(desc); if let Some(cache) = cache { table.set_block_cache(cache); @@ -76,7 +79,15 @@ fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache table.disable_cache(); } + opts.set_options_from_string( + "{{block_based_table_factory={num_file_reads_for_auto_readahead=0;\ + max_auto_readahead_size=524288;initial_auto_readahead_size=16384}}}", + ) + .map_err(map_err)?; + opts.set_block_based_table_factory(&table); + + Ok(()) } fn set_compression(desc: &mut Descriptor, config: &Config) { @@ -121,6 +132,7 @@ fn table_options(desc: &Descriptor) -> BlockBasedOptions { opts.set_unpartitioned_pinning_tier(BlockBasedPinningTier::None); opts.set_top_level_index_pinning_tier(BlockBasedPinningTier::None); + opts.set_partition_filters(true); opts.set_use_delta_encoding(false); opts.set_index_type(BlockBasedIndexType::TwoLevelIndexSearch); opts.set_data_block_index_type( @@ -203,9 +215,13 @@ fn get_cache(ctx: &Context, desc: &Descriptor) -> Option { } } -#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] pub(crate) fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> usize { - let ents = f64::from(base_size) * config.cache_capacity_modifier; + cache_size_f64(config, f64::from(base_size), entity_size) +} + +#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] +pub(crate) fn cache_size_f64(config: &Config, base_size: f64, entity_size: usize) -> usize { + let ents = base_size * config.cache_capacity_modifier; (ents as usize) .checked_mul(entity_size) diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 211265de..26f53825 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -3,7 +3,7 @@ use std::{cmp, convert::TryFrom}; use conduwuit::{utils, Config, Result}; use rocksdb::{statistics::StatsLevel, Cache, DBRecoveryMode, Env, LogLevel, Options}; -use super::{cf_opts::cache_size, logger::handle as handle_log}; +use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; /// Create database-wide options suitable for opening the database. This also /// sets our default column options in case of opening a column with the same @@ -41,16 +41,23 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul opts.set_skip_checking_sst_file_sizes_on_db_open(true); opts.set_skip_stats_update_on_db_open(true); //opts.set_max_file_opening_threads(threads.try_into().unwrap()); + } else { + opts.set_compaction_readahead_size(1024 * 512); } // Blocks opts.set_row_cache(row_cache); + opts.set_db_write_buffer_size(cache_size_f64( + config, + config.db_write_buffer_capacity_mb, + 1_048_576, + )); // Files opts.set_table_cache_num_shard_bits(7); opts.set_wal_size_limit_mb(1024 * 1024 * 1024); opts.set_max_total_wal_size(1024 * 1024 * 512); - opts.set_db_write_buffer_size(cache_size(config, 1024 * 1024 * 32, 1)); + opts.set_writable_file_max_buffer_size(1024 * 1024 * 2); // Misc opts.set_disable_auto_compactions(!config.rocksdb_compaction); From 66231676f15eb8400bac310f837cc23df0ef735a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 10 Jan 2025 22:23:02 +0000 Subject: [PATCH 0500/1248] gracefully ignore unknown columns; add dropped flag in descriptor Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 11 ++++--- src/database/engine/descriptor.rs | 2 ++ src/database/engine/open.rs | 54 +++++++++++++++++++------------ 3 files changed, 42 insertions(+), 25 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 006d36fe..a68eb8b6 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -72,7 +72,7 @@ fn descriptor_cf_options( } fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache>) -> Result { - let mut table = table_options(desc); + let mut table = table_options(desc, cache.is_some()); if let Some(cache) = cache { table.set_block_cache(cache); } else { @@ -119,13 +119,13 @@ fn uc_options(desc: &Descriptor) -> UniversalCompactOptions { opts } -fn table_options(desc: &Descriptor) -> BlockBasedOptions { +fn table_options(desc: &Descriptor, has_cache: bool) -> BlockBasedOptions { let mut opts = BlockBasedOptions::default(); opts.set_block_size(desc.block_size); opts.set_metadata_block_size(desc.index_size); - opts.set_cache_index_and_filter_blocks(true); + opts.set_cache_index_and_filter_blocks(has_cache); opts.set_pin_top_level_index_and_filter(false); opts.set_pin_l0_filter_and_index_blocks_in_cache(false); opts.set_partition_pinning_tier(BlockBasedPinningTier::None); @@ -144,10 +144,13 @@ fn table_options(desc: &Descriptor) -> BlockBasedOptions { } fn get_cache(ctx: &Context, desc: &Descriptor) -> Option { - let config = &ctx.server.config; + if desc.dropped { + return None; + } // Some cache capacities are overriden by server config in a strange but // legacy-compat way + let config = &ctx.server.config; let cap = match desc.name { | "eventid_pduid" => Some(config.eventid_pdu_cache_capacity), | "eventid_shorteventid" => Some(config.eventidshort_cache_capacity), diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index f0fd83f1..9cf57d8f 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -14,6 +14,7 @@ pub(crate) enum CacheDisp { #[derive(Debug, Clone)] pub(crate) struct Descriptor { pub(crate) name: &'static str, + pub(crate) dropped: bool, pub(crate) cache_disp: CacheDisp, pub(crate) key_size_hint: Option, pub(crate) val_size_hint: Option, @@ -39,6 +40,7 @@ pub(crate) struct Descriptor { pub(crate) static BASE: Descriptor = Descriptor { name: EMPTY, + dropped: false, cache_disp: CacheDisp::Shared, key_size_hint: None, val_size_hint: None, diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 9999296b..6a801878 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -4,11 +4,15 @@ use std::{ sync::{atomic::AtomicU32, Arc}, }; -use conduwuit::{debug, debug_warn, implement, info, warn, Result}; +use conduwuit::{debug, implement, info, warn, Result}; use rocksdb::{ColumnFamilyDescriptor, Options}; use super::{ - cf_opts::cf_options, db_opts::db_options, descriptor::Descriptor, repair::repair, Db, Engine, + cf_opts::cf_options, + db_opts::db_options, + descriptor::{self, Descriptor}, + repair::repair, + Db, Engine, }; use crate::{or_else, Context}; @@ -72,38 +76,46 @@ fn configure_cfds( let config = &server.config; let path = &config.database_path; let existing = Self::discover_cfs(path, db_opts); - debug!( - "Found {} existing columns; have {} described columns", - existing.len(), - desc.len() - ); - existing + let creating = desc.iter().filter(|desc| !existing.contains(desc.name)); + + let missing = existing .iter() .filter(|&name| name != "default") - .filter(|&name| !desc.iter().any(|desc| desc.name == name)) - .for_each(|name| { - debug_warn!("Found unknown column {name:?} in database which will not be opened."); - }); + .filter(|&name| !desc.iter().any(|desc| desc.name == name)); - desc.iter() - .filter(|desc| !existing.contains(desc.name)) - .for_each(|desc| { - debug!( - "Creating new column {:?} which was not found in the existing database.", - desc.name, - ); - }); + debug!( + existing = existing.len(), + described = desc.len(), + missing = missing.clone().count(), + creating = creating.clone().count(), + "Discovered database columns" + ); + + missing.clone().for_each(|name| { + debug!("Found unrecognized column {name:?} in existing database."); + }); + + creating.map(|desc| desc.name).for_each(|name| { + debug!("Creating new column {name:?} not previously found in existing database."); + }); + + let missing_descriptors = missing + .clone() + .map(|_| Descriptor { dropped: true, ..descriptor::BASE }); let cfopts: Vec<_> = desc .iter() - .map(|desc| cf_options(ctx, db_opts.clone(), desc)) + .cloned() + .chain(missing_descriptors) + .map(|ref desc| cf_options(ctx, db_opts.clone(), desc)) .collect::>()?; let cfds: Vec<_> = desc .iter() .map(|desc| desc.name) .map(ToOwned::to_owned) + .chain(missing.cloned()) .zip(cfopts.into_iter()) .map(|(name, opts)| ColumnFamilyDescriptor::new(name, opts)) .collect(); From 53d03bbb1f5001695d69f83047ead96d2079a36a Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 10:25:07 -0500 Subject: [PATCH 0501/1248] gate sd_notify to linux target_os only Signed-off-by: strawberry --- src/router/Cargo.toml | 2 +- src/router/run.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 1623590b..51e15aed 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -80,7 +80,7 @@ tower.workspace = true tower-http.workspace = true tracing.workspace = true -[target.'cfg(unix)'.dependencies] +[target.'cfg(all(unix, target_os = "linux"))'.dependencies] sd-notify.workspace = true sd-notify.optional = true diff --git a/src/router/run.rs b/src/router/run.rs index 1b4d7437..95d12559 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -63,7 +63,7 @@ pub(crate) async fn start(server: Arc) -> Result> { let services = Services::build(server).await?.start().await?; - #[cfg(feature = "systemd")] + #[cfg(all(feature = "systemd", target_os = "linux"))] sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) .expect("failed to notify systemd of ready state"); @@ -99,7 +99,7 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { ); } - #[cfg(feature = "systemd")] + #[cfg(all(feature = "systemd", target_os = "linux"))] sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) .expect("failed to notify systemd of stopping state"); From 4c2999ccd15506a4acbc948d62c7ec0c03d46167 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 10:46:32 -0500 Subject: [PATCH 0502/1248] gate libloading to conduwuit_mods feature and cfg only Signed-off-by: strawberry --- flake.nix | 22 ++++++++++++++++++++++ src/core/Cargo.toml | 4 ++++ src/core/mod.rs | 2 +- src/core/mods/mod.rs | 2 +- src/core/server.rs | 2 +- src/main/Cargo.toml | 3 +++ src/main/main.rs | 4 ++-- src/main/mods.rs | 2 +- src/main/server.rs | 4 ++-- src/main/signal.rs | 2 +- 10 files changed, 38 insertions(+), 9 deletions(-) diff --git a/flake.nix b/flake.nix index d8ad47a8..e3497d85 100644 --- a/flake.nix +++ b/flake.nix @@ -212,6 +212,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; all-features-debug = scopeHost.main.override { @@ -224,6 +226,8 @@ "hardened_malloc" # dont include experimental features "experimental" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; }; @@ -241,6 +245,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -255,6 +261,8 @@ "hardened_malloc" # dont include experimental features "experimental" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -330,6 +338,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; } @@ -349,6 +359,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); }; @@ -367,6 +379,8 @@ "hardened_malloc" # dont include experimental features "experimental" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; } @@ -423,6 +437,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -444,6 +460,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); }; @@ -464,6 +482,8 @@ "hardened_malloc" # dont include experimental features "experimental" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -510,6 +530,8 @@ # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; })); diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 4a9cc462..c716e9c2 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -50,6 +50,9 @@ zstd_compression = [ ] perf_measurements = [] sentry_telemetry = [] +conduwuit_mods = [ + "dep:libloading" +] [dependencies] argon2.workspace = true @@ -75,6 +78,7 @@ ipaddress.workspace = true itertools.workspace = true libc.workspace = true libloading.workspace = true +libloading.optional = true log.workspace = true num-traits.workspace = true rand.workspace = true diff --git a/src/core/mod.rs b/src/core/mod.rs index 87cb58ae..1416ed9e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -25,7 +25,7 @@ pub use crate as conduwuit_core; rustc_flags_capture! {} -#[cfg(not(conduwuit_mods))] +#[cfg(any(not(conduwuit_mods), not(feature = "conduwuit_mods")))] pub mod mods { #[macro_export] macro_rules! mod_ctor { diff --git a/src/core/mods/mod.rs b/src/core/mods/mod.rs index ac0c333b..b8f06f29 100644 --- a/src/core/mods/mod.rs +++ b/src/core/mods/mod.rs @@ -1,4 +1,4 @@ -#![cfg(conduwuit_mods)] +#![cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] pub(crate) use libloading::os::unix::{Library, Symbol}; diff --git a/src/core/server.rs b/src/core/server.rs index 8a4d9f66..948eea36 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -59,7 +59,7 @@ impl Server { } pub fn reload(&self) -> Result<()> { - if cfg!(not(conduwuit_mods)) { + if cfg!(any(not(conduwuit_mods), not(feature = "conduwuit_mods"))) { return Err!("Reloading not enabled"); } diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 38eb7188..baf5336f 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -135,6 +135,9 @@ zstd_compression = [ "conduwuit-database/zstd_compression", "conduwuit-router/zstd_compression", ] +conduwuit_mods = [ + "conduwuit-core/conduwuit_mods", +] [dependencies] conduwuit-admin.workspace = true diff --git a/src/main/main.rs b/src/main/main.rs index e7aaf3fc..dacc2a2e 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -37,7 +37,7 @@ fn main() -> Result<(), Error> { /// Operate the server normally in release-mode static builds. This will start, /// run and stop the server within the asynchronous runtime. -#[cfg(not(conduwuit_mods))] +#[cfg(any(not(conduwuit_mods), not(feature = "conduwuit_mods")))] #[tracing::instrument( name = "main", parent = None, @@ -89,7 +89,7 @@ async fn async_main(server: &Arc) -> Result<(), Error> { /// Operate the server in developer-mode dynamic builds. This will start, run, /// and hot-reload portions of the server as-needed before returning for an /// actual shutdown. This is not available in release-mode or static builds. -#[cfg(conduwuit_mods)] +#[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] async fn async_main(server: &Arc) -> Result<(), Error> { let mut starts = true; let mut reloads = true; diff --git a/src/main/mods.rs b/src/main/mods.rs index ca799b90..9ab36e6c 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -1,4 +1,4 @@ -#![cfg(conduwuit_mods)] +#![cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] #[unsafe(no_link)] extern crate conduwuit_service; diff --git a/src/main/server.rs b/src/main/server.rs index a81b708d..359a029c 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -23,7 +23,7 @@ pub(crate) struct Server { #[cfg(feature = "sentry_telemetry")] _sentry_guard: Option<::sentry::ClientInitGuard>, - #[cfg(conduwuit_mods)] + #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] // Module instances; TODO: move to mods::loaded mgmt vector pub(crate) mods: tokio::sync::RwLock>, } @@ -75,7 +75,7 @@ impl Server { #[cfg(feature = "sentry_telemetry")] _sentry_guard: sentry_guard, - #[cfg(conduwuit_mods)] + #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] mods: tokio::sync::RwLock::new(Vec::new()), })) } diff --git a/src/main/signal.rs b/src/main/signal.rs index 0f541099..cecb718b 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -12,7 +12,7 @@ pub(super) async fn signal(server: Arc) { use unix::SignalKind; const CONSOLE: bool = cfg!(feature = "console"); - const RELOADING: bool = cfg!(all(conduwuit_mods, not(CONSOLE))); + const RELOADING: bool = cfg!(all(conduwuit_mods, feature = "conduwuit_mods", not(CONSOLE))); let mut quit = unix::signal(SignalKind::quit()).expect("SIGQUIT handler"); let mut term = unix::signal(SignalKind::terminate()).expect("SIGTERM handler"); From 5b6279b1c514ea65e00399c26bb7cdd007061e6d Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 10:51:39 -0500 Subject: [PATCH 0503/1248] ci: require docker publishing to pass tests Signed-off-by: strawberry --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4a4df488..3ccbf5d9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -733,7 +733,7 @@ jobs: docker: name: Docker publish runs-on: ubuntu-24.04 - needs: [build, variables] + needs: [build, variables, tests] permissions: packages: write contents: read From 1852eeebf22d2bedfeff63b9c205640f5832ca49 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 22:45:48 -0500 Subject: [PATCH 0504/1248] disable more unnecessary features in various build outputs Signed-off-by: strawberry --- flake.nix | 146 ++++++++++++++++++++------------ nix/pkgs/complement/default.nix | 10 +++ nix/pkgs/main/default.nix | 14 ++- 3 files changed, 115 insertions(+), 55 deletions(-) diff --git a/flake.nix b/flake.nix index e3497d85..fb40cae7 100644 --- a/flake.nix +++ b/flake.nix @@ -191,27 +191,57 @@ in { packages = { - default = scopeHost.main; - default-debug = scopeHost.main.override { - profile = "dev"; - # debug build users expect full logs - disable_release_max_log_level = true; - }; - default-test = scopeHost.main.override { - profile = "test"; - disable_release_max_log_level = true; - }; - all-features = scopeHost.main.override { - all_features = true; + default = scopeHost.main.override { disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" + ]; + }; + default-debug = scopeHost.main.override { + profile = "dev"; + # debug build users expect full logs + disable_release_max_log_level = true; + disable_features = [ + # dont include experimental features + "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" + ]; + }; + # just a test profile used for things like CI and complement + default-test = scopeHost.main.override { + profile = "test"; + disable_release_max_log_level = true; + disable_features = [ + # dont include experimental features + "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" + ]; + }; + all-features = scopeHost.main.override { + all_features = true; + disable_features = [ + # dont include experimental features + "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -222,10 +252,10 @@ # debug build users expect full logs disable_release_max_log_level = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -237,14 +267,14 @@ main = scopeHost.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -257,10 +287,10 @@ # debug build users expect full logs disable_release_max_log_level = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -321,6 +351,14 @@ value = scopeCrossStatic.main.override { profile = "test"; disable_release_max_log_level = true; + disable_features = [ + # dont include experimental features + "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" + ]; }; } @@ -330,14 +368,14 @@ value = scopeCrossStatic.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -351,14 +389,14 @@ value = scopeCrossStatic.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -375,10 +413,10 @@ # debug build users expect full logs disable_release_max_log_level = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; @@ -429,16 +467,16 @@ main = scopeCrossStatic.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" + # dont include experimental features + "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -452,16 +490,16 @@ main = scopeCrossStatic.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" + # dont include experimental features + "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); }; @@ -478,12 +516,12 @@ # debug build users expect full logs disable_release_max_log_level = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" - # dont include experimental features - "experimental" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" + # dont include experimental features + "experimental" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" ]; }; }; @@ -522,14 +560,14 @@ main = prev.main.override { all_features = true; disable_features = [ - # this is non-functional on nix for some reason - "hardened_malloc" # dont include experimental features "experimental" # jemalloc profiling/stats features are expensive and shouldn't # be expected on non-debug builds. "jemalloc_prof" "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" # conduwuit_mods is a development-only hot reload feature "conduwuit_mods" ]; diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index 36f12400..e35cbf04 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -20,6 +20,8 @@ let disable_features = [ # no reason to use jemalloc for complement, just has compatibility/build issues "jemalloc" + "jemalloc_stats" + "jemalloc_prof" # console/CLI stuff isn't used or relevant for complement "console" "tokio_console" @@ -32,6 +34,14 @@ let "hardened_malloc" # dont include experimental features "experimental" + # compression isn't needed for complement + "brotli_compression" + "gzip_compression" + "zstd_compression" + # complement doesn't need hot reloading + "conduwuit_mods" + # complement doesn't have URL preview media tests + "url_preview" ]; }; diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index a785e7f2..d7424d11 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -15,7 +15,19 @@ # Options (keep sorted) , all_features ? false , default_features ? true -, disable_features ? [] +# default list of disabled features +, disable_features ? [ + # dont include experimental features + "experimental" + # jemalloc profiling/stats features are expensive and shouldn't + # be expected on non-debug builds. + "jemalloc_prof" + "jemalloc_stats" + # this is non-functional on nix for some reason + "hardened_malloc" + # conduwuit_mods is a development-only hot reload feature + "conduwuit_mods" +] , disable_release_max_log_level ? false , features ? [] , profile ? "release" From 0074f903d8a0574b63b588438efba996ef467c26 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 11 Jan 2025 21:43:46 -0500 Subject: [PATCH 0505/1248] delete lix binary cache because it has terrible reliability Signed-off-by: strawberry --- .gitea/workflows/ci.yml | 8 ++++---- .github/workflows/ci.yml | 8 ++++---- .github/workflows/documentation.yml | 4 ++-- .gitlab-ci.yml | 8 ++------ 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml index 9ce7c993..ef436734 100644 --- a/.gitea/workflows/ci.yml +++ b/.gitea/workflows/ci.yml @@ -87,8 +87,8 @@ jobs: - name: Apply Nix binary cache configuration run: | sudo tee -a /etc/nix/nix.conf > /dev/null < /dev/null < /dev/null < /dev/null < /dev/null < /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi - if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi - # Add Lix binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://cache.lix.systems" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o=" >> /etc/nix/nix.conf; fi - # Add crane binary cache - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi From 6f15c9b3f47143715280e485db2a5bcab35d77fd Mon Sep 17 00:00:00 2001 From: Holger Huo <50446405+HolgerHuo@users.noreply.github.com> Date: Wed, 8 Jan 2025 17:57:12 +0800 Subject: [PATCH 0506/1248] fix: presence timer not working --- src/api/client/presence.rs | 13 +++++++++---- src/api/client/sync/v3.rs | 17 ++++++++++++++--- src/service/presence/presence.rs | 6 +----- 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index 1a3ad26e..d19e6ae1 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -82,14 +82,19 @@ pub(crate) async fn get_presence_route( presence.content.status_msg }; + let last_active_ago = match presence.content.currently_active { + | Some(true) => None, + | _ => presence + .content + .last_active_ago + .map(|millis| Duration::from_millis(millis.into())), + }; + Ok(get_presence::v3::Response { // TODO: Should ruma just use the presenceeventcontent type here? status_msg, currently_active: presence.content.currently_active, - last_active_ago: presence - .content - .last_active_ago - .map(|millis| Duration::from_millis(millis.into())), + last_active_ago, presence: presence.content.presence, }) } else { diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index b7ecd6b9..910a15d4 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -382,7 +382,16 @@ async fn process_presence_updates( .ready_fold(PresenceUpdates::new(), |mut updates, (user_id, event)| { match updates.entry(user_id.into()) { | Entry::Vacant(slot) => { - slot.insert(event); + let mut new_event = event; + new_event.content.last_active_ago = match new_event.content.currently_active { + | Some(true) => None, + | _ => new_event + .content + .last_active_ago + .or(new_event.content.last_active_ago), + }; + + slot.insert(new_event); }, | Entry::Occupied(mut slot) => { let curr_event = slot.get_mut(); @@ -394,8 +403,6 @@ async fn process_presence_updates( curr_content.status_msg = new_content .status_msg .or_else(|| curr_content.status_msg.take()); - curr_content.last_active_ago = - new_content.last_active_ago.or(curr_content.last_active_ago); curr_content.displayname = new_content .displayname .or_else(|| curr_content.displayname.take()); @@ -405,6 +412,10 @@ async fn process_presence_updates( curr_content.currently_active = new_content .currently_active .or(curr_content.currently_active); + curr_content.last_active_ago = match curr_content.currently_active { + | Some(true) => None, + | _ => new_content.last_active_ago.or(curr_content.last_active_ago), + }; }, }; diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index b88a004b..b322dfb4 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -46,11 +46,7 @@ impl Presence { users: &users::Service, ) -> PresenceEvent { let now = utils::millis_since_unix_epoch(); - let last_active_ago = if self.currently_active { - None - } else { - Some(UInt::new_saturating(now.saturating_sub(self.last_active_ts))) - }; + let last_active_ago = Some(UInt::new_saturating(now.saturating_sub(self.last_active_ts))); PresenceEvent { sender: user_id.to_owned(), From 8451ea3bc32748fc8ff64d817685cc7f344e2e64 Mon Sep 17 00:00:00 2001 From: Holger Huo <50446405+HolgerHuo@users.noreply.github.com> Date: Wed, 8 Jan 2025 18:24:27 +0800 Subject: [PATCH 0507/1248] update: refresh timeout greater than idle timeout --- src/service/presence/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index bf5258e1..3b0bdd15 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -95,7 +95,7 @@ impl Service { /// Pings the presence of the given user in the given room, setting the /// specified state. pub async fn ping_presence(&self, user_id: &UserId, new_state: &PresenceState) -> Result<()> { - const REFRESH_TIMEOUT: u64 = 60 * 25 * 1000; + const REFRESH_TIMEOUT: u64 = 60 * 1000; let last_presence = self.db.get_presence(user_id).await; let state_changed = match last_presence { From b71201cf19cddd72c689ada532dbe1400f6a997d Mon Sep 17 00:00:00 2001 From: Holger Huo <50446405+HolgerHuo@users.noreply.github.com> Date: Wed, 8 Jan 2025 18:42:46 +0800 Subject: [PATCH 0508/1248] add: clear online status on server boot --- src/service/presence/mod.rs | 38 +++++++++++++++++++++++++++++++++++++ src/service/services.rs | 5 +++++ 2 files changed, 43 insertions(+) diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index 3b0bdd15..a6eb1bcd 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -170,6 +170,44 @@ impl Service { self.db.remove_presence(user_id).await; } + // Unset online/unavailable presence to offline on startup + pub async fn unset_all_presence(&self) -> Result<()> { + for user_id in &self + .services + .users + .list_local_users() + .map(UserId::to_owned) + .collect::>() + .await + { + let presence = self.db.get_presence(user_id).await; + + let presence = match presence { + | Ok((_, ref presence)) => &presence.content, + | _ => return Ok(()), + }; + + let need_reset = match presence.presence { + | PresenceState::Unavailable | PresenceState::Online => true, + | _ => false, + }; + + if !need_reset { + return Ok(()); + } + + self.set_presence( + user_id, + &PresenceState::Offline, + Some(false), + presence.last_active_ago, + presence.status_msg.clone(), + ) + .await?; + } + Ok(()) + } + /// Returns the most recent presence updates that happened after the event /// with id `since`. pub fn presence_since( diff --git a/src/service/services.rs b/src/service/services.rs index c955834e..9e099759 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -123,6 +123,11 @@ impl Services { .start() .await?; + // clear online statuses + if self.server.config.allow_local_presence { + _ = self.presence.unset_all_presence().await; + } + // set the server user as online if self.server.config.allow_local_presence && !self.db.is_read_only() { _ = self From fde1b94e26f22f21c3c6e012331f23164c27d776 Mon Sep 17 00:00:00 2001 From: Holger Huo <50446405+HolgerHuo@users.noreply.github.com> Date: Fri, 10 Jan 2025 23:51:51 +0800 Subject: [PATCH 0509/1248] fix: logic mistake --- src/api/client/sync/v3.rs | 5 +---- src/service/presence/mod.rs | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 910a15d4..95c8c2d4 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -385,10 +385,7 @@ async fn process_presence_updates( let mut new_event = event; new_event.content.last_active_ago = match new_event.content.currently_active { | Some(true) => None, - | _ => new_event - .content - .last_active_ago - .or(new_event.content.last_active_ago), + | _ => new_event.content.last_active_ago, }; slot.insert(new_event); diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index a6eb1bcd..ab7c76a1 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -193,7 +193,7 @@ impl Service { }; if !need_reset { - return Ok(()); + continue; } self.set_presence( From 8c18481d1dab411bd9270ed56e2b6b5c1f465d3f Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 10 Jan 2025 23:51:08 -0500 Subject: [PATCH 0510/1248] optimise resetting all user presences Signed-off-by: strawberry --- src/admin/query/presence.rs | 2 +- src/service/presence/data.rs | 4 +-- src/service/presence/mod.rs | 53 +++++++++++++++++++++++------------- src/service/services.rs | 9 ++---- 4 files changed, 40 insertions(+), 28 deletions(-) diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 45bb6bd9..0de6b696 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -32,7 +32,7 @@ pub(super) async fn process( match subcommand { | PresenceCommand::GetPresence { user_id } => { let timer = tokio::time::Instant::now(); - let results = services.presence.db.get_presence(&user_id).await; + let results = services.presence.get_presence(&user_id).await; let query_time = timer.elapsed(); Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 3d614333..4ec0a7ee 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -12,7 +12,7 @@ use ruma::{events::presence::PresenceEvent, presence::PresenceState, UInt, UserI use super::Presence; use crate::{globals, users, Dep}; -pub struct Data { +pub(crate) struct Data { presenceid_presence: Arc, userid_presenceid: Arc, services: Services, @@ -36,7 +36,7 @@ impl Data { } } - pub async fn get_presence(&self, user_id: &UserId) -> Result<(u64, PresenceEvent)> { + pub(super) async fn get_presence(&self, user_id: &UserId) -> Result<(u64, PresenceEvent)> { let count = self .userid_presenceid .get(user_id) diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index ab7c76a1..eb4105e5 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -4,7 +4,10 @@ mod presence; use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduwuit::{checked, debug, error, result::LogErr, Error, Result, Server}; +use conduwuit::{ + checked, debug, debug_warn, error, result::LogErr, trace, Error, Result, Server, +}; +use database::Database; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; @@ -18,12 +21,13 @@ pub struct Service { timeout_remote_users: bool, idle_timeout: u64, offline_timeout: u64, - pub db: Data, + db: Data, services: Services, } struct Services { server: Arc, + db: Arc, globals: Dep, users: Dep, } @@ -44,6 +48,7 @@ impl crate::Service for Service { db: Data::new(&args), services: Services { server: args.server.clone(), + db: args.db.clone(), globals: args.depend::("globals"), users: args.depend::("users"), }, @@ -171,7 +176,9 @@ impl Service { } // Unset online/unavailable presence to offline on startup - pub async fn unset_all_presence(&self) -> Result<()> { + pub async fn unset_all_presence(&self) { + let _cork = self.services.db.cork(); + for user_id in &self .services .users @@ -184,28 +191,36 @@ impl Service { let presence = match presence { | Ok((_, ref presence)) => &presence.content, - | _ => return Ok(()), + | _ => continue, }; - let need_reset = match presence.presence { - | PresenceState::Unavailable | PresenceState::Online => true, - | _ => false, - }; - - if !need_reset { + if !matches!( + presence.presence, + PresenceState::Unavailable | PresenceState::Online | PresenceState::Busy + ) { + trace!(?user_id, ?presence, "Skipping user"); continue; } - self.set_presence( - user_id, - &PresenceState::Offline, - Some(false), - presence.last_active_ago, - presence.status_msg.clone(), - ) - .await?; + trace!(?user_id, ?presence, "Resetting presence to offline"); + + _ = self + .set_presence( + user_id, + &PresenceState::Offline, + Some(false), + presence.last_active_ago, + presence.status_msg.clone(), + ) + .await + .inspect_err(|e| { + debug_warn!( + ?presence, + "{user_id} has invalid presence in database and failed to reset it to \ + offline: {e}" + ); + }); } - Ok(()) } /// Returns the most recent presence updates that happened after the event diff --git a/src/service/services.rs b/src/service/services.rs index 9e099759..1aa87f58 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -123,13 +123,10 @@ impl Services { .start() .await?; - // clear online statuses - if self.server.config.allow_local_presence { - _ = self.presence.unset_all_presence().await; - } - - // set the server user as online + // reset dormant online/away statuses to offline, and set the server user as + // online if self.server.config.allow_local_presence && !self.db.is_read_only() { + self.presence.unset_all_presence().await; _ = self .presence .ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Online) From 9bda5a43e5ac742fb6b30ae14c1d1e89c0a68c36 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 11 Jan 2025 00:24:51 -0500 Subject: [PATCH 0511/1248] fix /kick endpoint unbanning banned users Signed-off-by: strawberry --- src/api/client/membership.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 4046b493..0c493a37 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -439,6 +439,16 @@ pub(crate) async fn kick_user_route( return Ok(kick_user::v3::Response::new()); }; + if !matches!( + event.membership, + MembershipState::Invite | MembershipState::Knock | MembershipState::Join, + ) { + return Err!(Request(Forbidden( + "Cannot kick a user who is not apart of the room (current membership: {})", + event.membership + ))); + } + services .rooms .timeline @@ -527,7 +537,7 @@ pub(crate) async fn unban_user_route( if current_member_content.membership != MembershipState::Ban { return Err!(Request(Forbidden( - "Cannot ban a user who is not banned (current membership: {})", + "Cannot unban a user who is not banned (current membership: {})", current_member_content.membership ))); } From 5e21b43f2505fd8369f4ce09ef36950983d12182 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 11 Jan 2025 00:25:10 -0500 Subject: [PATCH 0512/1248] run direnv exec in engage default steps Signed-off-by: strawberry --- .github/workflows/ci.yml | 2 -- engage.toml | 18 +++++++++++------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d73af24f..d06de5e3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,8 +50,6 @@ env: experimental-features = nix-command flakes extra-experimental-features = nix-command flakes accept-flake-config = true - # complement uses libolm - NIXPKGS_ALLOW_INSECURE: 1 WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }} GH_SHA: ${{ github.sha }} GH_REF_NAME: ${{ github.ref_name }} diff --git a/engage.toml b/engage.toml index 9a6ef8ca..1d6a5475 100644 --- a/engage.toml +++ b/engage.toml @@ -97,6 +97,7 @@ env DIRENV_DEVSHELL=all-features \ name = "clippy/default" group = "lints" script = """ +direnv exec . \ cargo clippy \ --workspace \ --profile test \ @@ -126,6 +127,7 @@ env DIRENV_DEVSHELL=all-features \ name = "clippy/jemalloc" group = "lints" script = """ +direnv exec . \ cargo clippy \ --workspace \ --profile test \ @@ -179,13 +181,15 @@ env DIRENV_DEVSHELL=all-features \ name = "cargo/default" group = "tests" script = """ -cargo test \ - --workspace \ - --profile test \ - --all-targets \ - --color=always \ - -- \ - --color=always +env DIRENV_DEVSHELL=default \ + direnv exec . \ + cargo test \ + --workspace \ + --profile test \ + --all-targets \ + --color=always \ + -- \ + --color=always """ # Checks if the generated example config differs from the checked in repo's From fabd3cf567c9c676d8d546ced79e81f69bb70ae4 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 12 Jan 2025 19:18:07 -0500 Subject: [PATCH 0513/1248] ci: set binary as executable before uploading to webserver Signed-off-by: strawberry --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d06de5e3..007adace 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -490,6 +490,7 @@ jobs: if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-x86_64-linux-musl-x86_64-haswell-optimised fi @@ -497,6 +498,7 @@ jobs: if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + chmod +x static-${{ matrix.target }} scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }} fi @@ -692,6 +694,7 @@ jobs: if: ${{ matrix.os == 'macos-13' }} run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + chmod +x conduwuit-macos-x86_64 scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-x86_64 fi @@ -699,6 +702,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} run: | if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + chmod +x conduwuit-macos-arm64 scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-arm64 fi From 5a1c41e66b4fec8ab76fd268fc9c9e282fd19428 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 11 Jan 2025 18:43:54 -0500 Subject: [PATCH 0514/1248] knocking implementation Signed-off-by: strawberry add sync bit of knocking Signed-off-by: strawberry --- src/api/client/membership.rs | 716 +++++++++++++++++++++++++-- src/api/client/sync/v3.rs | 39 +- src/api/client/sync/v4.rs | 9 + src/api/router.rs | 3 + src/api/server/invite.rs | 14 +- src/api/server/make_knock.rs | 38 +- src/api/server/make_leave.rs | 6 +- src/api/server/mod.rs | 4 + src/api/server/send_join.rs | 13 +- src/api/server/send_knock.rs | 75 ++- src/api/server/utils.rs | 17 +- src/database/maps.rs | 8 + src/service/rooms/state_cache/mod.rs | 142 +++++- src/service/rooms/timeline/mod.rs | 11 +- 14 files changed, 978 insertions(+), 117 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 0c493a37..d94fc3c7 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,4 +1,5 @@ use std::{ + borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, net::IpAddr, sync::Arc, @@ -8,7 +9,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ debug, debug_info, debug_warn, err, info, - pdu::{self, gen_event_id_canonical_json, PduBuilder}, + pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, trace, utils::{self, shuffle, IterStream, ReadyExt}, @@ -19,6 +20,7 @@ use ruma::{ api::{ client::{ error::ErrorKind, + knock::knock_room, membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, @@ -37,11 +39,12 @@ use ruma::{ }, StateEventType, }, - state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, + OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use service::{ appservice::RegistrationInfo, + pdu::gen_event_id, rooms::{state::RoomMutexGuard, state_compressor::HashSetCompressStateEvent}, Services, }; @@ -348,6 +351,116 @@ pub(crate) async fn join_room_by_id_or_alias_route( Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id }) } +/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}` +/// +/// Tries to knock the room to ask permission to join for the sender user. +#[tracing::instrument(skip_all, fields(%client), name = "knock")] +pub(crate) async fn knock_room_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; + + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + | Ok(room_id) => { + banned_room_check( + &services, + sender_user, + Some(&room_id), + room_id.server_name(), + client, + ) + .await?; + + let mut servers = body.via.clone(); + servers.extend( + services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + + servers.extend( + services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default() + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + if let Some(server) = room_id.server_name() { + servers.push(server.to_owned()); + } + + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + + (servers, room_id) + }, + | Err(room_alias) => { + let (room_id, mut servers) = services + .rooms + .alias + .resolve_alias(&room_alias, Some(body.via.clone())) + .await?; + + banned_room_check( + &services, + sender_user, + Some(&room_id), + Some(room_alias.server_name()), + client, + ) + .await?; + + let addl_via_servers = services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned); + + let addl_state_servers = services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default(); + + let mut addl_servers: Vec<_> = addl_state_servers + .iter() + .map(|event| event.get_field("sender")) + .filter_map(FlatOk::flat_ok) + .map(|user: &UserId| user.server_name().to_owned()) + .stream() + .chain(addl_via_servers) + .collect() + .await; + + addl_servers.sort_unstable(); + addl_servers.dedup(); + shuffle(&mut addl_servers); + servers.append(&mut addl_servers); + + (servers, room_id) + }, + }; + + knock_room_by_id_helper(&services, sender_user, &room_id, body.reason.clone(), &servers) + .boxed() + .await +} + /// # `POST /_matrix/client/v3/rooms/{roomId}/leave` /// /// Tries to leave the sender user from a room. @@ -403,6 +516,17 @@ pub(crate) async fn invite_user_route( ))); } + if let Ok(target_user_membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, user_id) + .await + { + if target_user_membership.membership == MembershipState::Ban { + return Err!(Request(Forbidden("User is banned from this room."))); + } + } + if recipient_ignored_by_sender { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked @@ -862,7 +986,7 @@ async fn join_room_by_id_helper_remote( .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; // Generate event id - let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; + let event_id = gen_event_id(&join_event_stub, &room_version_id)?; // Add event_id back join_event_stub @@ -1030,7 +1154,7 @@ async fn join_room_by_id_helper_remote( }; let auth_check = state_res::event_auth::auth_check( - &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), + &state_res::RoomVersion::new(&room_version_id)?, &parsed_join_pdu, None, // TODO: third party invite |k, s| state_fetch(k, s.to_owned()), @@ -1043,10 +1167,10 @@ async fn join_room_by_id_helper_remote( } info!("Compressing state from send_join"); - let compressed = state - .iter() - .stream() - .then(|(&k, id)| services.rooms.state_compressor.compress_state_event(k, id)) + let compressed: HashSet<_> = services + .rooms + .state_compressor + .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) .collect() .await; @@ -1282,7 +1406,7 @@ async fn join_room_by_id_helper_local( .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; // Generate event id - let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?; + let event_id = gen_event_id(&join_event_stub, &room_version_id)?; // Add event_id back join_event_stub @@ -1392,6 +1516,7 @@ async fn make_join_request( ); make_join_response_and_server = Err!(BadServerResponse("No server available to assist in joining.")); + return make_join_response_and_server; } } @@ -1569,7 +1694,7 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { for room_id in all_rooms { // ignore errors if let Err(e) = leave_room(services, user_id, &room_id, None).await { - warn!(%room_id, %user_id, %e, "Failed to leave room"); + warn!(%user_id, "Failed to leave {room_id} remotely: {e}"); } services.rooms.state_cache.forget(&room_id, user_id); @@ -1585,11 +1710,15 @@ pub async fn leave_room( //use conduwuit::utils::stream::OptionStream; use futures::TryFutureExt; - // Ask a remote server if we don't have this room + // Ask a remote server if we don't have this room and are not knocking on it if !services .rooms .state_cache .server_in_room(services.globals.server_name(), room_id) + .await && !services + .rooms + .state_cache + .is_knocked(user_id, room_id) .await { if let Err(e) = remote_leave_room(services, user_id, room_id).await { @@ -1601,7 +1730,8 @@ pub async fn leave_room( .rooms .state_cache .invite_state(user_id, room_id) - .map_err(|_| services.rooms.state_cache.left_state(user_id, room_id)) + .or_else(|_| services.rooms.state_cache.knock_state(user_id, room_id)) + .or_else(|_| services.rooms.state_cache.left_state(user_id, room_id)) .await .ok(); @@ -1683,13 +1813,6 @@ async fn remote_leave_room( let mut make_leave_response_and_server = Err!(BadServerResponse("No server available to assist in leaving.")); - let invite_state = services - .rooms - .state_cache - .invite_state(user_id, room_id) - .await - .map_err(|_| err!(Request(BadState("User is not invited."))))?; - let mut servers: HashSet = services .rooms .state_cache @@ -1698,13 +1821,39 @@ async fn remote_leave_room( .collect() .await; - servers.extend( - invite_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); + if let Ok(invite_state) = services + .rooms + .state_cache + .invite_state(user_id, room_id) + .await + { + servers.extend( + invite_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + } else if let Ok(knock_state) = services + .rooms + .state_cache + .knock_state(user_id, room_id) + .await + { + servers.extend( + knock_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .filter_map(|sender| { + if !services.globals.user_is_local(sender) { + Some(sender.server_name().to_owned()) + } else { + None + } + }), + ); + } if let Some(room_id_server_name) = room_id.server_name() { servers.insert(room_id_server_name.to_owned()); @@ -1779,7 +1928,7 @@ async fn remote_leave_room( .hash_and_sign_event(&mut leave_event_stub, &room_version_id)?; // Generate event id - let event_id = pdu::gen_event_id(&leave_event_stub, &room_version_id)?; + let event_id = gen_event_id(&leave_event_stub, &room_version_id)?; // Add event_id back leave_event_stub @@ -1805,3 +1954,514 @@ async fn remote_leave_room( Ok(()) } + +async fn knock_room_by_id_helper( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], +) -> Result { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + if services + .rooms + .state_cache + .is_invited(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already invited in {room_id} but attempted to knock"); + return Err!(Request(Forbidden( + "You cannot knock on a room you are already invited/accepted to." + ))); + } + + if services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already joined in {room_id} but attempted to knock"); + return Err!(Request(Forbidden("You cannot knock on a room you are already joined in."))); + } + + if services + .rooms + .state_cache + .is_knocked(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already knocked in {room_id}"); + return Ok(knock_room::v3::Response { room_id: room_id.into() }); + } + + if let Ok(membership) = services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + { + if membership.membership == MembershipState::Ban { + debug_warn!("{sender_user} is banned from {room_id} but attempted to knock"); + return Err!(Request(Forbidden("You cannot knock on a room you are banned from."))); + } + } + + let server_in_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await; + + let local_knock = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_knock { + knock_room_helper_local(services, sender_user, room_id, reason, servers, state_lock) + .boxed() + .await?; + } else { + knock_room_helper_remote(services, sender_user, room_id, reason, servers, state_lock) + .boxed() + .await?; + } + + Ok(knock_room::v3::Response::new(room_id.to_owned())) +} + +async fn knock_room_helper_local( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + state_lock: RoomMutexGuard, +) -> Result { + debug_info!("We can knock locally"); + + let room_version_id = services.rooms.state.get_room_version(room_id).await?; + + if matches!( + room_version_id, + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + ) { + return Err!(Request(Forbidden("This room does not support knocking."))); + } + + let content = RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason: reason.clone(), + ..RoomMemberEventContent::new(MembershipState::Knock) + }; + + // Try normal knock first + let Err(error) = services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(sender_user.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await + else { + return Ok(()); + }; + + if servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])) + { + return Err(error); + } + + warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock"); + + let (make_knock_response, remote_server) = + make_knock_request(services, sender_user, room_id, servers).await?; + + info!("make_knock finished"); + + let room_version_id = make_knock_response.room_version; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut knock_event_stub = serde_json::from_str::( + make_knock_response.event.get(), + ) + .map_err(|e| { + err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) + })?; + + knock_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + knock_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + knock_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + ..RoomMemberEventContent::new(MembershipState::Knock) + }) + .expect("event is valid, we just created it"), + ); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; + + // Add event_id + knock_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let knock_event = knock_event_stub; + + info!("Asking {remote_server} for send_knock in room {room_id}"); + let send_knock_request = federation::knock::send_knock::v1::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + pdu: services + .sending + .convert_to_outgoing_federation_event(knock_event.clone()) + .await, + }; + + let send_knock_response = services + .sending + .send_federation_request(&remote_server, send_knock_request) + .await?; + + info!("send_knock finished"); + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing knock event"); + + let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; + + info!("Updating membership locally to knock state with provided stripped state events"); + services + .rooms + .state_cache + .update_membership( + room_id, + sender_user, + parsed_knock_pdu + .get_content::() + .expect("we just created this"), + sender_user, + Some(send_knock_response.knock_room_state), + None, + false, + ) + .await?; + + info!("Appending room knock event locally"); + services + .rooms + .timeline + .append_pdu( + &parsed_knock_pdu, + knock_event, + vec![(*parsed_knock_pdu.event_id).to_owned()], + &state_lock, + ) + .await?; + + Ok(()) +} + +async fn knock_room_helper_remote( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + state_lock: RoomMutexGuard, +) -> Result { + info!("Knocking {room_id} over federation."); + + let (make_knock_response, remote_server) = + make_knock_request(services, sender_user, room_id, servers).await?; + + info!("make_knock finished"); + + let room_version_id = make_knock_response.room_version; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut knock_event_stub: CanonicalJsonObject = + serde_json::from_str(make_knock_response.event.get()).map_err(|e| { + err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) + })?; + + knock_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + knock_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + knock_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + ..RoomMemberEventContent::new(MembershipState::Knock) + }) + .expect("event is valid, we just created it"), + ); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; + + // Add event_id + knock_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let knock_event = knock_event_stub; + + info!("Asking {remote_server} for send_knock in room {room_id}"); + let send_knock_request = federation::knock::send_knock::v1::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + pdu: services + .sending + .convert_to_outgoing_federation_event(knock_event.clone()) + .await, + }; + + let send_knock_response = services + .sending + .send_federation_request(&remote_server, send_knock_request) + .await?; + + info!("send_knock finished"); + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing knock event"); + let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; + + info!("Going through send_knock response knock state events"); + let state = send_knock_response + .knock_room_state + .iter() + .map(|event| serde_json::from_str::(event.clone().into_json().get())) + .filter_map(Result::ok); + + let mut state_map: HashMap = HashMap::new(); + + for event in state { + let Some(state_key) = event.get("state_key") else { + debug_warn!("send_knock stripped state event missing state_key: {event:?}"); + continue; + }; + let Some(event_type) = event.get("type") else { + debug_warn!("send_knock stripped state event missing event type: {event:?}"); + continue; + }; + + let Ok(state_key) = serde_json::from_value::(state_key.clone().into()) else { + debug_warn!("send_knock stripped state event has invalid state_key: {event:?}"); + continue; + }; + let Ok(event_type) = serde_json::from_value::(event_type.clone().into()) + else { + debug_warn!("send_knock stripped state event has invalid event type: {event:?}"); + continue; + }; + + let event_id = gen_event_id(&event, &room_version_id)?; + let shortstatekey = services + .rooms + .short + .get_or_create_shortstatekey(&event_type, &state_key) + .await; + + services.rooms.outlier.add_pdu_outlier(&event_id, &event); + state_map.insert(shortstatekey, event_id.clone()); + } + + info!("Compressing state from send_knock"); + let compressed: HashSet<_> = services + .rooms + .state_compressor + .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) + .collect() + .await; + + debug!("Saving compressed state"); + let HashSetCompressStateEvent { + shortstatehash: statehash_before_knock, + added, + removed, + } = services + .rooms + .state_compressor + .save_state(room_id, Arc::new(compressed)) + .await?; + + debug!("Forcing state for new room"); + services + .rooms + .state + .force_state(room_id, statehash_before_knock, added, removed, &state_lock) + .await?; + + let statehash_after_knock = services + .rooms + .state + .append_to_state(&parsed_knock_pdu) + .await?; + + info!("Updating membership locally to knock state with provided stripped state events"); + services + .rooms + .state_cache + .update_membership( + room_id, + sender_user, + parsed_knock_pdu + .get_content::() + .expect("we just created this"), + sender_user, + Some(send_knock_response.knock_room_state), + None, + false, + ) + .await?; + + info!("Appending room knock event locally"); + services + .rooms + .timeline + .append_pdu( + &parsed_knock_pdu, + knock_event, + vec![(*parsed_knock_pdu.event_id).to_owned()], + &state_lock, + ) + .await?; + + info!("Setting final room state for new room"); + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + services + .rooms + .state + .set_room_state(room_id, statehash_after_knock, &state_lock); + + Ok(()) +} + +async fn make_knock_request( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + servers: &[OwnedServerName], +) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> { + let mut make_knock_response_and_server = + Err!(BadServerResponse("No server available to assist in knocking.")); + + let mut make_knock_counter: usize = 0; + + for remote_server in servers { + if services.globals.server_is_ours(remote_server) { + continue; + } + + info!("Asking {remote_server} for make_knock ({make_knock_counter})"); + + let make_knock_response = services + .sending + .send_federation_request( + remote_server, + federation::knock::create_knock_event_template::v1::Request { + room_id: room_id.to_owned(), + user_id: sender_user.to_owned(), + ver: services.server.supported_room_versions().collect(), + }, + ) + .await; + + trace!("make_knock response: {make_knock_response:?}"); + make_knock_counter = make_knock_counter.saturating_add(1); + + make_knock_response_and_server = make_knock_response.map(|r| (r, remote_server.clone())); + + if make_knock_response_and_server.is_ok() { + break; + } + + if make_knock_counter > 40 { + warn!( + "50 servers failed to provide valid make_knock response, assuming no server can \ + assist in knocking." + ); + make_knock_response_and_server = + Err!(BadServerResponse("No server available to assist in knocking.")); + + return make_knock_response_and_server; + } + } + + make_knock_response_and_server +} diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 95c8c2d4..a4dc0205 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -33,8 +33,8 @@ use ruma::{ self, v3::{ Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, - LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State as RoomState, - Timeline, ToDevice, + KnockState, KnockedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, + State as RoomState, Timeline, ToDevice, }, DeviceLists, UnreadNotificationsCount, }, @@ -266,6 +266,35 @@ pub(crate) async fn build_sync_events( invited_rooms }); + let knocked_rooms = services + .rooms + .state_cache + .rooms_knocked(sender_user) + .fold_default(|mut knocked_rooms: BTreeMap<_, _>, (room_id, knock_state)| async move { + // Get and drop the lock to wait for remaining operations to finish + let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; + drop(insert_lock); + + let knock_count = services + .rooms + .state_cache + .get_knock_count(&room_id, sender_user) + .await + .ok(); + + // Knocked before last sync + if Some(since) >= knock_count { + return knocked_rooms; + } + + let knocked_room = KnockedRoom { + knock_state: KnockState { events: knock_state }, + }; + + knocked_rooms.insert(room_id, knocked_room); + knocked_rooms + }); + let presence_updates: OptionFuture<_> = services .globals .allow_local_presence() @@ -300,7 +329,7 @@ pub(crate) async fn build_sync_events( .users .remove_to_device_events(sender_user, sender_device, since); - let rooms = join3(joined_rooms, left_rooms, invited_rooms); + let rooms = join4(joined_rooms, left_rooms, invited_rooms, knocked_rooms); let ephemeral = join3(remove_to_device_events, to_device_events, presence_updates); let top = join5(account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) .boxed() @@ -308,7 +337,7 @@ pub(crate) async fn build_sync_events( let (account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) = top; let ((), to_device_events, presence_updates) = ephemeral; - let (joined_rooms, left_rooms, invited_rooms) = rooms; + let (joined_rooms, left_rooms, invited_rooms, knocked_rooms) = rooms; let (joined_rooms, mut device_list_updates, left_encrypted_users) = joined_rooms; device_list_updates.extend(keys_changed); @@ -349,7 +378,7 @@ pub(crate) async fn build_sync_events( leave: left_rooms, join: joined_rooms, invite: invited_rooms, - knock: BTreeMap::new(), // TODO + knock: knocked_rooms, }, to_device: ToDevice { events: to_device_events }, }; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 9915752e..24c7e286 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -113,9 +113,18 @@ pub(crate) async fn sync_events_v4_route( .collect() .await; + let all_knocked_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_knocked(sender_user) + .map(|r| r.0) + .collect() + .await; + let all_rooms = all_joined_rooms .iter() .chain(all_invited_rooms.iter()) + .chain(all_knocked_rooms.iter()) .map(Clone::clone) .collect(); diff --git a/src/api/router.rs b/src/api/router.rs index 1b38670d..1d42fc5e 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -99,6 +99,7 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&client::join_room_by_id_route) .ruma_route(&client::join_room_by_id_or_alias_route) .ruma_route(&client::joined_members_route) + .ruma_route(&client::knock_room_route) .ruma_route(&client::leave_room_route) .ruma_route(&client::forget_room_route) .ruma_route(&client::joined_rooms_route) @@ -204,8 +205,10 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&server::get_room_state_route) .ruma_route(&server::get_room_state_ids_route) .ruma_route(&server::create_leave_event_template_route) + .ruma_route(&server::create_knock_event_template_route) .ruma_route(&server::create_leave_event_v1_route) .ruma_route(&server::create_leave_event_v2_route) + .ruma_route(&server::create_knock_event_v1_route) .ruma_route(&server::create_join_event_template_route) .ruma_route(&server::create_join_event_v1_route) .ruma_route(&server::create_join_event_v2_route) diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 6d3be04c..1fea268b 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -6,8 +6,9 @@ use ruma::{ api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, - CanonicalJsonValue, OwnedEventId, OwnedUserId, UserId, + CanonicalJsonValue, OwnedUserId, UserId, }; +use service::pdu::gen_event_id; use crate::Ruma; @@ -86,12 +87,7 @@ pub(crate) async fn create_invite_route( .map_err(|e| err!(Request(InvalidParam("Failed to sign event: {e}"))))?; // Generate event id - let event_id = OwnedEventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&signed_event, &body.room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + let event_id = gen_event_id(&signed_event, &body.room_version)?; // Add event_id back signed_event.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.to_string())); @@ -115,12 +111,12 @@ pub(crate) async fn create_invite_route( let mut invite_state = body.invite_room_state.clone(); let mut event: JsonObject = serde_json::from_str(body.event.get()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + .map_err(|e| err!(Request(BadJson("Invalid invite event PDU: {e}"))))?; event.insert("event_id".to_owned(), "$placeholder".into()); let pdu: PduEvent = serde_json::from_value(event.into()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event."))?; + .map_err(|e| err!(Request(BadJson("Invalid invite event PDU: {e}"))))?; invite_state.push(pdu.to_stripped_state_event()); diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 6d9d6d55..90b9b629 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::Err; +use conduwuit::{debug_warn, Err}; use ruma::{ api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, events::room::member::{MembershipState, RoomMemberEventContent}, @@ -15,7 +15,8 @@ use crate::{service::pdu::PduBuilder, Error, Result, Ruma}; /// /// Creates a knock template. pub(crate) async fn create_knock_event_template_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { return Err!(Request(NotFound("Room is unknown to this server."))); @@ -39,8 +40,8 @@ pub(crate) async fn create_knock_event_template_route( .contains(body.origin()) { warn!( - "Server {} for remote user {} tried knocking room ID {} which has a server name that is globally \ - forbidden. Rejecting.", + "Server {} for remote user {} tried knocking room ID {} which has a server name \ + that is globally forbidden. Rejecting.", body.origin(), &body.user_id, &body.room_id, @@ -63,29 +64,44 @@ pub(crate) async fn create_knock_event_template_route( if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6) { return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: room_version_id, - }, + ErrorKind::IncompatibleRoomVersion { room_version: room_version_id }, "Room version does not support knocking.", )); } if !body.ver.contains(&room_version_id) { return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: room_version_id, - }, + ErrorKind::IncompatibleRoomVersion { room_version: room_version_id }, "Your homeserver does not support the features required to knock on this room.", )); } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + if let Ok(membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, &body.user_id) + .await + { + if membership.membership == MembershipState::Ban { + debug_warn!( + "Remote user {} is banned from {} but attempted to knock", + &body.user_id, + &body.room_id + ); + return Err!(Request(Forbidden("You cannot knock on a room you are banned from."))); + } + } + let (_pdu, mut pdu_json) = services .rooms .timeline .create_hash_and_sign_event( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent::new(MembershipState::Knock)), + PduBuilder::state( + body.user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Knock), + ), &body.user_id, &body.room_id, &state_lock, diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 746a4858..936e0fbb 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -9,7 +9,7 @@ use serde_json::value::to_raw_value; use super::make_join::maybe_strip_event_id; use crate::{service::pdu::PduBuilder, Ruma}; -/// # `PUT /_matrix/federation/v1/make_leave/{roomId}/{eventId}` +/// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` /// /// Creates a leave template. pub(crate) async fn create_leave_event_template_route( @@ -21,7 +21,9 @@ pub(crate) async fn create_leave_event_template_route( } if body.user_id.server_name() != body.origin() { - return Err!(Request(BadJson("Not allowed to leave on behalf of another server/user."))); + return Err!(Request(Forbidden( + "Not allowed to leave on behalf of another server/user." + ))); } // ACL check origin diff --git a/src/api/server/mod.rs b/src/api/server/mod.rs index 9b7d91cb..5c1ff3f7 100644 --- a/src/api/server/mod.rs +++ b/src/api/server/mod.rs @@ -6,6 +6,7 @@ pub(super) mod hierarchy; pub(super) mod invite; pub(super) mod key; pub(super) mod make_join; +pub(super) mod make_knock; pub(super) mod make_leave; pub(super) mod media; pub(super) mod openid; @@ -13,6 +14,7 @@ pub(super) mod publicrooms; pub(super) mod query; pub(super) mod send; pub(super) mod send_join; +pub(super) mod send_knock; pub(super) mod send_leave; pub(super) mod state; pub(super) mod state_ids; @@ -28,6 +30,7 @@ pub(super) use hierarchy::*; pub(super) use invite::*; pub(super) use key::*; pub(super) use make_join::*; +pub(super) use make_knock::*; pub(super) use make_leave::*; pub(super) use media::*; pub(super) use openid::*; @@ -35,6 +38,7 @@ pub(super) use publicrooms::*; pub(super) use query::*; pub(super) use send::*; pub(super) use send_join::*; +pub(super) use send_knock::*; pub(super) use send_leave::*; pub(super) use state::*; pub(super) use state_ids::*; diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 6cbe5143..97a65bf8 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -186,14 +186,13 @@ async fn create_join_event( .map_err(|e| err!(Request(InvalidParam(warn!("Failed to sign send_join event: {e}")))))?; let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event missing origin property."))))?, - ) - .expect("CanonicalJson is valid json value"), + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event does not have an origin server name."))))? + .clone() + .into(), ) - .map_err(|e| err!(Request(BadJson(warn!("origin field is not a valid server name: {e}")))))?; + .map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?; let mutex_lock = services .rooms diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index 49ec4bf8..95478081 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,7 +1,8 @@ use axum::extract::State; -use conduwuit::{err, pdu::gen_event_id_canonical_json, warn, Err, Error, PduEvent, Result}; +use conduwuit::{err, pdu::gen_event_id_canonical_json, warn, Err, PduEvent, Result}; +use futures::FutureExt; use ruma::{ - api::{client::error::ErrorKind, federation::knock::send_knock}, + api::federation::knock::send_knock, events::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, @@ -17,7 +18,8 @@ use crate::Ruma; /// /// Submits a signed knock event. pub(crate) async fn create_knock_event_v1_route( - State(services): State, body: Ruma, + State(services): State, + body: Ruma, ) -> Result { if services .globals @@ -26,7 +28,8 @@ pub(crate) async fn create_knock_event_v1_route( .contains(body.origin()) { warn!( - "Server {} tried knocking room ID {} who has a server name that is globally forbidden. Rejecting.", + "Server {} tried knocking room ID {} who has a server name that is globally \ + forbidden. Rejecting.", body.origin(), &body.room_id, ); @@ -41,7 +44,8 @@ pub(crate) async fn create_knock_event_v1_route( .contains(&server.to_owned()) { warn!( - "Server {} tried knocking room ID {} which has a server name that is globally forbidden. Rejecting.", + "Server {} tried knocking room ID {} which has a server name that is globally \ + forbidden. Rejecting.", body.origin(), &body.room_id, ); @@ -50,7 +54,7 @@ pub(crate) async fn create_knock_event_v1_route( } if !services.rooms.metadata.exists(&body.room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + return Err!(Request(NotFound("Room is unknown to this server."))); } // ACL check origin server @@ -74,44 +78,42 @@ pub(crate) async fn create_knock_event_v1_route( let event_type: StateEventType = serde_json::from_value( value .get("type") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing type property."))? + .ok_or_else(|| err!(Request(InvalidParam("Event has no event type."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event has invalid event type."))?; + .map_err(|e| err!(Request(InvalidParam("Event has invalid event type: {e}"))))?; if event_type != StateEventType::RoomMember { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, + return Err!(Request(InvalidParam( "Not allowed to send non-membership state event to knock endpoint.", - )); + ))); } let content: RoomMemberEventContent = serde_json::from_value( value .get("content") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing content property"))? + .ok_or_else(|| err!(Request(InvalidParam("Membership event has no content"))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event content is empty or invalid"))?; + .map_err(|e| err!(Request(InvalidParam("Event has invalid membership content: {e}"))))?; if content.membership != MembershipState::Knock { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to send a non-knock membership event to knock endpoint.", - )); + return Err!(Request(InvalidParam( + "Not allowed to send a non-knock membership event to knock endpoint." + ))); } // ACL check sender server name let sender: OwnedUserId = serde_json::from_value( value .get("sender") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing sender property."))? + .ok_or_else(|| err!(Request(InvalidParam("Event has no sender user ID."))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "sender is not a valid user ID."))?; + .map_err(|e| err!(Request(BadJson("Event sender is not a valid user ID: {e}"))))?; services .rooms @@ -127,36 +129,32 @@ pub(crate) async fn create_knock_event_v1_route( let state_key: OwnedUserId = serde_json::from_value( value .get("state_key") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing state_key property."))? + .ok_or_else(|| err!(Request(InvalidParam("Event does not have a state_key"))))? .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "state_key is invalid or not a user ID."))?; + .map_err(|e| err!(Request(BadJson("Event does not have a valid state_key: {e}"))))?; if state_key != sender { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "State key does not match sender user", - )); + return Err!(Request(InvalidParam("state_key does not match sender user of event."))); }; let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value( - value - .get("origin") - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event missing origin property."))?, - ) - .expect("CanonicalJson is valid json value"), + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event does not have an origin server name."))))? + .clone() + .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?; + .map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?; let mut event: JsonObject = serde_json::from_str(body.pdu.get()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid knock event PDU."))?; + .map_err(|e| err!(Request(InvalidParam("Invalid knock event PDU: {e}"))))?; event.insert("event_id".to_owned(), "$placeholder".into()); let pdu: PduEvent = serde_json::from_value(event.into()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid knock event PDU."))?; + .map_err(|e| err!(Request(InvalidParam("Invalid knock event PDU: {e}"))))?; let mutex_lock = services .rooms @@ -169,19 +167,18 @@ pub(crate) async fn create_knock_event_v1_route( .rooms .event_handler .handle_incoming_pdu(&origin, &body.room_id, &event_id, value.clone(), true) + .boxed() .await? .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; drop(mutex_lock); - let knock_room_state = services.rooms.state.summary_stripped(&pdu).await; - services .sending .send_pdu_room(&body.room_id, &pdu_id) .await?; - Ok(send_knock::v1::Response { - knock_room_state, - }) + let knock_room_state = services.rooms.state.summary_stripped(&pdu).await; + + Ok(send_knock::v1::Response { knock_room_state }) } diff --git a/src/api/server/utils.rs b/src/api/server/utils.rs index 112cf858..4f3fa245 100644 --- a/src/api/server/utils.rs +++ b/src/api/server/utils.rs @@ -1,6 +1,6 @@ use conduwuit::{implement, is_false, Err, Result}; use conduwuit_service::Services; -use futures::{future::OptionFuture, join, FutureExt}; +use futures::{future::OptionFuture, join, FutureExt, StreamExt}; use ruma::{EventId, RoomId, ServerName}; pub(super) struct AccessCheck<'a> { @@ -31,6 +31,15 @@ pub(super) async fn check(&self) -> Result { .state_cache .server_in_room(self.origin, self.room_id); + // if any user on our homeserver is trying to knock this room, we'll need to + // acknowledge bans or leaves + let user_is_knocking = self + .services + .rooms + .state_cache + .room_members_knocked(self.room_id) + .count(); + let server_can_see: OptionFuture<_> = self .event_id .map(|event_id| { @@ -42,14 +51,14 @@ pub(super) async fn check(&self) -> Result { }) .into(); - let (world_readable, server_in_room, server_can_see, acl_check) = - join!(world_readable, server_in_room, server_can_see, acl_check); + let (world_readable, server_in_room, server_can_see, acl_check, user_is_knocking) = + join!(world_readable, server_in_room, server_can_see, acl_check, user_is_knocking); if !acl_check { return Err!(Request(Forbidden("Server access denied."))); } - if !world_readable && !server_in_room { + if !world_readable && !server_in_room && user_is_knocking == 0 { return Err!(Request(Forbidden("Server is not in room."))); } diff --git a/src/database/maps.rs b/src/database/maps.rs index e9b26818..bc409919 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -184,6 +184,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "roomuserid_leftcount", ..descriptor::RANDOM }, + Descriptor { + name: "roomuserid_knockedcount", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "roomuserid_privateread", ..descriptor::RANDOM_SMALL @@ -377,6 +381,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "userroomid_leftstate", ..descriptor::RANDOM }, + Descriptor { + name: "userroomid_knockedstate", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "userroomid_notificationcount", ..descriptor::RANDOM diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 89421dfd..0d25142d 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -10,7 +10,7 @@ use conduwuit::{ warn, Result, }; use database::{serialize_key, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{future::join4, pin_mut, stream::iter, Stream, StreamExt}; +use futures::{future::join5, pin_mut, stream::iter, Stream, StreamExt}; use itertools::Itertools; use ruma::{ events::{ @@ -51,11 +51,13 @@ struct Data { roomuserid_invitecount: Arc, roomuserid_joined: Arc, roomuserid_leftcount: Arc, + roomuserid_knockedcount: Arc, roomuseroncejoinedids: Arc, serverroomids: Arc, userroomid_invitestate: Arc, userroomid_joined: Arc, userroomid_leftstate: Arc, + userroomid_knockedstate: Arc, } type AppServiceInRoomCache = RwLock>>; @@ -81,11 +83,13 @@ impl crate::Service for Service { roomuserid_invitecount: args.db["roomuserid_invitecount"].clone(), roomuserid_joined: args.db["roomuserid_joined"].clone(), roomuserid_leftcount: args.db["roomuserid_leftcount"].clone(), + roomuserid_knockedcount: args.db["roomuserid_knockedcount"].clone(), roomuseroncejoinedids: args.db["roomuseroncejoinedids"].clone(), serverroomids: args.db["serverroomids"].clone(), userroomid_invitestate: args.db["userroomid_invitestate"].clone(), userroomid_joined: args.db["userroomid_joined"].clone(), userroomid_leftstate: args.db["userroomid_leftstate"].clone(), + userroomid_knockedstate: args.db["userroomid_knockedstate"].clone(), }, })) } @@ -336,6 +340,9 @@ impl Service { self.db.userroomid_leftstate.remove(&userroom_id); self.db.roomuserid_leftcount.remove(&roomuser_id); + self.db.userroomid_knockedstate.remove(&userroom_id); + self.db.roomuserid_knockedcount.remove(&roomuser_id); + self.db.roomid_inviteviaservers.remove(room_id); } @@ -352,12 +359,13 @@ impl Service { // (timo) TODO let leftstate = Vec::>::new(); - let count = self.services.globals.next_count().unwrap(); self.db .userroomid_leftstate .raw_put(&userroom_id, Json(leftstate)); - self.db.roomuserid_leftcount.raw_put(&roomuser_id, count); + self.db + .roomuserid_leftcount + .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); self.db.userroomid_joined.remove(&userroom_id); self.db.roomuserid_joined.remove(&roomuser_id); @@ -365,6 +373,44 @@ impl Service { self.db.userroomid_invitestate.remove(&userroom_id); self.db.roomuserid_invitecount.remove(&roomuser_id); + self.db.userroomid_knockedstate.remove(&userroom_id); + self.db.roomuserid_knockedcount.remove(&roomuser_id); + + self.db.roomid_inviteviaservers.remove(room_id); + } + + /// Direct DB function to directly mark a user as knocked. It is not + /// recommended to use this directly. You most likely should use + /// `update_membership` instead + #[tracing::instrument(skip(self), level = "debug")] + pub fn mark_as_knocked( + &self, + user_id: &UserId, + room_id: &RoomId, + knocked_state: Option>>, + ) { + let userroom_id = (user_id, room_id); + let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); + + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); + + self.db + .userroomid_knockedstate + .raw_put(&userroom_id, Json(knocked_state.unwrap_or_default())); + self.db + .roomuserid_knockedcount + .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); + + self.db.userroomid_joined.remove(&userroom_id); + self.db.roomuserid_joined.remove(&roomuser_id); + + self.db.userroomid_invitestate.remove(&userroom_id); + self.db.roomuserid_invitecount.remove(&roomuser_id); + + self.db.userroomid_leftstate.remove(&userroom_id); + self.db.roomuserid_leftcount.remove(&roomuser_id); + self.db.roomid_inviteviaservers.remove(room_id); } @@ -528,6 +574,20 @@ impl Service { .map(|(_, user_id): (Ignore, &UserId)| user_id) } + /// Returns an iterator over all knocked members of a room. + #[tracing::instrument(skip(self), level = "debug")] + pub fn room_members_knocked<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomuserid_knockedcount + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, user_id): (Ignore, &UserId)| user_id) + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { let key = (room_id, user_id); @@ -538,6 +598,16 @@ impl Service { .deserialized() } + #[tracing::instrument(skip(self), level = "trace")] + pub async fn get_knock_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + let key = (room_id, user_id); + self.db + .roomuserid_knockedcount + .qry(&key) + .await + .deserialized() + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { let key = (room_id, user_id); @@ -576,6 +646,25 @@ impl Service { .ignore_err() } + /// Returns an iterator over all rooms a user is currently knocking. + #[tracing::instrument(skip(self), level = "trace")] + pub fn rooms_knocked<'a>( + &'a self, + user_id: &'a UserId, + ) -> impl Stream + Send + 'a { + type KeyVal<'a> = (Key<'a>, Raw>); + type Key<'a> = (&'a UserId, &'a RoomId); + + let prefix = (user_id, Interfix); + self.db + .userroomid_knockedstate + .stream_prefix(&prefix) + .ignore_err() + .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) + .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) + .ignore_err() + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn invite_state( &self, @@ -593,6 +682,23 @@ impl Service { }) } + #[tracing::instrument(skip(self), level = "trace")] + pub async fn knock_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>> { + let key = (user_id, room_id); + self.db + .userroomid_knockedstate + .qry(&key) + .await + .deserialized() + .and_then(|val: Raw>| { + val.deserialize_as().map_err(Into::into) + }) + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn left_state( &self, @@ -641,6 +747,12 @@ impl Service { self.db.userroomid_joined.qry(&key).await.is_ok() } + #[tracing::instrument(skip(self), level = "trace")] + pub async fn is_knocked<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { + let key = (user_id, room_id); + self.db.userroomid_knockedstate.qry(&key).await.is_ok() + } + #[tracing::instrument(skip(self), level = "trace")] pub async fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> bool { let key = (user_id, room_id); @@ -659,9 +771,10 @@ impl Service { user_id: &UserId, room_id: &RoomId, ) -> Option { - let states = join4( + let states = join5( self.is_joined(user_id, room_id), self.is_left(user_id, room_id), + self.is_knocked(user_id, room_id), self.is_invited(user_id, room_id), self.once_joined(user_id, room_id), ) @@ -670,8 +783,9 @@ impl Service { match states { | (true, ..) => Some(MembershipState::Join), | (_, true, ..) => Some(MembershipState::Leave), - | (_, _, true, ..) => Some(MembershipState::Invite), - | (false, false, false, true) => Some(MembershipState::Ban), + | (_, _, true, ..) => Some(MembershipState::Knock), + | (_, _, _, true, ..) => Some(MembershipState::Invite), + | (false, false, false, false, true) => Some(MembershipState::Ban), | _ => None, } } @@ -747,6 +861,7 @@ impl Service { pub async fn update_joined_count(&self, room_id: &RoomId) { let mut joinedcount = 0_u64; let mut invitedcount = 0_u64; + let mut knockedcount = 0_u64; let mut joined_servers = HashSet::new(); self.room_members(room_id) @@ -764,8 +879,19 @@ impl Service { .unwrap_or(0), ); + knockedcount = knockedcount.saturating_add( + self.room_members_knocked(room_id) + .count() + .await + .try_into() + .unwrap_or(0), + ); + self.db.roomid_joinedcount.raw_put(room_id, joinedcount); self.db.roomid_invitedcount.raw_put(room_id, invitedcount); + self.db + .roomuserid_knockedcount + .raw_put(room_id, knockedcount); self.room_servers(room_id) .ready_for_each(|old_joined_server| { @@ -820,7 +946,6 @@ impl Service { self.db .userroomid_invitestate .raw_put(&userroom_id, Json(last_state.unwrap_or_default())); - self.db .roomuserid_invitecount .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); @@ -831,6 +956,9 @@ impl Service { self.db.userroomid_leftstate.remove(&userroom_id); self.db.roomuserid_leftcount.remove(&roomuser_id); + self.db.userroomid_knockedstate.remove(&userroom_id); + self.db.roomuserid_knockedcount.remove(&roomuser_id); + if let Some(servers) = invite_via.filter(is_not_empty!()) { self.add_servers_invite_via(room_id, servers).await; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index fe7f885a..3ebc432f 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -498,14 +498,15 @@ impl Service { .expect("This state_key was previously validated"); let content: RoomMemberEventContent = pdu.get_content()?; - let invite_state = match content.membership { - | MembershipState::Invite => + let stripped_state = match content.membership { + | MembershipState::Invite | MembershipState::Knock => self.services.state.summary_stripped(pdu).await.into(), | _ => None, }; - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth + // Update our membership info, we do this here incase a user is invited or + // knocked and immediately leaves we need the DB to record the invite or + // knock event for auth self.services .state_cache .update_membership( @@ -513,7 +514,7 @@ impl Service { target_user_id, content, &pdu.sender, - invite_state, + stripped_state, None, true, ) From 9dd058de60cc2a07a61a0c783b8967a779d7881c Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 12 Jan 2025 21:02:03 -0500 Subject: [PATCH 0515/1248] update complement results Signed-off-by: strawberry --- .../complement/test_results.jsonl | 68 +++++++++---------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 9b4d2838..11339049 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -1,12 +1,12 @@ {"Action":"pass","Test":"TestACLs"} {"Action":"pass","Test":"TestBannedUserCannotSendJoin"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/join_event"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event"} -{"Action":"fail","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/join_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event"} +{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/event_with_mismatched_state_key"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/invite_event"} @@ -21,13 +21,13 @@ {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/leave_event"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/non-state_membership_event"} {"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/regular_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/invite_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/join_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/leave_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/non-state_membership_event"} -{"Action":"fail","Test":"TestCannotSendNonKnockViaSendKnock/regular_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/invite_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/join_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/leave_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/non-state_membership_event"} +{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/regular_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/event_with_mismatched_state_key"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/invite_event"} @@ -90,10 +90,10 @@ {"Action":"fail","Test":"TestKnocking"} {"Action":"fail","Test":"TestKnocking/A_user_can_knock_on_a_room_without_a_reason"} {"Action":"fail","Test":"TestKnocking/A_user_can_knock_on_a_room_without_a_reason#01"} -{"Action":"fail","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in"} -{"Action":"fail","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01"} -{"Action":"fail","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} -{"Action":"fail","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} +{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in"} +{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01"} +{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} +{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} {"Action":"pass","Test":"TestKnocking/A_user_in_the_room_can_accept_a_knock"} {"Action":"pass","Test":"TestKnocking/A_user_in_the_room_can_accept_a_knock#01"} {"Action":"fail","Test":"TestKnocking/A_user_in_the_room_can_reject_a_knock"} @@ -101,25 +101,25 @@ {"Action":"fail","Test":"TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"} {"Action":"fail","Test":"TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"} {"Action":"fail","Test":"TestKnocking/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"} -{"Action":"fail","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} -{"Action":"fail","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} +{"Action":"pass","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} +{"Action":"pass","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} {"Action":"pass","Test":"TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"} {"Action":"pass","Test":"TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"} {"Action":"pass","Test":"TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"} {"Action":"pass","Test":"TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"} -{"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} -{"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} +{"Action":"pass","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} +{"Action":"pass","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} {"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"} {"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} -{"Action":"fail","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} -{"Action":"fail","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} +{"Action":"pass","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} +{"Action":"pass","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} {"Action":"fail","Test":"TestKnockingInMSC3787Room"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock#01"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock"} @@ -127,18 +127,18 @@ {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} +{"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} From be16f84410c09db478eaa7998f451952bbe0fabe Mon Sep 17 00:00:00 2001 From: morguldir Date: Wed, 6 Nov 2024 03:17:50 +0100 Subject: [PATCH 0516/1248] syncv3: use a function for repeated pattern of fetching sticky params --- src/service/sync/mod.rs | 124 +++++++++++++++++----------------------- 1 file changed, 52 insertions(+), 72 deletions(-) diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index 97f4ce9c..02658a70 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -85,6 +85,17 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +fn list_or_sticky(target: &mut Vec, cached: &Vec) { + if target.is_empty() { + target.clone_from(cached); + } +} +fn some_or_sticky(target: &mut Option, cached: Option) { + if target.is_none() { + *target = cached; + } +} + impl Service { pub fn remembered( &self, @@ -136,57 +147,27 @@ impl Service { for (list_id, list) in &mut request.lists { if let Some(cached_list) = cached.lists.get(list_id) { - if list.sort.is_empty() { - list.sort.clone_from(&cached_list.sort); - }; - if list.room_details.required_state.is_empty() { - list.room_details - .required_state - .clone_from(&cached_list.room_details.required_state); - }; - list.room_details.timeline_limit = list - .room_details - .timeline_limit - .or(cached_list.room_details.timeline_limit); - list.include_old_rooms = list - .include_old_rooms - .clone() - .or_else(|| cached_list.include_old_rooms.clone()); + list_or_sticky(&mut list.sort, &cached_list.sort); + list_or_sticky(&mut list.room_details.required_state, &cached_list.room_details.required_state); + some_or_sticky(&mut list.room_details.timeline_limit, cached_list.room_details.timeline_limit); + some_or_sticky(&mut list.include_old_rooms, cached_list.include_old_rooms.clone()); match (&mut list.filters, cached_list.filters.clone()) { - | (Some(list_filters), Some(cached_filters)) => { - list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); - if list_filters.spaces.is_empty() { - list_filters.spaces = cached_filters.spaces; - } - list_filters.is_encrypted = - list_filters.is_encrypted.or(cached_filters.is_encrypted); - list_filters.is_invite = - list_filters.is_invite.or(cached_filters.is_invite); - if list_filters.room_types.is_empty() { - list_filters.room_types = cached_filters.room_types; - } - if list_filters.not_room_types.is_empty() { - list_filters.not_room_types = cached_filters.not_room_types; - } - list_filters.room_name_like = list_filters - .room_name_like - .clone() - .or(cached_filters.room_name_like); - if list_filters.tags.is_empty() { - list_filters.tags = cached_filters.tags; - } - if list_filters.not_tags.is_empty() { - list_filters.not_tags = cached_filters.not_tags; - } + (Some(list_filters), Some(cached_filters)) => { + some_or_sticky(&mut list_filters.is_dm, cached_filters.is_dm); + list_or_sticky(&mut list_filters.spaces, &cached_filters.spaces); + some_or_sticky(&mut list_filters.is_encrypted, cached_filters.is_encrypted); + some_or_sticky(&mut list_filters.is_invite, cached_filters.is_invite); + list_or_sticky(&mut list_filters.room_types, &cached_filters.room_types); + list_or_sticky(&mut list_filters.not_room_types, &cached_filters.not_room_types); + some_or_sticky(&mut list_filters.room_name_like, cached_filters.room_name_like); + list_or_sticky(&mut list_filters.tags, &cached_filters.tags); + list_or_sticky(&mut list_filters.not_tags, &cached_filters.not_tags); }, | (_, Some(cached_filters)) => list.filters = Some(cached_filters), | (Some(list_filters), _) => list.filters = Some(list_filters.clone()), | (..) => {}, } - if list.bump_event_types.is_empty() { - list.bump_event_types - .clone_from(&cached_list.bump_event_types); - }; + list_or_sticky(&mut list.bump_event_types, &cached_list.bump_event_types); } cached.lists.insert(list_id.clone(), list.clone()); } @@ -241,16 +222,18 @@ impl Service { subscriptions: BTreeMap, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -258,25 +241,22 @@ impl Service { } pub fn update_sync_known_rooms( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - list_id: String, - new_cached_rooms: BTreeSet, - globalsince: u64, + &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, list_id: String, + new_cached_rooms: BTreeSet, globalsince: u64, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); let cached = &mut cached.lock().expect("locked"); drop(cache); From 6cb3275be0828d7d64da30bacb759b8c796c3c99 Mon Sep 17 00:00:00 2001 From: morguldir Date: Fri, 3 Jan 2025 08:32:54 +0100 Subject: [PATCH 0517/1248] Add initial MSC4186 (Simplified Sliding Sync) implementation Signed-off-by: morguldir Signed-off-by: strawberry --- src/api/client/sync/mod.rs | 51 +- src/api/client/sync/v4.rs | 94 +-- src/api/client/sync/v5.rs | 886 ++++++++++++++++++++++++++ src/api/client/unversioned.rs | 1 + src/api/router.rs | 1 + src/service/rooms/read_receipt/mod.rs | 1 + src/service/sync/mod.rs | 273 +++++++- 7 files changed, 1214 insertions(+), 93 deletions(-) create mode 100644 src/api/client/sync/v5.rs diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 79e4b1ca..1967f4a2 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -1,16 +1,31 @@ mod v3; mod v4; +mod v5; use conduwuit::{ - utils::stream::{BroadbandExt, ReadyExt, TryIgnore}, + utils::{ + stream::{BroadbandExt, ReadyExt, TryIgnore}, + IterStream, + }, PduCount, }; use futures::{pin_mut, StreamExt}; -use ruma::{RoomId, UserId}; +use ruma::{ + directory::RoomTypeFilter, + events::TimelineEventType::{ + self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker, + }, + RoomId, UserId, +}; -pub(crate) use self::{v3::sync_events_route, v4::sync_events_v4_route}; +pub(crate) use self::{ + v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, +}; use crate::{service::Services, Error, PduEvent, Result}; +pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = + &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; + async fn load_timeline( services: &Services, sender_user: &UserId, @@ -69,3 +84,33 @@ async fn share_encrypted_room( }) .await } + +pub(crate) async fn filter_rooms<'a>( + services: &Services, + rooms: &[&'a RoomId], + filter: &[RoomTypeFilter], + negate: bool, +) -> Vec<&'a RoomId> { + rooms + .iter() + .stream() + .filter_map(|r| async move { + let room_type = services.rooms.state_accessor.get_room_type(r).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(r) + }) + .collect() + .await +} diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 24c7e286..a82e9309 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -23,24 +23,23 @@ use ruma::{ DeviceLists, UnreadNotificationsCount, }, }, - directory::RoomTypeFilter, events::{ room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, - TimelineEventType::{self, *}, + TimelineEventType::*, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, UInt, + uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, }; -use service::{rooms::read_receipt::pack_receipts, Services}; +use service::rooms::read_receipt::pack_receipts; use super::{load_timeline, share_encrypted_room}; -use crate::{client::ignored_filter, Ruma}; +use crate::{ + client::{filter_rooms, ignored_filter, sync::v5::TodoRooms, DEFAULT_BUMP_TYPES}, + Ruma, +}; -const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; - -const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = - &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; +pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; /// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` /// @@ -121,13 +120,19 @@ pub(crate) async fn sync_events_v4_route( .collect() .await; - let all_rooms = all_joined_rooms + let all_invited_rooms: Vec<&RoomId> = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + let all_knocked_rooms: Vec<&RoomId> = all_knocked_rooms.iter().map(AsRef::as_ref).collect(); + + let all_rooms: Vec<&RoomId> = all_joined_rooms .iter() - .chain(all_invited_rooms.iter()) - .chain(all_knocked_rooms.iter()) - .map(Clone::clone) + .map(AsRef::as_ref) + .chain(all_invited_rooms.iter().map(AsRef::as_ref)) + .chain(all_knocked_rooms.iter().map(AsRef::as_ref)) .collect(); + let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect(); + let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + if body.extensions.to_device.enabled.unwrap_or(false) { services .users @@ -180,6 +185,7 @@ pub(crate) async fn sync_events_v4_route( ); for room_id in &all_joined_rooms { + let room_id: &&RoomId = room_id; let Ok(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id).await else { @@ -332,7 +338,7 @@ pub(crate) async fn sync_events_v4_route( } let mut lists = BTreeMap::new(); - let mut todo_rooms = BTreeMap::new(); // and required state + let mut todo_rooms: TodoRooms = BTreeMap::new(); // and required state for (list_id, list) in &body.lists { let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { @@ -353,7 +359,7 @@ pub(crate) async fn sync_events_v4_route( | None => active_rooms, }; - let mut new_known_rooms = BTreeSet::new(); + let mut new_known_rooms: BTreeSet = BTreeSet::new(); let ranges = list.ranges.clone(); lists.insert(list_id.clone(), sync_events::v4::SyncList { @@ -375,9 +381,9 @@ pub(crate) async fn sync_events_v4_route( Vec::new() }; - new_known_rooms.extend(room_ids.iter().cloned()); + new_known_rooms.extend(room_ids.clone().into_iter().map(ToOwned::to_owned)); for room_id in &room_ids { - let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( + let todo_room = todo_rooms.entry((*room_id).to_owned()).or_insert(( BTreeSet::new(), 0_usize, u64::MAX, @@ -399,7 +405,7 @@ pub(crate) async fn sync_events_v4_route( todo_room.2 = todo_room.2.min( known_rooms .get(list_id.as_str()) - .and_then(|k| k.get(room_id)) + .and_then(|k| k.get(*room_id)) .copied() .unwrap_or(0), ); @@ -408,7 +414,7 @@ pub(crate) async fn sync_events_v4_route( op: SlidingOp::Sync, range: Some(r), index: None, - room_ids, + room_ids: room_ids.into_iter().map(ToOwned::to_owned).collect(), room_id: None, } }) @@ -418,8 +424,8 @@ pub(crate) async fn sync_events_v4_route( if let Some(conn_id) = &body.conn_id { services.sync.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), + sender_user, + &sender_device, conn_id.clone(), list_id.clone(), new_known_rooms, @@ -464,8 +470,8 @@ pub(crate) async fn sync_events_v4_route( if let Some(conn_id) = &body.conn_id { services.sync.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), + sender_user, + &sender_device, conn_id.clone(), "subscriptions".to_owned(), known_subscription_rooms, @@ -489,7 +495,8 @@ pub(crate) async fn sync_events_v4_route( let mut timestamp: Option<_> = None; let mut invite_state = None; let (timeline_pdus, limited); - if all_invited_rooms.contains(room_id) { + let new_room_id: &RoomId = (*room_id).as_ref(); + if all_invited_rooms.contains(&new_room_id) { // TODO: figure out a timestamp we can use for remote invites invite_state = services .rooms @@ -519,7 +526,7 @@ pub(crate) async fn sync_events_v4_route( } account_data.rooms.insert( - room_id.clone(), + room_id.to_owned(), services .account_data .changes_since(Some(room_id), sender_user, *roomsince) @@ -749,10 +756,9 @@ pub(crate) async fn sync_events_v4_route( }); } - if rooms - .iter() - .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) - { + if rooms.iter().all(|(id, r)| { + r.timeline.is_empty() && r.required_state.is_empty() && !receipts.rooms.contains_key(id) + }) { // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives let default = Duration::from_secs(30); @@ -798,33 +804,3 @@ pub(crate) async fn sync_events_v4_route( delta_token: None, }) } - -async fn filter_rooms( - services: &Services, - rooms: &[OwnedRoomId], - filter: &[RoomTypeFilter], - negate: bool, -) -> Vec { - rooms - .iter() - .stream() - .filter_map(|r| async move { - let room_type = services.rooms.state_accessor.get_room_type(r).await; - - if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { - return None; - } - - let room_type_filter = RoomTypeFilter::from(room_type.ok()); - - let include = if negate { - !filter.contains(&room_type_filter) - } else { - filter.is_empty() || filter.contains(&room_type_filter) - }; - - include.then_some(r.to_owned()) - }) - .collect() - .await -} diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs new file mode 100644 index 00000000..1c4f3504 --- /dev/null +++ b/src/api/client/sync/v5.rs @@ -0,0 +1,886 @@ +use std::{ + cmp::{self, Ordering}, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + time::Duration, +}; + +use axum::extract::State; +use conduwuit::{ + debug, error, extract_variant, trace, + utils::{ + math::{ruma_from_usize, usize_from_ruma}, + BoolExt, IterStream, ReadyExt, TryFutureExtExt, + }, + warn, Error, Result, +}; +use futures::{FutureExt, StreamExt, TryFutureExt}; +use ruma::{ + api::client::{ + error::ErrorKind, + sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, + }, + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, + }, + serde::Raw, + state_res::TypeStateKey, + uint, DeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, +}; +use service::{rooms::read_receipt::pack_receipts, PduCount}; + +use super::{filter_rooms, share_encrypted_room}; +use crate::{ + client::{ignored_filter, sync::load_timeline, DEFAULT_BUMP_TYPES}, + Ruma, +}; + +type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); + +/// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync` +/// ([MSC4186]) +/// +/// A simplified version of sliding sync ([MSC3575]). +/// +/// Get all new events in a sliding window of rooms since the last sync or a +/// given point in time. +/// +/// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 +/// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186 +pub(crate) async fn sync_events_v5_route( + State(services): State, + body: Ruma, +) -> Result { + debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let mut body = body.body; + + // Setup watchers, so if there's no response, we can wait for them + let watcher = services.sync.watch(sender_user, sender_device); + + let next_batch = services.globals.next_count()?; + + let conn_id = body.conn_id.clone(); + + let globalsince = body + .pos + .as_ref() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); + + if globalsince != 0 + && !services.sync.snake_connection_cached( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ) { + debug!("Restarting sync stream because it was gone from the database"); + return Err(Error::Request( + ErrorKind::UnknownPos, + "Connection data lost since last time".into(), + http::StatusCode::BAD_REQUEST, + )); + } + + // Client / User requested an initial sync + if globalsince == 0 { + services.sync.forget_snake_sync_connection( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ); + } + + // Get sticky parameters from cache + let known_rooms = services.sync.update_snake_sync_request_with_cache( + sender_user.clone(), + sender_device.clone(), + &mut body, + ); + + let all_joined_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_joined(sender_user) + .map(ToOwned::to_owned) + .collect() + .await; + + let all_invited_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_invited(sender_user) + .map(|r| r.0) + .collect() + .await; + + let all_knocked_rooms: Vec<_> = services + .rooms + .state_cache + .rooms_knocked(sender_user) + .map(|r| r.0) + .collect() + .await; + + let all_rooms: Vec<&RoomId> = all_joined_rooms + .iter() + .map(AsRef::as_ref) + .chain(all_invited_rooms.iter().map(AsRef::as_ref)) + .chain(all_knocked_rooms.iter().map(AsRef::as_ref)) + .collect(); + + let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect(); + let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + + let pos = next_batch.clone().to_string(); + + let mut todo_rooms: TodoRooms = BTreeMap::new(); + + let sync_info: SyncInfo<'_> = (sender_user, sender_device, globalsince, &body); + let mut response = sync_events::v5::Response { + txn_id: body.txn_id.clone(), + pos, + lists: BTreeMap::new(), + rooms: BTreeMap::new(), + extensions: sync_events::v5::response::Extensions { + account_data: collect_account_data(services, sync_info).await, + e2ee: collect_e2ee(services, sync_info, &all_joined_rooms).await?, + to_device: collect_to_device(services, sync_info, next_batch).await, + receipts: collect_receipts(services).await, + typing: sync_events::v5::response::Typing::default(), + }, + }; + + handle_lists( + services, + sync_info, + &all_invited_rooms, + &all_joined_rooms, + &all_rooms, + &mut todo_rooms, + &known_rooms, + &mut response, + ) + .await; + + fetch_subscriptions(services, sync_info, &known_rooms, &mut todo_rooms).await; + + response.rooms = process_rooms( + services, + sender_user, + next_batch, + &all_invited_rooms, + &todo_rooms, + &mut response, + &body, + ) + .await?; + + if response.rooms.iter().all(|(id, r)| { + r.timeline.is_empty() + && r.required_state.is_empty() + && !response.extensions.receipts.rooms.contains_key(id) + }) && response + .extensions + .to_device + .clone() + .is_none_or(|to| to.events.is_empty()) + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let default = Duration::from_secs(30); + let duration = cmp::min(body.timeout.unwrap_or(default), default); + _ = tokio::time::timeout(duration, watcher).await; + } + + trace!( + rooms=?response.rooms.len(), + account_data=?response.extensions.account_data.rooms.len(), + receipts=?response.extensions.receipts.rooms.len(), + "responding to request with" + ); + Ok(response) +} + +type KnownRooms = BTreeMap>; +pub(crate) type TodoRooms = BTreeMap, usize, u64)>; + +async fn fetch_subscriptions( + services: crate::State, + (sender_user, sender_device, globalsince, body): SyncInfo<'_>, + known_rooms: &KnownRooms, + todo_rooms: &mut TodoRooms, +) { + let mut known_subscription_rooms = BTreeSet::new(); + for (room_id, room) in &body.room_subscriptions { + if !services.rooms.metadata.exists(room_id).await { + continue; + } + let todo_room = + todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0_usize, u64::MAX)); + + let limit: UInt = room.timeline_limit; + + todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.1 = todo_room.1.max(usize_from_ruma(limit)); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get("subscriptions") + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + known_subscription_rooms.insert(room_id.clone()); + } + // where this went (protomsc says it was removed) + //for r in body.unsubscribe_rooms { + // known_subscription_rooms.remove(&r); + // body.room_subscriptions.remove(&r); + //} + + if let Some(conn_id) = &body.conn_id { + services.sync.update_snake_sync_known_rooms( + sender_user, + sender_device, + conn_id.clone(), + "subscriptions".to_owned(), + known_subscription_rooms, + globalsince, + ); + } +} + +#[allow(clippy::too_many_arguments)] +async fn handle_lists<'a>( + services: crate::State, + (sender_user, sender_device, globalsince, body): SyncInfo<'_>, + all_invited_rooms: &Vec<&'a RoomId>, + all_joined_rooms: &Vec<&'a RoomId>, + all_rooms: &Vec<&'a RoomId>, + todo_rooms: &'a mut TodoRooms, + known_rooms: &'a KnownRooms, + response: &'_ mut sync_events::v5::Response, +) -> KnownRooms { + for (list_id, list) in &body.lists { + let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { + | Some(true) => all_invited_rooms, + | Some(false) => all_joined_rooms, + | None => all_rooms, + }; + + let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { + | Some(filter) if filter.is_empty() => active_rooms, + | Some(value) => &filter_rooms(&services, active_rooms, &value, true).await, + | None => active_rooms, + }; + + let mut new_known_rooms: BTreeSet = BTreeSet::new(); + + let ranges = list.ranges.clone(); + + for mut range in ranges { + range.0 = uint!(0); + range.1 = range + .1 + .clamp(range.0, UInt::try_from(active_rooms.len()).unwrap_or(UInt::MAX)); + + let room_ids = + active_rooms[usize_from_ruma(range.0)..usize_from_ruma(range.1)].to_vec(); + + let new_rooms: BTreeSet = + room_ids.clone().into_iter().map(From::from).collect(); + new_known_rooms.extend(new_rooms); + //new_known_rooms.extend(room_ids..cloned()); + for room_id in room_ids { + let todo_room = todo_rooms.entry(room_id.to_owned()).or_insert(( + BTreeSet::new(), + 0_usize, + u64::MAX, + )); + + let limit: usize = usize_from_ruma(list.room_details.timeline_limit).min(100); + + todo_room + .0 + .extend(list.room_details.required_state.iter().cloned()); + + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get(list_id.as_str()) + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + } + } + response + .lists + .insert(list_id.clone(), sync_events::v5::response::List { + count: ruma_from_usize(active_rooms.len()), + }); + + if let Some(conn_id) = &body.conn_id { + services.sync.update_snake_sync_known_rooms( + sender_user, + sender_device, + conn_id.clone(), + list_id.clone(), + new_known_rooms, + globalsince, + ); + } + } + BTreeMap::default() +} + +async fn process_rooms( + services: crate::State, + sender_user: &UserId, + next_batch: u64, + all_invited_rooms: &[&RoomId], + todo_rooms: &TodoRooms, + response: &mut sync_events::v5::Response, + body: &sync_events::v5::Request, +) -> Result> { + let mut rooms = BTreeMap::new(); + for (room_id, (required_state_request, timeline_limit, roomsince)) in todo_rooms { + let roomsincecount = PduCount::Normal(*roomsince); + + let mut timestamp: Option<_> = None; + let mut invite_state = None; + let (timeline_pdus, limited); + let new_room_id: &RoomId = (*room_id).as_ref(); + if all_invited_rooms.contains(&new_room_id) { + // TODO: figure out a timestamp we can use for remote invites + invite_state = services + .rooms + .state_cache + .invite_state(sender_user, room_id) + .await + .ok(); + + (timeline_pdus, limited) = (Vec::new(), true); + } else { + (timeline_pdus, limited) = match load_timeline( + &services, + sender_user, + room_id, + roomsincecount, + Some(PduCount::from(next_batch)), + *timeline_limit, + ) + .await + { + | Ok(value) => value, + | Err(err) => { + warn!("Encountered missing timeline in {}, error {}", room_id, err); + continue; + }, + }; + } + + if body.extensions.account_data.enabled == Some(true) { + response.extensions.account_data.rooms.insert( + room_id.to_owned(), + services + .account_data + .changes_since(Some(room_id), sender_user, *roomsince) + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect() + .await, + ); + } + + let last_privateread_update = services + .rooms + .read_receipt + .last_privateread_update(sender_user, room_id) + .await > *roomsince; + + let private_read_event = if last_privateread_update { + services + .rooms + .read_receipt + .private_read_get(room_id, sender_user) + .await + .ok() + } else { + None + }; + + let mut receipts: Vec> = services + .rooms + .read_receipt + .readreceipts_since(room_id, *roomsince) + .filter_map(|(read_user, _ts, v)| async move { + services + .users + .user_is_ignored(read_user, sender_user) + .await + .or_some(v) + }) + .collect() + .await; + + if let Some(private_read_event) = private_read_event { + receipts.push(private_read_event); + } + + let receipt_size = receipts.len(); + + if receipt_size > 0 { + response + .extensions + .receipts + .rooms + .insert(room_id.clone(), pack_receipts(Box::new(receipts.into_iter()))); + } + + if roomsince != &0 + && timeline_pdus.is_empty() + && response + .extensions + .account_data + .rooms + .get(room_id) + .is_none_or(Vec::is_empty) + && receipt_size == 0 + { + continue; + } + + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { + Ok(Some(match pdu_count { + | PduCount::Backfilled(_) => { + error!("timeline in backfill state?!"); + "0".to_owned() + }, + | PduCount::Normal(c) => c.to_string(), + })) + })? + .or_else(|| { + if roomsince != &0 { + Some(roomsince.to_string()) + } else { + None + } + }); + + let room_events: Vec<_> = timeline_pdus + .iter() + .stream() + .filter_map(|item| ignored_filter(&services, item.clone(), sender_user)) + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect() + .await; + + for (_, pdu) in timeline_pdus { + let ts = pdu.origin_server_ts; + if DEFAULT_BUMP_TYPES.binary_search(&pdu.kind).is_ok() + && timestamp.is_none_or(|time| time <= ts) + { + timestamp = Some(ts); + } + } + + let required_state = required_state_request + .iter() + .stream() + .filter_map(|state| async move { + services + .rooms + .state_accessor + .room_state_get(room_id, &state.0, &state.1) + .await + .map(|s| s.to_sync_state_event()) + .ok() + }) + .collect() + .await; + + // Heroes + let heroes: Vec<_> = services + .rooms + .state_cache + .room_members(room_id) + .ready_filter(|member| *member != sender_user) + .filter_map(|user_id| { + services + .rooms + .state_accessor + .get_member(room_id, user_id) + .map_ok(|memberevent| sync_events::v5::response::Hero { + user_id: user_id.into(), + name: memberevent.displayname, + avatar: memberevent.avatar_url, + }) + .ok() + }) + .take(5) + .collect() + .await; + + let name = match heroes.len().cmp(&(1_usize)) { + | Ordering::Greater => { + let firsts = heroes[1..] + .iter() + .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) + .collect::>() + .join(", "); + + let last = heroes[0] + .name + .clone() + .unwrap_or_else(|| heroes[0].user_id.to_string()); + + Some(format!("{firsts} and {last}")) + }, + | Ordering::Equal => Some( + heroes[0] + .name + .clone() + .unwrap_or_else(|| heroes[0].user_id.to_string()), + ), + | Ordering::Less => None, + }; + + let heroes_avatar = if heroes.len() == 1 { + heroes[0].avatar.clone() + } else { + None + }; + + rooms.insert(room_id.clone(), sync_events::v5::response::Room { + name: services + .rooms + .state_accessor + .get_name(room_id) + .await + .ok() + .or(name), + avatar: if let Some(heroes_avatar) = heroes_avatar { + ruma::JsOption::Some(heroes_avatar) + } else { + match services.rooms.state_accessor.get_avatar(room_id).await { + | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), + | ruma::JsOption::Null => ruma::JsOption::Null, + | ruma::JsOption::Undefined => ruma::JsOption::Undefined, + } + }, + initial: Some(roomsince == &0), + is_dm: None, + invite_state, + unread_notifications: UnreadNotificationsCount { + highlight_count: Some( + services + .rooms + .user + .highlight_count(sender_user, room_id) + .await + .try_into() + .expect("notification count can't go that high"), + ), + notification_count: Some( + services + .rooms + .user + .notification_count(sender_user, room_id) + .await + .try_into() + .expect("notification count can't go that high"), + ), + }, + timeline: room_events, + required_state, + prev_batch, + limited, + joined_count: Some( + services + .rooms + .state_cache + .room_joined_count(room_id) + .await + .unwrap_or(0) + .try_into() + .unwrap_or_else(|_| uint!(0)), + ), + invited_count: Some( + services + .rooms + .state_cache + .room_invited_count(room_id) + .await + .unwrap_or(0) + .try_into() + .unwrap_or_else(|_| uint!(0)), + ), + num_live: None, // Count events in timeline greater than global sync counter + bump_stamp: timestamp, + heroes: Some(heroes), + }); + } + Ok(rooms) +} +async fn collect_account_data( + services: crate::State, + (sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request), +) -> sync_events::v5::response::AccountData { + let mut account_data = sync_events::v5::response::AccountData { + global: Vec::new(), + rooms: BTreeMap::new(), + }; + + if !body.extensions.account_data.enabled.unwrap_or(false) { + return sync_events::v5::response::AccountData::default(); + } + + account_data.global = services + .account_data + .changes_since(None, sender_user, globalsince) + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) + .collect() + .await; + + if let Some(rooms) = &body.extensions.account_data.rooms { + for room in rooms { + account_data.rooms.insert( + room.clone(), + services + .account_data + .changes_since(Some(room), sender_user, globalsince) + .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) + .collect() + .await, + ); + } + } + + account_data +} + +async fn collect_e2ee<'a>( + services: crate::State, + (sender_user, sender_device, globalsince, body): ( + &UserId, + &DeviceId, + u64, + &sync_events::v5::Request, + ), + all_joined_rooms: &'a Vec<&'a RoomId>, +) -> Result { + if !body.extensions.e2ee.enabled.unwrap_or(false) { + return Ok(sync_events::v5::response::E2EE::default()); + } + let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in + let mut device_list_changes = HashSet::new(); + let mut device_list_left = HashSet::new(); + // Look for device list updates of this account + device_list_changes.extend( + services + .users + .keys_changed(sender_user, globalsince, None) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + + for room_id in all_joined_rooms { + let Ok(current_shortstatehash) = + services.rooms.state.get_room_shortstatehash(room_id).await + else { + error!("Room {room_id} has no state"); + continue; + }; + + let since_shortstatehash = services + .rooms + .user + .get_token_shortstatehash(room_id, globalsince) + .await + .ok(); + + let encrypted_room = services + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") + .await + .is_ok(); + + if let Some(since_shortstatehash) = since_shortstatehash { + // Skip if there are only timeline changes + if since_shortstatehash == current_shortstatehash { + continue; + } + + let since_encryption = services + .rooms + .state_accessor + .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") + .await; + + let since_sender_member: Option = services + .rooms + .state_accessor + .state_get_content( + since_shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) + .ok() + .await; + + let joined_since_last_sync = since_sender_member + .as_ref() + .is_none_or(|member| member.membership != MembershipState::Join); + + let new_encrypted_room = encrypted_room && since_encryption.is_err(); + + if encrypted_room { + let current_state_ids: HashMap<_, OwnedEventId> = services + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let since_state_ids = services + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if since_state_ids.get(&key) != Some(&id) { + let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { + error!("Pdu in state not found: {id}"); + continue; + }; + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key) = &pdu.state_key { + let user_id = + OwnedUserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + if user_id == *sender_user { + continue; + } + + let content: RoomMemberEventContent = pdu.get_content()?; + match content.membership { + | MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room( + &services, + sender_user, + &user_id, + Some(room_id), + ) + .await + { + device_list_changes.insert(user_id); + } + }, + | MembershipState::Leave => { + // Write down users that have left encrypted rooms we + // are in + left_encrypted_users.insert(user_id); + }, + | _ => {}, + } + } + } + } + } + if joined_since_last_sync || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_changes.extend( + services + .rooms + .state_cache + .room_members(room_id) + // Don't send key updates from the sender to the sender + .ready_filter(|user_id| sender_user != *user_id) + // Only send keys if the sender doesn't share an encrypted room with the target + // already + .filter_map(|user_id| { + share_encrypted_room(&services, sender_user, user_id, Some(room_id)) + .map(|res| res.or_some(user_id.to_owned())) + }) + .collect::>() + .await, + ); + } + } + } + // Look for device list updates in this room + device_list_changes.extend( + services + .users + .room_keys_changed(room_id, globalsince, None) + .map(|(user_id, _)| user_id) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + } + + for user_id in left_encrypted_users { + let dont_share_encrypted_room = + !share_encrypted_room(&services, sender_user, &user_id, None).await; + + // If the user doesn't share an encrypted room with the target anymore, we need + // to tell them + if dont_share_encrypted_room { + device_list_left.insert(user_id); + } + } + + Ok(sync_events::v5::response::E2EE { + device_lists: DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, + device_one_time_keys_count: services + .users + .count_one_time_keys(sender_user, sender_device) + .await, + device_unused_fallback_key_types: None, + }) +} + +async fn collect_to_device( + services: crate::State, + (sender_user, sender_device, globalsince, body): SyncInfo<'_>, + next_batch: u64, +) -> Option { + if !body.extensions.to_device.enabled.unwrap_or(false) { + return None; + } + + services + .users + .remove_to_device_events(sender_user, sender_device, globalsince) + .await; + + Some(sync_events::v5::response::ToDevice { + next_batch: next_batch.to_string(), + events: services + .users + .get_to_device_events(sender_user, sender_device) + .collect() + .await, + }) +} + +async fn collect_receipts(_services: crate::State) -> sync_events::v5::response::Receipts { + sync_events::v5::response::Receipts { rooms: BTreeMap::new() } + // TODO: get explicitly requested read receipts +} diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index b4856d72..904f1d2f 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -52,6 +52,7 @@ pub(crate) async fn get_supported_versions_route( ("org.matrix.msc4180".to_owned(), true), /* stable flag for 3916 (https://github.com/matrix-org/matrix-spec-proposals/pull/4180) */ ("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */ ("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */ + ("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */ ]), }; diff --git a/src/api/router.rs b/src/api/router.rs index 1d42fc5e..e7cd368d 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -145,6 +145,7 @@ pub fn build(router: Router, server: &Server) -> Router { ) .ruma_route(&client::sync_events_route) .ruma_route(&client::sync_events_v4_route) + .ruma_route(&client::sync_events_v5_route) .ruma_route(&client::get_context_route) .ruma_route(&client::get_message_events_route) .ruma_route(&client::search_events_route) diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 9777faeb..2bc21355 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -155,6 +155,7 @@ where } let content = ReceiptEventContent::from_iter(json); + conduwuit::trace!(?content); Raw::from_json( serde_json::value::to_raw_value(&SyncEphemeralRoomEvent { content }) .expect("received valid json"), diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index 02658a70..0b86377a 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -11,8 +11,9 @@ use ruma::{ api::client::sync::sync_events::{ self, v4::{ExtensionsConfig, SyncRequestList}, + v5, }, - OwnedDeviceId, OwnedRoomId, OwnedUserId, + DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, }; use crate::{rooms, Dep}; @@ -20,7 +21,8 @@ use crate::{rooms, Dep}; pub struct Service { db: Data, services: Services, - connections: DbConnections, + connections: DbConnections, + snake_connections: DbConnections, } pub struct Data { @@ -52,9 +54,19 @@ struct SlidingSyncCache { extensions: ExtensionsConfig, } -type DbConnections = Mutex>; +#[derive(Default)] +struct SnakeSyncCache { + lists: BTreeMap, + subscriptions: BTreeMap, + known_rooms: BTreeMap>, + extensions: v5::request::Extensions, +} + +type DbConnections = Mutex>; type DbConnectionsKey = (OwnedUserId, OwnedDeviceId, String); type DbConnectionsVal = Arc>; +type SnakeConnectionsKey = (OwnedUserId, OwnedDeviceId, Option); +type SnakeConnectionsVal = Arc>; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -79,12 +91,15 @@ impl crate::Service for Service { typing: args.depend::("rooms::typing"), }, connections: StdMutex::new(BTreeMap::new()), + snake_connections: StdMutex::new(BTreeMap::new()), })) } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +/// load params from cache if body doesn't contain it, as long as it's allowed +/// in some cases we may need to allow an empty list as an actual value fn list_or_sticky(target: &mut Vec, cached: &Vec) { if target.is_empty() { target.clone_from(cached); @@ -97,6 +112,30 @@ fn some_or_sticky(target: &mut Option, cached: Option) { } impl Service { + pub fn snake_connection_cached( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: Option, + ) -> bool { + self.snake_connections + .lock() + .unwrap() + .contains_key(&(user_id, device_id, conn_id)) + } + + pub fn forget_snake_sync_connection( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: Option, + ) { + self.snake_connections + .lock() + .expect("locked") + .remove(&(user_id, device_id, conn_id)); + } + pub fn remembered( &self, user_id: OwnedUserId, @@ -121,6 +160,112 @@ impl Service { .remove(&(user_id, device_id, conn_id)); } + pub fn update_snake_sync_request_with_cache( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + request: &mut v5::Request, + ) -> BTreeMap> { + let conn_id = request.conn_id.clone(); + let mut cache = self.snake_connections.lock().expect("locked"); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), + ); + let cached = &mut cached.lock().expect("locked"); + drop(cache); + + //v5::Request::try_from_http_request(req, path_args); + for (list_id, list) in &mut request.lists { + if let Some(cached_list) = cached.lists.get(list_id) { + list_or_sticky( + &mut list.room_details.required_state, + &cached_list.room_details.required_state, + ); + some_or_sticky(&mut list.include_heroes, cached_list.include_heroes); + + match (&mut list.filters, cached_list.filters.clone()) { + | (Some(filters), Some(cached_filters)) => { + some_or_sticky(&mut filters.is_invite, cached_filters.is_invite); + // TODO (morguldir): Find out how a client can unset this, probably need + // to change into an option inside ruma + list_or_sticky( + &mut filters.not_room_types, + &cached_filters.not_room_types, + ); + }, + | (_, Some(cached_filters)) => list.filters = Some(cached_filters), + | (Some(list_filters), _) => list.filters = Some(list_filters.clone()), + | (..) => {}, + } + } + cached.lists.insert(list_id.clone(), list.clone()); + } + + cached + .subscriptions + .extend(request.room_subscriptions.clone()); + request + .room_subscriptions + .extend(cached.subscriptions.clone()); + + request.extensions.e2ee.enabled = request + .extensions + .e2ee + .enabled + .or(cached.extensions.e2ee.enabled); + + request.extensions.to_device.enabled = request + .extensions + .to_device + .enabled + .or(cached.extensions.to_device.enabled); + + request.extensions.account_data.enabled = request + .extensions + .account_data + .enabled + .or(cached.extensions.account_data.enabled); + request.extensions.account_data.lists = request + .extensions + .account_data + .lists + .clone() + .or_else(|| cached.extensions.account_data.lists.clone()); + request.extensions.account_data.rooms = request + .extensions + .account_data + .rooms + .clone() + .or_else(|| cached.extensions.account_data.rooms.clone()); + + some_or_sticky(&mut request.extensions.typing.enabled, cached.extensions.typing.enabled); + some_or_sticky( + &mut request.extensions.typing.rooms, + cached.extensions.typing.rooms.clone(), + ); + some_or_sticky( + &mut request.extensions.typing.lists, + cached.extensions.typing.lists.clone(), + ); + some_or_sticky( + &mut request.extensions.receipts.enabled, + cached.extensions.receipts.enabled, + ); + some_or_sticky( + &mut request.extensions.receipts.rooms, + cached.extensions.receipts.rooms.clone(), + ); + some_or_sticky( + &mut request.extensions.receipts.lists, + cached.extensions.receipts.lists.clone(), + ); + + cached.extensions = request.extensions.clone(); + cached.known_rooms.clone() + } + pub fn update_sync_request_with_cache( &self, user_id: OwnedUserId, @@ -148,20 +293,30 @@ impl Service { for (list_id, list) in &mut request.lists { if let Some(cached_list) = cached.lists.get(list_id) { list_or_sticky(&mut list.sort, &cached_list.sort); - list_or_sticky(&mut list.room_details.required_state, &cached_list.room_details.required_state); - some_or_sticky(&mut list.room_details.timeline_limit, cached_list.room_details.timeline_limit); - some_or_sticky(&mut list.include_old_rooms, cached_list.include_old_rooms.clone()); + list_or_sticky( + &mut list.room_details.required_state, + &cached_list.room_details.required_state, + ); + some_or_sticky( + &mut list.room_details.timeline_limit, + cached_list.room_details.timeline_limit, + ); + some_or_sticky( + &mut list.include_old_rooms, + cached_list.include_old_rooms.clone(), + ); match (&mut list.filters, cached_list.filters.clone()) { - (Some(list_filters), Some(cached_filters)) => { - some_or_sticky(&mut list_filters.is_dm, cached_filters.is_dm); - list_or_sticky(&mut list_filters.spaces, &cached_filters.spaces); - some_or_sticky(&mut list_filters.is_encrypted, cached_filters.is_encrypted); - some_or_sticky(&mut list_filters.is_invite, cached_filters.is_invite); - list_or_sticky(&mut list_filters.room_types, &cached_filters.room_types); - list_or_sticky(&mut list_filters.not_room_types, &cached_filters.not_room_types); - some_or_sticky(&mut list_filters.room_name_like, cached_filters.room_name_like); - list_or_sticky(&mut list_filters.tags, &cached_filters.tags); - list_or_sticky(&mut list_filters.not_tags, &cached_filters.not_tags); + | (Some(filter), Some(cached_filter)) => { + some_or_sticky(&mut filter.is_dm, cached_filter.is_dm); + list_or_sticky(&mut filter.spaces, &cached_filter.spaces); + some_or_sticky(&mut filter.is_encrypted, cached_filter.is_encrypted); + some_or_sticky(&mut filter.is_invite, cached_filter.is_invite); + list_or_sticky(&mut filter.room_types, &cached_filter.room_types); + // Should be made possible to change + list_or_sticky(&mut filter.not_room_types, &cached_filter.not_room_types); + some_or_sticky(&mut filter.room_name_like, cached_filter.room_name_like); + list_or_sticky(&mut filter.tags, &cached_filter.tags); + list_or_sticky(&mut filter.not_tags, &cached_filter.not_tags); }, | (_, Some(cached_filters)) => list.filters = Some(cached_filters), | (Some(list_filters), _) => list.filters = Some(list_filters.clone()), @@ -222,18 +377,16 @@ impl Service { subscriptions: BTreeMap, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); + let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( + || { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }, + )); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -241,13 +394,18 @@ impl Service { } pub fn update_sync_known_rooms( - &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, list_id: String, - new_cached_rooms: BTreeSet, globalsince: u64, + &self, + user_id: &UserId, + device_id: &DeviceId, + conn_id: String, + list_id: String, + new_cached_rooms: BTreeSet, + globalsince: u64, ) { let mut cache = self.connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry((user_id.to_owned(), device_id.to_owned(), conn_id)) .or_insert_with(|| { Arc::new(Mutex::new(SlidingSyncCache { lists: BTreeMap::new(), @@ -275,4 +433,57 @@ impl Service { list.insert(roomid, globalsince); } } + + pub fn update_snake_sync_known_rooms( + &self, + user_id: &UserId, + device_id: &DeviceId, + conn_id: String, + list_id: String, + new_cached_rooms: BTreeSet, + globalsince: u64, + ) { + let mut cache = self.snake_connections.lock().expect("locked"); + let cached = Arc::clone( + cache + .entry((user_id.to_owned(), device_id.to_owned(), Some(conn_id))) + .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), + ); + let cached = &mut cached.lock().expect("locked"); + drop(cache); + + for (roomid, lastsince) in cached + .known_rooms + .entry(list_id.clone()) + .or_default() + .iter_mut() + { + if !new_cached_rooms.contains(roomid) { + *lastsince = 0; + } + } + let list = cached.known_rooms.entry(list_id).or_default(); + for roomid in new_cached_rooms { + list.insert(roomid, globalsince); + } + } + + pub fn update_snake_sync_subscriptions( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: Option, + subscriptions: BTreeMap, + ) { + let mut cache = self.snake_connections.lock().expect("locked"); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), + ); + let cached = &mut cached.lock().expect("locked"); + drop(cache); + + cached.subscriptions = subscriptions; + } } From f59e3d8850bfa93244f855e757fa69d610d16e2b Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 14 Jan 2025 13:05:25 -0500 Subject: [PATCH 0518/1248] bump nix lockfile, cargo.lock, rust to 1.84, and fix tracing fork Signed-off-by: June Clementine Strawberry --- Cargo.lock | 457 ++++++++++++++++++++++---------------------- Cargo.toml | 10 +- flake.lock | 30 +-- flake.nix | 2 +- rust-toolchain.toml | 2 +- 5 files changed, 252 insertions(+), 249 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d25197e0..f777a50c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -49,9 +49,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arc-swap" @@ -138,18 +138,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -175,9 +175,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f47bb8cc16b669d267eeccf585aea077d0882f4777b1c1f740217885d6e6e5a3" +checksum = "f409eb70b561706bf8abba8ca9c112729c481595893fd06a2dd9af8ed8441148" dependencies = [ "aws-lc-sys", "paste", @@ -186,16 +186,15 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.23.1" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2101df3813227bbaaaa0b04cd61c534c7954b22bd68d399b440be937dc63ff7" +checksum = "923ded50f602b3007e5e63e3f094c479d9c8a9b42d7f4034e4afe456aa48bfd2" dependencies = [ "bindgen", "cc", "cmake", "dunce", "fs_extra", - "libc", "paste", ] @@ -225,9 +224,9 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", - "tower 0.5.1", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -259,7 +258,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", "tracing", @@ -282,7 +281,7 @@ dependencies = [ "mime", "pin-project-lite", "serde", - "tower 0.5.1", + "tower 0.5.2", "tower-layer", "tower-service", ] @@ -369,7 +368,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -382,7 +381,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.90", + "syn 2.0.96", "which", ] @@ -394,9 +393,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "1be3f42a67d6d345ecd59f675f3f012d6974981560836e938c22b424b85ce1be" [[package]] name = "blake2" @@ -445,9 +444,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" [[package]] name = "byteorder" @@ -496,9 +495,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.3" +version = "1.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" +checksum = "c8293772165d9345bdaaa39b45b2109591e63fe5e6fbc23c6ff930a048aa310b" dependencies = [ "jobserver", "libc", @@ -557,9 +556,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" dependencies = [ "clap_builder", "clap_derive", @@ -567,9 +566,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" dependencies = [ "anstyle", "clap_lex", @@ -577,14 +576,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -740,7 +739,7 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", - "thiserror 2.0.7", + "thiserror 2.0.11", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -778,7 +777,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -809,7 +808,7 @@ dependencies = [ "sentry-tracing", "serde_json", "tokio", - "tower 0.5.1", + "tower 0.5.2", "tower-http", "tracing", ] @@ -905,9 +904,9 @@ checksum = "3618cccc083bb987a415d85c02ca6c9994ea5b44731ec28b9ecf09658655fba9" [[package]] name = "const_panic" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "013b6c2c3a14d678f38cd23994b02da3a1a1b6a5d1eedddfe63a5a5f11b13a81" +checksum = "2459fc9262a1aa204eb4b5764ad4f189caec88aea9634389c0a25f8be7f6265e" [[package]] name = "coolor" @@ -1003,18 +1002,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1031,18 +1030,18 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crossterm" @@ -1050,7 +1049,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "crossterm_winapi", "futures-core", "mio", @@ -1087,7 +1086,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1114,7 +1113,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1183,7 +1182,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1235,7 +1234,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1421,7 +1420,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1495,9 +1494,9 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "h2" @@ -1647,11 +1646,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1687,7 +1686,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1753,9 +1752,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" dependencies = [ "bytes", "futures-channel", @@ -1774,9 +1773,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.3" +version = "0.27.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" dependencies = [ "futures-util", "http", @@ -1939,7 +1938,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -1982,9 +1981,9 @@ dependencies = [ [[package]] name = "image-webp" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e031e8e3d94711a9ccb5d6ea357439ef3dcbed361798bd4071dc4d9793fbe22f" +checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f" dependencies = [ "byteorder-lite", "quick-error 2.0.1", @@ -2090,9 +2089,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.76" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ "once_cell", "wasm-bindgen", @@ -2131,9 +2130,9 @@ dependencies = [ [[package]] name = "konst" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298ddf99f06a97c1ecd0e910932662b7842855046234b0d0376d35d93add087f" +checksum = "4381b9b00c55f251f2ebe9473aef7c117e96828def1a7cb3bd3f0f903c6894e9" dependencies = [ "const_panic", "konst_kernel", @@ -2151,9 +2150,9 @@ dependencies = [ [[package]] name = "lazy-regex" -version = "3.3.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d8e41c97e6bc7ecb552016274b99fbb5d035e8de288c582d9b933af6677bfda" +checksum = "60c7310b93682b36b98fa7ea4de998d3463ccbebd94d935d6b48ba5b6ffa7126" dependencies = [ "lazy-regex-proc_macros", "once_cell", @@ -2162,14 +2161,14 @@ dependencies = [ [[package]] name = "lazy-regex-proc_macros" -version = "3.3.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76e1d8b05d672c53cb9c7b920bbba8783845ae4f0b076e02a3db1d02c81b4163" +checksum = "4ba01db5ef81e17eb10a5e0f2109d1b3a3e29bac3070fdbd7d156bf7dbd206a1" dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2186,9 +2185,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libloading" @@ -2202,9 +2201,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "pkg-config", @@ -2219,9 +2218,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -2241,9 +2240,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" [[package]] name = "loole" @@ -2362,9 +2361,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" dependencies = [ "adler2", "simd-adler32", @@ -2394,7 +2393,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "cfg-if", "cfg_aliases", "libc", @@ -2517,9 +2516,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -2591,7 +2590,7 @@ dependencies = [ "glob", "once_cell", "opentelemetry", - "ordered-float 4.5.0", + "ordered-float 4.6.0", "percent-encoding", "rand", "thiserror 1.0.69", @@ -2610,18 +2609,18 @@ dependencies = [ [[package]] name = "ordered-float" -version = "4.5.0" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c65ee1f9701bf938026630b455d5315f490640234259037edb259798b3bcf85e" +checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" dependencies = [ "num-traits", ] [[package]] name = "os_info" -version = "3.9.0" +version = "3.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ca711d8b83edbb00b44d504503cd247c9c0bd8b0fa2694f2a1a3d8165379ce" +checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" dependencies = [ "log", "serde", @@ -2700,7 +2699,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2711,21 +2710,21 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_shared 0.11.2", + "phf_shared 0.11.3", ] [[package]] name = "phf_codegen" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" +checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" dependencies = [ - "phf_generator 0.11.2", - "phf_shared 0.11.2", + "phf_generator 0.11.3", + "phf_shared 0.11.3", ] [[package]] @@ -2740,11 +2739,11 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared 0.11.2", + "phf_shared 0.11.3", "rand", ] @@ -2754,43 +2753,43 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" dependencies = [ - "siphasher", + "siphasher 0.3.11", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ - "siphasher", + "siphasher 1.0.1", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2816,9 +2815,9 @@ checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "png" -version = "0.17.15" +version = "0.17.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67582bd5b65bdff614270e2ea89a1cf15bef71245cc1e5f7ea126977144211d" +checksum = "82151a2fc869e011c153adc57cf2789ccb8d9906ce52c0b39a6b5697749d7526" dependencies = [ "bitflags 1.3.2", "crc32fast", @@ -2850,12 +2849,12 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "prettyplease" -version = "0.2.25" +version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2869,9 +2868,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -2884,7 +2883,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "version_check", "yansi", ] @@ -2909,7 +2908,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -2927,7 +2926,7 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "memchr", "pulldown-cmark-escape", "unicase", @@ -2964,7 +2963,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.7", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -2983,7 +2982,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.7", + "thiserror 2.0.11", "tinyvec", "tracing", "web-time 1.1.0", @@ -2991,9 +2990,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ "cfg_aliases", "libc", @@ -3005,9 +3004,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -3044,11 +3043,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", ] [[package]] @@ -3130,7 +3129,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tokio-rustls", "tokio-socks", @@ -3221,7 +3220,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.7", + "thiserror 2.0.11", "url", "web-time 1.1.0", ] @@ -3248,7 +3247,7 @@ dependencies = [ "serde_html_form", "serde_json", "smallvec", - "thiserror 2.0.7", + "thiserror 2.0.11", "time", "tracing", "url", @@ -3275,7 +3274,7 @@ dependencies = [ "serde", "serde_json", "smallvec", - "thiserror 2.0.7", + "thiserror 2.0.11", "tracing", "url", "web-time 1.1.0", @@ -3306,7 +3305,7 @@ version = "0.9.5" source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" dependencies = [ "js_int", - "thiserror 2.0.7", + "thiserror 2.0.11", ] [[package]] @@ -3330,7 +3329,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.90", + "syn 2.0.96", "toml", ] @@ -3355,7 +3354,7 @@ dependencies = [ "http", "http-auth", "ruma-common", - "thiserror 2.0.7", + "thiserror 2.0.11", "tracing", ] @@ -3372,7 +3371,7 @@ dependencies = [ "serde_json", "sha2", "subslice", - "thiserror 2.0.7", + "thiserror 2.0.11", ] [[package]] @@ -3386,7 +3385,7 @@ dependencies = [ "ruma-events", "serde", "serde_json", - "thiserror 2.0.7", + "thiserror 2.0.11", "tracing", ] @@ -3453,11 +3452,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "errno", "libc", "linux-raw-sys", @@ -3466,9 +3465,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" dependencies = [ "aws-lc-rs", "log", @@ -3503,9 +3502,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" dependencies = [ "web-time 1.1.0", ] @@ -3524,9 +3523,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rustyline-async" @@ -3538,7 +3537,7 @@ dependencies = [ "futures-util", "pin-project", "thingbuf", - "thiserror 2.0.7", + "thiserror 2.0.11", "unicode-segmentation", "unicode-width 0.2.0", ] @@ -3581,11 +3580,11 @@ checksum = "1be20c5f7f393ee700f8b2f28ea35812e4e212f40774b550cd2a93ea91684451" [[package]] name = "security-framework" -version = "3.0.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "core-foundation", "core-foundation-sys", "libc", @@ -3594,9 +3593,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -3604,9 +3603,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "sentry" @@ -3745,29 +3744,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "serde_html_form" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" +checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", "indexmap 2.7.0", @@ -3778,9 +3777,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "itoa", "memchr", @@ -3930,6 +3929,12 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + [[package]] name = "slab" version = "0.4.9" @@ -4040,21 +4045,15 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "sync_wrapper" version = "1.0.2" @@ -4072,7 +4071,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4123,11 +4122,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.7" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93605438cbd668185516ab499d589afb7ee1859ea3d5fc8f6b0755e1c7443767" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ - "thiserror-impl 2.0.7", + "thiserror-impl 2.0.11", ] [[package]] @@ -4138,18 +4137,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "thiserror-impl" -version = "2.0.7" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d8749b4531af2117677a5fcd12b1348a3fe2b81e36e61ffeac5c4aa3273e36" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4255,9 +4254,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -4270,9 +4269,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -4288,13 +4287,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -4441,14 +4440,14 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 0.1.2", + "sync_wrapper", "tokio", "tower-layer", "tower-service", @@ -4461,7 +4460,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.6.0", + "bitflags 2.7.0", "bytes", "futures-core", "futures-util", @@ -4471,7 +4470,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower 0.5.1", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -4492,7 +4491,7 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.41" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "log", "pin-project-lite", @@ -4503,17 +4502,17 @@ dependencies = [ [[package]] name = "tracing-attributes" version = "0.1.28" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] name = "tracing-core" version = "0.1.33" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "once_cell", "valuable", @@ -4533,7 +4532,7 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "log", "once_cell", @@ -4561,7 +4560,7 @@ dependencies = [ [[package]] name = "tracing-subscriber" version = "0.3.18" -source = "git+https://github.com/girlbossceo/tracing?rev=ccc4fbd8238c2d5ba354e61ec17ac610af11401d#ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" dependencies = [ "matchers", "nu-ansi-term", @@ -4622,9 +4621,9 @@ dependencies = [ [[package]] name = "unicase" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" @@ -4715,9 +4714,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" dependencies = [ "getrandom", "serde", @@ -4758,34 +4757,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.49" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", @@ -4796,9 +4796,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4806,28 +4806,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "web-sys" -version = "0.3.76" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -5125,9 +5128,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" dependencies = [ "memchr", ] @@ -5191,7 +5194,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "synstructure", ] @@ -5213,7 +5216,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -5233,7 +5236,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", "synstructure", ] @@ -5262,7 +5265,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.96", ] [[package]] @@ -5301,9 +5304,9 @@ checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" [[package]] name = "zune-jpeg" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16099418600b4d8f028622f73ff6e3deaabdff330fb9a2a131dea781ee8b0768" +checksum = "99a5bab8d7dedf81405c4bb1f2b83ea057643d9cb28778cea9eecddeedd2e028" dependencies = [ "zune-core", ] diff --git a/Cargo.toml b/Cargo.toml index 76acda80..855b8dda 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.83.0" +rust-version = "1.84.0" version = "0.5.0" [workspace.metadata.crane] @@ -513,16 +513,16 @@ version = "0.2" # https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c [patch.crates-io.tracing-subscriber] git = "https://github.com/girlbossceo/tracing" -rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" [patch.crates-io.tracing] git = "https://github.com/girlbossceo/tracing" -rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" [patch.crates-io.tracing-core] git = "https://github.com/girlbossceo/tracing" -rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" [patch.crates-io.tracing-log] git = "https://github.com/girlbossceo/tracing" -rev = "ccc4fbd8238c2d5ba354e61ec17ac610af11401d" +rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" # adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 # adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b diff --git a/flake.lock b/flake.lock index 35029076..210e8e08 100644 --- a/flake.lock +++ b/flake.lock @@ -117,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1734808813, - "narHash": "sha256-3aH/0Y6ajIlfy7j52FGZ+s4icVX0oHhqBzRdlOeztqg=", + "lastModified": 1736566337, + "narHash": "sha256-SC0eDcZPqISVt6R0UfGPyQLrI0+BppjjtQ3wcSlk0oI=", "owner": "ipetkov", "repo": "crane", - "rev": "72e2d02dbac80c8c86bf6bf3e785536acf8ee926", + "rev": "9172acc1ee6c7e1cbafc3044ff850c568c75a5a3", "type": "github" }, "original": { @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1735799625, - "narHash": "sha256-lFadwWDvVIub11bwfZhsh2WUByf9LOi6yjsSUMmE0xk=", + "lastModified": 1736836313, + "narHash": "sha256-zdZ7/T6yG0/hzoVOiNpDiR/sW3zR6oSMrfIFJK2BrrE=", "owner": "nix-community", "repo": "fenix", - "rev": "a9d84a1545814910cb4ab0515ed6921e8b07ee95", + "rev": "056c9393c821a4df356df6ce7f14c722dc8717ec", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1733603756, - "narHash": "sha256-eTKnZDZ1Ex++v+BI0DBcUBmCXAO/tE8hxK9MiyztZkU=", + "lastModified": 1736719310, + "narHash": "sha256-Turvx60THwzTiUHb49WV3upUgsPuktr7tVy2Lwu2xJg=", "owner": "axboe", "repo": "liburing", - "rev": "c3d5d6270cd5ed48d817fc1e8e95f7c8b222f2ff", + "rev": "3124a4619e4daf26b06d48ccf0186a947070c415", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1735685343, - "narHash": "sha256-h1CpBzdJDNtSUb5QMyfFHKHocTTky+4McgQEBQBM+xA=", + "lastModified": 1736817698, + "narHash": "sha256-1m+JP9RUsbeLVv/tF1DX3Ew9Vl/fatXnlh/g5k3jcSk=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "81934660d6e9ea54d2f0cdee821e8533b10c221a", + "rev": "2b1fca3296ddd1602d2c4f104a4050e006f4b0cb", "type": "github" }, "original": { @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1735742096, - "narHash": "sha256-q3a80h8Jf8wfmPURUgRR46nQCB3I5fhZ+/swulTF5HY=", + "lastModified": 1736690231, + "narHash": "sha256-g9gyxX+F6CrkT5gRIMKPnCPom0o9ZDzYnzzeNF86D6Q=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "7e639ee3dda6ed9cecc79d41f6d38235121e483d", + "rev": "8364ef299790cb6ec22b9e09e873c97dbe9f2cb5", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index fb40cae7..920d3d14 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-s1RPtyvDGJaX/BisLT+ifVfuhDT1nZkZ1NcK8sbwELM="; + sha256 = "sha256-lMLAupxng4Fd9F1oDw8gx+qA0RuF7ou7xhNU8wgs0PU="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index ddd952a2..97e33c91 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.83.0" +channel = "1.84.0" profile = "minimal" components = [ # For rust-analyzer From 9ebb39ca4f35789e54b73cd33805943b362819ae Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 15 Jan 2025 14:34:21 -0500 Subject: [PATCH 0519/1248] add missing cfg_attr on deserialisation db test Signed-off-by: strawberry --- src/database/tests.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/database/tests.rs b/src/database/tests.rs index 0c9fb41a..0a42ad60 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -219,7 +219,10 @@ fn de_tuple_incomplete_with_sep() { } #[test] -#[should_panic(expected = "deserialization failed to consume trailing bytes")] +#[cfg_attr( + debug_assertions, + should_panic(expected = "deserialization failed to consume trailing bytes") +)] fn de_tuple_unfinished() { let user_id: &UserId = "@user:example.com".try_into().unwrap(); let room_id: &RoomId = "!room:example.com".try_into().unwrap(); From afe9e5536bc8afded76c30304dc782deeda3c9c4 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 11 Jan 2025 16:04:19 +0000 Subject: [PATCH 0520/1248] remove undocumented jwt token login --- Cargo.lock | 15 --------------- Cargo.toml | 4 ---- conduwuit-example.toml | 4 ---- src/api/Cargo.toml | 1 - src/api/client/session.rs | 38 ++++---------------------------------- src/core/config/mod.rs | 6 ------ src/service/Cargo.toml | 1 - src/service/globals/mod.rs | 11 ----------- 8 files changed, 4 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f777a50c..18bd7aab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -685,7 +685,6 @@ dependencies = [ "http-body-util", "hyper", "ipaddress", - "jsonwebtoken", "log", "rand", "reqwest", @@ -831,7 +830,6 @@ dependencies = [ "image", "ipaddress", "itertools 0.13.0", - "jsonwebtoken", "log", "loole", "lru-cache", @@ -2115,19 +2113,6 @@ dependencies = [ "serde", ] -[[package]] -name = "jsonwebtoken" -version = "9.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" -dependencies = [ - "base64 0.21.7", - "js-sys", - "ring", - "serde", - "serde_json", -] - [[package]] name = "konst" version = "0.3.16" diff --git a/Cargo.toml b/Cargo.toml index 855b8dda..c0b31a69 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,10 +58,6 @@ features = ["parse"] [workspace.dependencies.sanitize-filename] version = "0.6.0" -[workspace.dependencies.jsonwebtoken] -version = "9.3.0" -default-features = false - [workspace.dependencies.base64] version = "0.22.1" default-features = false diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 9eefedbb..28e7012b 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -563,10 +563,6 @@ # #proxy = "none" -# This item is undocumented. Please contribute documentation for it. -# -#jwt_secret = - # Servers listed here will be used to gather public keys of other servers # (notary trusted key servers). # diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 1bc73624..1b463fbc 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -50,7 +50,6 @@ http.workspace = true http-body-util.workspace = true hyper.workspace = true ipaddress.workspace = true -jsonwebtoken.workspace = true log.workspace = true rand.workspace = true reqwest.workspace = true diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 26377c55..21b8786c 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -20,17 +20,10 @@ use ruma::{ }, OwnedUserId, UserId, }; -use serde::Deserialize; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; use crate::{utils, utils::hash, Error, Result, Ruma}; -#[derive(Debug, Deserialize)] -struct Claims { - sub: String, - //exp: usize, -} - /// # `GET /_matrix/client/v3/login` /// /// Get the supported login types of this server. One of these should be used as @@ -106,34 +99,11 @@ pub(crate) async fn login_route( user_id }, - | login::v3::LoginInfo::Token(login::v3::Token { token }) => { + | login::v3::LoginInfo::Token(login::v3::Token { token: _ }) => { debug!("Got token login type"); - if let Some(jwt_decoding_key) = services.globals.jwt_decoding_key() { - let token = jsonwebtoken::decode::( - token, - jwt_decoding_key, - &jsonwebtoken::Validation::default(), - ) - .map_err(|e| { - warn!("Failed to parse JWT token from user logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid.") - })?; - - let username = token.claims.sub.to_lowercase(); - - UserId::parse_with_server_name(username, services.globals.server_name()).map_err( - |e| { - err!(Request(InvalidUsername(debug_error!( - ?e, - "Failed to parse login username" - )))) - }, - )? - } else { - return Err!(Request(Unknown( - "Token login is not supported (server has no jwt decoding key)." - ))); - } + return Err!(Request(Unknown( + "Token login is not supported." + ))); }, #[allow(deprecated)] | login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 97ecbeaf..d65d3812 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -671,8 +671,6 @@ pub struct Config { #[serde(default)] pub proxy: ProxyConfig, - pub jwt_secret: Option, - /// Servers listed here will be used to gather public keys of other servers /// (notary trusted key servers). /// @@ -2005,10 +2003,6 @@ impl fmt::Display for Config { "Lockdown public room directory (only allow admins to publish)", &self.lockdown_public_room_directory.to_string(), ); - line("JWT secret", match self.jwt_secret { - | Some(_) => "set", - | None => "not set", - }); line( "Trusted key servers", &self diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 4708ff4e..21fbb417 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -61,7 +61,6 @@ image.workspace = true image.optional = true ipaddress.workspace = true itertools.workspace = true -jsonwebtoken.workspace = true log.workspace = true loole.workspace = true lru-cache.workspace = true diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 38d7f786..f6ff2b09 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -18,7 +18,6 @@ pub struct Service { pub db: Data, pub config: Config, - jwt_decoding_key: Option, pub bad_event_ratelimiter: Arc>>, pub server_user: OwnedUserId, pub admin_alias: OwnedRoomAliasId, @@ -33,11 +32,6 @@ impl crate::Service for Service { let db = Data::new(&args); let config = &args.server.config; - let jwt_decoding_key = config - .jwt_secret - .as_ref() - .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); - let turn_secret = config .turn_secret_file @@ -66,7 +60,6 @@ impl crate::Service for Service { let mut s = Self { db, config: config.clone(), - jwt_decoding_key, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &config.server_name)) .expect("#admins:server_name is valid alias name"), @@ -158,10 +151,6 @@ impl Service { pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { - self.jwt_decoding_key.as_ref() - } - pub fn turn_password(&self) -> &String { &self.config.turn_password } pub fn turn_ttl(&self) -> u64 { self.config.turn_ttl } From 2cc6ad8df32610531eb56a5e3bf06320afafde97 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 11 Jan 2025 18:49:21 +0000 Subject: [PATCH 0521/1248] implement `/login/get_token` (MSC3882) --- conduwuit-example.toml | 16 +++++ src/api/client/capabilities.rs | 5 +- src/api/client/session.rs | 124 +++++++++++++++++++++++++++------ src/api/router.rs | 1 + src/core/config/mod.rs | 20 ++++++ src/database/maps.rs | 4 ++ src/service/users/mod.rs | 50 +++++++++++++ 7 files changed, 196 insertions(+), 24 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 28e7012b..96578da3 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -645,6 +645,22 @@ # #openid_token_ttl = 3600 +# Allow an existing session to mint a login token for another client. +# This requires interactive authentication, but has security ramifications +# as a malicious client could use the mechanism to spawn more than one +# session. +# Enabled by default. +# +#login_via_existing_session = true + +# Login token expiration/TTL in milliseconds. +# +# These are short-lived tokens for the m.login.token endpoint. +# This is used to allow existing sessions to create new sessions. +# see login_via_existing_session. +# +#login_token_ttl = 120000 + # Static TURN username to provide the client if not using a shared secret # ("turn_secret"), It is recommended to use a shared secret over static # credentials. diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index e122611f..87cdb43d 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -32,8 +32,9 @@ pub(crate) async fn get_capabilities_route( // we do not implement 3PID stuff capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability { enabled: false }; - // we dont support generating tokens yet - capabilities.get_login_token = GetLoginTokenCapability { enabled: false }; + capabilities.get_login_token = GetLoginTokenCapability { + enabled: services.server.config.login_via_existing_session, + }; // MSC4133 capability capabilities diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 21b8786c..4881ade7 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{debug, err, info, utils::ReadyExt, warn, Err}; @@ -6,9 +8,10 @@ use ruma::{ api::client::{ error::ErrorKind, session::{ + get_login_token, get_login_types::{ self, - v3::{ApplicationServiceLoginType, PasswordLoginType}, + v3::{ApplicationServiceLoginType, PasswordLoginType, TokenLoginType}, }, login::{ self, @@ -16,10 +19,11 @@ use ruma::{ }, logout, logout_all, }, - uiaa::UserIdentifier, + uiaa, }, OwnedUserId, UserId, }; +use service::uiaa::SESSION_ID_LENGTH; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; use crate::{utils, utils::hash, Error, Result, Ruma}; @@ -30,12 +34,16 @@ use crate::{utils, utils::hash, Error, Result, Ruma}; /// the `type` field when logging in. #[tracing::instrument(skip_all, fields(%client), name = "login")] pub(crate) async fn get_login_types_route( + State(services): State, InsecureClientIp(client): InsecureClientIp, _body: Ruma, ) -> Result { Ok(get_login_types::v3::Response::new(vec![ get_login_types::v3::LoginType::Password(PasswordLoginType::default()), get_login_types::v3::LoginType::ApplicationService(ApplicationServiceLoginType::default()), + get_login_types::v3::LoginType::Token(TokenLoginType { + get_login_token: services.server.config.login_via_existing_session, + }), ])) } @@ -70,7 +78,9 @@ pub(crate) async fn login_route( .. }) => { debug!("Got password login type"); - let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = + identifier + { UserId::parse_with_server_name( user_id.to_lowercase(), services.globals.server_name(), @@ -99,11 +109,12 @@ pub(crate) async fn login_route( user_id }, - | login::v3::LoginInfo::Token(login::v3::Token { token: _ }) => { + | login::v3::LoginInfo::Token(login::v3::Token { token }) => { debug!("Got token login type"); - return Err!(Request(Unknown( - "Token login is not supported." - ))); + if !services.server.config.login_via_existing_session { + return Err!(Request(Unknown("Token login is not enabled."))); + } + services.users.find_from_login_token(token).await? }, #[allow(deprecated)] | login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { @@ -111,21 +122,22 @@ pub(crate) async fn login_route( user, }) => { debug!("Got appservice login type"); - let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) - } else if let Some(user) = user { - OwnedUserId::parse(user) - } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); - } - .map_err(|e| { - warn!("Failed to parse username from appservice logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + let user_id = + if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name( + user_id.to_lowercase(), + services.globals.server_name(), + ) + } else if let Some(user) = user { + OwnedUserId::parse(user) + } else { + warn!("Bad login type: {:?}", &body.login_info); + return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); + } + .map_err(|e| { + warn!("Failed to parse username from appservice logging in: {e}"); + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; if let Some(ref info) = body.appservice_info { if !info.is_user_match(&user_id) { @@ -217,6 +229,74 @@ pub(crate) async fn login_route( }) } +/// # `POST /_matrix/client/v1/login/get_token` +/// +/// Allows a logged-in user to get a short-lived token which can be used +/// to log in with the m.login.token flow. +/// +/// +#[tracing::instrument(skip_all, fields(%client), name = "login_token")] +pub(crate) async fn login_token_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + if !services.server.config.login_via_existing_session { + return Err!(Request(Unknown("Login via an existing session is not enabled"))); + } + // Authentication for this endpoint was made optional, but we need + // authentication. + let sender_user = body + .sender_user + .as_ref() + .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + // This route SHOULD have UIA + // TODO: How do we make only UIA sessions that have not been used before valid? + + let mut uiaainfo = uiaa::UiaaInfo { + flows: vec![uiaa::AuthFlow { stages: vec![uiaa::AuthType::Password] }], + completed: Vec::new(), + params: Box::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; + + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); + + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + + let login_token = utils::random_string(TOKEN_LENGTH); + + let expires_in = services + .users + .create_login_token(sender_user, &login_token)?; + + Ok(get_login_token::v1::Response { + expires_in: Duration::from_millis(expires_in), + login_token, + }) +} + /// # `POST /_matrix/client/v3/logout` /// /// Log out the current device. diff --git a/src/api/router.rs b/src/api/router.rs index e7cd368d..7855ddfa 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -34,6 +34,7 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&client::register_route) .ruma_route(&client::get_login_types_route) .ruma_route(&client::login_route) + .ruma_route(&client::login_token_route) .ruma_route(&client::whoami_route) .ruma_route(&client::logout_route) .ruma_route(&client::logout_all_route) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d65d3812..84b88c7c 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -767,6 +767,24 @@ pub struct Config { #[serde(default = "default_openid_token_ttl")] pub openid_token_ttl: u64, + /// Allow an existing session to mint a login token for another client. + /// This requires interactive authentication, but has security ramifications + /// as a malicious client could use the mechanism to spawn more than one + /// session. + /// Enabled by default. + #[serde(default = "true_fn")] + pub login_via_existing_session: bool, + + /// Login token expiration/TTL in milliseconds. + /// + /// These are short-lived tokens for the m.login.token endpoint. + /// This is used to allow existing sessions to create new sessions. + /// see login_via_existing_session. + /// + /// default: 120000 + #[serde(default = "default_login_token_ttl")] + pub login_token_ttl: u64, + /// Static TURN username to provide the client if not using a shared secret /// ("turn_secret"), It is recommended to use a shared secret over static /// credentials. @@ -2373,6 +2391,8 @@ fn default_notification_push_path() -> String { "/_matrix/push/v1/notify".to_own fn default_openid_token_ttl() -> u64 { 60 * 60 } +fn default_login_token_ttl() -> u64 { 2 * 60 * 1000 } + fn default_turn_ttl() -> u64 { 60 * 60 * 24 } fn default_presence_idle_timeout_s() -> u64 { 5 * 60 } diff --git a/src/database/maps.rs b/src/database/maps.rs index bc409919..19e19955 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -365,6 +365,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "openidtoken_expiresatuserid", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "logintoken_expiresatuserid", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "userroomid_highlightcount", ..descriptor::RANDOM diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index fe064d9c..971cea7c 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -41,6 +41,7 @@ struct Data { keyid_key: Arc, onetimekeyid_onetimekeys: Arc, openidtoken_expiresatuserid: Arc, + logintoken_expiresatuserid: Arc, todeviceid_events: Arc, token_userdeviceid: Arc, userdeviceid_metadata: Arc, @@ -76,6 +77,7 @@ impl crate::Service for Service { keyid_key: args.db["keyid_key"].clone(), onetimekeyid_onetimekeys: args.db["onetimekeyid_onetimekeys"].clone(), openidtoken_expiresatuserid: args.db["openidtoken_expiresatuserid"].clone(), + logintoken_expiresatuserid: args.db["logintoken_expiresatuserid"].clone(), todeviceid_events: args.db["todeviceid_events"].clone(), token_userdeviceid: args.db["token_userdeviceid"].clone(), userdeviceid_metadata: args.db["userdeviceid_metadata"].clone(), @@ -941,6 +943,54 @@ impl Service { .map_err(|e| err!(Database("User ID in openid_userid is invalid. {e}"))) } + /// Creates a short-lived login token, which can be used to log in using the + /// `m.login.token` mechanism. + pub fn create_login_token(&self, user_id: &UserId, token: &str) -> Result { + use std::num::Saturating as Sat; + + let expires_in = self.services.server.config.login_token_ttl; + let expires_at = Sat(utils::millis_since_unix_epoch()) + Sat(expires_in); + + let mut value = expires_at.0.to_be_bytes().to_vec(); + value.extend_from_slice(user_id.as_bytes()); + + self.db + .logintoken_expiresatuserid + .insert(token.as_bytes(), value.as_slice()); + + Ok(expires_in) + } + + /// Find out which user a login token belongs to. + /// Removes the token to prevent double-use attacks. + pub async fn find_from_login_token(&self, token: &str) -> Result { + let Ok(value) = self.db.logintoken_expiresatuserid.get(token).await else { + return Err!(Request(Unauthorized("Login token is unrecognised"))); + }; + + let (expires_at_bytes, user_bytes) = value.split_at(0_u64.to_be_bytes().len()); + let expires_at = u64::from_be_bytes( + expires_at_bytes + .try_into() + .map_err(|e| err!(Database("expires_at in login_userid is invalid u64. {e}")))?, + ); + + if expires_at < utils::millis_since_unix_epoch() { + debug_warn!("Login token is expired, removing"); + self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); + + return Err!(Request(Unauthorized("Login token is expired"))); + } + + self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); + + let user_string = utils::string_from_bytes(user_bytes) + .map_err(|e| err!(Database("User ID in login_userid is invalid unicode. {e}")))?; + + OwnedUserId::try_from(user_string) + .map_err(|e| err!(Database("User ID in login_userid is invalid. {e}"))) + } + /// Gets a specific user profile key pub async fn profile_key( &self, From 5b8464252c2c03edf65e43153be026dbb768a12a Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 17 Jan 2025 00:01:47 -0500 Subject: [PATCH 0522/1248] cleanup+fix login get_token code, use db ser/deser instead Signed-off-by: strawberry --- src/api/client/session.rs | 23 ++++++++-------------- src/service/users/mod.rs | 41 +++++++++++++-------------------------- 2 files changed, 22 insertions(+), 42 deletions(-) diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 4881ade7..7155351c 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -242,15 +242,11 @@ pub(crate) async fn login_token_route( body: Ruma, ) -> Result { if !services.server.config.login_via_existing_session { - return Err!(Request(Unknown("Login via an existing session is not enabled"))); + return Err!(Request(Forbidden("Login via an existing session is not enabled"))); } - // Authentication for this endpoint was made optional, but we need - // authentication. - let sender_user = body - .sender_user - .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + let sender_user = body.sender_user(); + let sender_device = body.sender_device(); // This route SHOULD have UIA // TODO: How do we make only UIA sessions that have not been used before valid? @@ -274,22 +270,19 @@ pub(crate) async fn login_token_route( } // Success! - } else if let Some(json) = body.json_body { + } else if let Some(json) = body.json_body.as_ref() { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("No JSON body was sent when required."))); } let login_token = utils::random_string(TOKEN_LENGTH); - - let expires_in = services - .users - .create_login_token(sender_user, &login_token)?; + let expires_in = services.users.create_login_token(sender_user, &login_token); Ok(get_login_token::v1::Response { expires_in: Duration::from_millis(expires_in), diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 971cea7c..b2d3a94a 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,8 +1,8 @@ use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; use conduwuit::{ - debug_warn, err, utils, - utils::{stream::TryIgnore, string::Unquoted, ReadyExt}, + debug_warn, err, trace, + utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, }; use database::{Database, Deserialized, Ignore, Interfix, Json, Map}; @@ -945,50 +945,37 @@ impl Service { /// Creates a short-lived login token, which can be used to log in using the /// `m.login.token` mechanism. - pub fn create_login_token(&self, user_id: &UserId, token: &str) -> Result { + pub fn create_login_token(&self, user_id: &UserId, token: &str) -> u64 { use std::num::Saturating as Sat; let expires_in = self.services.server.config.login_token_ttl; let expires_at = Sat(utils::millis_since_unix_epoch()) + Sat(expires_in); - let mut value = expires_at.0.to_be_bytes().to_vec(); - value.extend_from_slice(user_id.as_bytes()); + let value = (expires_at.0, user_id); + self.db.logintoken_expiresatuserid.raw_put(token, value); - self.db - .logintoken_expiresatuserid - .insert(token.as_bytes(), value.as_slice()); - - Ok(expires_in) + expires_in } /// Find out which user a login token belongs to. /// Removes the token to prevent double-use attacks. pub async fn find_from_login_token(&self, token: &str) -> Result { let Ok(value) = self.db.logintoken_expiresatuserid.get(token).await else { - return Err!(Request(Unauthorized("Login token is unrecognised"))); + return Err!(Request(Forbidden("Login token is unrecognised"))); }; - - let (expires_at_bytes, user_bytes) = value.split_at(0_u64.to_be_bytes().len()); - let expires_at = u64::from_be_bytes( - expires_at_bytes - .try_into() - .map_err(|e| err!(Database("expires_at in login_userid is invalid u64. {e}")))?, - ); + let (expires_at, user_id): (u64, OwnedUserId) = value.deserialized()?; if expires_at < utils::millis_since_unix_epoch() { - debug_warn!("Login token is expired, removing"); - self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); + trace!(?user_id, ?token, "Removing expired login token"); - return Err!(Request(Unauthorized("Login token is expired"))); + self.db.logintoken_expiresatuserid.remove(token); + + return Err!(Request(Forbidden("Login token is expired"))); } - self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); + self.db.logintoken_expiresatuserid.remove(token); - let user_string = utils::string_from_bytes(user_bytes) - .map_err(|e| err!(Database("User ID in login_userid is invalid unicode. {e}")))?; - - OwnedUserId::try_from(user_string) - .map_err(|e| err!(Database("User ID in login_userid is invalid. {e}"))) + Ok(user_id) } /// Gets a specific user profile key From afcd0bfeef8e68232dd92aeddfd20397493a409e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 05:55:49 +0000 Subject: [PATCH 0523/1248] add deref_at macro util Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 631b2820..2bbadb50 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -90,6 +90,13 @@ macro_rules! ref_at { }; } +#[macro_export] +macro_rules! deref_at { + ($idx:tt) => { + |t| *t.$idx + }; +} + /// Functor for equality i.e. .is_some_and(is_equal!(2)) #[macro_export] macro_rules! is_equal_to { From e56d3c6cb3939bfe9b10c5a18a62104f85a02fef Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 05:56:06 +0000 Subject: [PATCH 0524/1248] add multi_get_statekey_from_short Signed-off-by: Jason Volk --- src/service/rooms/short/mod.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index b645f9f1..4a591592 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -196,6 +196,20 @@ pub async fn get_statekey_from_short( }) } +#[implement(Service)] +pub fn multi_get_statekey_from_short<'a, S>( + &'a self, + shortstatekey: S, +) -> impl Stream> + Send + 'a +where + S: Stream + Send + 'a, +{ + self.db + .shortstatekey_statekey + .qry_batch(shortstatekey) + .map(Deserialized::deserialized) +} + /// Returns (shortstatehash, already_existed) #[implement(Service)] pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (ShortStateHash, bool) { From 5167e1f06dce7bbf2cb521dbfb5ca28c15b2a547 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 9 Jan 2025 21:01:58 +0000 Subject: [PATCH 0525/1248] add option to disable listeners Signed-off-by: Jason Volk --- conduwuit-example.toml | 5 +++++ src/core/config/mod.rs | 5 +++++ src/router/serve/mod.rs | 14 ++++++++++---- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 96578da3..54143ced 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1503,6 +1503,11 @@ # #sender_workers = 0 +# Enables listener sockets; can be set to false to disable listening. This +# option is intended for developer/diagnostic purposes only. +# +#listening = true + [global.tls] # Path to a valid TLS certificate file. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 84b88c7c..cb42940b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1710,6 +1710,11 @@ pub struct Config { #[serde(default)] pub sender_workers: usize, + /// Enables listener sockets; can be set to false to disable listening. This + /// option is intended for developer/diagnostic purposes only. + #[serde(default = "true_fn")] + pub listening: bool, + #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs index f6262202..5c822f2b 100644 --- a/src/router/serve/mod.rs +++ b/src/router/serve/mod.rs @@ -6,7 +6,7 @@ mod unix; use std::sync::Arc; use axum_server::Handle as ServerHandle; -use conduwuit::Result; +use conduwuit::{err, Result}; use conduwuit_service::Services; use tokio::sync::broadcast; @@ -16,13 +16,19 @@ use super::layers; pub(super) async fn serve( services: Arc, handle: ServerHandle, - shutdown: broadcast::Receiver<()>, -) -> Result<()> { + mut shutdown: broadcast::Receiver<()>, +) -> Result { let server = &services.server; let config = &server.config; + if !config.listening { + return shutdown + .recv() + .await + .map_err(|e| err!(error!("channel error: {e}"))); + } + let addrs = config.get_bind_addrs(); let (app, _guard) = layers::build(&services)?; - if cfg!(unix) && config.unix_socket_path.is_some() { unix::serve(server, app, shutdown).await } else if config.tls.certs.is_some() { From 98d8e5c63cc7019d6ecbe235f380a3c954b9e6b5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 08:00:01 +0000 Subject: [PATCH 0526/1248] add standard error trait and thread access error conversions Signed-off-by: Jason Volk --- src/core/error/mod.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index ffa829d9..2468811e 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -4,7 +4,7 @@ mod panic; mod response; mod serde; -use std::{any::Any, borrow::Cow, convert::Infallible, fmt, sync::PoisonError}; +use std::{any::Any, borrow::Cow, convert::Infallible, sync::PoisonError}; pub use self::{err::visit, log::*}; @@ -17,7 +17,7 @@ pub enum Error { // std #[error(transparent)] - Fmt(#[from] fmt::Error), + Fmt(#[from] std::fmt::Error), #[error(transparent)] FromUtf8(#[from] std::string::FromUtf8Error), #[error("I/O error: {0}")] @@ -27,6 +27,10 @@ pub enum Error { #[error(transparent)] ParseInt(#[from] std::num::ParseIntError), #[error(transparent)] + Std(#[from] Box), + #[error(transparent)] + ThreadAccessError(#[from] std::thread::AccessError), + #[error(transparent)] TryFromInt(#[from] std::num::TryFromIntError), #[error(transparent)] TryFromSlice(#[from] std::array::TryFromSliceError), @@ -189,8 +193,10 @@ impl Error { pub fn is_not_found(&self) -> bool { self.status_code() == http::StatusCode::NOT_FOUND } } -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.message()) } +impl std::fmt::Debug for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.message()) + } } impl From> for Error { From 80832cb0bb2fc725164370bb808e192ee3480172 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 12:04:31 +0000 Subject: [PATCH 0527/1248] add checked math wrapper Signed-off-by: Jason Volk --- src/core/utils/math.rs | 3 ++- src/core/utils/math/tried.rs | 47 ++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 src/core/utils/math/tried.rs diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index a08cb206..ed157daf 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -1,10 +1,11 @@ mod expected; +mod tried; use std::{cmp, convert::TryFrom}; pub use checked_ops::checked_ops; -pub use self::expected::Expected; +pub use self::{expected::Expected, tried::Tried}; use crate::{debug::type_name, err, Err, Error, Result}; /// Checked arithmetic expression. Returns a Result diff --git a/src/core/utils/math/tried.rs b/src/core/utils/math/tried.rs new file mode 100644 index 00000000..2006d2d5 --- /dev/null +++ b/src/core/utils/math/tried.rs @@ -0,0 +1,47 @@ +use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub}; + +use crate::{checked, Result}; + +pub trait Tried { + #[inline] + fn try_add(self, rhs: Self) -> Result + where + Self: CheckedAdd + Sized, + { + checked!(self + rhs) + } + + #[inline] + fn try_sub(self, rhs: Self) -> Result + where + Self: CheckedSub + Sized, + { + checked!(self - rhs) + } + + #[inline] + fn try_mul(self, rhs: Self) -> Result + where + Self: CheckedMul + Sized, + { + checked!(self * rhs) + } + + #[inline] + fn try_div(self, rhs: Self) -> Result + where + Self: CheckedDiv + Sized, + { + checked!(self / rhs) + } + + #[inline] + fn try_rem(self, rhs: Self) -> Result + where + Self: CheckedRem + Sized, + { + checked!(self % rhs) + } +} + +impl Tried for T {} From 7a8ca8842af65435239ae9358587fdabb18ee6c0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 08:58:40 +0000 Subject: [PATCH 0528/1248] add jemallctl base; add trim to interface w/ console cmd Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 9 +++ src/admin/debug/mod.rs | 3 + src/core/alloc/default.rs | 3 + src/core/alloc/hardened.rs | 2 + src/core/alloc/je.rs | 127 +++++++++++++++++++++++++++++++++++- src/core/alloc/mod.rs | 6 +- 6 files changed, 144 insertions(+), 6 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 07daaf0a..d027fa73 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -923,3 +923,12 @@ pub(super) async fn database_stats( Ok(RoomMessageEventContent::notice_markdown(out)) } + +#[admin_command] +pub(super) async fn trim_memory(&self) -> Result { + conduwuit::alloc::trim()?; + + writeln!(self, "done").await?; + + Ok(RoomMessageEventContent::notice_plain("")) +} diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index c87dbb0a..cc2a8ddd 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -207,6 +207,9 @@ pub(super) enum DebugCommand { map: Option, }, + /// - Trim memory usage + TrimMemory, + /// - Developer test stubs #[command(subcommand)] #[allow(non_snake_case)] diff --git a/src/core/alloc/default.rs b/src/core/alloc/default.rs index 83bfca7d..5db02884 100644 --- a/src/core/alloc/default.rs +++ b/src/core/alloc/default.rs @@ -1,5 +1,8 @@ //! Default allocator with no special features +/// Always returns Ok +pub fn trim() -> crate::Result { Ok(()) } + /// Always returns None #[must_use] pub fn memory_stats() -> Option { None } diff --git a/src/core/alloc/hardened.rs b/src/core/alloc/hardened.rs index 335a3307..e2d9b28e 100644 --- a/src/core/alloc/hardened.rs +++ b/src/core/alloc/hardened.rs @@ -3,6 +3,8 @@ #[global_allocator] static HMALLOC: hardened_malloc_rs::HardenedMalloc = hardened_malloc_rs::HardenedMalloc; +pub fn trim() -> crate::Result { Ok(()) } + #[must_use] //TODO: get usage pub fn memory_usage() -> Option { None } diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 423f5408..b2c1fe85 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -1,18 +1,45 @@ //! jemalloc allocator -use std::ffi::{c_char, c_void}; +use std::{ + cell::OnceCell, + ffi::{c_char, c_void}, + fmt::{Debug, Write}, +}; +use arrayvec::ArrayVec; +use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; +use crate::{err, is_equal_to, utils::math::Tried, Result}; + +#[cfg(feature = "jemalloc_conf")] +#[no_mangle] +pub static malloc_conf: &[u8] = b"\ +metadata_thp:always\ +,percpu_arena:percpu\ +,background_thread:true\ +,max_background_threads:-1\ +,lg_extent_max_active_fit:4\ +,oversize_threshold:33554432\ +,tcache_max:2097152\ +,dirty_decay_ms:16000\ +,muzzy_decay_ms:144000\ +\0"; + #[global_allocator] static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc; +type Key = ArrayVec; +type Name = ArrayVec; + +const KEY_SEGS: usize = 8; +const NAME_MAX: usize = 128; + #[must_use] #[cfg(feature = "jemalloc_stats")] pub fn memory_usage() -> Option { use mallctl::stats; - use tikv_jemalloc_ctl as mallctl; let mibs = |input: Result| { let input = input.unwrap_or_default(); @@ -62,7 +89,12 @@ pub fn memory_stats() -> Option { unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { // SAFETY: we have to trust the opaque points to our String - let res: &mut String = unsafe { opaque.cast::().as_mut().unwrap() }; + let res: &mut String = unsafe { + opaque + .cast::() + .as_mut() + .expect("failed to cast void* to &mut String") + }; // SAFETY: we have to trust the string is null terminated. let msg = unsafe { std::ffi::CStr::from_ptr(msg) }; @@ -70,3 +102,92 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { let msg = String::from_utf8_lossy(msg.to_bytes()); res.push_str(msg.as_ref()); } + +macro_rules! mallctl { + ($name:literal) => {{ + thread_local! { + static KEY: OnceCell = OnceCell::default(); + }; + + KEY.with(|once| { + once.get_or_init(move || key($name).expect("failed to translate name into mib key")) + .clone() + }) + }}; +} + +pub fn trim() -> Result { set(&mallctl!("arena.4096.purge"), ()) } + +pub fn decay() -> Result { set(&mallctl!("arena.4096.purge"), ()) } + +pub fn set_by_name(name: &str, val: T) -> Result { set(&key(name)?, val) } + +pub fn get_by_name(name: &str) -> Result { get(&key(name)?) } + +pub mod this_thread { + use super::{get, key, set, Key, OnceCell, Result}; + + pub fn trim() -> Result { + let mut key = mallctl!("arena.0.purge"); + key[1] = arena_id()?.try_into()?; + set(&key, ()) + } + + pub fn decay() -> Result { + let mut key = mallctl!("arena.0.decay"); + key[1] = arena_id()?.try_into()?; + set(&key, ()) + } + + pub fn cache(enable: bool) -> Result { + set(&mallctl!("thread.tcache.enabled"), u8::from(enable)) + } + + pub fn flush() -> Result { set(&mallctl!("thread.tcache.flush"), ()) } + + pub fn allocated() -> Result { get::(&mallctl!("thread.allocated")) } + + pub fn deallocated() -> Result { get::(&mallctl!("thread.deallocated")) } + + pub fn arena_id() -> Result { get::(&mallctl!("thread.arena")) } +} + +fn set(key: &Key, val: T) -> Result +where + T: Copy + Debug, +{ + // SAFETY: T must be the exact expected type. + unsafe { mallctl::raw::write_mib(key.as_slice(), val) }.map_err(map_err) +} + +fn get(key: &Key) -> Result +where + T: Copy + Debug, +{ + // SAFETY: T must be perfectly valid to receive value. + unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err) +} + +fn key(name: &str) -> Result { + // tikv asserts the output buffer length is tight to the number of required mibs + // so we slice that down here. + let segs = name.chars().filter(is_equal_to!(&'.')).count().try_add(1)?; + + let name = self::name(name)?; + let mut buf = [0_usize; KEY_SEGS]; + mallctl::raw::name_to_mib(name.as_slice(), &mut buf[0..segs]) + .map_err(map_err) + .map(move |()| buf.into_iter().take(segs).collect()) +} + +fn name(name: &str) -> Result { + let mut buf = Name::new(); + buf.try_extend_from_slice(name.as_bytes())?; + buf.try_extend_from_slice(b"\0")?; + + Ok(buf) +} + +fn map_err(error: tikv_jemalloc_ctl::Error) -> crate::Error { + err!("mallctl: {}", error.to_string()) +} diff --git a/src/core/alloc/mod.rs b/src/core/alloc/mod.rs index 31eb033c..0ed1b1a6 100644 --- a/src/core/alloc/mod.rs +++ b/src/core/alloc/mod.rs @@ -4,7 +4,7 @@ #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] pub mod je; #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -pub use je::{memory_stats, memory_usage}; +pub use je::{memory_stats, memory_usage, trim}; #[cfg(all(not(target_env = "msvc"), feature = "hardened_malloc", not(feature = "jemalloc")))] pub mod hardened; @@ -13,7 +13,7 @@ pub mod hardened; feature = "hardened_malloc", not(feature = "jemalloc") ))] -pub use hardened::{memory_stats, memory_usage}; +pub use hardened::{memory_stats, memory_usage, trim}; #[cfg(any( target_env = "msvc", @@ -24,4 +24,4 @@ pub mod default; target_env = "msvc", all(not(feature = "hardened_malloc"), not(feature = "jemalloc")) ))] -pub use default::{memory_stats, memory_usage}; +pub use default::{memory_stats, memory_usage, trim}; From 77d8e26efe3b46e73325386837a0f95107213842 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 19:08:54 +0000 Subject: [PATCH 0529/1248] integrate trim-on-park memory reclamation to runtime Signed-off-by: Jason Volk --- src/main/clap.rs | 12 ++++++++++++ src/main/runtime.rs | 22 +++++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/main/clap.rs b/src/main/clap.rs index ad5c815a..d3d40491 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -80,6 +80,18 @@ pub(crate) struct Args { default_missing_value = "true", )] pub(crate) worker_affinity: bool, + + /// Toggles feature to promote memory reclamation by the operating system + /// when tokio worker runs out of work. + #[arg( + long, + hide(true), + env = "CONDUWUIT_RUNTIME_GC_ON_PARK", + action = ArgAction::Set, + num_args = 0..=1, + require_equals(false), + )] + pub(crate) gc_on_park: Option, } /// Parse commandline arguments into structured data diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 3039ef1b..315336b0 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -9,6 +9,7 @@ use std::{ }; use conduwuit::{ + result::LogErr, utils::sys::compute::{nth_core_available, set_affinity}, Result, }; @@ -22,12 +23,17 @@ const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; static WORKER_AFFINITY: OnceLock = OnceLock::new(); +static GC_ON_PARK: OnceLock> = OnceLock::new(); pub(super) fn new(args: &Args) -> Result { WORKER_AFFINITY .set(args.worker_affinity) .expect("set WORKER_AFFINITY from program argument"); + GC_ON_PARK + .set(args.gc_on_park) + .expect("set GC_ON_PARK from program argument"); + let mut builder = Builder::new_multi_thread(); builder .enable_io() @@ -138,7 +144,21 @@ fn thread_unpark() {} name = %thread::current().name().unwrap_or("None"), ), )] -fn thread_park() {} +fn thread_park() { + match GC_ON_PARK + .get() + .as_ref() + .expect("GC_ON_PARK initialized by runtime::new()") + { + | Some(true) | None if cfg!(feature = "jemalloc_conf") => gc_on_park(), + | _ => (), + } +} + +fn gc_on_park() { + #[cfg(feature = "jemalloc")] + conduwuit::alloc::je::this_thread::decay().log_err().ok(); +} #[cfg(tokio_unstable)] #[tracing::instrument( From 3759d1be6ca01a687b29df9e68c24c7723a55427 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 11 Jan 2025 00:08:35 +0000 Subject: [PATCH 0530/1248] tweak per-column write_buffer down from default Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 4 +--- src/database/engine/descriptor.rs | 8 ++++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index a68eb8b6..8cb659ac 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -32,9 +32,7 @@ fn descriptor_cf_options( opts.set_min_write_buffer_number(1); opts.set_max_write_buffer_number(2); - if let Some(write_size) = desc.write_size { - opts.set_write_buffer_size(write_size); - } + opts.set_write_buffer_size(desc.write_size); opts.set_target_file_size_base(desc.file_size); opts.set_target_file_size_multiplier(desc.file_shape[0]); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 9cf57d8f..ef08945e 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -20,7 +20,7 @@ pub(crate) struct Descriptor { pub(crate) val_size_hint: Option, pub(crate) block_size: usize, pub(crate) index_size: usize, - pub(crate) write_size: Option, + pub(crate) write_size: usize, pub(crate) cache_size: usize, pub(crate) level_size: u64, pub(crate) level_shape: [i32; 7], @@ -46,7 +46,7 @@ pub(crate) static BASE: Descriptor = Descriptor { val_size_hint: None, block_size: 1024 * 4, index_size: 1024 * 4, - write_size: None, + write_size: 1024 * 1024 * 2, cache_size: 1024 * 1024 * 4, level_size: 1024 * 1024 * 8, level_shape: [1, 1, 1, 3, 7, 15, 31], @@ -66,11 +66,13 @@ pub(crate) static BASE: Descriptor = Descriptor { pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, + write_size: 1024 * 1024 * 32, ..BASE }; pub(crate) static SEQUENTIAL: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestLargestSeqFirst, + write_size: 1024 * 1024 * 64, level_size: 1024 * 1024 * 32, file_size: 1024 * 1024 * 2, ..BASE @@ -78,6 +80,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, + write_size: 1024 * 1024 * 16, level_size: 1024 * 512, file_size: 1024 * 128, ..RANDOM @@ -85,6 +88,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, + write_size: 1024 * 1024 * 16, level_size: 1024 * 1024, file_size: 1024 * 512, ..SEQUENTIAL From aad42bdaa0734d76ae390b7c865673a1bf68c4ca Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 16 Jan 2025 20:47:00 +0000 Subject: [PATCH 0531/1248] reduce block size on small tables Signed-off-by: Jason Volk --- src/database/engine/descriptor.rs | 5 +++++ src/database/maps.rs | 17 ++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index ef08945e..06e1a29b 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -83,6 +83,9 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { write_size: 1024 * 1024 * 16, level_size: 1024 * 512, file_size: 1024 * 128, + index_size: 512, + block_size: 512, + cache_shards: 64, ..RANDOM }; @@ -91,5 +94,7 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { write_size: 1024 * 1024 * 16, level_size: 1024 * 1024, file_size: 1024 * 512, + block_size: 512, + cache_shards: 64, ..SEQUENTIAL }; diff --git a/src/database/maps.rs b/src/database/maps.rs index 19e19955..82c5a4a8 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -58,6 +58,8 @@ pub(super) static MAPS: &[Descriptor] = &[ cache_disp: CacheDisp::SharedWith("pduid_pdu"), key_size_hint: Some(48), val_size_hint: Some(1488), + block_size: 1024, + index_size: 512, ..descriptor::RANDOM }, Descriptor { @@ -65,6 +67,8 @@ pub(super) static MAPS: &[Descriptor] = &[ cache_disp: CacheDisp::Unique, key_size_hint: Some(48), val_size_hint: Some(16), + block_size: 512, + index_size: 512, ..descriptor::RANDOM }, Descriptor { @@ -72,6 +76,8 @@ pub(super) static MAPS: &[Descriptor] = &[ cache_disp: CacheDisp::Unique, key_size_hint: Some(48), val_size_hint: Some(8), + block_size: 512, + index_size: 512, ..descriptor::RANDOM }, Descriptor { @@ -111,6 +117,8 @@ pub(super) static MAPS: &[Descriptor] = &[ cache_disp: CacheDisp::SharedWith("eventid_outlierpdu"), key_size_hint: Some(16), val_size_hint: Some(1520), + block_size: 2048, + index_size: 512, ..descriptor::SEQUENTIAL }, Descriptor { @@ -162,6 +170,7 @@ pub(super) static MAPS: &[Descriptor] = &[ Descriptor { name: "roomsynctoken_shortstatehash", val_size_hint: Some(8), + block_size: 512, ..descriptor::SEQUENTIAL }, Descriptor { @@ -243,6 +252,8 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "shorteventid_shortstatehash", key_size_hint: Some(8), val_size_hint: Some(8), + block_size: 512, + index_size: 512, ..descriptor::SEQUENTIAL }, Descriptor { @@ -292,7 +303,11 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "token_userdeviceid", ..descriptor::RANDOM_SMALL }, - Descriptor { name: "tokenids", ..descriptor::RANDOM }, + Descriptor { + name: "tokenids", + block_size: 512, + ..descriptor::RANDOM + }, Descriptor { name: "url_previews", ..descriptor::RANDOM From bab40a374707f9a50e2b440ab830d1a4456dd678 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 17 Jan 2025 20:34:56 +0000 Subject: [PATCH 0532/1248] enable hashing on large-block indexes Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 16 +++++++--------- src/database/engine/descriptor.rs | 5 +++-- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 8cb659ac..158fb3c8 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -1,8 +1,4 @@ -use conduwuit::{ - err, - utils::{math::Expected, BoolExt}, - Config, Result, -}; +use conduwuit::{err, utils::math::Expected, Config, Result}; use rocksdb::{ BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, @@ -133,10 +129,12 @@ fn table_options(desc: &Descriptor, has_cache: bool) -> BlockBasedOptions { opts.set_partition_filters(true); opts.set_use_delta_encoding(false); opts.set_index_type(BlockBasedIndexType::TwoLevelIndexSearch); - opts.set_data_block_index_type( - desc.block_index_hashing - .map_or(DataBlockIndexType::BinarySearch, || DataBlockIndexType::BinaryAndHash), - ); + + opts.set_data_block_index_type(match desc.block_index_hashing { + | None if desc.index_size > 512 => DataBlockIndexType::BinaryAndHash, + | Some(enable) if enable => DataBlockIndexType::BinaryAndHash, + | Some(_) | None => DataBlockIndexType::BinarySearch, + }); opts } diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 06e1a29b..d668862b 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -34,7 +34,7 @@ pub(crate) struct Descriptor { pub(crate) compression: CompressionType, pub(crate) compression_level: i32, pub(crate) bottommost_level: Option, - pub(crate) block_index_hashing: bool, + pub(crate) block_index_hashing: Option, pub(crate) cache_shards: u32, } @@ -60,7 +60,7 @@ pub(crate) static BASE: Descriptor = Descriptor { compression: CompressionType::Zstd, compression_level: 32767, bottommost_level: Some(32767), - block_index_hashing: false, + block_index_hashing: None, cache_shards: 64, }; @@ -96,5 +96,6 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { file_size: 1024 * 512, block_size: 512, cache_shards: 64, + block_index_hashing: Some(false), ..SEQUENTIAL }; From 819e35f81fdfb6d7045b24f7fdf7d647e75ffb22 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 08:01:12 +0000 Subject: [PATCH 0533/1248] remove mutex lock/unlock during sync iteration. Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a4dc0205..d6b9f15c 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -242,10 +242,6 @@ pub(crate) async fn build_sync_events( .state_cache .rooms_invited(sender_user) .fold_default(|mut invited_rooms: BTreeMap<_, _>, (room_id, invite_state)| async move { - // Get and drop the lock to wait for remaining operations to finish - let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; - drop(insert_lock); - let invite_count = services .rooms .state_cache @@ -271,10 +267,6 @@ pub(crate) async fn build_sync_events( .state_cache .rooms_knocked(sender_user) .fold_default(|mut knocked_rooms: BTreeMap<_, _>, (room_id, knock_state)| async move { - // Get and drop the lock to wait for remaining operations to finish - let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await; - drop(insert_lock); - let knock_count = services .rooms .state_cache @@ -470,10 +462,6 @@ async fn handle_left_room( full_state: bool, lazy_load_enabled: bool, ) -> Result> { - // Get and drop the lock to wait for remaining operations to finish - let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; - drop(insert_lock); - let left_count = services .rooms .state_cache @@ -627,11 +615,6 @@ async fn load_joined_room( lazy_load_send_redundant: bool, full_state: bool, ) -> Result<(JoinedRoom, HashSet, HashSet)> { - // Get and drop the lock to wait for remaining operations to finish - // This will make sure the we have all events until next_batch - let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await; - drop(insert_lock); - let sincecount = PduCount::Normal(since); let next_batchcount = PduCount::Normal(next_batch); From fc1170e12a17f47cb1c0b60fc873dcfae127ccea Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 05:20:42 +0000 Subject: [PATCH 0534/1248] additional tracing span tweaks Signed-off-by: Jason Volk --- src/api/server/send.rs | 10 ++++- src/database/pool.rs | 5 +-- src/service/rooms/auth_chain/mod.rs | 4 +- src/service/rooms/event_handler/fetch_prev.rs | 2 +- .../rooms/event_handler/fetch_state.rs | 2 +- .../event_handler/handle_incoming_pdu.rs | 2 +- .../rooms/event_handler/handle_prev_pdu.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 2 +- .../rooms/event_handler/state_at_incoming.rs | 4 +- src/service/rooms/state_compressor/mod.rs | 43 +++++++++++-------- src/service/rooms/timeline/mod.rs | 4 +- 11 files changed, 47 insertions(+), 33 deletions(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index c0c8a0c9..56a17c22 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -39,7 +39,15 @@ type ResolvedMap = BTreeMap>; /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. -#[tracing::instrument(skip_all, fields(%client, origin = body.origin().as_str()), name = "send")] +#[tracing::instrument( + name = "send", + level = "debug", + skip_all, + fields( + %client, + origin = body.origin().as_str() + ), +)] pub(crate) async fn send_transaction_message_route( State(services): State, InsecureClientIp(client): InsecureClientIp, diff --git a/src/database/pool.rs b/src/database/pool.rs index b972e763..f5600c36 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -12,7 +12,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - debug, debug_warn, defer, err, error, implement, + debug, debug_warn, err, error, implement, result::DebugInspect, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, @@ -271,9 +271,6 @@ async fn execute(&self, queue: &Sender, cmd: Cmd) -> Result { ), )] fn worker(self: Arc, id: usize, recv: Receiver) { - defer! {{ trace!("worker finished"); }} - trace!("worker spawned"); - self.worker_init(id); self.worker_loop(&recv); } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index f6534825..74064701 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -79,7 +79,7 @@ impl Service { Ok(event_ids) } - #[tracing::instrument(skip_all, name = "auth_chain")] + #[tracing::instrument(name = "auth_chain", level = "debug", skip_all)] pub async fn get_auth_chain<'a, I>( &'a self, room_id: &RoomId, @@ -179,7 +179,7 @@ impl Service { Ok(full_auth_chain) } - #[tracing::instrument(skip(self, room_id))] + #[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] async fn get_auth_chain_inner( &self, room_id: &RoomId, diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 0d64e98e..5966aeba 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -16,7 +16,7 @@ use super::check_room_id; #[implement(super::Service)] #[tracing::instrument( - level = "warn", + level = "debug", skip_all, fields(%origin), )] diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index cc4a3e46..0892655e 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -14,7 +14,7 @@ use crate::rooms::short::ShortStateKey; /// on the events #[implement(super::Service)] #[tracing::instrument( - level = "warn", + level = "debug", skip_all, fields(%origin), )] diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index c2e6ccc9..4e6f0b0c 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -41,7 +41,7 @@ use crate::rooms::timeline::RawPduId; #[implement(super::Service)] #[tracing::instrument( name = "pdu", - level = "warn", + level = "debug", skip_all, fields(%room_id, %event_id), )] diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index ad71c173..2bec4eba 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -14,7 +14,7 @@ use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName}; #[allow(clippy::too_many_arguments)] #[tracing::instrument( name = "prev", - level = "warn", + level = "debug", skip_all, fields(%prev_id), )] diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index f21f7b66..8640c582 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -18,7 +18,7 @@ use ruma::{ use crate::rooms::state_compressor::CompressedStateEvent; #[implement(super::Service)] -#[tracing::instrument(skip_all, name = "resolve")] +#[tracing::instrument(name = "resolve", level = "debug", skip_all)] pub async fn resolve_state( &self, room_id: &RoomId, diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index fa2ce1cd..9e7f8d2a 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -16,7 +16,7 @@ use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; // TODO: if we know the prev_events of the incoming event we can avoid the #[implement(super::Service)] // request and build the state from a known point and resolve if > 1 prev_event -#[tracing::instrument(skip_all, name = "state")] +#[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_degree_one( &self, incoming_pdu: &Arc, @@ -66,7 +66,7 @@ pub(super) async fn state_at_incoming_degree_one( } #[implement(super::Service)] -#[tracing::instrument(skip_all, name = "state")] +#[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_resolved( &self, incoming_pdu: &Arc, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index a61a66a1..532df360 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -7,7 +7,7 @@ use std::{ use arrayvec::ArrayVec; use conduwuit::{ - at, checked, debug, err, expected, utils, + at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, Result, }; @@ -117,35 +117,44 @@ impl crate::Service for Service { impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and /// removed diff for the selected shortstatehash and each parent layer. + #[tracing::instrument(name = "load", level = "debug", skip(self))] pub async fn load_shortstatehash_info( &self, shortstatehash: ShortStateHash, ) -> Result { - if let Some(r) = self - .stateinfo_cache - .lock() - .expect("locked") - .get_mut(&shortstatehash) - { + if let Some(r) = self.stateinfo_cache.lock()?.get_mut(&shortstatehash) { return Ok(r.clone()); } let stack = self.new_shortstatehash_info(shortstatehash).await?; - debug!( - ?shortstatehash, - len = %stack.len(), - "cache update" - ); - - self.stateinfo_cache - .lock() - .expect("locked") - .insert(shortstatehash, stack.clone()); + self.cache_shortstatehash_info(shortstatehash, stack.clone()) + .await?; Ok(stack) } + /// Returns a stack with info on shortstatehash, full state, added diff and + /// removed diff for the selected shortstatehash and each parent layer. + #[tracing::instrument( + name = "cache", + level = "debug", + skip_all, + fields( + ?shortstatehash, + stack = stack.len(), + ), + )] + async fn cache_shortstatehash_info( + &self, + shortstatehash: ShortStateHash, + stack: ShortStateInfoVec, + ) -> Result { + self.stateinfo_cache.lock()?.insert(shortstatehash, stack); + + Ok(()) + } + async fn new_shortstatehash_info( &self, shortstatehash: ShortStateHash, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 3ebc432f..bd60e40e 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -259,7 +259,7 @@ impl Service { /// happens in `append_pdu`. /// /// Returns pdu id - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "debug", skip_all)] pub async fn append_pdu( &self, pdu: &PduEvent, @@ -942,7 +942,7 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "debug", skip_all)] pub async fn append_incoming_pdu( &self, pdu: &PduEvent, From 96e85adc32f68e2080da9cf0088d9da84858747e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 01:34:14 +0000 Subject: [PATCH 0535/1248] use cache builder for row and table cache options add cache check using multi-get path Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 2 +- src/database/engine/context.rs | 19 +++++++--- src/database/engine/descriptor.rs | 2 ++ src/database/map/get.rs | 60 +++++++++++++++++++++---------- src/database/map/get_batch.rs | 41 ++++++++++++++++----- 5 files changed, 90 insertions(+), 34 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 158fb3c8..7b3a1d49 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -178,7 +178,7 @@ fn get_cache(ctx: &Context, desc: &Descriptor) -> Option { .try_into() .expect("u32 to i32 conversion"); - debug_assert!(shard_bits <= 6, "cache shards limited to 64"); + debug_assert!(shard_bits <= 10, "cache shards probably too large"); let mut cache_opts = LruCacheOptions::default(); cache_opts.set_num_shard_bits(shard_bits); cache_opts.set_capacity(size); diff --git a/src/database/engine/context.rs b/src/database/engine/context.rs index 76238f7d..04e08854 100644 --- a/src/database/engine/context.rs +++ b/src/database/engine/context.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{debug, utils::math::usize_from_f64, Result, Server}; -use rocksdb::{Cache, Env}; +use rocksdb::{Cache, Env, LruCacheOptions}; use crate::{or_else, pool::Pool}; @@ -25,12 +25,21 @@ impl Context { let config = &server.config; let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0; - let row_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; - let row_cache = Cache::new_lru_cache(row_cache_capacity_bytes); - + let col_shard_bits = 7; let col_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; - let col_cache = Cache::new_lru_cache(col_cache_capacity_bytes); + let row_shard_bits = 7; + let row_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; + + let mut row_cache_opts = LruCacheOptions::default(); + row_cache_opts.set_num_shard_bits(row_shard_bits); + row_cache_opts.set_capacity(row_cache_capacity_bytes); + let row_cache = Cache::new_lru_cache_opts(&row_cache_opts); + + let mut col_cache_opts = LruCacheOptions::default(); + col_cache_opts.set_num_shard_bits(col_shard_bits); + col_cache_opts.set_capacity(col_cache_capacity_bytes); + let col_cache = Cache::new_lru_cache_opts(&col_cache_opts); let col_cache: BTreeMap<_, _> = [("Shared".to_owned(), col_cache)].into(); let mut env = Env::new().or_else(or_else)?; diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index d668862b..234ca2bf 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -67,6 +67,7 @@ pub(crate) static BASE: Descriptor = Descriptor { pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, write_size: 1024 * 1024 * 32, + cache_shards: 128, ..BASE }; @@ -75,6 +76,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { write_size: 1024 * 1024 * 64, level_size: 1024 * 1024 * 32, file_size: 1024 * 1024 * 2, + cache_shards: 128, ..BASE }; diff --git a/src/database/map/get.rs b/src/database/map/get.rs index e64ef2ec..73182042 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -3,6 +3,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; use arrayvec::ArrayVec; use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; use futures::{future::ready, Future, FutureExt, TryFutureExt}; +use rocksdb::{DBPinnableSlice, ReadOptions}; use serde::Serialize; use tokio::task; @@ -90,6 +91,17 @@ where .boxed() } +/// Fetch a value from the cache without I/O. +#[implement(super::Map)] +#[tracing::instrument(skip(self, key), name = "cache", level = "trace")] +pub(crate) fn get_cached(&self, key: &K) -> Result>> +where + K: AsRef<[u8]> + Debug + ?Sized, +{ + let res = self.get_blocking_opts(key, &self.cache_read_options); + cached_handle_from(res) +} + /// Fetch a value from the database into cache, returning a reference-handle. /// The key is referenced directly to perform the query. This is a thread- /// blocking call. @@ -99,37 +111,47 @@ pub fn get_blocking(&self, key: &K) -> Result> where K: AsRef<[u8]> + ?Sized, { - self.db - .db - .get_pinned_cf_opt(&self.cf(), key, &self.read_options) + let res = self.get_blocking_opts(key, &self.read_options); + handle_from(res) +} + +#[implement(super::Map)] +fn get_blocking_opts( + &self, + key: &K, + read_options: &ReadOptions, +) -> Result>, rocksdb::Error> +where + K: AsRef<[u8]> + ?Sized, +{ + self.db.db.get_pinned_cf_opt(&self.cf(), key, read_options) +} + +#[inline] +pub(super) fn handle_from( + result: Result>, rocksdb::Error>, +) -> Result> { + result .map_err(map_err)? .map(Handle::from) .ok_or(err!(Request(NotFound("Not found in database")))) } -/// Fetch a value from the cache without I/O. -#[implement(super::Map)] -#[tracing::instrument(skip(self, key), name = "cache", level = "trace")] -pub(crate) fn get_cached(&self, key: &K) -> Result>> -where - K: AsRef<[u8]> + Debug + ?Sized, -{ - let res = self - .db - .db - .get_pinned_cf_opt(&self.cf(), key, &self.cache_read_options); - - match res { +#[inline] +pub(super) fn cached_handle_from( + result: Result>, rocksdb::Error>, +) -> Result>> { + match result { // cache hit; not found | Ok(None) => Err!(Request(NotFound("Not found in database"))), // cache hit; value found - | Ok(Some(res)) => Ok(Some(Handle::from(res))), + | Ok(Some(result)) => Ok(Some(Handle::from(result))), // cache miss; unknown - | Err(e) if is_incomplete(&e) => Ok(None), + | Err(error) if is_incomplete(&error) => Ok(None), // some other error occurred - | Err(e) => or_else(e), + | Err(error) => or_else(error), } } diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index 452697f1..ee9269e3 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,7 +1,7 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; use conduwuit::{ - err, implement, + implement, utils::{ stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, @@ -9,9 +9,11 @@ use conduwuit::{ Result, }; use futures::{Stream, StreamExt, TryStreamExt}; +use rocksdb::{DBPinnableSlice, ReadOptions}; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, util::map_err, Handle}; +use super::get::{cached_handle_from, handle_from}; +use crate::{keyval::KeyBuf, ser, Handle}; #[implement(super::Map)] #[tracing::instrument(skip(self, keys), level = "trace")] @@ -66,12 +68,40 @@ where .try_flatten() } +#[implement(super::Map)] +#[tracing::instrument(name = "batch_cached", level = "trace", skip_all)] +pub(crate) fn get_batch_cached<'a, I, K>( + &self, + keys: I, +) -> impl Iterator>>> + Send +where + I: Iterator + ExactSizeIterator + Send, + K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, +{ + self.get_batch_blocking_opts(keys, &self.cache_read_options) + .map(cached_handle_from) +} + #[implement(super::Map)] #[tracing::instrument(name = "batch_blocking", level = "trace", skip_all)] pub(crate) fn get_batch_blocking<'a, I, K>( &self, keys: I, ) -> impl Iterator>> + Send +where + I: Iterator + ExactSizeIterator + Send, + K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, +{ + self.get_batch_blocking_opts(keys, &self.read_options) + .map(handle_from) +} + +#[implement(super::Map)] +fn get_batch_blocking_opts<'a, I, K>( + &self, + keys: I, + read_options: &ReadOptions, +) -> impl Iterator>, rocksdb::Error>> + Send where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -80,15 +110,8 @@ where // comparator**. const SORTED: bool = false; - let read_options = &self.read_options; self.db .db .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options) .into_iter() - .map(|result| { - result - .map_err(map_err)? - .map(Handle::from) - .ok_or(err!(Request(NotFound("Not found in database")))) - }) } From abf33013e37e89e1581c8ca9ab9b1411d51d7513 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 01:38:09 +0000 Subject: [PATCH 0536/1248] check-in additional database test related Signed-off-by: Jason Volk --- src/database/tests.rs | 97 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/src/database/tests.rs b/src/database/tests.rs index 0a42ad60..2f143698 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -35,6 +35,29 @@ fn ser_tuple() { assert_eq!(a, b); } +#[test] +fn ser_tuple_option() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut a = Vec::::new(); + a.push(0xFF); + a.extend_from_slice(user_id.as_bytes()); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let b: (Option<&RoomId>, &UserId) = (None, user_id); + let b = serialize_to_vec(&b).expect("failed to serialize tuple"); + assert_eq!(a, b); + + let bb: (Option<&RoomId>, &UserId) = (Some(room_id), user_id); + let bb = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bb); +} + #[test] #[should_panic(expected = "I/O error: failed to write whole buffer")] fn ser_overflow() { @@ -284,6 +307,8 @@ fn ser_array() { let b: u64 = 987_654; let arr: &[u64] = &[a, b]; + let vec: Vec = vec![a, b]; + let arv: ArrayVec = [a, b].into(); let mut v = Vec::new(); v.extend_from_slice(&a.to_be_bytes()); @@ -291,4 +316,76 @@ fn ser_array() { let s = serialize_to_vec(arr).expect("failed to serialize"); assert_eq!(&s, &v, "serialization does not match"); + + let s = serialize_to_vec(arv.as_slice()).expect("failed to serialize arrayvec"); + assert_eq!(&s, &v, "arrayvec serialization does not match"); + + let s = serialize_to_vec(&vec).expect("failed to serialize vec"); + assert_eq!(&s, &v, "vec serialization does not match"); +} + +#[cfg(todo)] +#[test] +fn de_array() { + let a: u64 = 123_456; + let b: u64 = 987_654; + + let mut v: Vec = Vec::new(); + v.extend_from_slice(&a.to_be_bytes()); + v.extend_from_slice(&b.to_be_bytes()); + + let arv: ArrayVec = de::from_slice::>(v.as_slice()) + .map(TryInto::try_into) + .expect("failed to deserialize to arrayvec") + .expect("failed to deserialize into"); + + assert_eq!(arv[0], a, "deserialized arv [0] does not match"); + assert_eq!(arv[1], b, "deserialized arv [1] does not match"); + + let arr: [u64; 2] = de::from_slice::<[u64; 2]>(v.as_slice()) + .map(TryInto::try_into) + .expect("failed to deserialize to array") + .expect("failed to deserialize into"); + + assert_eq!(arr[0], a, "deserialized arr [0] does not match"); + assert_eq!(arr[1], b, "deserialized arr [1] does not match"); + + let vec: Vec = de::from_slice(v.as_slice()).expect("failed to deserialize to vec"); + + assert_eq!(vec[0], a, "deserialized vec [0] does not match"); + assert_eq!(vec[1], b, "deserialized vec [1] does not match"); +} + +#[cfg(todo)] +#[test] +fn de_complex() { + type Key<'a> = (&'a UserId, ArrayVec, &'a RoomId); + + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let a: u64 = 123_456; + let b: u64 = 987_654; + + let mut v = Vec::new(); + v.extend_from_slice(user_id.as_bytes()); + v.extend_from_slice(b"\xFF"); + v.extend_from_slice(&a.to_be_bytes()); + v.extend_from_slice(&b.to_be_bytes()); + v.extend_from_slice(b"\xFF"); + v.extend_from_slice(room_id.as_bytes()); + + let arr: &[u64] = &[a, b]; + let key = (user_id, arr, room_id); + let s = serialize_to_vec(&key).expect("failed to serialize"); + + assert_eq!(&s, &v, "serialization does not match"); + + let key = (user_id, [a, b].into(), room_id); + let arr: Key<'_> = de::from_slice(&v).expect("failed to deserialize"); + + assert_eq!(arr, key, "deserialization does not match"); + + let arr: Key<'_> = de::from_slice(&s).expect("failed to deserialize"); + + assert_eq!(arr, key, "deserialization of serialization does not match"); } From 8141ca34448452fd8fd910c85626a2568a3ebe55 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 4 Jan 2025 16:57:07 +0000 Subject: [PATCH 0537/1248] refactor admin outputs to asyncwrite Signed-off-by: Jason Volk --- src/admin/admin.rs | 12 +- src/admin/check/mod.rs | 15 +- src/admin/command.rs | 30 +- src/admin/debug/commands.rs | 39 ++- src/admin/debug/mod.rs | 12 +- src/admin/debug/tester.rs | 2 +- src/admin/processor.rs | 43 ++- src/admin/query/account_data.rs | 74 ++--- src/admin/query/appservice.rs | 15 +- src/admin/query/globals.rs | 24 +- src/admin/query/mod.rs | 20 +- src/admin/query/presence.rs | 16 +- src/admin/query/pusher.rs | 12 +- src/admin/query/raw.rs | 457 ++++++++++++++++++++++++++++ src/admin/query/resolver.rs | 82 ++--- src/admin/query/room_alias.rs | 20 +- src/admin/query/room_state_cache.rs | 47 +-- src/admin/query/sending.rs | 9 +- src/admin/query/short.rs | 45 +++ src/admin/query/users.rs | 46 +++ src/admin/room/alias.rs | 9 +- src/admin/room/directory.rs | 8 +- src/admin/user/commands.rs | 51 ++-- src/macros/admin.rs | 16 +- 24 files changed, 877 insertions(+), 227 deletions(-) create mode 100644 src/admin/query/raw.rs create mode 100644 src/admin/query/short.rs diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 9097a613..b6de1ec6 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -1,6 +1,5 @@ use clap::Parser; use conduwuit::Result; -use ruma::events::room::message::RoomMessageEventContent; use crate::{ appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command, @@ -50,13 +49,10 @@ pub(super) enum AdminCommand { } #[tracing::instrument(skip_all, name = "command")] -pub(super) async fn process( - command: AdminCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result { use AdminCommand::*; - Ok(match command { + match command { | Appservices(command) => appservice::process(command, context).await?, | Media(command) => media::process(command, context).await?, | Users(command) => user::process(command, context).await?, @@ -66,5 +62,7 @@ pub(super) async fn process( | Debug(command) => debug::process(command, context).await?, | Query(command) => query::process(command, context).await?, | Check(command) => check::process(command, context).await?, - }) + }; + + Ok(()) } diff --git a/src/admin/check/mod.rs b/src/admin/check/mod.rs index 4790a6de..30b335c4 100644 --- a/src/admin/check/mod.rs +++ b/src/admin/check/mod.rs @@ -2,20 +2,11 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::events::room::message::RoomMessageEventContent; -use crate::Command; +use crate::admin_command_dispatch; +#[admin_command_dispatch] #[derive(Debug, Subcommand)] pub(super) enum CheckCommand { - AllUsers, -} - -pub(super) async fn process( - command: CheckCommand, - context: &Command<'_>, -) -> Result { - Ok(match command { - | CheckCommand::AllUsers => context.check_all_users().await?, - }) + CheckAllUsers, } diff --git a/src/admin/command.rs b/src/admin/command.rs index 5277b976..5ad9e581 100644 --- a/src/admin/command.rs +++ b/src/admin/command.rs @@ -1,6 +1,12 @@ -use std::time::SystemTime; +use std::{fmt, time::SystemTime}; +use conduwuit::Result; use conduwuit_service::Services; +use futures::{ + io::{AsyncWriteExt, BufWriter}, + lock::Mutex, + Future, FutureExt, +}; use ruma::EventId; pub(crate) struct Command<'a> { @@ -8,4 +14,26 @@ pub(crate) struct Command<'a> { pub(crate) body: &'a [&'a str], pub(crate) timer: SystemTime, pub(crate) reply_id: Option<&'a EventId>, + pub(crate) output: Mutex>>, +} + +impl Command<'_> { + pub(crate) fn write_fmt( + &self, + arguments: fmt::Arguments<'_>, + ) -> impl Future + Send + '_ { + let buf = format!("{arguments}"); + self.output.lock().then(|mut output| async move { + output.write_all(buf.as_bytes()).await.map_err(Into::into) + }) + } + + pub(crate) fn write_str<'a>( + &'a self, + s: &'a str, + ) -> impl Future + Send + 'a { + self.output.lock().then(move |mut output| async move { + output.write_all(s.as_bytes()).await.map_err(Into::into) + }) + } } diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index d027fa73..b6189f6a 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,8 @@ use std::{ }; use conduwuit::{ - debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, Result, + debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, PduId, + RawPduId, Result, }; use futures::{FutureExt, StreamExt}; use ruma::{ @@ -15,7 +16,10 @@ use ruma::{ CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName, }; -use service::rooms::state_compressor::HashSetCompressStateEvent; +use service::rooms::{ + short::{ShortEventId, ShortRoomId}, + state_compressor::HashSetCompressStateEvent, +}; use tracing_subscriber::EnvFilter; use crate::admin_command; @@ -131,6 +135,35 @@ pub(super) async fn get_pdu(&self, event_id: Box) -> Result Result { + let pdu_id: RawPduId = PduId { + shortroomid, + shorteventid: shorteventid.into(), + } + .into(); + + let pdu_json = self + .services + .rooms + .timeline + .get_pdu_json_from_id(&pdu_id) + .await; + + match pdu_json { + | Ok(json) => { + let json_text = + serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",))) + }, + | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), + } +} + #[admin_command] pub(super) async fn get_remote_pdu_list( &self, @@ -895,7 +928,7 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result, }, + /// - Retrieve and print a PDU by PduId from the conduwuit database + GetShortPdu { + /// Shortroomid integer + shortroomid: ShortRoomId, + + /// Shorteventid integer + shorteventid: ShortEventId, + }, + /// - Attempts to retrieve a PDU from a remote server. Inserts it into our /// database/timeline if found and we do not have this PDU already /// (following normal event auth rules, handles it as an incoming PDU). diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 5f922ece..5200fa0d 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -31,7 +31,7 @@ async fn failure(&self) -> Result { #[admin_command] async fn tester(&self) -> Result { - Ok(RoomMessageEventContent::notice_plain("completed")) + Ok(RoomMessageEventContent::notice_plain("legacy")) } #[inline(never)] diff --git a/src/admin/processor.rs b/src/admin/processor.rs index ed7d5ed1..eefcdcd6 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -1,5 +1,6 @@ use std::{ fmt::Write, + mem::take, panic::AssertUnwindSafe, sync::{Arc, Mutex}, time::SystemTime, @@ -17,7 +18,7 @@ use conduwuit::{ utils::string::{collect_stream, common_prefix}, warn, Error, Result, }; -use futures::future::FutureExt; +use futures::{future::FutureExt, io::BufWriter, AsyncWriteExt}; use ruma::{ events::{ relation::InReplyTo, @@ -62,9 +63,32 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce body: &body, timer: SystemTime::now(), reply_id: input.reply_id.as_deref(), + output: BufWriter::new(Vec::new()).into(), }; - process(&context, command, &args).await + let (result, mut logs) = process(&context, command, &args).await; + + let output = &mut context.output.lock().await; + output.flush().await.expect("final flush of output stream"); + + let output = + String::from_utf8(take(output.get_mut())).expect("invalid utf8 in command output stream"); + + match result { + | Ok(()) if logs.is_empty() => + Ok(Some(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id))), + + | Ok(()) => { + logs.write_str(output.as_str()).expect("output buffer"); + Ok(Some(reply(RoomMessageEventContent::notice_markdown(logs), context.reply_id))) + }, + | Err(error) => { + write!(&mut logs, "Command failed with error:\n```\n{error:#?}\n```") + .expect("output buffer"); + + Err(reply(RoomMessageEventContent::notice_markdown(logs), context.reply_id)) + }, + } } fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { @@ -81,7 +105,7 @@ async fn process( context: &Command<'_>, command: AdminCommand, args: &[String], -) -> ProcessorResult { +) -> (Result, String) { let (capture, logs) = capture_create(context); let capture_scope = capture.start(); @@ -104,18 +128,7 @@ async fn process( } drop(logs); - match result { - | Ok(content) => { - write!(&mut output, "{0}", content.body()) - .expect("failed to format command result to output buffer"); - Ok(Some(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id))) - }, - | Err(error) => { - write!(&mut output, "Command failed with error:\n```\n{error:#?}\n```") - .expect("failed to format command result to output"); - Err(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id)) - }, - } + (result, output) } fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index 43762789..b75d8234 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -3,8 +3,9 @@ use conduwuit::Result; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId}; -use crate::Command; +use crate::{admin_command, admin_command_dispatch}; +#[admin_command_dispatch] #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/account_data.rs pub(crate) enum AccountDataCommand { @@ -19,7 +20,7 @@ pub(crate) enum AccountDataCommand { }, /// - Searches the account data for a specific kind. - Get { + AccountDataGet { /// Full user ID user_id: Box, /// Account data event type @@ -29,38 +30,43 @@ pub(crate) enum AccountDataCommand { }, } -/// All the getters and iterators from src/database/key_value/account_data.rs -pub(super) async fn process( - subcommand: AccountDataCommand, - context: &Command<'_>, +#[admin_command] +async fn changes_since( + &self, + user_id: Box, + since: u64, + room_id: Option>, ) -> Result { - let services = context.services; + let timer = tokio::time::Instant::now(); + let results: Vec<_> = self + .services + .account_data + .changes_since(room_id.as_deref(), &user_id, since) + .collect() + .await; + let query_time = timer.elapsed(); - match subcommand { - | AccountDataCommand::ChangesSince { user_id, since, room_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .account_data - .changes_since(room_id.as_deref(), &user_id, since) - .collect() - .await; - let query_time = timer.elapsed(); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) - }, - | AccountDataCommand::Get { user_id, kind, room_id } => { - let timer = tokio::time::Instant::now(); - let results = services - .account_data - .get_raw(room_id.as_deref(), &user_id, &kind) - .await; - let query_time = timer.elapsed(); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) - }, - } + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + ))) +} + +#[admin_command] +async fn account_data_get( + &self, + user_id: Box, + kind: String, + room_id: Option>, +) -> Result { + let timer = tokio::time::Instant::now(); + let results = self + .services + .account_data + .get_raw(room_id.as_deref(), &user_id, &kind) + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + ))) } diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index fe4861bc..f9e1fd2c 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -1,6 +1,5 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::events::room::message::RoomMessageEventContent; use crate::Command; @@ -18,10 +17,7 @@ pub(crate) enum AppserviceCommand { } /// All the getters and iterators from src/database/key_value/appservice.rs -pub(super) async fn process( - subcommand: AppserviceCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -31,18 +27,15 @@ pub(super) async fn process( let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | AppserviceCommand::All => { let timer = tokio::time::Instant::now(); let results = services.appservice.all().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, } + .await } diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index e356453f..1642f7cd 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{events::room::message::RoomMessageEventContent, ServerName}; +use ruma::ServerName; use crate::Command; @@ -21,10 +21,7 @@ pub(crate) enum GlobalsCommand { } /// All the getters and iterators from src/database/key_value/globals.rs -pub(super) async fn process( - subcommand: GlobalsCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -33,36 +30,29 @@ pub(super) async fn process( let results = services.globals.db.database_version().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | GlobalsCommand::CurrentCount => { let timer = tokio::time::Instant::now(); let results = services.globals.db.current_count(); let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | GlobalsCommand::LastCheckForUpdatesId => { let timer = tokio::time::Instant::now(); let results = services.updates.last_check_for_updates_id().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | GlobalsCommand::SigningKeysFor { origin } => { let timer = tokio::time::Instant::now(); let results = services.server_keys.verify_keys_for(&origin).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, } + .await } diff --git a/src/admin/query/mod.rs b/src/admin/query/mod.rs index ab269a40..da27eb1d 100644 --- a/src/admin/query/mod.rs +++ b/src/admin/query/mod.rs @@ -3,10 +3,13 @@ mod appservice; mod globals; mod presence; mod pusher; +mod raw; mod resolver; mod room_alias; mod room_state_cache; +mod room_timeline; mod sending; +mod short; mod users; use clap::Subcommand; @@ -14,9 +17,10 @@ use conduwuit::Result; use self::{ account_data::AccountDataCommand, appservice::AppserviceCommand, globals::GlobalsCommand, - presence::PresenceCommand, pusher::PusherCommand, resolver::ResolverCommand, + presence::PresenceCommand, pusher::PusherCommand, raw::RawCommand, resolver::ResolverCommand, room_alias::RoomAliasCommand, room_state_cache::RoomStateCacheCommand, - sending::SendingCommand, users::UsersCommand, + room_timeline::RoomTimelineCommand, sending::SendingCommand, short::ShortCommand, + users::UsersCommand, }; use crate::admin_command_dispatch; @@ -44,6 +48,10 @@ pub(super) enum QueryCommand { #[command(subcommand)] RoomStateCache(RoomStateCacheCommand), + /// - rooms/timeline iterators and getters + #[command(subcommand)] + RoomTimeline(RoomTimelineCommand), + /// - globals.rs iterators and getters #[command(subcommand)] Globals(GlobalsCommand), @@ -63,4 +71,12 @@ pub(super) enum QueryCommand { /// - pusher service #[command(subcommand)] Pusher(PusherCommand), + + /// - short service + #[command(subcommand)] + Short(ShortCommand), + + /// - raw service + #[command(subcommand)] + Raw(RawCommand), } diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 0de6b696..38272749 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, UserId}; +use ruma::UserId; use crate::Command; @@ -23,10 +23,7 @@ pub(crate) enum PresenceCommand { } /// All the getters and iterators in key_value/presence.rs -pub(super) async fn process( - subcommand: PresenceCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -35,9 +32,7 @@ pub(super) async fn process( let results = services.presence.get_presence(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | PresenceCommand::PresenceSince { since } => { let timer = tokio::time::Instant::now(); @@ -49,9 +44,8 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, } + .await } diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 55532e54..34edf4db 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{events::room::message::RoomMessageEventContent, UserId}; +use ruma::UserId; use crate::Command; @@ -13,10 +13,7 @@ pub(crate) enum PusherCommand { }, } -pub(super) async fn process( - subcommand: PusherCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -25,9 +22,8 @@ pub(super) async fn process( let results = services.pusher.get_pushers(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, } + .await } diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs new file mode 100644 index 00000000..678d21c9 --- /dev/null +++ b/src/admin/query/raw.rs @@ -0,0 +1,457 @@ +use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; + +use clap::Subcommand; +use conduwuit::{ + apply, at, + utils::{ + stream::{ReadyExt, TryIgnore}, + string::EMPTY, + IterStream, + }, + Result, +}; +use futures::{FutureExt, StreamExt, TryStreamExt}; +use ruma::events::room::message::RoomMessageEventContent; +use tokio::time::Instant; + +use crate::{admin_command, admin_command_dispatch}; + +#[admin_command_dispatch] +#[derive(Debug, Subcommand)] +#[allow(clippy::enum_variant_names)] +/// Query tables from database +pub(crate) enum RawCommand { + /// - List database maps + RawMaps, + + /// - Raw database query + RawGet { + /// Map name + map: String, + + /// Key + key: String, + }, + + /// - Raw database keys iteration + RawKeys { + /// Map name + map: String, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database key size breakdown + RawKeysSizes { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database keys total bytes + RawKeysTotal { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database values size breakdown + RawValsSizes { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database values total bytes + RawValsTotal { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database items iteration + RawIter { + /// Map name + map: String, + + /// Key prefix + prefix: Option, + }, + + /// - Raw database keys iteration + RawKeysFrom { + /// Map name + map: String, + + /// Lower-bound + start: String, + + /// Limit + #[arg(short, long)] + limit: Option, + }, + + /// - Raw database items iteration + RawIterFrom { + /// Map name + map: String, + + /// Lower-bound + start: String, + + /// Limit + #[arg(short, long)] + limit: Option, + }, + + /// - Raw database record count + RawCount { + /// Map name + map: Option, + + /// Key prefix + prefix: Option, + }, +} + +#[admin_command] +pub(super) async fn raw_count( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let count = maps + .iter() + .stream() + .then(|map| map.raw_count_prefix(&prefix)) + .ready_fold(0_usize, usize::saturating_add) + .await; + + let query_time = timer.elapsed(); + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_keys( + &self, + map: String, + prefix: Option, +) -> Result { + writeln!(self, "```").boxed().await?; + + let map = self.services.db.get(map.as_str())?; + let timer = Instant::now(); + prefix + .as_deref() + .map_or_else(|| map.raw_keys().boxed(), |prefix| map.raw_keys_prefix(prefix).boxed()) + .map_ok(String::from_utf8_lossy) + .try_for_each(|str| writeln!(self, "{str:?}")) + .boxed() + .await?; + + let query_time = timer.elapsed(); + let out = format!("\n```\n\nQuery completed in {query_time:?}"); + self.write_str(out.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_keys_sizes( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let result = maps + .iter() + .stream() + .map(|map| map.raw_keys_prefix(&prefix)) + .flatten() + .ignore_err() + .map(<[u8]>::len) + .ready_fold_default(|mut map: BTreeMap<_, usize>, len| { + let entry = map.entry(len).or_default(); + *entry = entry.saturating_add(1); + map + }) + .await; + + let query_time = timer.elapsed(); + let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); + self.write_str(result.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_keys_total( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let result = maps + .iter() + .stream() + .map(|map| map.raw_keys_prefix(&prefix)) + .flatten() + .ignore_err() + .map(<[u8]>::len) + .ready_fold_default(|acc: usize, len| acc.saturating_add(len)) + .await; + + let query_time = timer.elapsed(); + + self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_vals_sizes( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let result = maps + .iter() + .stream() + .map(|map| map.raw_stream_prefix(&prefix)) + .flatten() + .ignore_err() + .map(at!(1)) + .map(<[u8]>::len) + .ready_fold_default(|mut map: BTreeMap<_, usize>, len| { + let entry = map.entry(len).or_default(); + *entry = entry.saturating_add(1); + map + }) + .await; + + let query_time = timer.elapsed(); + let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); + self.write_str(result.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_vals_total( + &self, + map: Option, + prefix: Option, +) -> Result { + let prefix = prefix.as_deref().unwrap_or(EMPTY); + + let default_all_maps = map + .is_none() + .then(|| self.services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .iter() + .map(String::as_str) + .chain(default_all_maps) + .map(|map| self.services.db.get(map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + let timer = Instant::now(); + let result = maps + .iter() + .stream() + .map(|map| map.raw_stream_prefix(&prefix)) + .flatten() + .ignore_err() + .map(at!(1)) + .map(<[u8]>::len) + .ready_fold_default(|acc: usize, len| acc.saturating_add(len)) + .await; + + let query_time = timer.elapsed(); + + self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_iter( + &self, + map: String, + prefix: Option, +) -> Result { + writeln!(self, "```").await?; + + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + prefix + .as_deref() + .map_or_else(|| map.raw_stream().boxed(), |prefix| map.raw_stream_prefix(prefix).boxed()) + .map_ok(apply!(2, String::from_utf8_lossy)) + .map_ok(apply!(2, Cow::into_owned)) + .try_for_each(|keyval| writeln!(self, "{keyval:?}")) + .boxed() + .await?; + + let query_time = timer.elapsed(); + self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_keys_from( + &self, + map: String, + start: String, + limit: Option, +) -> Result { + writeln!(self, "```").await?; + + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + map.raw_keys_from(&start) + .map_ok(String::from_utf8_lossy) + .take(limit.unwrap_or(usize::MAX)) + .try_for_each(|str| writeln!(self, "{str:?}")) + .boxed() + .await?; + + let query_time = timer.elapsed(); + self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) +} + +#[admin_command] +pub(super) async fn raw_iter_from( + &self, + map: String, + start: String, + limit: Option, +) -> Result { + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + let result = map + .raw_stream_from(&start) + .map_ok(apply!(2, String::from_utf8_lossy)) + .map_ok(apply!(2, Cow::into_owned)) + .take(limit.unwrap_or(usize::MAX)) + .try_collect::>() + .await?; + + let query_time = timer.elapsed(); + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) +} + +#[admin_command] +pub(super) async fn raw_get(&self, map: String, key: String) -> Result { + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + let handle = map.get(&key).await?; + let query_time = timer.elapsed(); + let result = String::from_utf8_lossy(&handle); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" + ))) +} + +#[admin_command] +pub(super) async fn raw_maps(&self) -> Result { + let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect(); + + Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}"))) +} diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 3b950d13..b53661fc 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -28,56 +28,66 @@ async fn destinations_cache( ) -> Result { use service::resolver::cache::CachedDest; + writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?; + writeln!(self, "| ----------- | ----------- | -------- | ------- |").await?; + let mut out = String::new(); - writeln!(out, "| Server Name | Destination | Hostname | Expires |")?; - writeln!(out, "| ----------- | ----------- | -------- | ------- |")?; - let row = |(name, &CachedDest { ref dest, ref host, expire })| { - let expire = time::format(expire, "%+"); - writeln!(out, "| {name} | {dest} | {host} | {expire} |").expect("wrote line"); - }; + { + let map = self + .services + .resolver + .cache + .destinations + .read() + .expect("locked"); - let map = self - .services - .resolver - .cache - .destinations - .read() - .expect("locked"); + for (name, &CachedDest { ref dest, ref host, expire }) in map.iter() { + if let Some(server_name) = server_name.as_ref() { + if name != server_name { + continue; + } + } - if let Some(server_name) = server_name.as_ref() { - map.get_key_value(server_name).map(row); - } else { - map.iter().for_each(row); + let expire = time::format(expire, "%+"); + writeln!(out, "| {name} | {dest} | {host} | {expire} |")?; + } } - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(out.as_str()).await?; + + Ok(RoomMessageEventContent::notice_plain("")) } #[admin_command] async fn overrides_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedOverride; + writeln!(self, "| Server Name | IP | Port | Expires |").await?; + writeln!(self, "| ----------- | --- | ----:| ------- |").await?; + let mut out = String::new(); - writeln!(out, "| Server Name | IP | Port | Expires |")?; - writeln!(out, "| ----------- | --- | ----:| ------- |")?; - let row = |(name, &CachedOverride { ref ips, port, expire })| { - let expire = time::format(expire, "%+"); - writeln!(out, "| {name} | {ips:?} | {port} | {expire} |").expect("wrote line"); - }; + { + let map = self + .services + .resolver + .cache + .overrides + .read() + .expect("locked"); - let map = self - .services - .resolver - .cache - .overrides - .read() - .expect("locked"); + for (name, &CachedOverride { ref ips, port, expire }) in map.iter() { + if let Some(server_name) = server_name.as_ref() { + if name != server_name { + continue; + } + } - if let Some(server_name) = server_name.as_ref() { - map.get_key_value(server_name).map(row); - } else { - map.iter().for_each(row); + let expire = time::format(expire, "%+"); + writeln!(out, "| {name} | {ips:?} | {port} | {expire} |")?; + } } - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(out.as_str()).await?; + + Ok(RoomMessageEventContent::notice_plain("")) } diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs index e1bf1622..2d4d8104 100644 --- a/src/admin/query/room_alias.rs +++ b/src/admin/query/room_alias.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId}; +use ruma::{RoomAliasId, RoomId}; use crate::Command; @@ -24,10 +24,7 @@ pub(crate) enum RoomAliasCommand { } /// All the getters and iterators in src/database/key_value/rooms/alias.rs -pub(super) async fn process( - subcommand: RoomAliasCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) -> Result { let services = context.services; match subcommand { @@ -36,9 +33,7 @@ pub(super) async fn process( let results = services.rooms.alias.resolve_local_alias(&alias).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, | RoomAliasCommand::LocalAliasesForRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -51,9 +46,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```") }, | RoomAliasCommand::AllLocalAliases => { let timer = tokio::time::Instant::now(); @@ -66,9 +59,8 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```" - ))) + write!(context, "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```") }, } + .await } diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index cd7f5af7..71dadc99 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Error, Result}; use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId}; @@ -76,13 +76,10 @@ pub(crate) enum RoomStateCacheCommand { }, } -pub(super) async fn process( - subcommand: RoomStateCacheCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result { let services = context.services; - match subcommand { + let c = match subcommand { | RoomStateCacheCommand::ServerInRoom { server, room_id } => { let timer = tokio::time::Instant::now(); let result = services @@ -92,7 +89,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" ))) }, @@ -107,7 +104,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -122,7 +119,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -137,7 +134,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -152,7 +149,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -167,7 +164,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -176,7 +173,7 @@ pub(super) async fn process( let results = services.rooms.state_cache.room_joined_count(&room_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -189,7 +186,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -204,7 +201,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -219,7 +216,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -232,7 +229,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -245,7 +242,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -260,7 +257,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -274,7 +271,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -288,7 +285,7 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, @@ -301,9 +298,13 @@ pub(super) async fn process( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( + Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" ))) }, - } + }?; + + context.write_str(c.body()).await?; + + Ok(()) } diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 3edbbe87..8c6fb25f 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -62,7 +62,14 @@ pub(crate) enum SendingCommand { } /// All the getters and iterators in key_value/sending.rs -pub(super) async fn process( +pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result { + let c = reprocess(subcommand, context).await?; + context.write_str(c.body()).await?; + Ok(()) +} + +/// All the getters and iterators in key_value/sending.rs +pub(super) async fn reprocess( subcommand: SendingCommand, context: &Command<'_>, ) -> Result { diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs new file mode 100644 index 00000000..7f0f3449 --- /dev/null +++ b/src/admin/query/short.rs @@ -0,0 +1,45 @@ +use clap::Subcommand; +use conduwuit::Result; +use ruma::{events::room::message::RoomMessageEventContent, OwnedEventId, OwnedRoomOrAliasId}; + +use crate::{admin_command, admin_command_dispatch}; + +#[admin_command_dispatch] +#[derive(Debug, Subcommand)] +/// Query tables from database +pub(crate) enum ShortCommand { + ShortEventId { + event_id: OwnedEventId, + }, + + ShortRoomId { + room_id: OwnedRoomOrAliasId, + }, +} + +#[admin_command] +pub(super) async fn short_event_id( + &self, + event_id: OwnedEventId, +) -> Result { + let shortid = self + .services + .rooms + .short + .get_shorteventid(&event_id) + .await?; + + Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) +} + +#[admin_command] +pub(super) async fn short_room_id( + &self, + room_id: OwnedRoomOrAliasId, +) -> Result { + let room_id = self.services.rooms.alias.resolve(&room_id).await?; + + let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?; + + Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) +} diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 2149a103..3715ac25 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -15,6 +15,8 @@ pub(crate) enum UsersCommand { IterUsers, + IterUsers2, + PasswordHash { user_id: OwnedUserId, }, @@ -89,6 +91,33 @@ pub(crate) enum UsersCommand { room_id: OwnedRoomId, session_id: String, }, + + GetSharedRooms { + user_a: OwnedUserId, + user_b: OwnedUserId, + }, +} + +#[admin_command] +async fn get_shared_rooms( + &self, + user_a: OwnedUserId, + user_b: OwnedUserId, +) -> Result { + let timer = tokio::time::Instant::now(); + let result: Vec<_> = self + .services + .rooms + .state_cache + .get_shared_rooms(&user_a, &user_b) + .map(ToOwned::to_owned) + .collect() + .await; + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + ))) } #[admin_command] @@ -207,6 +236,23 @@ async fn iter_users(&self) -> Result { ))) } +#[admin_command] +async fn iter_users2(&self) -> Result { + let timer = tokio::time::Instant::now(); + let result: Vec<_> = self.services.users.stream().collect().await; + let result: Vec<_> = result + .into_iter() + .map(ruma::UserId::as_bytes) + .map(String::from_utf8_lossy) + .collect(); + + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" + ))) +} + #[admin_command] async fn count_users(&self) -> Result { let timer = tokio::time::Instant::now(); diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 4490600d..9710cfc8 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -44,7 +44,14 @@ pub(crate) enum RoomAliasCommand { }, } -pub(super) async fn process( +pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result { + let c = reprocess(command, context).await?; + context.write_str(c.body()).await?; + + Ok(()) +} + +pub(super) async fn reprocess( command: RoomAliasCommand, context: &Command<'_>, ) -> Result { diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 81f25478..791b9204 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -25,7 +25,13 @@ pub(crate) enum RoomDirectoryCommand { }, } -pub(super) async fn process( +pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result { + let c = reprocess(command, context).await?; + context.write_str(c.body()).await?; + Ok(()) +} + +pub(super) async fn reprocess( command: RoomDirectoryCommand, context: &Command<'_>, ) -> Result { diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 5758d937..57aedd9c 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -31,19 +31,21 @@ const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the #[admin_command] pub(super) async fn list_users(&self) -> Result { - let users = self + let users: Vec<_> = self .services .users .list_local_users() .map(ToString::to_string) - .collect::>() + .collect() .await; let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len()); plain_msg += users.join("\n").as_str(); plain_msg += "\n```"; - Ok(RoomMessageEventContent::notice_markdown(plain_msg)) + self.write_str(plain_msg.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) } #[admin_command] @@ -912,29 +914,30 @@ pub(super) async fn redact_event( self.services.globals.server_name() ); - let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; + let redaction_event_id = { + let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; - let redaction_event_id = self - .services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - redacts: Some(event.event_id.clone()), - ..PduBuilder::timeline(&RoomRedactionEventContent { + self.services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { redacts: Some(event.event_id.clone()), - reason: Some(reason), - }) - }, - &sender_user, - &room_id, - &state_lock, - ) - .await?; + ..PduBuilder::timeline(&RoomRedactionEventContent { + redacts: Some(event.event_id.clone()), + reason: Some(reason), + }) + }, + &sender_user, + &room_id, + &state_lock, + ) + .await? + }; - drop(state_lock); + let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}"); - Ok(RoomMessageEventContent::text_plain(format!( - "Successfully redacted event. Redaction event ID: {redaction_event_id}" - ))) + self.write_str(out.as_str()).await?; + + Ok(RoomMessageEventContent::text_plain("")) } diff --git a/src/macros/admin.rs b/src/macros/admin.rs index e98e914c..e35bd586 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -22,7 +22,7 @@ pub(super) fn command_dispatch(item: ItemEnum, _args: &[Meta]) -> Result - ) -> Result { + ) -> Result { use #name::*; #[allow(non_snake_case)] Ok(match command { @@ -46,7 +46,10 @@ fn dispatch_arm(v: &Variant) -> Result { let field = fields.named.iter().filter_map(|f| f.ident.as_ref()); let arg = field.clone(); quote! { - #name { #( #field ),* } => Box::pin(context.#handler(#( #arg ),*)).await?, + #name { #( #field ),* } => { + let c = Box::pin(context.#handler(#( #arg ),*)).await?; + Box::pin(context.write_str(c.body())).await?; + }, } }, | Fields::Unnamed(fields) => { @@ -54,12 +57,17 @@ fn dispatch_arm(v: &Variant) -> Result { return Err(Error::new(Span::call_site().into(), "One unnamed field required")); }; quote! { - #name ( #field ) => Box::pin(#handler::process(#field, context)).await?, + #name ( #field ) => { + Box::pin(#handler::process(#field, context)).await?; + } } }, | Fields::Unit => { quote! { - #name => Box::pin(context.#handler()).await?, + #name => { + let c = Box::pin(context.#handler()).await?; + Box::pin(context.write_str(c.body())).await?; + }, } }, }; From f9e76d6239632bd3e74cd0b1c76dd72dbc24dc7a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 01:32:37 +0000 Subject: [PATCH 0538/1248] improve debug memory-stats options Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 30 +++++++++++++++++++----------- src/admin/debug/mod.rs | 8 +++++++- src/core/alloc/default.rs | 2 +- src/core/alloc/hardened.rs | 4 ++-- src/core/alloc/je.rs | 14 ++++++-------- 5 files changed, 35 insertions(+), 23 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index b6189f6a..ad61440c 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -843,19 +843,27 @@ pub(super) async fn resolve_true_destination( } #[admin_command] -pub(super) async fn memory_stats(&self) -> Result { - let html_body = conduwuit::alloc::memory_stats(); +pub(super) async fn memory_stats(&self, opts: Option) -> Result { + const OPTS: &str = "abcdefghijklmnopqrstuvwxyz"; - if html_body.is_none() { - return Ok(RoomMessageEventContent::text_plain( - "malloc stats are not supported on your compiled malloc.", - )); - } + let opts: String = OPTS + .chars() + .filter(|&c| { + let allow_any = opts.as_ref().is_some_and(|opts| opts == "*"); - Ok(RoomMessageEventContent::text_html( - "This command's output can only be viewed by clients that render HTML.".to_owned(), - html_body.expect("string result"), - )) + let allow = allow_any || opts.as_ref().is_some_and(|opts| opts.contains(c)); + + !allow + }) + .collect(); + + let stats = conduwuit::alloc::memory_stats(&opts).unwrap_or_default(); + + self.write_str("```\n").await?; + self.write_str(&stats).await?; + self.write_str("\n```").await?; + + Ok(RoomMessageEventContent::text_plain("")) } #[cfg(tokio_unstable)] diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index 2367f80d..07f7296b 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -191,7 +191,13 @@ pub(super) enum DebugCommand { }, /// - Print extended memory usage - MemoryStats, + /// + /// Optional argument is a character mask (a sequence of characters in any + /// order) which enable additional extended statistics. Known characters are + /// "abdeglmx". For convenience, a '*' will enable everything. + MemoryStats { + opts: Option, + }, /// - Print general tokio runtime metric totals. RuntimeMetrics, diff --git a/src/core/alloc/default.rs b/src/core/alloc/default.rs index 5db02884..56e8c407 100644 --- a/src/core/alloc/default.rs +++ b/src/core/alloc/default.rs @@ -5,7 +5,7 @@ pub fn trim() -> crate::Result { Ok(()) } /// Always returns None #[must_use] -pub fn memory_stats() -> Option { None } +pub fn memory_stats(_opts: &str) -> Option { None } /// Always returns None #[must_use] diff --git a/src/core/alloc/hardened.rs b/src/core/alloc/hardened.rs index e2d9b28e..ff10cf2b 100644 --- a/src/core/alloc/hardened.rs +++ b/src/core/alloc/hardened.rs @@ -7,9 +7,9 @@ pub fn trim() -> crate::Result { Ok(()) } #[must_use] //TODO: get usage -pub fn memory_usage() -> Option { None } +pub fn memory_usage() -> Option { None } #[must_use] -pub fn memory_stats() -> Option { +pub fn memory_stats(_opts: &str) -> Option { Some("Extended statistics are not available from hardened_malloc.".to_owned()) } diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index b2c1fe85..ccb213c9 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -3,7 +3,7 @@ use std::{ cell::OnceCell, ffi::{c_char, c_void}, - fmt::{Debug, Write}, + fmt::Debug, }; use arrayvec::ArrayVec; @@ -66,15 +66,12 @@ pub fn memory_usage() -> Option { #[cfg(not(feature = "jemalloc_stats"))] pub fn memory_usage() -> Option { None } -#[must_use] -pub fn memory_stats() -> Option { - const MAX_LENGTH: usize = 65536 - 4096; +pub fn memory_stats(opts: &str) -> Option { + const MAX_LENGTH: usize = 1_048_576; - let opts_s = "d"; let mut str = String::new(); - let opaque = std::ptr::from_mut(&mut str).cast::(); - let opts_p: *const c_char = std::ffi::CString::new(opts_s) + let opts_p: *const c_char = std::ffi::CString::new(opts) .expect("cstring") .into_raw() .cast_const(); @@ -84,7 +81,8 @@ pub fn memory_stats() -> Option { unsafe { ffi::malloc_stats_print(Some(malloc_stats_cb), opaque, opts_p) }; str.truncate(MAX_LENGTH); - Some(format!("

    {str}
    ")) + + Some(str) } unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { From afdf5a07b52bd7102d5b98c4d3b6aa1b1fc905ce Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 05:32:17 +0000 Subject: [PATCH 0539/1248] abstract hidden line related in config generator macro --- src/macros/config.rs | 70 +++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/src/macros/config.rs b/src/macros/config.rs index 0fb79728..452abd20 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -15,6 +15,8 @@ use crate::{ const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; +const HIDDEN: &[&str] = &["default"]; + #[allow(clippy::needless_pass_by_value)] pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { if is_cargo_build() && !is_cargo_test() { @@ -93,7 +95,7 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { format!("{doc}\n#\n") }; - let default = get_doc_default(field) + let default = get_doc_comment_line(field, "default") .or_else(|| get_default(field)) .unwrap_or_default(); @@ -163,40 +165,40 @@ fn get_default(field: &Field) -> Option { None } -fn get_doc_default(field: &Field) -> Option { - for attr in &field.attrs { - let Meta::NameValue(MetaNameValue { path, value, .. }) = &attr.meta else { - continue; - }; +fn get_doc_comment(field: &Field) -> Option { + let comment = get_doc_comment_full(field)?; - if path.segments.iter().next().is_none_or(|s| s.ident != "doc") { - continue; - } + let out = comment + .lines() + .filter(|line| { + !HIDDEN.iter().any(|key| { + line.trim().starts_with(key) && line.trim().chars().nth(key.len()) == Some(':') + }) + }) + .fold(String::new(), |full, line| full + "#" + line + "\n"); - let Expr::Lit(ExprLit { lit, .. }) = &value else { - continue; - }; - - let Lit::Str(token) = &lit else { - continue; - }; - - let value = token.value(); - if !value.trim().starts_with("default:") { - continue; - } - - return value - .split_once(':') - .map(|(_, v)| v) - .map(str::trim) - .map(ToOwned::to_owned); - } - - None + (!out.is_empty()).then_some(out) } -fn get_doc_comment(field: &Field) -> Option { +fn get_doc_comment_line(field: &Field, label: &str) -> Option { + let comment = get_doc_comment_full(field)?; + + comment + .lines() + .map(str::trim) + .filter(|line| line.starts_with(label)) + .filter(|line| line.chars().nth(label.len()) == Some(':')) + .map(|line| { + line.split_once(':') + .map(|(_, v)| v) + .map(str::trim) + .map(ToOwned::to_owned) + }) + .next() + .flatten() +} + +fn get_doc_comment_full(field: &Field) -> Option { let mut out = String::new(); for attr in &field.attrs { let Meta::NameValue(MetaNameValue { path, value, .. }) = &attr.meta else { @@ -216,11 +218,7 @@ fn get_doc_comment(field: &Field) -> Option { }; let value = token.value(); - if value.trim().starts_with("default:") { - continue; - } - - writeln!(&mut out, "#{value}").expect("wrote to output string buffer"); + writeln!(&mut out, "{value}").expect("wrote to output string buffer"); } (!out.is_empty()).then_some(out) From c6ae6adc80e562a44f96e10f03eb4d14dc312984 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 09:47:17 +0000 Subject: [PATCH 0540/1248] pre-allocate some amount of media read buffer Signed-off-by: Jason Volk --- src/service/media/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 7e77090c..0d98853d 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -157,7 +157,7 @@ impl Service { if let Ok(Metadata { content_disposition, content_type, key }) = self.db.search_file_metadata(mxc, &Dim::default()).await { - let mut content = Vec::new(); + let mut content = Vec::with_capacity(8192); let path = self.get_media_file(&key); BufReader::new(fs::File::open(path).await?) .read_to_end(&mut content) From 7045481fae69150eea84983a55b762ecfaa04e2f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 21:58:49 +0000 Subject: [PATCH 0541/1248] add from_errno construction to Error Signed-off-by: Jason Volk --- src/core/error/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 2468811e..88ac6d09 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -131,6 +131,10 @@ pub enum Error { } impl Error { + #[inline] + #[must_use] + pub fn from_errno() -> Self { Self::Io(std::io::Error::last_os_error()) } + //#[deprecated] pub fn bad_database(message: &'static str) -> Self { crate::err!(Database(error!("{message}"))) From df3eb95d4f18f61839b296f48401ca75f61ad750 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 4 Jan 2025 01:32:45 +0000 Subject: [PATCH 0542/1248] additional affinity utils Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 2 +- src/core/utils/sys.rs | 2 +- src/core/utils/sys/compute.rs | 145 ++++++++++++++++++++++++++-------- 3 files changed, 116 insertions(+), 33 deletions(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 2bbadb50..4b5330ed 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -37,7 +37,7 @@ pub use self::{ rand::{shuffle, string as random_string}, stream::{IterStream, ReadyExt, Tools as StreamTools, TryReadyExt}, string::{str_from_bytes, string_from_bytes}, - sys::compute::parallelism as available_parallelism, + sys::compute::available_parallelism, time::{ exponential_backoff::{continue_exponential_backoff, continue_exponential_backoff_secs}, now_millis as millis_since_unix_epoch, timepoint_ago, timepoint_from_now, diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index 5c5564c4..a0d5be52 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -3,7 +3,7 @@ pub mod storage; use std::path::PathBuf; -pub use compute::parallelism as available_parallelism; +pub use compute::available_parallelism; use crate::{debug, Result}; diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs index 9e90fc90..ce2aa504 100644 --- a/src/core/utils/sys/compute.rs +++ b/src/core/utils/sys/compute.rs @@ -1,23 +1,31 @@ //! System utilities related to compute/processing -use std::{cell::Cell, fmt::Debug, sync::LazyLock}; +use std::{cell::Cell, fmt::Debug, path::PathBuf, sync::LazyLock}; -use crate::is_equal_to; +use crate::{is_equal_to, Result}; -/// The list of cores available to the process (at startup) -static CORES_AVAILABLE: LazyLock = LazyLock::new(|| { - core_affinity::get_core_ids() - .unwrap_or_default() - .into_iter() - .map(|core_id| core_id.id) - .inspect(|&id| debug_assert!(id < 128, "Core ID must be < 128 at least for now")) - .fold(0_u128, |mask, id| mask | (1 << id)) -}); +type Id = usize; + +type Mask = u128; +type Masks = [Mask; MASK_BITS]; + +const MASK_BITS: usize = 128; + +/// The mask of logical cores available to the process (at startup). +static CORES_AVAILABLE: LazyLock = LazyLock::new(|| into_mask(query_cores_available())); + +/// Stores the mask of logical-cores with thread/HT/SMT association. Each group +/// here makes up a physical-core. +static SMT_TOPOLOGY: LazyLock = LazyLock::new(init_smt_topology); + +/// Stores the mask of logical-core associations on a node/socket. Bits are set +/// for all logical cores within all physical cores of the node. +static NODE_TOPOLOGY: LazyLock = LazyLock::new(init_node_topology); thread_local! { /// Tracks the affinity for this thread. This is updated when affinities /// are set via our set_affinity() interface. - static CORE_AFFINITY: Cell = Cell::default(); + static CORE_AFFINITY: Cell = Cell::default(); } /// Set the core affinity for this thread. The ID should be listed in @@ -28,19 +36,19 @@ thread_local! { fields( id = ?std::thread::current().id(), name = %std::thread::current().name().unwrap_or("None"), - set = ?ids.by_ref().collect::>(), + set = ?ids.clone().collect::>(), CURRENT = %format!("[b{:b}]", CORE_AFFINITY.get()), AVAILABLE = %format!("[b{:b}]", *CORES_AVAILABLE), ), )] pub fn set_affinity(mut ids: I) where - I: Iterator + Clone + Debug, + I: Iterator + Clone + Debug, { use core_affinity::{set_each_for_current, set_for_current, CoreId}; let n = ids.clone().count(); - let mask: u128 = ids.clone().fold(0, |mask, id| { + let mask: Mask = ids.clone().fold(0, |mask, id| { debug_assert!(is_core_available(id), "setting affinity to unavailable core"); mask | (1 << id) }); @@ -57,35 +65,110 @@ where } /// Get the core affinity for this thread. -pub fn get_affinity() -> impl Iterator { iter_bits(CORE_AFFINITY.get()) } +pub fn get_affinity() -> impl Iterator { from_mask(CORE_AFFINITY.get()) } + +/// List the cores sharing SMT-tier resources +pub fn smt_siblings() -> impl Iterator { + from_mask(get_affinity().fold(0_u128, |mask, id| { + mask | SMT_TOPOLOGY.get(id).expect("ID must not exceed max cpus") + })) +} + +/// List the cores sharing Node-tier resources relative to this threads current +/// affinity. +pub fn node_siblings() -> impl Iterator { + from_mask(get_affinity().fold(0_u128, |mask, id| { + mask | NODE_TOPOLOGY.get(id).expect("Id must not exceed max cpus") + })) +} + +/// Get the cores sharing SMT resources relative to id. +#[inline] +pub fn smt_affinity(id: Id) -> impl Iterator { + from_mask(*SMT_TOPOLOGY.get(id).expect("ID must not exceed max cpus")) +} + +/// Get the cores sharing Node resources relative to id. +#[inline] +pub fn node_affinity(id: Id) -> impl Iterator { + from_mask(*NODE_TOPOLOGY.get(id).expect("ID must not exceed max cpus")) +} + +/// Get the number of threads which could execute in parallel based on hardware +/// constraints of this system. +#[inline] +#[must_use] +pub fn available_parallelism() -> usize { cores_available().count() } /// Gets the ID of the nth core available. This bijects our sequence of cores to /// actual ID's which may have gaps for cores which are not available. #[inline] #[must_use] -pub fn nth_core_available(i: usize) -> Option { cores_available().nth(i) } +pub fn nth_core_available(i: usize) -> Option { cores_available().nth(i) } /// Determine if core (by id) is available to the process. #[inline] #[must_use] -pub fn is_core_available(id: usize) -> bool { cores_available().any(is_equal_to!(id)) } +pub fn is_core_available(id: Id) -> bool { cores_available().any(is_equal_to!(id)) } /// Get the list of cores available. The values were recorded at program start. #[inline] -pub fn cores_available() -> impl Iterator { iter_bits(*CORES_AVAILABLE) } +pub fn cores_available() -> impl Iterator { from_mask(*CORES_AVAILABLE) } -/// Get the number of threads which could execute in parallel based on the -/// hardware and administrative constraints of this system. This value should be -/// used to hint the size of thread-pools and divide-and-conquer algorithms. -/// -/// * -#[must_use] -pub fn parallelism() -> usize { - std::thread::available_parallelism() - .expect("Unable to query for available parallelism.") - .get() +#[cfg(target_os = "linux")] +#[inline] +pub fn getcpu() -> Result { + use crate::{utils::math, Error}; + + // SAFETY: This is part of an interface with many low-level calls taking many + // raw params, but it's unclear why this specific call is unsafe. Nevertheless + // the value obtained here is semantically unsafe because it can change on the + // instruction boundary trailing its own acquisition and also any other time. + let ret: i32 = unsafe { libc::sched_getcpu() }; + + #[cfg(target_os = "linux")] + // SAFETY: On modern linux systems with a vdso if we can optimize away the branch checking + // for error (see getcpu(2)) then this system call becomes a memory access. + unsafe { + std::hint::assert_unchecked(ret >= 0); + }; + + if ret == -1 { + return Err(Error::from_errno()); + } + + math::try_into(ret) } -fn iter_bits(v: u128) -> impl Iterator { - (0..128).filter(move |&i| (v & (1 << i)) != 0) +#[cfg(not(target_os = "linux"))] +#[inline] +pub fn getcpu() -> Result { Err(crate::Error::Io(std::io::ErrorKind::Unsupported.into())) } + +fn query_cores_available() -> impl Iterator { + core_affinity::get_core_ids() + .unwrap_or_default() + .into_iter() + .map(|core_id| core_id.id) +} + +fn init_smt_topology() -> [Mask; MASK_BITS] { [Mask::default(); MASK_BITS] } + +fn init_node_topology() -> [Mask; MASK_BITS] { [Mask::default(); MASK_BITS] } + +fn into_mask(ids: I) -> Mask +where + I: Iterator, +{ + ids.inspect(|&id| { + debug_assert!(id < MASK_BITS, "Core ID must be < Mask::BITS at least for now"); + }) + .fold(Mask::default(), |mask, id| mask | (1 << id)) +} + +fn from_mask(v: Mask) -> impl Iterator { + (0..MASK_BITS).filter(move |&i| (v & (1 << i)) != 0) +} + +fn _sys_path(id: usize, suffix: &str) -> PathBuf { + format!("/sys/devices/system/cpu/cpu{id}/{suffix}").into() } From 4fbbfe5d3056669982becae1ae7ec0f3edd80439 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 19:56:16 +0000 Subject: [PATCH 0543/1248] add alt argument format for detecting cargo build phase Signed-off-by: Jason Volk --- src/macros/utils.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/macros/utils.rs b/src/macros/utils.rs index e33ee8b4..af2519a7 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -23,6 +23,16 @@ pub(crate) fn get_simple_settings(args: &[Meta]) -> HashMap { } pub(crate) fn is_cargo_build() -> bool { + legacy_is_cargo_build() + || std::env::args() + .skip_while(|flag| !flag.starts_with("--emit")) + .nth(1) + .iter() + .flat_map(|flag| flag.split(',')) + .any(|elem| elem == "link") +} + +pub(crate) fn legacy_is_cargo_build() -> bool { std::env::args() .find(|flag| flag.starts_with("--emit")) .as_ref() From 3eed408b2975564ed2c0b103a665f1a022e150b8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 04:42:01 +0000 Subject: [PATCH 0544/1248] additional util macros and reorg Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 85 ++++++++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 33 deletions(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 4b5330ed..1a4b52da 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -57,6 +57,14 @@ macro_rules! extract_variant { }; } +/// Functor for !is_empty() +#[macro_export] +macro_rules! is_not_empty { + () => { + |x| !x.is_empty() + }; +} + #[macro_export] macro_rules! apply { (1, $($idx:tt)+) => { @@ -76,24 +84,35 @@ macro_rules! apply { }; } +/// Functor for truthy #[macro_export] -macro_rules! at { - ($idx:tt) => { - |t| t.$idx +macro_rules! is_true { + () => { + |x| !!x }; } +/// Functor for falsy #[macro_export] -macro_rules! ref_at { - ($idx:tt) => { - |ref t| &t.$idx +macro_rules! is_false { + () => { + |x| !x }; } +/// Functor for equality to non-zero #[macro_export] -macro_rules! deref_at { - ($idx:tt) => { - |t| *t.$idx +macro_rules! is_nonzero { + () => { + |x| x != 0 + }; +} + +/// Functor for equality to zero +#[macro_export] +macro_rules! is_zero { + () => { + $crate::is_matching!(0) }; } @@ -121,14 +140,6 @@ macro_rules! is_less_than { }; } -/// Functor for equality to zero -#[macro_export] -macro_rules! is_zero { - () => { - $crate::is_matching!(0) - }; -} - /// Functor for matches! i.e. .is_some_and(is_matching!('A'..='Z')) #[macro_export] macro_rules! is_matching { @@ -141,14 +152,6 @@ macro_rules! is_matching { }; } -/// Functor for !is_empty() -#[macro_export] -macro_rules! is_not_empty { - () => { - |x| !x.is_empty() - }; -} - /// Functor for equality i.e. (a, b).map(is_equal!()) #[macro_export] macro_rules! is_equal { @@ -157,18 +160,34 @@ macro_rules! is_equal { }; } -/// Functor for truthy +/// Functor for |x| *x.$i #[macro_export] -macro_rules! is_true { - () => { - |x| !!x +macro_rules! deref_at { + ($idx:tt) => { + |t| *t.$idx }; } -/// Functor for falsy +/// Functor for |ref x| x.$i #[macro_export] -macro_rules! is_false { - () => { - |x| !x +macro_rules! ref_at { + ($idx:tt) => { + |ref t| &t.$idx + }; +} + +/// Functor for |&x| x.$i +#[macro_export] +macro_rules! val_at { + ($idx:tt) => { + |&t| t.$idx + }; +} + +/// Functor for |x| x.$i +#[macro_export] +macro_rules! at { + ($idx:tt) => { + |t| t.$idx }; } From 3dae02b886a3428c58d0a11e1cc19271722b4b47 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 01:30:41 +0000 Subject: [PATCH 0545/1248] add preferred jemalloc config add muzzy/dirty configuration mallctl interface add program argument for --gc-muzzy=false Signed-off-by: Jason Volk --- Cargo.lock | 57 +++++----- Cargo.toml | 16 ++- deps/rust-rocksdb/Cargo.toml | 2 +- src/admin/debug/commands.rs | 2 +- src/core/Cargo.toml | 1 + src/core/alloc/default.rs | 2 +- src/core/alloc/hardened.rs | 2 +- src/core/alloc/je.rs | 194 +++++++++++++++++++++++++++++------ src/database/pool.rs | 16 ++- src/main/Cargo.toml | 4 + src/main/clap.rs | 16 +++ src/main/runtime.rs | 59 ++++++++--- 12 files changed, 289 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18bd7aab..8de3abf4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -175,9 +175,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f409eb70b561706bf8abba8ca9c112729c481595893fd06a2dd9af8ed8441148" +checksum = "1ea835662a0af02443aa1396d39be523bbf8f11ee6fad20329607c480bea48c3" dependencies = [ "aws-lc-sys", "paste", @@ -186,9 +186,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.24.1" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "923ded50f602b3007e5e63e3f094c479d9c8a9b42d7f4034e4afe456aa48bfd2" +checksum = "71b2ddd3ada61a305e1d8bb6c005d1eaa7d14d903681edfc400406d523a9b491" dependencies = [ "bindgen", "cc", @@ -368,7 +368,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -393,9 +393,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be3f42a67d6d345ecd59f675f3f012d6974981560836e938c22b424b85ce1be" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "blake2" @@ -495,9 +495,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.9" +version = "1.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8293772165d9345bdaaa39b45b2109591e63fe5e6fbc23c6ff930a048aa310b" +checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" dependencies = [ "jobserver", "libc", @@ -1047,7 +1047,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "crossterm_winapi", "futures-core", "mio", @@ -1122,9 +1122,9 @@ checksum = "817fa642fb0ee7fe42e95783e00e0969927b96091bdd4b9b1af082acd943913b" [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" [[package]] name = "date_header" @@ -2378,7 +2378,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "cfg-if", "cfg_aliases", "libc", @@ -2911,7 +2911,7 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "memchr", "pulldown-cmark-escape", "unicase", @@ -3032,7 +3032,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", ] [[package]] @@ -3377,7 +3377,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.31.0+9.9.3" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=123d6302fed23fc706344becb2f19623265a83f8#123d6302fed23fc706344becb2f19623265a83f8" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2d31cf323df7c6d95396ef0213e28936c2218bd6#2d31cf323df7c6d95396ef0213e28936c2218bd6" dependencies = [ "bindgen", "bzip2-sys", @@ -3394,7 +3394,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.35.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=123d6302fed23fc706344becb2f19623265a83f8#123d6302fed23fc706344becb2f19623265a83f8" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2d31cf323df7c6d95396ef0213e28936c2218bd6#2d31cf323df7c6d95396ef0213e28936c2218bd6" dependencies = [ "libc", "rust-librocksdb-sys", @@ -3441,7 +3441,7 @@ version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "errno", "libc", "linux-raw-sys", @@ -3559,9 +3559,12 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sd-notify" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be20c5f7f393ee700f8b2f28ea35812e4e212f40774b550cd2a93ea91684451" +checksum = "561e6b346a5e59e0b8a07894004897d7160567e3352d2ebd6c3741d4e086b6f5" +dependencies = [ + "libc", +] [[package]] name = "security-framework" @@ -3569,7 +3572,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.8.0", "core-foundation", "core-foundation-sys", "libc", @@ -4171,7 +4174,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=d87938bfddc26377dd7fdf14bbcd345f3ab19442#d87938bfddc26377dd7fdf14bbcd345f3ab19442" +source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "paste", @@ -4181,7 +4184,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" -source = "git+https://github.com/girlbossceo/jemallocator?rev=d87938bfddc26377dd7fdf14bbcd345f3ab19442#d87938bfddc26377dd7fdf14bbcd345f3ab19442" +source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "cc", "libc", @@ -4190,7 +4193,7 @@ dependencies = [ [[package]] name = "tikv-jemallocator" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=d87938bfddc26377dd7fdf14bbcd345f3ab19442#d87938bfddc26377dd7fdf14bbcd345f3ab19442" +source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -4445,7 +4448,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.7.0", + "bitflags 2.8.0", "bytes", "futures-core", "futures-util", @@ -4709,9 +4712,9 @@ dependencies = [ [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vcpkg" diff --git a/Cargo.toml b/Cargo.toml index c0b31a69..4d738a11 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -431,17 +431,23 @@ version = "0.35.0" # jemalloc usage [workspace.dependencies.tikv-jemalloc-sys] git = "https://github.com/girlbossceo/jemallocator" -rev = "d87938bfddc26377dd7fdf14bbcd345f3ab19442" +rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false -features = ["unprefixed_malloc_on_supported_platforms"] +features = [ + "background_threads_runtime_support", + "unprefixed_malloc_on_supported_platforms", +] [workspace.dependencies.tikv-jemallocator] git = "https://github.com/girlbossceo/jemallocator" -rev = "d87938bfddc26377dd7fdf14bbcd345f3ab19442" +rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false -features = ["unprefixed_malloc_on_supported_platforms"] +features = [ + "background_threads_runtime_support", + "unprefixed_malloc_on_supported_platforms", +] [workspace.dependencies.tikv-jemalloc-ctl] git = "https://github.com/girlbossceo/jemallocator" -rev = "d87938bfddc26377dd7fdf14bbcd345f3ab19442" +rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = ["use_std"] diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index f06c44e8..96554aed 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "123d6302fed23fc706344becb2f19623265a83f8" +rev = "2d31cf323df7c6d95396ef0213e28936c2218bd6" #branch = "master" default-features = false diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index ad61440c..a77587b0 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -967,7 +967,7 @@ pub(super) async fn database_stats( #[admin_command] pub(super) async fn trim_memory(&self) -> Result { - conduwuit::alloc::trim()?; + conduwuit::alloc::trim(None)?; writeln!(self, "done").await?; diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index c716e9c2..ef2df4ff 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -36,6 +36,7 @@ jemalloc_stats = [ "tikv-jemalloc-ctl/stats", "tikv-jemallocator/stats", ] +jemalloc_conf = [] hardened_malloc = [ "dep:hardened_malloc-rs" ] diff --git a/src/core/alloc/default.rs b/src/core/alloc/default.rs index 56e8c407..65354b7d 100644 --- a/src/core/alloc/default.rs +++ b/src/core/alloc/default.rs @@ -1,7 +1,7 @@ //! Default allocator with no special features /// Always returns Ok -pub fn trim() -> crate::Result { Ok(()) } +pub fn trim>>(_: I) -> crate::Result { Ok(()) } /// Always returns None #[must_use] diff --git a/src/core/alloc/hardened.rs b/src/core/alloc/hardened.rs index ff10cf2b..5f850673 100644 --- a/src/core/alloc/hardened.rs +++ b/src/core/alloc/hardened.rs @@ -3,7 +3,7 @@ #[global_allocator] static HMALLOC: hardened_malloc_rs::HardenedMalloc = hardened_malloc_rs::HardenedMalloc; -pub fn trim() -> crate::Result { Ok(()) } +pub fn trim>>(_: I) -> crate::Result { Ok(()) } #[must_use] //TODO: get usage diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index ccb213c9..119ff45e 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -2,8 +2,9 @@ use std::{ cell::OnceCell, - ffi::{c_char, c_void}, + ffi::{c_char, c_void, CStr}, fmt::Debug, + sync::RwLock, }; use arrayvec::ArrayVec; @@ -11,10 +12,14 @@ use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; -use crate::{err, is_equal_to, utils::math::Tried, Result}; +use crate::{ + err, is_equal_to, is_nonzero, + utils::{math, math::Tried}, + Result, +}; #[cfg(feature = "jemalloc_conf")] -#[no_mangle] +#[unsafe(no_mangle)] pub static malloc_conf: &[u8] = b"\ metadata_thp:always\ ,percpu_arena:percpu\ @@ -22,19 +27,26 @@ metadata_thp:always\ ,max_background_threads:-1\ ,lg_extent_max_active_fit:4\ ,oversize_threshold:33554432\ -,tcache_max:2097152\ +,tcache_max:1048576\ ,dirty_decay_ms:16000\ ,muzzy_decay_ms:144000\ \0"; #[global_allocator] static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc; +static CONTROL: RwLock<()> = RwLock::new(()); -type Key = ArrayVec; type Name = ArrayVec; +type Key = ArrayVec; -const KEY_SEGS: usize = 8; const NAME_MAX: usize = 128; +const KEY_SEGS: usize = 8; + +#[crate::ctor] +fn _static_initialization() { + acq_epoch().expect("pre-initialization of jemalloc failed"); + acq_epoch().expect("pre-initialization of jemalloc failed"); +} #[must_use] #[cfg(feature = "jemalloc_stats")] @@ -49,6 +61,9 @@ pub fn memory_usage() -> Option { kibs / 1024.0 }; + // Acquire the epoch; ensure latest stats are pulled in + acq_epoch().ok()?; + let allocated = mibs(stats::allocated::read()); let active = mibs(stats::active::read()); let mapped = mibs(stats::mapped::read()); @@ -76,6 +91,9 @@ pub fn memory_stats(opts: &str) -> Option { .into_raw() .cast_const(); + // Acquire the epoch; ensure latest stats are pulled in + acq_epoch().ok()?; + // SAFETY: calls malloc_stats_print() with our string instance which must remain // in this frame. https://docs.rs/tikv-jemalloc-sys/latest/tikv_jemalloc_sys/fn.malloc_stats_print.html unsafe { ffi::malloc_stats_print(Some(malloc_stats_cb), opaque, opts_p) }; @@ -95,7 +113,7 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { }; // SAFETY: we have to trust the string is null terminated. - let msg = unsafe { std::ffi::CStr::from_ptr(msg) }; + let msg = unsafe { CStr::from_ptr(msg) }; let msg = String::from_utf8_lossy(msg.to_bytes()); res.push_str(msg.as_ref()); @@ -114,58 +132,168 @@ macro_rules! mallctl { }}; } -pub fn trim() -> Result { set(&mallctl!("arena.4096.purge"), ()) } - -pub fn decay() -> Result { set(&mallctl!("arena.4096.purge"), ()) } - -pub fn set_by_name(name: &str, val: T) -> Result { set(&key(name)?, val) } - -pub fn get_by_name(name: &str) -> Result { get(&key(name)?) } - pub mod this_thread { - use super::{get, key, set, Key, OnceCell, Result}; + use super::{is_nonzero, key, math, Debug, Key, OnceCell, Result}; - pub fn trim() -> Result { - let mut key = mallctl!("arena.0.purge"); - key[1] = arena_id()?.try_into()?; - set(&key, ()) + pub fn trim() -> Result { notify(mallctl!("arena.0.purge")) } + + pub fn decay() -> Result { notify(mallctl!("arena.0.decay")) } + + pub fn flush() -> Result { super::notify(&mallctl!("thread.tcache.flush")) } + + pub fn set_muzzy_decay(decay_ms: isize) -> Result { + set(mallctl!("arena.0.muzzy_decay_ms"), decay_ms) } - pub fn decay() -> Result { - let mut key = mallctl!("arena.0.decay"); - key[1] = arena_id()?.try_into()?; - set(&key, ()) + pub fn get_muzzy_decay() -> Result { get(mallctl!("arena.0.muzzy_decay_ms")) } + + pub fn set_dirty_decay(decay_ms: isize) -> Result { + set(mallctl!("arena.0.dirty_decay_ms"), decay_ms) } - pub fn cache(enable: bool) -> Result { - set(&mallctl!("thread.tcache.enabled"), u8::from(enable)) + pub fn get_dirty_decay() -> Result { get(mallctl!("arena.0.dirty_decay_ms")) } + + pub fn enable_cache(enable: bool) -> Result { + super::set::(&mallctl!("thread.tcache.enabled"), enable.into()).map(is_nonzero!()) } - pub fn flush() -> Result { set(&mallctl!("thread.tcache.flush"), ()) } + pub fn is_cache_enabled() -> Result { + super::get::(&mallctl!("thread.tcache.enabled")).map(is_nonzero!()) + } - pub fn allocated() -> Result { get::(&mallctl!("thread.allocated")) } + pub fn set_arena(id: usize) -> Result { + super::set::(&mallctl!("thread.arena"), id.try_into()?).and_then(math::try_into) + } - pub fn deallocated() -> Result { get::(&mallctl!("thread.deallocated")) } + pub fn arena_id() -> Result { + super::get::(&mallctl!("thread.arena")).and_then(math::try_into) + } - pub fn arena_id() -> Result { get::(&mallctl!("thread.arena")) } + pub fn allocated() -> Result { super::get(&mallctl!("thread.allocated")) } + + pub fn deallocated() -> Result { super::get(&mallctl!("thread.deallocated")) } + + fn notify(key: Key) -> Result { super::notify_by_arena(Some(arena_id()?), key) } + + fn set(key: Key, val: T) -> Result + where + T: Copy + Debug, + { + super::set_by_arena(Some(arena_id()?), key, val) + } + + fn get(key: Key) -> Result + where + T: Copy + Debug, + { + super::get_by_arena(Some(arena_id()?), key) + } } -fn set(key: &Key, val: T) -> Result +pub fn trim>>(arena: I) -> Result { + notify_by_arena(arena.into(), mallctl!("arena.4096.purge")) +} + +pub fn decay>>(arena: I) -> Result { + notify_by_arena(arena.into(), mallctl!("arena.4096.decay")) +} + +pub fn set_muzzy_decay>>(arena: I, decay_ms: isize) -> Result { + if let Some(arena) = arena.into() { + set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms) + } else { + set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms) + } +} + +pub fn set_dirty_decay>>(arena: I, decay_ms: isize) -> Result { + if let Some(arena) = arena.into() { + set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms) + } else { + set(&mallctl!("arenas.dirty_decay_ms"), decay_ms) + } +} + +#[inline] +#[must_use] +pub fn is_affine_arena() -> bool { is_percpu_arena() || is_phycpu_arena() } + +#[inline] +#[must_use] +pub fn is_percpu_arena() -> bool { percpu_arenas().is_ok_and(is_equal_to!("percpu")) } + +#[inline] +#[must_use] +pub fn is_phycpu_arena() -> bool { percpu_arenas().is_ok_and(is_equal_to!("phycpu")) } + +pub fn percpu_arenas() -> Result<&'static str> { + let ptr = get::<*const c_char>(&mallctl!("opt.percpu_arena"))?; + //SAFETY: ptr points to a null-terminated string returned for opt.percpu_arena. + let cstr = unsafe { CStr::from_ptr(ptr) }; + cstr.to_str().map_err(Into::into) +} + +pub fn arenas() -> Result { + get::(&mallctl!("arenas.narenas")).and_then(math::try_into) +} + +pub fn inc_epoch() -> Result { xchg(&mallctl!("epoch"), 1_u64) } + +pub fn acq_epoch() -> Result { xchg(&mallctl!("epoch"), 0_u64) } + +fn notify_by_arena(id: Option, mut key: Key) -> Result { + key[1] = id.unwrap_or(4096); + notify(&key) +} + +fn set_by_arena(id: Option, mut key: Key, val: T) -> Result where T: Copy + Debug, { - // SAFETY: T must be the exact expected type. - unsafe { mallctl::raw::write_mib(key.as_slice(), val) }.map_err(map_err) + key[1] = id.unwrap_or(4096); + set(&key, val) +} + +fn get_by_arena(id: Option, mut key: Key) -> Result +where + T: Copy + Debug, +{ + key[1] = id.unwrap_or(4096); + get(&key) +} + +fn notify(key: &Key) -> Result { xchg(key, ()) } + +fn set(key: &Key, val: T) -> Result +where + T: Copy + Debug, +{ + let _lock = CONTROL.write()?; + let res = xchg(key, val)?; + inc_epoch()?; + + Ok(res) } fn get(key: &Key) -> Result where T: Copy + Debug, { + acq_epoch()?; + acq_epoch()?; + // SAFETY: T must be perfectly valid to receive value. unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err) } +fn xchg(key: &Key, val: T) -> Result +where + T: Copy + Debug, +{ + // SAFETY: T must be the exact expected type. + unsafe { mallctl::raw::update_mib(key.as_slice(), val) }.map_err(map_err) +} + fn key(name: &str) -> Result { // tikv asserts the output buffer length is tight to the number of required mibs // so we slice that down here. diff --git a/src/database/pool.rs b/src/database/pool.rs index f5600c36..86516c31 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -13,7 +13,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ debug, debug_warn, err, error, implement, - result::DebugInspect, + result::{DebugInspect, LogDebugErr}, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, Error, Result, Server, @@ -289,6 +289,20 @@ fn worker_init(&self, id: usize) { // affinity is empty (no-op) if there's only one queue set_affinity(affinity.clone()); + + #[cfg(feature = "jemalloc")] + if affinity.clone().count() == 1 && conduwuit::alloc::je::is_affine_arena() { + use conduwuit::alloc::je::this_thread::{arena_id, set_arena}; + + let id = affinity.clone().next().expect("at least one id"); + + if let Ok(arena) = arena_id() { + if arena != id { + set_arena(id).log_debug_err().ok(); + } + } + } + debug!( ?group, affinity = ?affinity.collect::>(), diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index baf5336f..f774c37a 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -41,6 +41,7 @@ default = [ "gzip_compression", "io_uring", "jemalloc", + "jemalloc_conf", "media_thumbnail", "release_max_log_level", "systemd", @@ -85,6 +86,9 @@ jemalloc_prof = [ jemalloc_stats = [ "conduwuit-core/jemalloc_stats", ] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", +] media_thumbnail = [ "conduwuit-service/media_thumbnail", ] diff --git a/src/main/clap.rs b/src/main/clap.rs index d3d40491..2bb6f3f2 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -92,6 +92,22 @@ pub(crate) struct Args { require_equals(false), )] pub(crate) gc_on_park: Option, + + /// Toggles muzzy decay for jemalloc arenas associated with a tokio + /// worker (when worker-affinity is enabled). Setting to false releases + /// memory to the operating system using MADV_FREE without MADV_DONTNEED. + /// Setting to false increases performance by reducing pagefaults, but + /// resident memory usage appears high until there is memory pressure. The + /// default is true unless the system has four or more cores. + #[arg( + long, + hide(true), + env = "CONDUWUIT_RUNTIME_GC_MUZZY", + action = ArgAction::Set, + num_args = 0..=1, + require_equals(false), + )] + pub(crate) gc_muzzy: Option, } /// Parse commandline arguments into structured data diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 315336b0..9f4f60f8 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -9,8 +9,12 @@ use std::{ }; use conduwuit::{ - result::LogErr, - utils::sys::compute::{nth_core_available, set_affinity}, + is_true, + result::LogDebugErr, + utils::{ + available_parallelism, + sys::compute::{nth_core_available, set_affinity}, + }, Result, }; use tokio::runtime::Builder; @@ -21,9 +25,11 @@ const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; +const DISABLE_MUZZY_THRESHOLD: usize = 4; static WORKER_AFFINITY: OnceLock = OnceLock::new(); static GC_ON_PARK: OnceLock> = OnceLock::new(); +static GC_MUZZY: OnceLock> = OnceLock::new(); pub(super) fn new(args: &Args) -> Result { WORKER_AFFINITY @@ -34,6 +40,10 @@ pub(super) fn new(args: &Args) -> Result { .set(args.gc_on_park) .expect("set GC_ON_PARK from program argument"); + GC_MUZZY + .set(args.gc_muzzy) + .expect("set GC_MUZZY from program argument"); + let mut builder = Builder::new_multi_thread(); builder .enable_io() @@ -83,11 +93,13 @@ fn enable_histogram(builder: &mut Builder, args: &Args) { ), )] fn thread_start() { - if WORKER_AFFINITY - .get() - .copied() - .expect("WORKER_AFFINITY initialized by runtime::new()") - { + debug_assert_eq!( + Some(WORKER_NAME), + thread::current().name(), + "tokio worker name mismatch at thread start" + ); + + if WORKER_AFFINITY.get().is_some_and(is_true!()) { set_worker_affinity(); } } @@ -95,10 +107,6 @@ fn thread_start() { fn set_worker_affinity() { static CORES_OCCUPIED: AtomicUsize = AtomicUsize::new(0); - if thread::current().name() != Some(WORKER_NAME) { - return; - } - let handle = tokio::runtime::Handle::current(); let num_workers = handle.metrics().num_workers(); let i = CORES_OCCUPIED.fetch_add(1, Ordering::Relaxed); @@ -111,8 +119,33 @@ fn set_worker_affinity() { }; set_affinity(once(id)); + set_worker_mallctl(id); } +#[cfg(feature = "jemalloc")] +fn set_worker_mallctl(id: usize) { + use conduwuit::alloc::je::{ + is_affine_arena, + this_thread::{set_arena, set_muzzy_decay}, + }; + + if is_affine_arena() { + set_arena(id).log_debug_err().ok(); + } + + let muzzy_option = GC_MUZZY + .get() + .expect("GC_MUZZY initialized by runtime::new()"); + + let muzzy_auto_disable = available_parallelism() >= DISABLE_MUZZY_THRESHOLD; + if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { + set_muzzy_decay(-1).log_debug_err().ok(); + } +} + +#[cfg(not(feature = "jemalloc"))] +fn set_worker_mallctl(_: usize) {} + #[tracing::instrument( name = "join", level = "debug", @@ -157,7 +190,9 @@ fn thread_park() { fn gc_on_park() { #[cfg(feature = "jemalloc")] - conduwuit::alloc::je::this_thread::decay().log_err().ok(); + conduwuit::alloc::je::this_thread::decay() + .log_debug_err() + .ok(); } #[cfg(tokio_unstable)] From ac944496c15bc476bc9964e034b7fcea737cc733 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 14 Jan 2025 19:17:45 +0000 Subject: [PATCH 0546/1248] optimize statekey-from-short loopsite Signed-off-by: Jason Volk --- .../rooms/event_handler/resolve_state.rs | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 8640c582..edce880d 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -6,10 +6,10 @@ use std::{ use conduwuit::{ debug, err, implement, - utils::stream::{automatic_width, IterStream, WidebandExt}, + utils::stream::{automatic_width, IterStream, ReadyExt, WidebandExt}, Result, }; -use futures::{FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, StreamExt}; use ruma::{ state_res::{self, StateMap}, OwnedEventId, RoomId, RoomVersionId, @@ -59,17 +59,18 @@ pub async fn resolve_state( let fork_states: Vec> = fork_states .into_iter() .stream() - .wide_then(|fork_state| { - fork_state - .into_iter() - .stream() - .wide_filter_map(|(k, id)| { - self.services - .short - .get_statekey_from_short(k) - .map_ok_or_else(|_| None, move |(ty, st_key)| Some(((ty, st_key), id))) - }) + .wide_then(|fork_state| async move { + let shortstatekeys = fork_state.keys().copied().stream(); + + let event_ids = fork_state.values().cloned().stream().boxed(); + + self.services + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(event_ids) + .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) .collect() + .await }) .collect() .await; From 388730d6dd7dc69e1243218e556946bb35fd7461 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 07:38:19 +0000 Subject: [PATCH 0547/1248] add TryWideband trait to similar to TryBroadband Signed-off-by: Jason Volk --- src/core/utils/stream/mod.rs | 2 + src/core/utils/stream/try_wideband.rs | 57 +++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 src/core/utils/stream/try_wideband.rs diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 0fee0a3a..c7bfa021 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -9,6 +9,7 @@ mod tools; mod try_broadband; mod try_ready; mod try_tools; +mod try_wideband; mod wideband; pub use band::{ @@ -25,4 +26,5 @@ pub use tools::Tools; pub use try_broadband::TryBroadbandExt; pub use try_ready::TryReadyExt; pub use try_tools::TryTools; +pub use try_wideband::TryWidebandExt; pub use wideband::WidebandExt; diff --git a/src/core/utils/stream/try_wideband.rs b/src/core/utils/stream/try_wideband.rs new file mode 100644 index 00000000..0af3c8ec --- /dev/null +++ b/src/core/utils/stream/try_wideband.rs @@ -0,0 +1,57 @@ +//! Synchronous combinator extensions to futures::TryStream + +use futures::{TryFuture, TryStream, TryStreamExt}; + +use super::automatic_width; +use crate::Result; + +/// Concurrency extensions to augment futures::TryStreamExt. wide_ combinators +/// produce in-order results +pub trait TryWidebandExt +where + Self: TryStream> + Send + Sized, +{ + fn widen_and_then( + self, + n: N, + f: F, + ) -> impl TryStream> + Send + where + N: Into>, + F: Fn(Self::Ok) -> Fut + Send, + Fut: TryFuture> + Send, + U: Send; + + fn wide_and_then( + self, + f: F, + ) -> impl TryStream> + Send + where + F: Fn(Self::Ok) -> Fut + Send, + Fut: TryFuture> + Send, + U: Send, + { + self.widen_and_then(None, f) + } +} + +impl TryWidebandExt for S +where + S: TryStream> + Send + Sized, + E: Send, +{ + fn widen_and_then( + self, + n: N, + f: F, + ) -> impl TryStream> + Send + where + N: Into>, + F: Fn(Self::Ok) -> Fut + Send, + Fut: TryFuture> + Send, + U: Send, + { + self.map_ok(f) + .try_buffered(n.into().unwrap_or_else(automatic_width)) + } +} From ea25dc04b26fb4c3dffb456fd1c8e4b7c066b95f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 07:38:32 +0000 Subject: [PATCH 0548/1248] parallelize current and incoming fork-state fetch Signed-off-by: Jason Volk --- .../rooms/event_handler/resolve_state.rs | 32 +++++++++++-------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index edce880d..0526d31c 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -6,10 +6,10 @@ use std::{ use conduwuit::{ debug, err, implement, - utils::stream::{automatic_width, IterStream, ReadyExt, WidebandExt}, + utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ state_res::{self, StateMap}, OwnedEventId, RoomId, RoomVersionId, @@ -40,20 +40,24 @@ pub async fn resolve_state( .await?; let fork_states = [current_state_ids, incoming_state]; - let mut auth_chain_sets = Vec::with_capacity(fork_states.len()); - for state in &fork_states { - let starting_events = state.values().map(Borrow::borrow); + let auth_chain_sets: Vec> = fork_states + .iter() + .try_stream() + .wide_and_then(|state| async move { + let starting_events = state.values().map(Borrow::borrow); - let auth_chain: HashSet = self - .services - .auth_chain - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .collect(); + let auth_chain = self + .services + .auth_chain + .get_event_ids(room_id, starting_events) + .await? + .into_iter() + .collect(); - auth_chain_sets.push(auth_chain); - } + Ok(auth_chain) + }) + .try_collect() + .await?; debug!("Loading fork states"); let fork_states: Vec> = fork_states From 4c0ae8c2f708cc2d950f6a8269844ae42069d55a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 09:02:50 +0000 Subject: [PATCH 0549/1248] parallelize get_auth_chain outer Signed-off-by: Jason Volk --- src/service/rooms/auth_chain/mod.rs | 112 ++++++++++++++-------------- 1 file changed, 55 insertions(+), 57 deletions(-) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 74064701..fb7b6163 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -7,11 +7,14 @@ use std::{ }; use conduwuit::{ - debug, debug_error, trace, - utils::{stream::ReadyExt, IterStream}, + at, debug, debug_error, trace, + utils::{ + stream::{ReadyExt, TryBroadbandExt}, + IterStream, + }, validated, warn, Err, Result, }; -use futures::{Stream, StreamExt}; +use futures::{Stream, StreamExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; @@ -112,66 +115,61 @@ impl Service { "start", ); - let mut hits: usize = 0; - let mut misses: usize = 0; - let mut full_auth_chain = Vec::with_capacity(buckets.len()); - for chunk in buckets { - if chunk.is_empty() { - continue; - } + let full_auth_chain: Vec<_> = buckets + .into_iter() + .try_stream() + .broad_and_then(|chunk| async move { + let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); - let chunk_key: Vec = - chunk.iter().map(|(short, _)| short).copied().collect(); - if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { - trace!("Found cache entry for whole chunk"); - full_auth_chain.extend(cached.iter().copied()); - hits = hits.saturating_add(1); - continue; - } + if chunk_key.is_empty() { + return Ok(Vec::new()); + } - let mut hits2: usize = 0; - let mut misses2: usize = 0; - let mut chunk_cache = Vec::with_capacity(chunk.len()); - for (sevent_id, event_id) in chunk { - if let Ok(cached) = self.get_cached_eventid_authchain(&[sevent_id]).await { - trace!(?event_id, "Found cache entry for event"); - chunk_cache.extend(cached.iter().copied()); - hits2 = hits2.saturating_add(1); - } else { - let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain(vec![sevent_id], &auth_chain); - chunk_cache.extend(auth_chain.iter()); - misses2 = misses2.saturating_add(1); - debug!( - event_id = ?event_id, - chain_length = ?auth_chain.len(), - chunk_cache_length = ?chunk_cache.len(), - elapsed = ?started.elapsed(), - "Cache missed event" - ); - }; - } + if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { + return Ok(cached.to_vec()); + } - chunk_cache.sort_unstable(); - chunk_cache.dedup(); - self.cache_auth_chain_vec(chunk_key, &chunk_cache); - full_auth_chain.extend(chunk_cache.iter()); - misses = misses.saturating_add(1); - debug!( - chunk_cache_length = ?chunk_cache.len(), - hits = ?hits2, - misses = ?misses2, - elapsed = ?started.elapsed(), - "Chunk missed", - ); - } + let chunk_cache: Vec<_> = chunk + .into_iter() + .try_stream() + .broad_and_then(|(shortid, event_id)| async move { + if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { + return Ok(cached.to_vec()); + } + let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; + self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); + debug!( + ?event_id, + elapsed = ?started.elapsed(), + "Cache missed event" + ); + + Ok(auth_chain) + }) + .try_collect() + .await?; + + let mut chunk_cache: Vec<_> = chunk_cache.into_iter().flatten().collect(); + chunk_cache.sort_unstable(); + chunk_cache.dedup(); + self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); + debug!( + chunk_cache_length = ?chunk_cache.len(), + elapsed = ?started.elapsed(), + "Cache missed chunk", + ); + + Ok(chunk_cache) + }) + .try_collect() + .await?; + + let mut full_auth_chain: Vec<_> = full_auth_chain.into_iter().flatten().collect(); full_auth_chain.sort_unstable(); full_auth_chain.dedup(); debug!( chain_length = ?full_auth_chain.len(), - hits = ?hits, - misses = ?misses, elapsed = ?started.elapsed(), "done", ); @@ -184,7 +182,7 @@ impl Service { &self, room_id: &RoomId, event_id: &EventId, - ) -> Result> { + ) -> Result> { let mut todo: VecDeque<_> = [event_id.to_owned()].into(); let mut found = HashSet::new(); @@ -226,7 +224,7 @@ impl Service { } } - Ok(found) + Ok(found.into_iter().collect()) } #[inline] From 610129d16265f702b7bfbf0ada019fc77766e10f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 09:05:49 +0000 Subject: [PATCH 0550/1248] outdent auth_chain Service impl Signed-off-by: Jason Volk --- src/service/rooms/auth_chain/mod.rs | 429 ++++++++++++++-------------- 1 file changed, 219 insertions(+), 210 deletions(-) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index fb7b6163..df2663b2 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -7,14 +7,14 @@ use std::{ }; use conduwuit::{ - at, debug, debug_error, trace, + at, debug, debug_error, implement, trace, utils::{ stream::{ReadyExt, TryBroadbandExt}, IterStream, }, validated, warn, Err, Result, }; -use futures::{Stream, StreamExt, TryStreamExt}; +use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; @@ -44,213 +44,222 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - pub async fn event_ids_iter<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, - ) -> Result + Send + '_> - where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, - { - let stream = self - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .stream(); +#[implement(Service)] +pub async fn event_ids_iter<'a, I>( + &'a self, + room_id: &RoomId, + starting_events: I, +) -> Result + Send + '_> +where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, +{ + let stream = self + .get_event_ids(room_id, starting_events) + .await? + .into_iter() + .stream(); - Ok(stream) - } - - pub async fn get_event_ids<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, - ) -> Result> - where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, - { - let chain = self.get_auth_chain(room_id, starting_events).await?; - let event_ids = self - .services - .short - .multi_get_eventid_from_short(chain.into_iter().stream()) - .ready_filter_map(Result::ok) - .collect() - .await; - - Ok(event_ids) - } - - #[tracing::instrument(name = "auth_chain", level = "debug", skip_all)] - pub async fn get_auth_chain<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, - ) -> Result> - where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, - { - const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? - const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); - - let started = std::time::Instant::now(); - let mut starting_ids = self - .services - .short - .multi_get_or_create_shorteventid(starting_events.clone()) - .zip(starting_events.clone().stream()) - .boxed(); - - let mut buckets = [BUCKET; NUM_BUCKETS]; - while let Some((short, starting_event)) = starting_ids.next().await { - let bucket: usize = short.try_into()?; - let bucket: usize = validated!(bucket % NUM_BUCKETS); - buckets[bucket].insert((short, starting_event)); - } - - debug!( - starting_events = ?starting_events.count(), - elapsed = ?started.elapsed(), - "start", - ); - - let full_auth_chain: Vec<_> = buckets - .into_iter() - .try_stream() - .broad_and_then(|chunk| async move { - let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); - - if chunk_key.is_empty() { - return Ok(Vec::new()); - } - - if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { - return Ok(cached.to_vec()); - } - - let chunk_cache: Vec<_> = chunk - .into_iter() - .try_stream() - .broad_and_then(|(shortid, event_id)| async move { - if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { - return Ok(cached.to_vec()); - } - - let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); - debug!( - ?event_id, - elapsed = ?started.elapsed(), - "Cache missed event" - ); - - Ok(auth_chain) - }) - .try_collect() - .await?; - - let mut chunk_cache: Vec<_> = chunk_cache.into_iter().flatten().collect(); - chunk_cache.sort_unstable(); - chunk_cache.dedup(); - self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); - debug!( - chunk_cache_length = ?chunk_cache.len(), - elapsed = ?started.elapsed(), - "Cache missed chunk", - ); - - Ok(chunk_cache) - }) - .try_collect() - .await?; - - let mut full_auth_chain: Vec<_> = full_auth_chain.into_iter().flatten().collect(); - full_auth_chain.sort_unstable(); - full_auth_chain.dedup(); - debug!( - chain_length = ?full_auth_chain.len(), - elapsed = ?started.elapsed(), - "done", - ); - - Ok(full_auth_chain) - } - - #[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] - async fn get_auth_chain_inner( - &self, - room_id: &RoomId, - event_id: &EventId, - ) -> Result> { - let mut todo: VecDeque<_> = [event_id.to_owned()].into(); - let mut found = HashSet::new(); - - while let Some(event_id) = todo.pop_front() { - trace!(?event_id, "processing auth event"); - - match self.services.timeline.get_pdu(&event_id).await { - | Err(e) => { - debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"); - }, - | Ok(pdu) => { - if pdu.room_id != room_id { - return Err!(Request(Forbidden(error!( - ?event_id, - ?room_id, - wrong_room_id = ?pdu.room_id, - "auth event for incorrect room" - )))); - } - - for auth_event in &pdu.auth_events { - let sauthevent = self - .services - .short - .get_or_create_shorteventid(auth_event) - .await; - - if found.insert(sauthevent) { - trace!( - ?event_id, - ?auth_event, - "adding auth event to processing queue" - ); - - todo.push_back(auth_event.clone()); - } - } - }, - } - } - - Ok(found.into_iter().collect()) - } - - #[inline] - pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { - self.db.get_cached_eventid_authchain(key).await - } - - #[tracing::instrument(skip_all, level = "debug")] - pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) { - let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); - - self.db.cache_auth_chain(key, val); - } - - #[tracing::instrument(skip_all, level = "debug")] - pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &[ShortEventId]) { - let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); - - self.db.cache_auth_chain(key, val); - } - - pub fn get_cache_usage(&self) -> (usize, usize) { - let cache = self.db.auth_chain_cache.lock().expect("locked"); - - (cache.len(), cache.capacity()) - } - - pub fn clear_cache(&self) { self.db.auth_chain_cache.lock().expect("locked").clear(); } + Ok(stream) } + +#[implement(Service)] +pub async fn get_event_ids<'a, I>( + &'a self, + room_id: &RoomId, + starting_events: I, +) -> Result> +where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, +{ + let chain = self.get_auth_chain(room_id, starting_events).await?; + let event_ids = self + .services + .short + .multi_get_eventid_from_short(chain.into_iter().stream()) + .ready_filter_map(Result::ok) + .collect() + .await; + + Ok(event_ids) +} + +#[implement(Service)] +#[tracing::instrument(name = "auth_chain", level = "debug", skip_all)] +pub async fn get_auth_chain<'a, I>( + &'a self, + room_id: &RoomId, + starting_events: I, +) -> Result> +where + I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, +{ + const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? + const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); + + let started = std::time::Instant::now(); + let mut starting_ids = self + .services + .short + .multi_get_or_create_shorteventid(starting_events.clone()) + .zip(starting_events.clone().stream()) + .boxed(); + + let mut buckets = [BUCKET; NUM_BUCKETS]; + while let Some((short, starting_event)) = starting_ids.next().await { + let bucket: usize = short.try_into()?; + let bucket: usize = validated!(bucket % NUM_BUCKETS); + buckets[bucket].insert((short, starting_event)); + } + + debug!( + starting_events = ?starting_events.count(), + elapsed = ?started.elapsed(), + "start", + ); + + let full_auth_chain: Vec = buckets + .into_iter() + .try_stream() + .broad_and_then(|chunk| async move { + let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); + + if chunk_key.is_empty() { + return Ok(Vec::new()); + } + + if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { + return Ok(cached.to_vec()); + } + + let chunk_cache: Vec<_> = chunk + .into_iter() + .try_stream() + .broad_and_then(|(shortid, event_id)| async move { + if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { + return Ok(cached.to_vec()); + } + + let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; + self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); + debug!( + ?event_id, + elapsed = ?started.elapsed(), + "Cache missed event" + ); + + Ok(auth_chain) + }) + .try_collect() + .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) + .map_ok(|mut chunk_cache: Vec<_>| { + chunk_cache.sort_unstable(); + chunk_cache.dedup(); + chunk_cache + }) + .await?; + + self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); + debug!( + chunk_cache_length = ?chunk_cache.len(), + elapsed = ?started.elapsed(), + "Cache missed chunk", + ); + + Ok(chunk_cache) + }) + .try_collect() + .map_ok(|auth_chain: Vec<_>| auth_chain.into_iter().flatten().collect()) + .map_ok(|mut full_auth_chain: Vec<_>| { + full_auth_chain.sort_unstable(); + full_auth_chain.dedup(); + full_auth_chain + }) + .await?; + + debug!( + chain_length = ?full_auth_chain.len(), + elapsed = ?started.elapsed(), + "done", + ); + + Ok(full_auth_chain) +} + +#[implement(Service)] +#[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] +async fn get_auth_chain_inner( + &self, + room_id: &RoomId, + event_id: &EventId, +) -> Result> { + let mut todo: VecDeque<_> = [event_id.to_owned()].into(); + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop_front() { + trace!(?event_id, "processing auth event"); + + match self.services.timeline.get_pdu(&event_id).await { + | Err(e) => { + debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"); + }, + | Ok(pdu) => { + if pdu.room_id != room_id { + return Err!(Request(Forbidden(error!( + ?event_id, + ?room_id, + wrong_room_id = ?pdu.room_id, + "auth event for incorrect room" + )))); + } + + for auth_event in &pdu.auth_events { + let sauthevent = self + .services + .short + .get_or_create_shorteventid(auth_event) + .await; + + if found.insert(sauthevent) { + trace!(?event_id, ?auth_event, "adding auth event to processing queue"); + + todo.push_back(auth_event.clone()); + } + } + }, + } + } + + Ok(found.into_iter().collect()) +} + +#[implement(Service)] +#[inline] +pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { + self.db.get_cached_eventid_authchain(key).await +} + +#[implement(Service)] +#[tracing::instrument(skip_all, level = "debug")] +pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) { + let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); + + self.db.cache_auth_chain(key, val); +} + +#[implement(Service)] +#[tracing::instrument(skip_all, level = "debug")] +pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &[ShortEventId]) { + let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); + + self.db.cache_auth_chain(key, val); +} + +#[implement(Service)] +pub fn get_cache_usage(&self) -> (usize, usize) { + let cache = self.db.auth_chain_cache.lock().expect("locked"); + + (cache.len(), cache.capacity()) +} + +#[implement(Service)] +pub fn clear_cache(&self) { self.db.auth_chain_cache.lock().expect("locked").clear(); } From 277b4951e8e7f28e6319a17e91229e77c9db090d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 11:50:17 +0000 Subject: [PATCH 0551/1248] add compression-shaping; tweak default compression levels Signed-off-by: Jason Volk --- conduwuit-example.toml | 8 +++++++- src/core/config/mod.rs | 8 +++++++- src/database/engine/cf_opts.rs | 32 +++++++++++++++++++++++++++---- src/database/engine/descriptor.rs | 19 ++++++++++++++++-- 4 files changed, 59 insertions(+), 8 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 54143ced..79efbd14 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -818,6 +818,9 @@ # magic number and translated to the library's default compression level # as they all differ. See their `kDefaultCompressionLevel`. # +# Note when using the default value we may override it with a setting +# tailored specifically conduwuit. +# #rocksdb_compression_level = 32767 # Level of compression the specified compression algorithm for the @@ -831,6 +834,9 @@ # less likely for this data to be used. Research your chosen compression # algorithm. # +# Note when using the default value we may override it with a setting +# tailored specifically conduwuit. +# #rocksdb_bottommost_compression_level = 32767 # Whether to enable RocksDB's "bottommost_compression". @@ -842,7 +848,7 @@ # # See https://github.com/facebook/rocksdb/wiki/Compression for more details. # -#rocksdb_bottommost_compression = false +#rocksdb_bottommost_compression = true # Database recovery mode (for RocksDB WAL corruption). # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index cb42940b..5cfed0b9 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -958,6 +958,9 @@ pub struct Config { /// magic number and translated to the library's default compression level /// as they all differ. See their `kDefaultCompressionLevel`. /// + /// Note when using the default value we may override it with a setting + /// tailored specifically conduwuit. + /// /// default: 32767 #[serde(default = "default_rocksdb_compression_level")] pub rocksdb_compression_level: i32, @@ -973,6 +976,9 @@ pub struct Config { /// less likely for this data to be used. Research your chosen compression /// algorithm. /// + /// Note when using the default value we may override it with a setting + /// tailored specifically conduwuit. + /// /// default: 32767 #[serde(default = "default_rocksdb_bottommost_compression_level")] pub rocksdb_bottommost_compression_level: i32, @@ -985,7 +991,7 @@ pub struct Config { /// if you're trying to reduce storage usage from the database. /// /// See https://github.com/facebook/rocksdb/wiki/Compression for more details. - #[serde(default)] + #[serde(default = "true_fn")] pub rocksdb_bottommost_compression: bool, /// Database recovery mode (for RocksDB WAL corruption). diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 7b3a1d49..da636718 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -8,6 +8,8 @@ use rocksdb::{ use super::descriptor::{CacheDisp, Descriptor}; use crate::{util::map_err, Context}; +pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; + /// Adjust options for the specific column by name. Provide the result of /// db_options() as the argument to this function and use the return value in /// the arguments to open the specific column. @@ -45,7 +47,15 @@ fn descriptor_cf_options( opts.set_compaction_pri(desc.compaction_pri); opts.set_universal_compaction_options(&uc_options(&desc)); + let compression_shape: Vec<_> = desc + .compression_shape + .into_iter() + .map(|val| (val > 0).then_some(desc.compression)) + .map(|val| val.unwrap_or(CompressionType::None)) + .collect(); + opts.set_compression_type(desc.compression); + opts.set_compression_per_level(compression_shape.as_slice()); opts.set_compression_options(-14, desc.compression_level, 0, 0); // -14 w_bits used by zlib. if let Some(&bottommost_level) = desc.bottommost_level.as_ref() { opts.set_bottommost_compression_type(desc.compression); @@ -95,10 +105,24 @@ fn set_compression(desc: &mut Descriptor, config: &Config) { | _ => CompressionType::Zstd, }; - desc.compression_level = config.rocksdb_compression_level; - desc.bottommost_level = config - .rocksdb_bottommost_compression - .then_some(config.rocksdb_bottommost_compression_level); + let can_override_level = config.rocksdb_compression_level == SENTINEL_COMPRESSION_LEVEL + && desc.compression == CompressionType::Zstd; + + if !can_override_level { + desc.compression_level = config.rocksdb_compression_level; + } + + let can_override_bottom = config.rocksdb_bottommost_compression_level + == SENTINEL_COMPRESSION_LEVEL + && desc.compression == CompressionType::Zstd; + + if !can_override_bottom { + desc.bottommost_level = Some(config.rocksdb_bottommost_compression_level); + } + + if !config.rocksdb_bottommost_compression { + desc.bottommost_level = None; + } } fn uc_options(desc: &Descriptor) -> UniversalCompactOptions { diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 234ca2bf..2c84ac53 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -4,6 +4,8 @@ use rocksdb::{ DBCompressionType as CompressionType, }; +use super::cf_opts::SENTINEL_COMPRESSION_LEVEL; + #[derive(Debug, Clone, Copy)] pub(crate) enum CacheDisp { Unique, @@ -32,6 +34,7 @@ pub(crate) struct Descriptor { pub(crate) compaction: CompactionStyle, pub(crate) compaction_pri: CompactionPri, pub(crate) compression: CompressionType, + pub(crate) compression_shape: [i32; 7], pub(crate) compression_level: i32, pub(crate) bottommost_level: Option, pub(crate) block_index_hashing: Option, @@ -58,8 +61,9 @@ pub(crate) static BASE: Descriptor = Descriptor { compaction: CompactionStyle::Level, compaction_pri: CompactionPri::MinOverlappingRatio, compression: CompressionType::Zstd, - compression_level: 32767, - bottommost_level: Some(32767), + compression_shape: [0, 0, 0, 1, 1, 1, 1], + compression_level: SENTINEL_COMPRESSION_LEVEL, + bottommost_level: Some(SENTINEL_COMPRESSION_LEVEL), block_index_hashing: None, cache_shards: 64, }; @@ -68,6 +72,8 @@ pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, write_size: 1024 * 1024 * 32, cache_shards: 128, + compression_level: -3, + bottommost_level: Some(4), ..BASE }; @@ -77,6 +83,9 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { level_size: 1024 * 1024 * 32, file_size: 1024 * 1024 * 2, cache_shards: 128, + compression_level: -1, + bottommost_level: Some(6), + compression_shape: [0, 0, 1, 1, 1, 1, 1], ..BASE }; @@ -88,6 +97,9 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { index_size: 512, block_size: 512, cache_shards: 64, + compression_level: -4, + bottommost_level: Some(1), + compression_shape: [0, 0, 0, 0, 0, 1, 1], ..RANDOM }; @@ -99,5 +111,8 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { block_size: 512, cache_shards: 64, block_index_hashing: Some(false), + compression_level: -2, + bottommost_level: Some(4), + compression_shape: [0, 0, 0, 0, 1, 1, 1], ..SEQUENTIAL }; From 19f6d9d0e1dd2e40bb710bdc0e876e4f2fc02917 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 20 Jan 2025 12:12:44 +0000 Subject: [PATCH 0552/1248] add index-compression and auto-readahead to descriptor Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 16 +++++++++++----- src/database/engine/descriptor.rs | 12 ++++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index da636718..1230081c 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -83,11 +83,17 @@ fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache table.disable_cache(); } - opts.set_options_from_string( - "{{block_based_table_factory={num_file_reads_for_auto_readahead=0;\ - max_auto_readahead_size=524288;initial_auto_readahead_size=16384}}}", - ) - .map_err(map_err)?; + let string = format!( + "{{block_based_table_factory={{num_file_reads_for_auto_readahead={0};\ + max_auto_readahead_size={1};initial_auto_readahead_size={2};\ + enable_index_compression={3}}}}}", + desc.auto_readahead_thresh, + desc.auto_readahead_max, + desc.auto_readahead_init, + desc.compressed_index, + ); + + opts.set_options_from_string(&string).map_err(map_err)?; opts.set_block_based_table_factory(&table); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 2c84ac53..6ce8b5ad 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -34,11 +34,15 @@ pub(crate) struct Descriptor { pub(crate) compaction: CompactionStyle, pub(crate) compaction_pri: CompactionPri, pub(crate) compression: CompressionType, + pub(crate) compressed_index: bool, pub(crate) compression_shape: [i32; 7], pub(crate) compression_level: i32, pub(crate) bottommost_level: Option, pub(crate) block_index_hashing: Option, pub(crate) cache_shards: u32, + pub(crate) auto_readahead_thresh: u32, + pub(crate) auto_readahead_init: usize, + pub(crate) auto_readahead_max: usize, } pub(crate) static BASE: Descriptor = Descriptor { @@ -61,11 +65,15 @@ pub(crate) static BASE: Descriptor = Descriptor { compaction: CompactionStyle::Level, compaction_pri: CompactionPri::MinOverlappingRatio, compression: CompressionType::Zstd, + compressed_index: true, compression_shape: [0, 0, 0, 1, 1, 1, 1], compression_level: SENTINEL_COMPRESSION_LEVEL, bottommost_level: Some(SENTINEL_COMPRESSION_LEVEL), block_index_hashing: None, cache_shards: 64, + auto_readahead_thresh: 0, + auto_readahead_init: 1024 * 16, + auto_readahead_max: 1024 * 1024 * 2, }; pub(crate) static RANDOM: Descriptor = Descriptor { @@ -74,6 +82,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { cache_shards: 128, compression_level: -3, bottommost_level: Some(4), + compressed_index: true, ..BASE }; @@ -86,6 +95,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { compression_level: -1, bottommost_level: Some(6), compression_shape: [0, 0, 1, 1, 1, 1, 1], + compressed_index: false, ..BASE }; @@ -100,6 +110,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { compression_level: -4, bottommost_level: Some(1), compression_shape: [0, 0, 0, 0, 0, 1, 1], + compressed_index: false, ..RANDOM }; @@ -114,5 +125,6 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compression_level: -2, bottommost_level: Some(4), compression_shape: [0, 0, 0, 0, 1, 1, 1], + compressed_index: false, ..SEQUENTIAL }; From 8ab825b12c08324977898b26e9513b197750b9bb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 21 Jan 2025 20:43:38 +0000 Subject: [PATCH 0553/1248] add stream parallelism extension combinators Signed-off-by: Jason Volk --- src/core/utils/stream/mod.rs | 2 + src/core/utils/stream/try_broadband.rs | 6 +-- src/core/utils/stream/try_parallel.rs | 71 ++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 src/core/utils/stream/try_parallel.rs diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index c7bfa021..23455322 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -7,6 +7,7 @@ mod iter_stream; mod ready; mod tools; mod try_broadband; +mod try_parallel; mod try_ready; mod try_tools; mod try_wideband; @@ -24,6 +25,7 @@ pub use iter_stream::IterStream; pub use ready::ReadyExt; pub use tools::Tools; pub use try_broadband::TryBroadbandExt; +pub use try_parallel::TryParallelExt; pub use try_ready::TryReadyExt; pub use try_tools::TryTools; pub use try_wideband::TryWidebandExt; diff --git a/src/core/utils/stream/try_broadband.rs b/src/core/utils/stream/try_broadband.rs index c72fcc2c..361b4a92 100644 --- a/src/core/utils/stream/try_broadband.rs +++ b/src/core/utils/stream/try_broadband.rs @@ -18,7 +18,7 @@ where ) -> impl TryStream> + Send where N: Into>, - F: Fn(Self::Ok) -> Fut + Send + Sync, + F: Fn(Self::Ok) -> Fut + Send, Fut: TryFuture> + Send; fn broad_and_then( @@ -26,7 +26,7 @@ where f: F, ) -> impl TryStream> + Send where - F: Fn(Self::Ok) -> Fut + Send + Sync, + F: Fn(Self::Ok) -> Fut + Send, Fut: TryFuture> + Send, { self.broadn_and_then(None, f) @@ -44,7 +44,7 @@ where ) -> impl TryStream> + Send where N: Into>, - F: Fn(Self::Ok) -> Fut + Send + Sync, + F: Fn(Self::Ok) -> Fut + Send, Fut: TryFuture> + Send, { self.map_ok(f) diff --git a/src/core/utils/stream/try_parallel.rs b/src/core/utils/stream/try_parallel.rs new file mode 100644 index 00000000..7f8a63b1 --- /dev/null +++ b/src/core/utils/stream/try_parallel.rs @@ -0,0 +1,71 @@ +//! Parallelism stream combinator extensions to futures::Stream + +use futures::{stream::TryStream, TryFutureExt}; +use tokio::{runtime, task::JoinError}; + +use super::TryBroadbandExt; +use crate::{utils::sys::available_parallelism, Error, Result}; + +/// Parallelism extensions to augment futures::StreamExt. These combinators are +/// for computation-oriented workloads, unlike -band combinators for I/O +/// workloads; these default to the available compute parallelism for the +/// system. Threads are currently drawn from the tokio-spawn pool. Results are +/// unordered. +pub trait TryParallelExt +where + Self: TryStream> + Send + Sized, + E: From + From + Send + 'static, + T: Send + 'static, +{ + fn paralleln_and_then( + self, + h: H, + n: N, + f: F, + ) -> impl TryStream> + Send + where + N: Into>, + H: Into>, + F: Fn(Self::Ok) -> Result + Clone + Send + 'static, + U: Send + 'static; + + fn parallel_and_then( + self, + h: H, + f: F, + ) -> impl TryStream> + Send + where + H: Into>, + F: Fn(Self::Ok) -> Result + Clone + Send + 'static, + U: Send + 'static, + { + self.paralleln_and_then(h, None, f) + } +} + +impl TryParallelExt for S +where + S: TryStream> + Send + Sized, + E: From + From + Send + 'static, + T: Send + 'static, +{ + fn paralleln_and_then( + self, + h: H, + n: N, + f: F, + ) -> impl TryStream> + Send + where + N: Into>, + H: Into>, + F: Fn(Self::Ok) -> Result + Clone + Send + 'static, + U: Send + 'static, + { + let n = n.into().unwrap_or_else(available_parallelism); + let h = h.into().unwrap_or_else(runtime::Handle::current); + self.broadn_and_then(n, move |val| { + let (h, f) = (h.clone(), f.clone()); + async move { h.spawn_blocking(move || f(val)).map_err(E::from).await? } + }) + } +} From dda27ffcb1a6d9f1ff6dafebb6203cb9cb8c2f22 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 12:05:07 +0000 Subject: [PATCH 0554/1248] add some compaction related interfaces Signed-off-by: Jason Volk --- Cargo.toml | 1 + src/admin/mod.rs | 1 + src/admin/query/raw.rs | 104 ++++++++++++++++++++++++++++++++++-- src/database/engine.rs | 23 ++++++-- src/database/map.rs | 1 + src/database/map/compact.rs | 62 +++++++++++++++++++++ src/database/mod.rs | 4 +- 7 files changed, 188 insertions(+), 8 deletions(-) create mode 100644 src/database/map/compact.rs diff --git a/Cargo.toml b/Cargo.toml index 4d738a11..f9e3b6db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -875,6 +875,7 @@ enum_glob_use = { level = "allow", priority = 1 } if_not_else = { level = "allow", priority = 1 } if_then_some_else_none = { level = "allow", priority = 1 } inline_always = { level = "allow", priority = 1 } +match_bool = { level = "allow", priority = 1 } missing_docs_in_private_items = { level = "allow", priority = 1 } missing_errors_doc = { level = "allow", priority = 1 } missing_panics_doc = { level = "allow", priority = 1 } diff --git a/src/admin/mod.rs b/src/admin/mod.rs index ac51104a..695155e8 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -1,6 +1,7 @@ #![recursion_limit = "192"] #![allow(clippy::wildcard_imports)] #![allow(clippy::enum_glob_use)] +#![allow(clippy::too_many_arguments)] pub(crate) mod admin; pub(crate) mod command; diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 678d21c9..ac5e8976 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -2,13 +2,13 @@ use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; use clap::Subcommand; use conduwuit::{ - apply, at, + apply, at, is_zero, utils::{ - stream::{ReadyExt, TryIgnore}, + stream::{ReadyExt, TryIgnore, TryParallelExt}, string::EMPTY, IterStream, }, - Result, + Err, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::events::room::message::RoomMessageEventContent; @@ -121,6 +121,104 @@ pub(crate) enum RawCommand { /// Key prefix prefix: Option, }, + + /// - Compact database + Compact { + #[arg(short, long, alias("column"))] + map: Option>, + + #[arg(long)] + start: Option, + + #[arg(long)] + stop: Option, + + #[arg(long)] + from: Option, + + #[arg(long)] + into: Option, + + /// There is one compaction job per column; then this controls how many + /// columns are compacted in parallel. If zero, one compaction job is + /// still run at a time here, but in exclusive-mode blocking any other + /// automatic compaction jobs until complete. + #[arg(long)] + parallelism: Option, + + #[arg(long, default_value("false"))] + exhaustive: bool, + }, +} + +#[admin_command] +pub(super) async fn compact( + &self, + map: Option>, + start: Option, + stop: Option, + from: Option, + into: Option, + parallelism: Option, + exhaustive: bool, +) -> Result { + use conduwuit_database::compact::Options; + + let default_all_maps = map + .is_none() + .then(|| { + self.services + .db + .keys() + .map(Deref::deref) + .map(ToOwned::to_owned) + }) + .into_iter() + .flatten(); + + let maps: Vec<_> = map + .unwrap_or_default() + .into_iter() + .chain(default_all_maps) + .map(|map| self.services.db.get(&map)) + .filter_map(Result::ok) + .cloned() + .collect(); + + if maps.is_empty() { + return Err!("--map argument invalid. not found in database"); + } + + let range = ( + start.as_ref().map(String::as_bytes).map(Into::into), + stop.as_ref().map(String::as_bytes).map(Into::into), + ); + + let options = Options { + range, + level: (from, into), + exclusive: parallelism.is_some_and(is_zero!()), + exhaustive, + }; + + let runtime = self.services.server.runtime().clone(); + let parallelism = parallelism.unwrap_or(1); + let results = maps + .into_iter() + .try_stream() + .paralleln_and_then(runtime, parallelism, move |map| { + map.compact_blocking(options.clone())?; + Ok(map.name().to_owned()) + }) + .collect::>(); + + let timer = Instant::now(); + let results = results.await; + let query_time = timer.elapsed(); + self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await?; + + Ok(RoomMessageEventContent::text_plain("")) } #[admin_command] diff --git a/src/database/engine.rs b/src/database/engine.rs index 2958f73f..8be9eecc 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -18,9 +18,16 @@ use std::{ }; use conduwuit::{debug, info, warn, Err, Result}; -use rocksdb::{AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded}; +use rocksdb::{ + AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded, + WaitForCompactOptions, +}; -use crate::{pool::Pool, result, Context}; +use crate::{ + pool::Pool, + util::{map_err, result}, + Context, +}; pub struct Engine { pub(super) read_only: bool, @@ -55,12 +62,22 @@ impl Engine { #[tracing::instrument(skip(self), level = "debug")] pub fn flush(&self) -> Result { result(DBCommon::flush_wal(&self.db, false)) } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self), level = "info")] pub fn sort(&self) -> Result { let flushoptions = rocksdb::FlushOptions::default(); result(DBCommon::flush_opt(&self.db, &flushoptions)) } + #[tracing::instrument(skip(self), level = "info")] + pub fn wait_compactions(&self) -> Result { + let mut opts = WaitForCompactOptions::default(); + opts.set_abort_on_pause(true); + opts.set_flush(false); + opts.set_timeout(0); + + self.db.wait_for_compact(&opts).map_err(map_err) + } + /// Query for database property by null-terminated name which is expected to /// have a result with an integer representation. This is intended for /// low-overhead programmatic use. diff --git a/src/database/map.rs b/src/database/map.rs index 60d66585..33cae594 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,3 +1,4 @@ +pub mod compact; mod contains; mod count; mod get; diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs new file mode 100644 index 00000000..c0381eb4 --- /dev/null +++ b/src/database/map/compact.rs @@ -0,0 +1,62 @@ +use conduwuit::{implement, Err, Result}; +use rocksdb::{BottommostLevelCompaction, CompactOptions}; + +use crate::keyval::KeyBuf; + +#[derive(Clone, Debug, Default)] +pub struct Options { + /// Key range to start and stop compaction. + pub range: (Option, Option), + + /// (None, None) - all levels to all necessary levels + /// (None, Some(1)) - compact all levels into level 1 + /// (Some(1), None) - compact level 1 into level 1 + /// (Some(_), Some(_) - currently unsupported + pub level: (Option, Option), + + /// run compaction until complete. if false only one pass is made, and the + /// results of that pass are not further recompacted. + pub exhaustive: bool, + + /// waits for other compactions to complete, then runs this compaction + /// exclusively before allowing automatic compactions to resume. + pub exclusive: bool, +} + +#[implement(super::Map)] +#[tracing::instrument( + name = "compact", + level = "info" + skip(self), + fields(%self), +)] +pub fn compact_blocking(&self, opts: Options) -> Result { + let mut co = CompactOptions::default(); + co.set_exclusive_manual_compaction(opts.exclusive); + co.set_bottommost_level_compaction(match opts.exhaustive { + | true => BottommostLevelCompaction::Force, + | false => BottommostLevelCompaction::ForceOptimized, + }); + + match opts.level { + | (None, None) => { + co.set_change_level(true); + co.set_target_level(-1); + }, + | (None, Some(level)) => { + co.set_change_level(true); + co.set_target_level(level.try_into()?); + }, + | (Some(level), None) => { + co.set_change_level(false); + co.set_target_level(level.try_into()?); + }, + | (Some(_), Some(_)) => return Err!("compacting between specific levels not supported"), + }; + + self.db + .db + .compact_range_cf_opt(&self.cf(), opts.range.0, opts.range.1, &co); + + Ok(()) +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 6e3f8c96..8ae8dcf5 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -30,12 +30,12 @@ pub use self::{ deserialized::Deserialized, handle::Handle, keyval::{serialize_key, serialize_val, KeyVal, Slice}, - map::Map, + map::{compact, Map}, ser::{serialize, serialize_to, serialize_to_vec, Interfix, Json, Separator, SEP}, }; pub(crate) use self::{ engine::{context::Context, Engine}, - util::{or_else, result}, + util::or_else, }; use crate::maps::{Maps, MapsKey, MapsVal}; From 9ab381e4ebf8b2953c3cd697185c79e14c0ae309 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 18 Jan 2025 07:12:20 +0000 Subject: [PATCH 0555/1248] generate fmt::Display for Config Signed-off-by: Jason Volk --- src/admin/server/commands.rs | 4 +- src/core/config/mod.rs | 401 ----------------------------------- src/macros/config.rs | 93 +++++--- 3 files changed, 63 insertions(+), 435 deletions(-) diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 8d3358a8..6469a0e9 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -22,8 +22,8 @@ pub(super) async fn uptime(&self) -> Result { pub(super) async fn show_config(&self) -> Result { // Construct and send the response Ok(RoomMessageEventContent::text_markdown(format!( - "```\n{}\n```", - self.services.globals.config + "{}", + self.services.server.config ))) } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 5cfed0b9..d6983540 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -3,7 +3,6 @@ pub mod proxy; use std::{ collections::{BTreeMap, BTreeSet, HashSet}, - fmt, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, path::PathBuf, }; @@ -15,7 +14,6 @@ use either::{ }; use figment::providers::{Env, Format, Toml}; pub use figment::{value::Value as FigmentValue, Figment}; -use itertools::Itertools; use regex::RegexSet; use ruma::{ api::client::discovery::discover_support::ContactRole, OwnedRoomOrAliasId, OwnedServerName, @@ -1859,405 +1857,6 @@ impl Config { pub fn check(&self) -> Result<(), Error> { check(self) } } -impl fmt::Display for Config { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "Active config values:\n").expect("wrote line to formatter stream"); - let mut line = |key: &str, val: &str| { - writeln!(f, "{key}: {val}").expect("wrote line to formatter stream"); - }; - - line("Server name", self.server_name.host()); - line("Database path", &self.database_path.to_string_lossy()); - line( - "Database backup path", - self.database_backup_path - .as_ref() - .map_or("", |path| path.to_str().unwrap_or("")), - ); - line("Database backups to keep", &self.database_backups_to_keep.to_string()); - line("Database cache capacity (MB)", &self.db_cache_capacity_mb.to_string()); - line("Cache capacity modifier", &self.cache_capacity_modifier.to_string()); - line("PDU cache capacity", &self.pdu_cache_capacity.to_string()); - line("Auth chain cache capacity", &self.auth_chain_cache_capacity.to_string()); - line("Short eventid cache capacity", &self.shorteventid_cache_capacity.to_string()); - line("Eventid short cache capacity", &self.eventidshort_cache_capacity.to_string()); - line("Short statekey cache capacity", &self.shortstatekey_cache_capacity.to_string()); - line("Statekey short cache capacity", &self.statekeyshort_cache_capacity.to_string()); - line( - "Server visibility cache capacity", - &self.server_visibility_cache_capacity.to_string(), - ); - line( - "User visibility cache capacity", - &self.user_visibility_cache_capacity.to_string(), - ); - line("Stateinfo cache capacity", &self.stateinfo_cache_capacity.to_string()); - line( - "Roomid space hierarchy cache capacity", - &self.roomid_spacehierarchy_cache_capacity.to_string(), - ); - line("DNS cache entry limit", &self.dns_cache_entries.to_string()); - line("DNS minimum TTL", &self.dns_min_ttl.to_string()); - line("DNS minimum NXDOMAIN TTL", &self.dns_min_ttl_nxdomain.to_string()); - line("DNS attempts", &self.dns_attempts.to_string()); - line("DNS timeout", &self.dns_timeout.to_string()); - line("DNS fallback to TCP", &self.dns_tcp_fallback.to_string()); - line("DNS query over TCP only", &self.query_over_tcp_only.to_string()); - line("Query all nameservers", &self.query_all_nameservers.to_string()); - line("Maximum request size (bytes)", &self.max_request_size.to_string()); - line("Sender retry backoff limit", &self.sender_retry_backoff_limit.to_string()); - line("Request connect timeout", &self.request_conn_timeout.to_string()); - line("Request timeout", &self.request_timeout.to_string()); - line("Request total timeout", &self.request_total_timeout.to_string()); - line("Idle connections per host", &self.request_idle_per_host.to_string()); - line("Request pool idle timeout", &self.request_idle_timeout.to_string()); - line("Well_known connect timeout", &self.well_known_conn_timeout.to_string()); - line("Well_known timeout", &self.well_known_timeout.to_string()); - line("Federation timeout", &self.federation_timeout.to_string()); - line("Federation pool idle per host", &self.federation_idle_per_host.to_string()); - line("Federation pool idle timeout", &self.federation_idle_timeout.to_string()); - line("Sender timeout", &self.sender_timeout.to_string()); - line("Sender pool idle timeout", &self.sender_idle_timeout.to_string()); - line("Appservice timeout", &self.appservice_timeout.to_string()); - line("Appservice pool idle timeout", &self.appservice_idle_timeout.to_string()); - line("Pusher pool idle timeout", &self.pusher_idle_timeout.to_string()); - line("Allow registration", &self.allow_registration.to_string()); - line( - "Registration token", - if self.registration_token.is_none() - && self.registration_token_file.is_none() - && self.allow_registration - { - "not set (⚠️ open registration!)" - } else if self.registration_token.is_none() && self.registration_token_file.is_none() - { - "not set" - } else { - "set" - }, - ); - line( - "Registration token file path", - self.registration_token_file - .as_ref() - .map_or("", |path| path.to_str().unwrap_or_default()), - ); - line( - "Allow guest registration (inherently false if allow registration is false)", - &self.allow_guest_registration.to_string(), - ); - line( - "Log guest registrations in admin room", - &self.log_guest_registrations.to_string(), - ); - line( - "Allow guests to auto join rooms", - &self.allow_guests_auto_join_rooms.to_string(), - ); - line("New user display name suffix", &self.new_user_displayname_suffix); - line("Allow encryption", &self.allow_encryption.to_string()); - line("Allow federation", &self.allow_federation.to_string()); - line("Federation loopback", &self.federation_loopback.to_string()); - line( - "Require authentication for profile requests", - &self.require_auth_for_profile_requests.to_string(), - ); - line( - "Allow incoming federated presence requests (updates)", - &self.allow_incoming_presence.to_string(), - ); - line( - "Allow outgoing federated presence requests (updates)", - &self.allow_outgoing_presence.to_string(), - ); - line( - "Allow local presence requests (updates)", - &self.allow_local_presence.to_string(), - ); - line( - "Allow incoming remote read receipts", - &self.allow_incoming_read_receipts.to_string(), - ); - line( - "Allow outgoing remote read receipts", - &self.allow_outgoing_read_receipts.to_string(), - ); - line( - "Block non-admin room invites (local and remote, admins can still send and receive \ - invites)", - &self.block_non_admin_invites.to_string(), - ); - line("Enable admin escape commands", &self.admin_escape_commands.to_string()); - line( - "Activate admin console after startup", - &self.admin_console_automatic.to_string(), - ); - line("Execute admin commands after startup", &self.admin_execute.join(", ")); - line( - "Continue startup even if some commands fail", - &self.admin_execute_errors_ignore.to_string(), - ); - line("Filter for admin command log capture", &self.admin_log_capture); - line("Admin room tag", &self.admin_room_tag); - line("Allow outgoing federated typing", &self.allow_outgoing_typing.to_string()); - line("Allow incoming federated typing", &self.allow_incoming_typing.to_string()); - line( - "Incoming federated typing timeout", - &self.typing_federation_timeout_s.to_string(), - ); - line("Client typing timeout minimum", &self.typing_client_timeout_min_s.to_string()); - line("Client typing timeout maxmimum", &self.typing_client_timeout_max_s.to_string()); - line("Allow device name federation", &self.allow_device_name_federation.to_string()); - line( - "Allow incoming profile lookup federation requests", - &self - .allow_inbound_profile_lookup_federation_requests - .to_string(), - ); - line( - "Auto deactivate banned room join attempts", - &self.auto_deactivate_banned_room_attempts.to_string(), - ); - line("Notification push path", &self.notification_push_path); - line("Allow room creation", &self.allow_room_creation.to_string()); - line( - "Allow public room directory over federation", - &self.allow_public_room_directory_over_federation.to_string(), - ); - line( - "Allow public room directory without authentication", - &self.allow_public_room_directory_without_auth.to_string(), - ); - line( - "Lockdown public room directory (only allow admins to publish)", - &self.lockdown_public_room_directory.to_string(), - ); - line( - "Trusted key servers", - &self - .trusted_servers - .iter() - .map(|server| server.host()) - .join(", "), - ); - line("OpenID Token TTL", &self.openid_token_ttl.to_string()); - line( - "TURN username", - if self.turn_username.is_empty() { - "not set" - } else { - &self.turn_username - }, - ); - line("TURN password", { - if self.turn_password.is_empty() { - "not set" - } else { - "set" - } - }); - line("TURN secret", { - if self.turn_secret.is_empty() && self.turn_secret_file.is_none() { - "not set" - } else { - "set" - } - }); - line("TURN secret file path", { - self.turn_secret_file - .as_ref() - .map_or("", |path| path.to_str().unwrap_or_default()) - }); - line("Turn TTL", &self.turn_ttl.to_string()); - line("Turn URIs", { - let mut lst = Vec::with_capacity(self.turn_uris.len()); - for item in self.turn_uris.iter().cloned().enumerate() { - let (_, uri): (usize, String) = item; - lst.push(uri); - } - &lst.join(", ") - }); - line("Auto Join Rooms", { - let mut lst = Vec::with_capacity(self.auto_join_rooms.len()); - for room in &self.auto_join_rooms { - lst.push(room); - } - &lst.into_iter().join(", ") - }); - line("Zstd HTTP Compression", &self.zstd_compression.to_string()); - line("Gzip HTTP Compression", &self.gzip_compression.to_string()); - line("Brotli HTTP Compression", &self.brotli_compression.to_string()); - line("RocksDB database LOG level", &self.rocksdb_log_level); - line("RocksDB database LOG to stderr", &self.rocksdb_log_stderr.to_string()); - line("RocksDB database LOG time-to-roll", &self.rocksdb_log_time_to_roll.to_string()); - line("RocksDB Max LOG Files", &self.rocksdb_max_log_files.to_string()); - line( - "RocksDB database max LOG file size", - &self.rocksdb_max_log_file_size.to_string(), - ); - line( - "RocksDB database optimize for spinning disks", - &self.rocksdb_optimize_for_spinning_disks.to_string(), - ); - line("RocksDB Direct-IO", &self.rocksdb_direct_io.to_string()); - line("RocksDB Parallelism Threads", &self.rocksdb_parallelism_threads.to_string()); - line("RocksDB Compression Algorithm", &self.rocksdb_compression_algo); - line("RocksDB Compression Level", &self.rocksdb_compression_level.to_string()); - line( - "RocksDB Bottommost Compression Level", - &self.rocksdb_bottommost_compression_level.to_string(), - ); - line( - "RocksDB Bottommost Level Compression", - &self.rocksdb_bottommost_compression.to_string(), - ); - line("RocksDB Recovery Mode", &self.rocksdb_recovery_mode.to_string()); - line("RocksDB Repair Mode", &self.rocksdb_repair.to_string()); - line("RocksDB Read-only Mode", &self.rocksdb_read_only.to_string()); - line("RocksDB Secondary Mode", &self.rocksdb_secondary.to_string()); - line( - "RocksDB Compaction Idle Priority", - &self.rocksdb_compaction_prio_idle.to_string(), - ); - line( - "RocksDB Compaction Idle IOPriority", - &self.rocksdb_compaction_ioprio_idle.to_string(), - ); - line("RocksDB Compaction enabled", &self.rocksdb_compaction.to_string()); - line("RocksDB Statistics level", &self.rocksdb_stats_level.to_string()); - line("Media integrity checks on startup", &self.media_startup_check.to_string()); - line("Media compatibility filesystem links", &self.media_compat_file_link.to_string()); - line("Prune missing media from database", &self.prune_missing_media.to_string()); - line("Allow legacy (unauthenticated) media", &self.allow_legacy_media.to_string()); - line("Freeze legacy (unauthenticated) media", &self.freeze_legacy_media.to_string()); - line("Prevent Media Downloads From", { - let mut lst = Vec::with_capacity(self.prevent_media_downloads_from.len()); - for domain in &self.prevent_media_downloads_from { - lst.push(domain.host()); - } - &lst.join(", ") - }); - line("Forbidden Remote Server Names (\"Global\" ACLs)", { - let mut lst = Vec::with_capacity(self.forbidden_remote_server_names.len()); - for domain in &self.forbidden_remote_server_names { - lst.push(domain.host()); - } - &lst.join(", ") - }); - line("Forbidden Remote Room Directory Server Names", { - let mut lst = - Vec::with_capacity(self.forbidden_remote_room_directory_server_names.len()); - for domain in &self.forbidden_remote_room_directory_server_names { - lst.push(domain.host()); - } - &lst.join(", ") - }); - line("Outbound Request IP Range (CIDR) Denylist", { - let mut lst = Vec::with_capacity(self.ip_range_denylist.len()); - for item in self.ip_range_denylist.iter().cloned().enumerate() { - let (_, ip): (usize, String) = item; - lst.push(ip); - } - &lst.join(", ") - }); - line("Forbidden usernames", { - &self.forbidden_usernames.patterns().iter().join(", ") - }); - line("Forbidden room aliases", { - &self.forbidden_alias_names.patterns().iter().join(", ") - }); - line( - "URL preview bound interface", - self.url_preview_bound_interface - .as_ref() - .map(Either::as_ref) - .map(|either| either.map_left(ToString::to_string)) - .map(Either::either_into::) - .unwrap_or_default() - .as_str(), - ); - line( - "URL preview domain contains allowlist", - &self.url_preview_domain_contains_allowlist.join(", "), - ); - line( - "URL preview domain explicit allowlist", - &self.url_preview_domain_explicit_allowlist.join(", "), - ); - line( - "URL preview domain explicit denylist", - &self.url_preview_domain_explicit_denylist.join(", "), - ); - line( - "URL preview URL contains allowlist", - &self.url_preview_url_contains_allowlist.join(", "), - ); - line("URL preview maximum spider size", &self.url_preview_max_spider_size.to_string()); - line("URL preview check root domain", &self.url_preview_check_root_domain.to_string()); - line( - "Allow check for updates / announcements check", - &self.allow_check_for_updates.to_string(), - ); - line("Enable netburst on startup", &self.startup_netburst.to_string()); - #[cfg(feature = "sentry_telemetry")] - line("Sentry.io reporting and tracing", &self.sentry.to_string()); - #[cfg(feature = "sentry_telemetry")] - line("Sentry.io send server_name in logs", &self.sentry_send_server_name.to_string()); - #[cfg(feature = "sentry_telemetry")] - line("Sentry.io tracing sample rate", &self.sentry_traces_sample_rate.to_string()); - line("Sentry.io attach stacktrace", &self.sentry_attach_stacktrace.to_string()); - line("Sentry.io send panics", &self.sentry_send_panic.to_string()); - line("Sentry.io send errors", &self.sentry_send_error.to_string()); - line("Sentry.io tracing filter", &self.sentry_filter); - line( - "Well-known server name", - self.well_known - .server - .as_ref() - .map_or("", |server| server.as_str()), - ); - line( - "Well-known client URL", - self.well_known - .client - .as_ref() - .map_or("", |url| url.as_str()), - ); - line( - "Well-known support email", - self.well_known - .support_email - .as_ref() - .map_or("", |str| str.as_ref()), - ); - line( - "Well-known support Matrix ID", - self.well_known - .support_mxid - .as_ref() - .map_or("", |mxid| mxid.as_str()), - ); - line( - "Well-known support role", - self.well_known - .support_role - .as_ref() - .map_or("", |role| role.as_str()), - ); - line( - "Well-known support page/URL", - self.well_known - .support_page - .as_ref() - .map_or("", |url| url.as_str()), - ); - line("Enable the tokio-console", &self.tokio_console.to_string()); - line("Admin room notices", &self.admin_room_notices.to_string()); - - Ok(()) - } -} - fn true_fn() -> bool { true } fn default_address() -> ListeningAddr { diff --git a/src/macros/config.rs b/src/macros/config.rs index 452abd20..90d6ef15 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -1,8 +1,8 @@ use std::{collections::HashSet, fmt::Write as _, fs::OpenOptions, io::Write as _}; use proc_macro::TokenStream; -use proc_macro2::Span; -use quote::ToTokens; +use proc_macro2::{Span, TokenStream as TokenStream2}; +use quote::{quote, ToTokens}; use syn::{ parse::Parser, punctuated::Punctuated, spanned::Spanned, Error, Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, @@ -19,18 +19,24 @@ const HIDDEN: &[&str] = &["default"]; #[allow(clippy::needless_pass_by_value)] pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { - if is_cargo_build() && !is_cargo_test() { - generate_example(&input, args)?; - } + let write = is_cargo_build() && !is_cargo_test(); + let additional = generate_example(&input, args, write)?; - Ok(input.to_token_stream().into()) + Ok([input.to_token_stream(), additional] + .into_iter() + .collect::() + .into()) } #[allow(clippy::needless_pass_by_value)] #[allow(unused_variables)] -fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { +fn generate_example(input: &ItemStruct, args: &[Meta], write: bool) -> Result { let settings = get_simple_settings(args); + let section = settings.get("section").ok_or_else(|| { + Error::new(args[0].span(), "missing required 'section' attribute argument") + })?; + let filename = settings.get("filename").ok_or_else(|| { Error::new(args[0].span(), "missing required 'filename' attribute argument") })?; @@ -45,31 +51,33 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { .split(' ') .collect(); - let section = settings.get("section").ok_or_else(|| { - Error::new(args[0].span(), "missing required 'section' attribute argument") - })?; - - let mut file = OpenOptions::new() + let fopts = OpenOptions::new() .write(true) .create(section == "global") .truncate(section == "global") .append(section != "global") - .open(filename) - .map_err(|e| { - Error::new( - Span::call_site(), - format!("Failed to open config file for generation: {e}"), - ) - })?; + .clone(); - if let Some(header) = settings.get("header") { - file.write_all(header.as_bytes()) + let mut file = write + .then(|| { + fopts.open(filename).map_err(|e| { + let msg = format!("Failed to open file for config generation: {e}"); + Error::new(Span::call_site(), msg) + }) + }) + .transpose()?; + + if let Some(file) = file.as_mut() { + if let Some(header) = settings.get("header") { + file.write_all(header.as_bytes()) + .expect("written to config file"); + } + + file.write_fmt(format_args!("\n[{section}]\n")) .expect("written to config file"); } - file.write_fmt(format_args!("\n[{section}]\n")) - .expect("written to config file"); - + let mut summary: Vec = Vec::new(); if let Fields::Named(FieldsNamed { named, .. }) = &input.fields { for field in named { let Some(ident) = &field.ident else { @@ -105,20 +113,41 @@ fn generate_example(input: &ItemStruct, args: &[Meta]) -> Result<()> { default }; - file.write_fmt(format_args!("\n{doc}")) - .expect("written to config file"); + if let Some(file) = file.as_mut() { + file.write_fmt(format_args!("\n{doc}")) + .expect("written to config file"); - file.write_fmt(format_args!("#{ident} ={default}\n")) + file.write_fmt(format_args!("#{ident} ={default}\n")) + .expect("written to config file"); + } + + let name = ident.to_string(); + summary.push(quote! { + writeln!(out, "| {} | {:?} |", #name, self.#ident)?; + }); + } + } + + if let Some(file) = file.as_mut() { + if let Some(footer) = settings.get("footer") { + file.write_all(footer.as_bytes()) .expect("written to config file"); } } - if let Some(footer) = settings.get("footer") { - file.write_all(footer.as_bytes()) - .expect("written to config file"); - } + let struct_name = &input.ident; + let display = quote! { + impl std::fmt::Display for #struct_name { + fn fmt(&self, out: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(out, "| name | value |")?; + writeln!(out, "| :--- | :--- |")?; + #( #summary )* + Ok(()) + } + } + }; - Ok(()) + Ok(display) } fn get_default(field: &Field) -> Option { From 1f31e74024bcdc23efcbafc32c0d9572df83fb82 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 03:50:51 +0000 Subject: [PATCH 0556/1248] add del to raw suite Signed-off-by: Jason Volk --- src/admin/query/raw.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index ac5e8976..5a6006ec 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -33,6 +33,15 @@ pub(crate) enum RawCommand { key: String, }, + /// - Raw database delete (for string keys) + RawDel { + /// Map name + map: String, + + /// Key + key: String, + }, + /// - Raw database keys iteration RawKeys { /// Map name @@ -534,6 +543,18 @@ pub(super) async fn raw_iter_from( ))) } +#[admin_command] +pub(super) async fn raw_del(&self, map: String, key: String) -> Result { + let map = self.services.db.get(&map)?; + let timer = Instant::now(); + map.remove(&key); + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::notice_markdown(format!( + "Operation completed in {query_time:?}" + ))) +} + #[admin_command] pub(super) async fn raw_get(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; From 0c96891008713b1e121f1896fdba59f94570cc29 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 00:52:48 +0000 Subject: [PATCH 0557/1248] add CBOR support to database schema Signed-off-by: Jason Volk --- Cargo.lock | 32 ++++++++++++++++++++++++++++++++ Cargo.toml | 8 ++++++++ src/database/Cargo.toml | 2 ++ src/database/de.rs | 4 ++++ src/database/mod.rs | 2 +- src/database/ser.rs | 14 +++++++++++++- 6 files changed, 60 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8de3abf4..d9758e6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -761,6 +761,8 @@ dependencies = [ "const-str", "futures", "log", + "minicbor", + "minicbor-serde", "rust-rocksdb-uwu", "serde", "serde_json", @@ -2329,6 +2331,36 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minicbor" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0452a60c1863c1f50b5f77cd295e8d2786849f35883f0b9e18e7e6e1b5691b0" +dependencies = [ + "minicbor-derive", +] + +[[package]] +name = "minicbor-derive" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "minicbor-serde" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "becf18ac384ecf6f53b2db3b1549eebff664c67ecf259ae99be5912193291686" +dependencies = [ + "minicbor", + "serde", +] + [[package]] name = "minimad" version = "0.13.1" diff --git a/Cargo.toml b/Cargo.toml index f9e3b6db..042587fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -506,6 +506,14 @@ version = "0.2" [workspace.dependencies.num-traits] version = "0.2" +[workspace.dependencies.minicbor] +version = "0.25.1" +features = ["std"] + +[workspace.dependencies.minicbor-serde] +version = "0.3.2" +features = ["std"] + # # Patches # diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 09eedaf4..557c9a3e 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -40,6 +40,8 @@ conduwuit-core.workspace = true const-str.workspace = true futures.workspace = true log.workspace = true +minicbor.workspace = true +minicbor-serde.workspace = true rust-rocksdb.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/src/database/de.rs b/src/database/de.rs index 48bc9f64..4fdc2251 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -248,6 +248,10 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { { match name { | "$serde_json::private::RawValue" => visitor.visit_map(self), + | "Cbor" => visitor + .visit_newtype_struct(&mut minicbor_serde::Deserializer::new(self.record_trail())) + .map_err(|e| Self::Error::SerdeDe(e.to_string().into())), + | _ => visitor.visit_newtype_struct(self), } } diff --git a/src/database/mod.rs b/src/database/mod.rs index 8ae8dcf5..42b7f5e3 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -31,7 +31,7 @@ pub use self::{ handle::Handle, keyval::{serialize_key, serialize_val, KeyVal, Slice}, map::{compact, Map}, - ser::{serialize, serialize_to, serialize_to_vec, Interfix, Json, Separator, SEP}, + ser::{serialize, serialize_to, serialize_to_vec, Cbor, Interfix, Json, Separator, SEP}, }; pub(crate) use self::{ engine::{context::Context, Engine}, diff --git a/src/database/ser.rs b/src/database/ser.rs index e6de5f7f..372b7522 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -1,7 +1,7 @@ use std::io::Write; use conduwuit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; -use serde::{ser, Serialize}; +use serde::{ser, Deserialize, Serialize}; use crate::util::unhandled; @@ -55,6 +55,10 @@ pub(crate) struct Serializer<'a, W: Write> { #[derive(Debug, Serialize)] pub struct Json(pub T); +/// Newtype for CBOR serialization. +#[derive(Debug, Deserialize, Serialize)] +pub struct Cbor(pub T); + /// Directive to force separator serialization specifically for prefix keying /// use. This is a quirk of the database schema and prefix iterations. #[derive(Debug, Serialize)] @@ -189,6 +193,14 @@ impl ser::Serializer for &mut Serializer<'_, W> { match name { | "Json" => serde_json::to_writer(&mut self.out, value).map_err(Into::into), + | "Cbor" => { + use minicbor::encode::write::Writer; + use minicbor_serde::Serializer; + + value + .serialize(&mut Serializer::new(&mut Writer::new(&mut self.out))) + .map_err(|e| Self::Error::SerdeSer(e.to_string().into())) + }, | _ => unhandled!("Unrecognized serialization Newtype {name:?}"), } } From 49023aa295da8b4d975389a1494611696d4cc63d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 02:16:51 +0000 Subject: [PATCH 0558/1248] use database for resolver caches Signed-off-by: Jason Volk --- src/admin/query/resolver.rs | 59 +++++--------- src/database/maps.rs | 8 ++ src/service/resolver/actual.rs | 6 +- src/service/resolver/cache.rs | 135 ++++++++++++++++----------------- src/service/resolver/dns.rs | 24 +++--- src/service/resolver/fed.rs | 3 +- src/service/resolver/mod.rs | 39 +--------- src/service/sending/send.rs | 2 +- 8 files changed, 114 insertions(+), 162 deletions(-) diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index b53661fc..0b6da6fd 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,6 @@ -use std::fmt::Write; - use clap::Subcommand; use conduwuit::{utils::time, Result}; +use futures::StreamExt; use ruma::{events::room::message::RoomMessageEventContent, OwnedServerName}; use crate::{admin_command, admin_command_dispatch}; @@ -31,29 +30,19 @@ async fn destinations_cache( writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?; writeln!(self, "| ----------- | ----------- | -------- | ------- |").await?; - let mut out = String::new(); - { - let map = self - .services - .resolver - .cache - .destinations - .read() - .expect("locked"); + let mut destinations = self.services.resolver.cache.destinations().boxed(); - for (name, &CachedDest { ref dest, ref host, expire }) in map.iter() { - if let Some(server_name) = server_name.as_ref() { - if name != server_name { - continue; - } + while let Some((name, CachedDest { dest, host, expire })) = destinations.next().await { + if let Some(server_name) = server_name.as_ref() { + if name != server_name { + continue; } - - let expire = time::format(expire, "%+"); - writeln!(out, "| {name} | {dest} | {host} | {expire} |")?; } - } - self.write_str(out.as_str()).await?; + let expire = time::format(expire, "%+"); + self.write_str(&format!("| {name} | {dest} | {host} | {expire} |\n")) + .await?; + } Ok(RoomMessageEventContent::notice_plain("")) } @@ -65,29 +54,19 @@ async fn overrides_cache(&self, server_name: Option) -> Result Result { - let (result, cached) = if let Some(result) = self.get_cached_destination(server_name) { + let (result, cached) = if let Ok(result) = self.cache.get_destination(server_name).await { (result, true) } else { self.validate_dest(server_name)?; @@ -232,7 +232,7 @@ impl super::Service { #[tracing::instrument(skip_all, name = "well-known")] async fn request_well_known(&self, dest: &str) -> Result> { - if !self.has_cached_override(dest) { + if !self.cache.has_override(dest).await { self.query_and_cache_override(dest, dest, 8448).await?; } @@ -315,7 +315,7 @@ impl super::Service { debug_info!("{overname:?} overriden by {hostname:?}"); } - self.set_cached_override(overname, CachedOverride { + self.cache.set_override(overname, CachedOverride { ips: override_ip.into_iter().take(MAX_IPS).collect(), port, expire: CachedOverride::default_expire(), diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index e309a129..11e6c9bd 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -1,108 +1,103 @@ -use std::{ - collections::HashMap, - net::IpAddr, - sync::{Arc, RwLock}, - time::SystemTime, -}; +use std::{net::IpAddr, sync::Arc, time::SystemTime}; use arrayvec::ArrayVec; use conduwuit::{ - trace, - utils::{math::Expected, rand}, + at, implement, + utils::{math::Expected, rand, stream::TryIgnore}, + Result, }; -use ruma::{OwnedServerName, ServerName}; +use database::{Cbor, Deserialized, Map}; +use futures::{Stream, StreamExt}; +use ruma::ServerName; +use serde::{Deserialize, Serialize}; use super::fed::FedDest; pub struct Cache { - pub destinations: RwLock, // actual_destination, host - pub overrides: RwLock, + destinations: Arc, + overrides: Arc, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct CachedDest { pub dest: FedDest, pub host: String, pub expire: SystemTime, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct CachedOverride { pub ips: IpAddrs, pub port: u16, pub expire: SystemTime, } -pub type WellKnownMap = HashMap; -pub type TlsNameMap = HashMap; - pub type IpAddrs = ArrayVec; pub(crate) const MAX_IPS: usize = 3; impl Cache { - pub(super) fn new() -> Arc { + pub(super) fn new(args: &crate::Args<'_>) -> Arc { Arc::new(Self { - destinations: RwLock::new(WellKnownMap::new()), - overrides: RwLock::new(TlsNameMap::new()), + destinations: args.db["servername_destination"].clone(), + overrides: args.db["servername_override"].clone(), }) } } -impl super::Service { - pub fn set_cached_destination( - &self, - name: OwnedServerName, - dest: CachedDest, - ) -> Option { - trace!(?name, ?dest, "set cached destination"); - self.cache - .destinations - .write() - .expect("locked for writing") - .insert(name, dest) - } +#[implement(Cache)] +pub fn set_destination(&self, name: &ServerName, dest: CachedDest) { + self.destinations.raw_put(name, Cbor(dest)); +} - #[must_use] - pub fn get_cached_destination(&self, name: &ServerName) -> Option { - self.cache - .destinations - .read() - .expect("locked for reading") - .get(name) - .cloned() - } +#[implement(Cache)] +pub fn set_override(&self, name: &str, over: CachedOverride) { + self.overrides.raw_put(name, Cbor(over)); +} - pub fn set_cached_override( - &self, - name: &str, - over: CachedOverride, - ) -> Option { - trace!(?name, ?over, "set cached override"); - self.cache - .overrides - .write() - .expect("locked for writing") - .insert(name.into(), over) - } +#[implement(Cache)] +pub async fn get_destination(&self, name: &ServerName) -> Result { + self.destinations + .get(name) + .await + .deserialized::>() + .map(at!(0)) +} - #[must_use] - pub fn get_cached_override(&self, name: &str) -> Option { - self.cache - .overrides - .read() - .expect("locked for reading") - .get(name) - .cloned() - } +#[implement(Cache)] +pub async fn get_override(&self, name: &str) -> Result { + self.overrides + .get(name) + .await + .deserialized::>() + .map(at!(0)) +} - #[must_use] - pub fn has_cached_override(&self, name: &str) -> bool { - self.cache - .overrides - .read() - .expect("locked for reading") - .contains_key(name) - } +#[implement(Cache)] +#[must_use] +pub async fn has_destination(&self, destination: &str) -> bool { + self.destinations.exists(destination).await.is_ok() +} + +#[implement(Cache)] +#[must_use] +pub async fn has_override(&self, destination: &str) -> bool { + self.overrides.exists(destination).await.is_ok() +} + +#[implement(Cache)] +pub fn destinations(&self) -> impl Stream + Send + '_ { + self.destinations + .stream() + .ignore_err() + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) +} + +#[implement(Cache)] +pub fn overrides(&self) -> impl Stream + Send + '_ { + self.overrides + .stream() + .ignore_err() + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) } impl CachedDest { diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index 5c9018ab..ad7768bc 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -88,18 +88,20 @@ impl Resolve for Resolver { impl Resolve for Hooked { fn resolve(&self, name: Name) -> Resolving { - let cached: Option = self - .cache - .overrides - .read() - .expect("locked for reading") - .get(name.as_str()) - .cloned(); + hooked_resolve(self.cache.clone(), self.server.clone(), self.resolver.clone(), name) + .boxed() + } +} - cached.map_or_else( - || resolve_to_reqwest(self.server.clone(), self.resolver.clone(), name).boxed(), - |cached| cached_to_reqwest(cached).boxed(), - ) +async fn hooked_resolve( + cache: Arc, + server: Arc, + resolver: Arc, + name: Name, +) -> Result> { + match cache.get_override(name.as_str()).await { + | Ok(cached) => cached_to_reqwest(cached).await, + | Err(_) => resolve_to_reqwest(server, resolver, name).boxed().await, } } diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index 76fc6894..bfe100e7 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -6,8 +6,9 @@ use std::{ use arrayvec::ArrayString; use conduwuit::utils::math::Expected; +use serde::{Deserialize, Serialize}; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] pub enum FedDest { Literal(SocketAddr), Named(String, PortString), diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 6a6289b6..3163b0d0 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -4,9 +4,9 @@ mod dns; pub mod fed; mod tests; -use std::{fmt::Write, sync::Arc}; +use std::sync::Arc; -use conduwuit::{utils, utils::math::Expected, Result, Server}; +use conduwuit::{Result, Server}; use self::{cache::Cache, dns::Resolver}; use crate::{client, Dep}; @@ -25,7 +25,7 @@ struct Services { impl crate::Service for Service { #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] fn build(args: crate::Args<'_>) -> Result> { - let cache = Cache::new(); + let cache = Cache::new(&args); Ok(Arc::new(Self { cache: cache.clone(), resolver: Resolver::build(args.server, cache)?, @@ -36,38 +36,5 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { - use utils::bytes::pretty; - - let (oc_count, oc_bytes) = self.cache.overrides.read()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, val)| { - (count.expected_add(1), bytes.expected_add(key.len()).expected_add(val.size())) - }, - ); - - let (dc_count, dc_bytes) = self.cache.destinations.read()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, val)| { - (count.expected_add(1), bytes.expected_add(key.len()).expected_add(val.size())) - }, - ); - - writeln!(out, "resolver_overrides_cache: {oc_count} ({})", pretty(oc_bytes))?; - writeln!(out, "resolver_destinations_cache: {dc_count} ({})", pretty(dc_bytes))?; - - Ok(()) - } - - fn clear_cache(&self) { - self.cache.overrides.write().expect("write locked").clear(); - self.cache - .destinations - .write() - .expect("write locked") - .clear(); - self.resolver.resolver.clear_cache(); - } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index e2981068..831a1dd8 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -125,7 +125,7 @@ where let result = T::IncomingResponse::try_from_http_response(response); if result.is_ok() && !actual.cached { - resolver.set_cached_destination(dest.to_owned(), CachedDest { + resolver.cache.set_destination(dest, CachedDest { dest: actual.dest.clone(), host: actual.host.clone(), expire: CachedDest::default_expire(), From 7c0c029a4a90ded986d76910c162377f73360b64 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 06:40:07 +0000 Subject: [PATCH 0559/1248] add try_lock to MutexMap; allow TryFrom constructions Signed-off-by: Jason Volk --- Cargo.lock | 26 ++++++++-------- Cargo.toml | 2 +- src/admin/debug/commands.rs | 2 +- src/core/utils/mutex_map.rs | 60 ++++++++++++++++++++++++++++++++++--- 4 files changed, 71 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d9758e6f..7985a411 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3188,7 +3188,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "assign", "js_int", @@ -3210,7 +3210,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "js_int", "ruma-common", @@ -3222,7 +3222,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "as_variant", "assign", @@ -3245,7 +3245,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "as_variant", "base64 0.22.1", @@ -3276,7 +3276,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3301,7 +3301,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "bytes", "http", @@ -3319,7 +3319,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3328,7 +3328,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "js_int", "ruma-common", @@ -3338,7 +3338,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3353,7 +3353,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "js_int", "ruma-common", @@ -3365,7 +3365,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "headers", "http", @@ -3378,7 +3378,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3394,7 +3394,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c4f55b39900b33b2d443dd12a6a2dab50961fdfb#c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 042587fc..b8c145ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -332,7 +332,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "c4f55b39900b33b2d443dd12a6a2dab50961fdfb" +rev = "427877d5bc14988ed877e500bbb27f8bc08b84e8" features = [ "compat", "rand", diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index a77587b0..49078dde 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -725,7 +725,7 @@ pub(super) async fn force_set_room_state_from_server( .save_state(room_id.clone().as_ref(), new_room_state) .await?; - let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; + let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await; self.services .rooms .state diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs index 9b9821fe..03a4adf1 100644 --- a/src/core/utils/mutex_map.rs +++ b/src/core/utils/mutex_map.rs @@ -1,7 +1,13 @@ -use std::{fmt::Debug, hash::Hash, sync::Arc}; +use std::{ + fmt::Debug, + hash::Hash, + sync::{Arc, TryLockError::WouldBlock}, +}; use tokio::sync::OwnedMutexGuard as Omg; +use crate::{err, Result}; + /// Map of Mutexes pub struct MutexMap { map: Map, @@ -30,16 +36,17 @@ where } #[tracing::instrument(level = "trace", skip(self))] - pub async fn lock(&self, k: &K) -> Guard + pub async fn lock<'a, K>(&'a self, k: &'a K) -> Guard where K: Debug + Send + ?Sized + Sync, - Key: for<'a> From<&'a K>, + Key: TryFrom<&'a K>, + >::Error: Debug, { let val = self .map .lock() .expect("locked") - .entry(k.into()) + .entry(k.try_into().expect("failed to construct key")) .or_default() .clone(); @@ -49,6 +56,51 @@ where } } + #[tracing::instrument(level = "trace", skip(self))] + pub fn try_lock<'a, K>(&self, k: &'a K) -> Result> + where + K: Debug + Send + ?Sized + Sync, + Key: TryFrom<&'a K>, + >::Error: Debug, + { + let val = self + .map + .lock() + .expect("locked") + .entry(k.try_into().expect("failed to construct key")) + .or_default() + .clone(); + + Ok(Guard:: { + map: Arc::clone(&self.map), + val: val.try_lock_owned().map_err(|_| err!("would yield"))?, + }) + } + + #[tracing::instrument(level = "trace", skip(self))] + pub fn try_try_lock<'a, K>(&self, k: &'a K) -> Result> + where + K: Debug + Send + ?Sized + Sync, + Key: TryFrom<&'a K>, + >::Error: Debug, + { + let val = self + .map + .try_lock() + .map_err(|e| match e { + | WouldBlock => err!("would block"), + | _ => panic!("{e:?}"), + })? + .entry(k.try_into().expect("failed to construct key")) + .or_default() + .clone(); + + Ok(Guard:: { + map: Arc::clone(&self.map), + val: val.try_lock_owned().map_err(|_| err!("would yield"))?, + }) + } + #[must_use] pub fn contains(&self, k: &Key) -> bool { self.map.lock().expect("locked").contains_key(k) } From f75d9fa79e1ef1bfbd7454cfa470acf1910d6a99 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 06:57:18 +0000 Subject: [PATCH 0560/1248] deduplicate name resolutions Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 55 +++++++++++++++++++++++----------- src/service/resolver/mod.rs | 8 ++++- 2 files changed, 44 insertions(+), 19 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 5676d7b1..afe5a1e5 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; -use futures::FutureExt; +use futures::{FutureExt, TryFutureExt}; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; use ruma::ServerName; @@ -29,18 +29,31 @@ impl ActualDest { impl super::Service { #[tracing::instrument(skip_all, level = "debug", name = "resolve")] pub(crate) async fn get_actual_dest(&self, server_name: &ServerName) -> Result { - let (result, cached) = if let Ok(result) = self.cache.get_destination(server_name).await { - (result, true) - } else { - self.validate_dest(server_name)?; - (self.resolve_actual_dest(server_name, true).boxed().await?, false) - }; - - let CachedDest { dest, host, .. } = result; + let (CachedDest { dest, host, .. }, cached) = + self.lookup_actual_dest(server_name).await?; Ok(ActualDest { dest, host, cached }) } + pub(crate) async fn lookup_actual_dest( + &self, + server_name: &ServerName, + ) -> Result<(CachedDest, bool)> { + if let Ok(result) = self.cache.get_destination(server_name).await { + return Ok((result, true)); + } + + let _dedup = self.resolving.lock(server_name.as_str()); + if let Ok(result) = self.cache.get_destination(server_name).await { + return Ok((result, true)); + } + + self.resolve_actual_dest(server_name, true) + .map_ok(|result| (result, false)) + .boxed() + .await + } + /// Returns: `actual_destination`, host header /// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of @@ -51,7 +64,7 @@ impl super::Service { dest: &ServerName, cache: bool, ) -> Result { - trace!("Finding actual destination for {dest}"); + self.validate_dest(dest)?; let mut host = dest.as_str().to_owned(); let actual_dest = match get_ip_with_port(dest.as_str()) { | Some(host_port) => Self::actual_dest_1(host_port)?, @@ -106,6 +119,7 @@ impl super::Service { cache, ) .await?; + Ok(FedDest::Named( host.to_owned(), port.try_into().unwrap_or_else(|_| FedDest::default_port()), @@ -156,6 +170,7 @@ impl super::Service { cache, ) .await?; + Ok(FedDest::Named( host.to_owned(), port.try_into().unwrap_or_else(|_| FedDest::default_port()), @@ -177,17 +192,18 @@ impl super::Service { cache, ) .await?; + if let Some(port) = force_port { - Ok(FedDest::Named( + return Ok(FedDest::Named( delegated, format!(":{port}") .as_str() .try_into() .unwrap_or_else(|_| FedDest::default_port()), - )) - } else { - Ok(add_port_to_hostname(&delegated)) + )); } + + Ok(add_port_to_hostname(&delegated)) } async fn actual_dest_3_4(&self, cache: bool, delegated: String) -> Result { @@ -212,21 +228,24 @@ impl super::Service { cache, ) .await?; + if let Some(port) = force_port { let port = format!(":{port}"); - Ok(FedDest::Named( + + return Ok(FedDest::Named( host.to_owned(), PortString::from(port.as_str()).unwrap_or_else(|_| FedDest::default_port()), - )) - } else { - Ok(add_port_to_hostname(host)) + )); } + + Ok(add_port_to_hostname(host)) } async fn actual_dest_5(&self, dest: &ServerName, cache: bool) -> Result { debug!("5: No SRV record found"); self.conditional_query_and_cache_override(dest.as_str(), dest.as_str(), 8448, cache) .await?; + Ok(add_port_to_hostname(dest.as_str())) } diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 3163b0d0..090e562d 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,7 +6,8 @@ mod tests; use std::sync::Arc; -use conduwuit::{Result, Server}; +use arrayvec::ArrayString; +use conduwuit::{utils::MutexMap, Result, Server}; use self::{cache::Cache, dns::Resolver}; use crate::{client, Dep}; @@ -14,6 +15,7 @@ use crate::{client, Dep}; pub struct Service { pub cache: Arc, pub resolver: Arc, + resolving: Resolving, services: Services, } @@ -22,6 +24,9 @@ struct Services { client: Dep, } +type Resolving = MutexMap; +type NameBuf = ArrayString<256>; + impl crate::Service for Service { #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] fn build(args: crate::Args<'_>) -> Result> { @@ -29,6 +34,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { cache: cache.clone(), resolver: Resolver::build(args.server, cache)?, + resolving: MutexMap::new(), services: Services { server: args.server.clone(), client: args.depend::("client"), From 607e338ac2bdb03b9e08cfe207bc7253aa8a8a2e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 07:56:24 +0000 Subject: [PATCH 0561/1248] cache result of resolution at completion of resolution Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 8 ++++---- src/service/resolver/cache.rs | 4 ++-- src/service/sending/send.rs | 29 ++++------------------------- 3 files changed, 10 insertions(+), 31 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index afe5a1e5..1a36936d 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -18,7 +18,6 @@ use super::{ pub(crate) struct ActualDest { pub(crate) dest: FedDest, pub(crate) host: String, - pub(crate) cached: bool, } impl ActualDest { @@ -29,10 +28,10 @@ impl ActualDest { impl super::Service { #[tracing::instrument(skip_all, level = "debug", name = "resolve")] pub(crate) async fn get_actual_dest(&self, server_name: &ServerName) -> Result { - let (CachedDest { dest, host, .. }, cached) = + let (CachedDest { dest, host, .. }, _cached) = self.lookup_actual_dest(server_name).await?; - Ok(ActualDest { dest, host, cached }) + Ok(ActualDest { dest, host }) } pub(crate) async fn lookup_actual_dest( @@ -49,6 +48,7 @@ impl super::Service { } self.resolve_actual_dest(server_name, true) + .inspect_ok(|result| self.cache.set_destination(server_name, result)) .map_ok(|result| (result, false)) .boxed() .await @@ -334,7 +334,7 @@ impl super::Service { debug_info!("{overname:?} overriden by {hostname:?}"); } - self.cache.set_override(overname, CachedOverride { + self.cache.set_override(overname, &CachedOverride { ips: override_ip.into_iter().take(MAX_IPS).collect(), port, expire: CachedOverride::default_expire(), diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 11e6c9bd..657718b3 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -45,12 +45,12 @@ impl Cache { } #[implement(Cache)] -pub fn set_destination(&self, name: &ServerName, dest: CachedDest) { +pub fn set_destination(&self, name: &ServerName, dest: &CachedDest) { self.destinations.raw_put(name, Cbor(dest)); } #[implement(Cache)] -pub fn set_override(&self, name: &str, over: CachedOverride) { +pub fn set_override(&self, name: &str, over: &CachedOverride) { self.overrides.raw_put(name, Cbor(over)); } diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 831a1dd8..c8a64f3c 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -18,10 +18,7 @@ use ruma::{ CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, }; -use crate::{ - resolver, - resolver::{actual::ActualDest, cache::CachedDest}, -}; +use crate::resolver::actual::ActualDest; impl super::Service { #[tracing::instrument( @@ -73,16 +70,7 @@ impl super::Service { debug!(?method, ?url, "Sending request"); match client.execute(request).await { - | Ok(response) => - handle_response::( - &self.services.resolver, - dest, - actual, - &method, - &url, - response, - ) - .await, + | Ok(response) => handle_response::(dest, actual, &method, &url, response).await, | Err(error) => Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), } @@ -111,7 +99,6 @@ impl super::Service { } async fn handle_response( - resolver: &resolver::Service, dest: &ServerName, actual: &ActualDest, method: &Method, @@ -122,17 +109,9 @@ where T: OutgoingRequest + Send, { let response = into_http_response(dest, actual, method, url, response).await?; - let result = T::IncomingResponse::try_from_http_response(response); - if result.is_ok() && !actual.cached { - resolver.cache.set_destination(dest, CachedDest { - dest: actual.dest.clone(), - host: actual.host.clone(), - expire: CachedDest::default_expire(), - }); - } - - result.map_err(|e| err!(BadServerResponse("Server returned bad 200 response: {e:?}"))) + T::IncomingResponse::try_from_http_response(response) + .map_err(|e| err!(BadServerResponse("Server returned bad 200 response: {e:?}"))) } async fn into_http_response( From da9f1ae5d7daf68d8e8568f07d38cbf8c065634a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 08:51:15 +0000 Subject: [PATCH 0562/1248] expire resolver cache entries Signed-off-by: Jason Volk --- src/core/utils/rand.rs | 2 +- src/service/resolver/cache.rs | 44 ++++++++++++++++++----------------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/src/core/utils/rand.rs b/src/core/utils/rand.rs index 9e6fc7a8..1d289c6e 100644 --- a/src/core/utils/rand.rs +++ b/src/core/utils/rand.rs @@ -33,7 +33,7 @@ pub fn string_array() -> ArrayString { #[inline] #[must_use] -pub fn timepoint_secs(range: Range) -> SystemTime { +pub fn time_from_now_secs(range: Range) -> SystemTime { SystemTime::now() .checked_add(secs(range)) .expect("range does not overflow SystemTime") diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 657718b3..e64878d4 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -2,7 +2,7 @@ use std::{net::IpAddr, sync::Arc, time::SystemTime}; use arrayvec::ArrayVec; use conduwuit::{ - at, implement, + at, err, implement, utils::{math::Expected, rand, stream::TryIgnore}, Result, }; @@ -54,6 +54,18 @@ pub fn set_override(&self, name: &str, over: &CachedOverride) { self.overrides.raw_put(name, Cbor(over)); } +#[implement(Cache)] +#[must_use] +pub async fn has_destination(&self, destination: &ServerName) -> bool { + self.get_destination(destination).await.is_ok() +} + +#[implement(Cache)] +#[must_use] +pub async fn has_override(&self, destination: &str) -> bool { + self.get_override(destination).await.is_ok() +} + #[implement(Cache)] pub async fn get_destination(&self, name: &ServerName) -> Result { self.destinations @@ -61,6 +73,9 @@ pub async fn get_destination(&self, name: &ServerName) -> Result { .await .deserialized::>() .map(at!(0)) + .into_iter() + .find(CachedDest::valid) + .ok_or(err!(Request(NotFound("Expired from cache")))) } #[implement(Cache)] @@ -70,18 +85,9 @@ pub async fn get_override(&self, name: &str) -> Result { .await .deserialized::>() .map(at!(0)) -} - -#[implement(Cache)] -#[must_use] -pub async fn has_destination(&self, destination: &str) -> bool { - self.destinations.exists(destination).await.is_ok() -} - -#[implement(Cache)] -#[must_use] -pub async fn has_override(&self, destination: &str) -> bool { - self.overrides.exists(destination).await.is_ok() + .into_iter() + .find(CachedOverride::valid) + .ok_or(err!(Request(NotFound("Expired from cache")))) } #[implement(Cache)] @@ -103,13 +109,11 @@ pub fn overrides(&self) -> impl Stream + S impl CachedDest { #[inline] #[must_use] - pub fn valid(&self) -> bool { true } - - //pub fn valid(&self) -> bool { self.expire > SystemTime::now() } + pub fn valid(&self) -> bool { self.expire > SystemTime::now() } #[must_use] pub(crate) fn default_expire() -> SystemTime { - rand::timepoint_secs(60 * 60 * 18..60 * 60 * 36) + rand::time_from_now_secs(60 * 60 * 18..60 * 60 * 36) } #[inline] @@ -125,13 +129,11 @@ impl CachedDest { impl CachedOverride { #[inline] #[must_use] - pub fn valid(&self) -> bool { true } - - //pub fn valid(&self) -> bool { self.expire > SystemTime::now() } + pub fn valid(&self) -> bool { self.expire > SystemTime::now() } #[must_use] pub(crate) fn default_expire() -> SystemTime { - rand::timepoint_secs(60 * 60 * 6..60 * 60 * 12) + rand::time_from_now_secs(60 * 60 * 6..60 * 60 * 12) } #[inline] From 265802d54608eb10295560f54f26bd106e4930e9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 21:38:15 +0000 Subject: [PATCH 0563/1248] additional prof/stats jemalloc related interface tweaks Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 61 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 6 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 119ff45e..81fbd3ea 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -26,10 +26,11 @@ metadata_thp:always\ ,background_thread:true\ ,max_background_threads:-1\ ,lg_extent_max_active_fit:4\ -,oversize_threshold:33554432\ -,tcache_max:1048576\ +,oversize_threshold:16777216\ +,tcache_max:2097152\ ,dirty_decay_ms:16000\ ,muzzy_decay_ms:144000\ +,prof_active:false\ \0"; #[global_allocator] @@ -120,7 +121,7 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { } macro_rules! mallctl { - ($name:literal) => {{ + ($name:expr) => {{ thread_local! { static KEY: OnceCell = OnceCell::default(); }; @@ -135,6 +136,13 @@ macro_rules! mallctl { pub mod this_thread { use super::{is_nonzero, key, math, Debug, Key, OnceCell, Result}; + thread_local! { + static ALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; + static DEALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; + } + + pub fn idle() -> Result { super::notify(&mallctl!("thread.idle")) } + pub fn trim() -> Result { notify(mallctl!("arena.0.purge")) } pub fn decay() -> Result { notify(mallctl!("arena.0.decay")) } @@ -153,7 +161,7 @@ pub mod this_thread { pub fn get_dirty_decay() -> Result { get(mallctl!("arena.0.dirty_decay_ms")) } - pub fn enable_cache(enable: bool) -> Result { + pub fn cache_enable(enable: bool) -> Result { super::set::(&mallctl!("thread.tcache.enabled"), enable.into()).map(is_nonzero!()) } @@ -169,9 +177,29 @@ pub mod this_thread { super::get::(&mallctl!("thread.arena")).and_then(math::try_into) } - pub fn allocated() -> Result { super::get(&mallctl!("thread.allocated")) } + pub fn prof_enable(enable: bool) -> Result { + super::set::(&mallctl!("thread.prof.active"), enable.into()).map(is_nonzero!()) + } - pub fn deallocated() -> Result { super::get(&mallctl!("thread.deallocated")) } + pub fn is_prof_enabled() -> Result { + super::get::(&mallctl!("thread.prof.active")).map(is_nonzero!()) + } + + pub fn reset_peak() -> Result { super::notify(&mallctl!("thread.peak.reset")) } + + pub fn peak() -> Result { super::get(&mallctl!("thread.peak.read")) } + + #[inline] + #[must_use] + pub fn allocated() -> u64 { + *ALLOCATED_BYTES.with(|once| init_tls_cell(once, "thread.allocatedp")) + } + + #[inline] + #[must_use] + pub fn deallocated() -> u64 { + *DEALLOCATED_BYTES.with(|once| init_tls_cell(once, "thread.deallocatedp")) + } fn notify(key: Key) -> Result { super::notify_by_arena(Some(arena_id()?), key) } @@ -188,6 +216,27 @@ pub mod this_thread { { super::get_by_arena(Some(arena_id()?), key) } + + fn init_tls_cell(cell: &OnceCell<&'static u64>, name: &str) -> &'static u64 { + cell.get_or_init(|| { + let ptr: *const u64 = super::get(&mallctl!(name)).expect("failed to obtain pointer"); + + // SAFETY: ptr points directly to the internal state of jemalloc for this thread + unsafe { ptr.as_ref() }.expect("pointer must not be null") + }) + } +} + +pub fn stats_reset() -> Result { notify(&mallctl!("stats.mutexes.reset")) } + +pub fn prof_reset() -> Result { notify(&mallctl!("prof.reset")) } + +pub fn prof_enable(enable: bool) -> Result { + set::(&mallctl!("prof.active"), enable.into()).map(is_nonzero!()) +} + +pub fn is_prof_enabled() -> Result { + get::(&mallctl!("prof.active")).map(is_nonzero!()) } pub fn trim>>(arena: I) -> Result { From a5520e8b1bc1c4ddb9090dc9b93ef76899e58d9a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 22 Jan 2025 23:07:13 +0000 Subject: [PATCH 0564/1248] fix SRV override loss on cache expiration Signed-off-by: Jason Volk --- src/admin/query/resolver.rs | 10 +++--- src/service/resolver/actual.rs | 62 ++++++++++++++++++---------------- src/service/resolver/cache.rs | 9 ++--- src/service/resolver/dns.rs | 22 ++++++++++-- 4 files changed, 63 insertions(+), 40 deletions(-) diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 0b6da6fd..08b5d171 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -51,12 +51,14 @@ async fn destinations_cache( async fn overrides_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedOverride; - writeln!(self, "| Server Name | IP | Port | Expires |").await?; - writeln!(self, "| ----------- | --- | ----:| ------- |").await?; + writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?; + writeln!(self, "| ----------- | --- | ----:| ------- | ---------- |").await?; let mut overrides = self.services.resolver.cache.overrides().boxed(); - while let Some((name, CachedOverride { ips, port, expire })) = overrides.next().await { + while let Some((name, CachedOverride { ips, port, expire, overriding })) = + overrides.next().await + { if let Some(server_name) = server_name.as_ref() { if name != server_name { continue; @@ -64,7 +66,7 @@ async fn overrides_cache(&self, server_name: Option) -> Result Result { debug!("2: Hostname with included port"); let (host, port) = dest.as_str().split_at(pos); - self.conditional_query_and_cache_override( - host, - host, - port.parse::().unwrap_or(8448), - cache, - ) - .await?; + self.conditional_query_and_cache(host, port.parse::().unwrap_or(8448), cache) + .await?; Ok(FedDest::Named( host.to_owned(), @@ -163,13 +158,8 @@ impl super::Service { ) -> Result { debug!("3.2: Hostname with port in .well-known file"); let (host, port) = delegated.split_at(pos); - self.conditional_query_and_cache_override( - host, - host, - port.parse::().unwrap_or(8448), - cache, - ) - .await?; + self.conditional_query_and_cache(host, port.parse::().unwrap_or(8448), cache) + .await?; Ok(FedDest::Named( host.to_owned(), @@ -208,7 +198,7 @@ impl super::Service { async fn actual_dest_3_4(&self, cache: bool, delegated: String) -> Result { debug!("3.4: No SRV records, just use the hostname from .well-known"); - self.conditional_query_and_cache_override(&delegated, &delegated, 8448, cache) + self.conditional_query_and_cache(&delegated, 8448, cache) .await?; Ok(add_port_to_hostname(&delegated)) } @@ -243,7 +233,7 @@ impl super::Service { async fn actual_dest_5(&self, dest: &ServerName, cache: bool) -> Result { debug!("5: No SRV record found"); - self.conditional_query_and_cache_override(dest.as_str(), dest.as_str(), 8448, cache) + self.conditional_query_and_cache(dest.as_str(), 8448, cache) .await?; Ok(add_port_to_hostname(dest.as_str())) @@ -251,9 +241,7 @@ impl super::Service { #[tracing::instrument(skip_all, name = "well-known")] async fn request_well_known(&self, dest: &str) -> Result> { - if !self.cache.has_override(dest).await { - self.query_and_cache_override(dest, dest, 8448).await?; - } + self.conditional_query_and_cache(dest, 8448, true).await?; self.services.server.check_running()?; trace!("Requesting well known for {dest}"); @@ -301,6 +289,17 @@ impl super::Service { Ok(Some(m_server.to_owned())) } + #[inline] + async fn conditional_query_and_cache( + &self, + hostname: &str, + port: u16, + cache: bool, + ) -> Result { + self.conditional_query_and_cache_override(hostname, hostname, port, cache) + .await + } + #[inline] async fn conditional_query_and_cache_override( &self, @@ -308,13 +307,17 @@ impl super::Service { hostname: &str, port: u16, cache: bool, - ) -> Result<()> { - if cache { - self.query_and_cache_override(overname, hostname, port) - .await - } else { - Ok(()) + ) -> Result { + if !cache { + return Ok(()); } + + if self.cache.has_override(overname).await { + return Ok(()); + } + + self.query_and_cache_override(overname, hostname, port) + .await } #[tracing::instrument(skip(self, overname, port), name = "ip")] @@ -323,21 +326,20 @@ impl super::Service { overname: &'_ str, hostname: &'_ str, port: u16, - ) -> Result<()> { + ) -> Result { self.services.server.check_running()?; debug!("querying IP for {overname:?} ({hostname:?}:{port})"); match self.resolver.resolver.lookup_ip(hostname.to_owned()).await { | Err(e) => Self::handle_resolve_error(&e, hostname), | Ok(override_ip) => { - if hostname != overname { - debug_info!("{overname:?} overriden by {hostname:?}"); - } - self.cache.set_override(overname, &CachedOverride { ips: override_ip.into_iter().take(MAX_IPS).collect(), port, expire: CachedOverride::default_expire(), + overriding: (hostname != overname) + .then_some(hostname.into()) + .inspect(|_| debug_info!("{overname:?} overriden by {hostname:?}")), }); Ok(()) diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index e64878d4..22a92865 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -30,6 +30,7 @@ pub struct CachedOverride { pub ips: IpAddrs, pub port: u16, pub expire: SystemTime, + pub overriding: Option, } pub type IpAddrs = ArrayVec; @@ -63,7 +64,10 @@ pub async fn has_destination(&self, destination: &ServerName) -> bool { #[implement(Cache)] #[must_use] pub async fn has_override(&self, destination: &str) -> bool { - self.get_override(destination).await.is_ok() + self.get_override(destination) + .await + .iter() + .any(CachedOverride::valid) } #[implement(Cache)] @@ -85,9 +89,6 @@ pub async fn get_override(&self, name: &str) -> Result { .await .deserialized::>() .map(at!(0)) - .into_iter() - .find(CachedOverride::valid) - .ok_or(err!(Request(NotFound("Expired from cache")))) } #[implement(Cache)] diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index ad7768bc..ca6106e2 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -93,6 +93,11 @@ impl Resolve for Hooked { } } +#[tracing::instrument( + level = "debug", + skip_all, + fields(name = ?name.as_str()) +)] async fn hooked_resolve( cache: Arc, server: Arc, @@ -100,8 +105,21 @@ async fn hooked_resolve( name: Name, ) -> Result> { match cache.get_override(name.as_str()).await { - | Ok(cached) => cached_to_reqwest(cached).await, - | Err(_) => resolve_to_reqwest(server, resolver, name).boxed().await, + | Ok(cached) if cached.valid() => cached_to_reqwest(cached).await, + | Ok(CachedOverride { overriding, .. }) if overriding.is_some() => + resolve_to_reqwest( + server, + resolver, + overriding + .as_deref() + .map(str::parse) + .expect("overriding is set for this record") + .expect("overriding is a valid internet name"), + ) + .boxed() + .await, + + | _ => resolve_to_reqwest(server, resolver, name).boxed().await, } } From 52adae7553e896bb07aacce4224a7fe8ff1bc992 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 23 Jan 2025 20:05:20 +0000 Subject: [PATCH 0565/1248] add sequence method to db engine; improve engine interface/tracing Signed-off-by: Jason Volk --- src/database/engine.rs | 87 +++++++++++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 27 deletions(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index 8be9eecc..76b2889b 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -41,12 +41,49 @@ pub struct Engine { pub(crate) type Db = DBWithThreadMode; impl Engine { - pub(crate) fn cf(&self, name: &str) -> Arc> { - self.db - .cf_handle(name) - .expect("column must be described prior to database open") + #[tracing::instrument( + level = "info", + skip_all, + fields( + sequence = ?self.current_sequence(), + ), + )] + pub fn wait_compactions_blocking(&self) -> Result { + let mut opts = WaitForCompactOptions::default(); + opts.set_abort_on_pause(true); + opts.set_flush(false); + opts.set_timeout(0); + + self.db.wait_for_compact(&opts).map_err(map_err) } + #[tracing::instrument( + level = "info", + skip_all, + fields( + sequence = ?self.current_sequence(), + ), + )] + pub fn sort(&self) -> Result { + let flushoptions = rocksdb::FlushOptions::default(); + result(DBCommon::flush_opt(&self.db, &flushoptions)) + } + + #[tracing::instrument( + level = "debug", + skip_all, + fields( + sequence = ?self.current_sequence(), + ), + )] + pub fn update(&self) -> Result { self.db.try_catch_up_with_primary().map_err(map_err) } + + #[tracing::instrument(level = "info", skip_all)] + pub fn sync(&self) -> Result { result(DBCommon::flush_wal(&self.db, true)) } + + #[tracing::instrument(level = "debug", skip_all)] + pub fn flush(&self) -> Result { result(DBCommon::flush_wal(&self.db, false)) } + #[inline] pub(crate) fn cork(&self) { self.corks.fetch_add(1, Ordering::Relaxed); } @@ -56,28 +93,6 @@ impl Engine { #[inline] pub fn corked(&self) -> bool { self.corks.load(Ordering::Relaxed) > 0 } - #[tracing::instrument(skip(self))] - pub fn sync(&self) -> Result { result(DBCommon::flush_wal(&self.db, true)) } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn flush(&self) -> Result { result(DBCommon::flush_wal(&self.db, false)) } - - #[tracing::instrument(skip(self), level = "info")] - pub fn sort(&self) -> Result { - let flushoptions = rocksdb::FlushOptions::default(); - result(DBCommon::flush_opt(&self.db, &flushoptions)) - } - - #[tracing::instrument(skip(self), level = "info")] - pub fn wait_compactions(&self) -> Result { - let mut opts = WaitForCompactOptions::default(); - opts.set_abort_on_pause(true); - opts.set_flush(false); - opts.set_timeout(0); - - self.db.wait_for_compact(&opts).map_err(map_err) - } - /// Query for database property by null-terminated name which is expected to /// have a result with an integer representation. This is intended for /// low-overhead programmatic use. @@ -96,6 +111,24 @@ impl Engine { .and_then(|val| val.map_or_else(|| Err!("Property {name:?} not found."), Ok)) } + pub(crate) fn cf(&self, name: &str) -> Arc> { + self.db + .cf_handle(name) + .expect("column must be described prior to database open") + } + + #[inline] + #[must_use] + #[tracing::instrument(name = "sequence", level = "debug", skip_all, fields(sequence))] + pub fn current_sequence(&self) -> u64 { + let sequence = self.db.latest_sequence_number(); + + #[cfg(debug_assertions)] + tracing::Span::current().record("sequence", sequence); + + sequence + } + #[inline] #[must_use] pub fn is_read_only(&self) -> bool { self.secondary || self.read_only } @@ -114,7 +147,7 @@ impl Drop for Engine { self.db.cancel_all_background_work(BLOCKING); info!( - sequence = %self.db.latest_sequence_number(), + sequence = %self.current_sequence(), "Closing database..." ); } From 6e7c73336c49bd43cdb143212f36fe82f749209a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 06:12:52 +0000 Subject: [PATCH 0566/1248] move room version config check out of services.globals make available_room_versions() non-member associated Signed-off-by: Jason Volk --- src/api/client/capabilities.rs | 5 +++-- src/core/config/check.rs | 12 +++++++++++- src/core/info/room_version.rs | 10 +++++----- src/service/globals/mod.rs | 8 -------- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 87cdb43d..7188aa23 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; +use conduwuit::{Result, Server}; use ruma::{ api::client::discovery::get_capabilities::{ self, Capabilities, GetLoginTokenCapability, RoomVersionStability, @@ -10,7 +11,7 @@ use ruma::{ }; use serde_json::json; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/v3/capabilities` /// @@ -21,7 +22,7 @@ pub(crate) async fn get_capabilities_route( _body: Ruma, ) -> Result { let available: BTreeMap = - services.server.available_room_versions().collect(); + Server::available_room_versions().collect(); let mut capabilities = Capabilities::default(); capabilities.room_versions = RoomVersionsCapability { diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 06ae5ebb..d7be54b1 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -4,7 +4,7 @@ use either::Either; use figment::Figment; use super::DEPRECATED_KEYS; -use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result}; +use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result, Server}; #[allow(clippy::cognitive_complexity)] pub fn check(config: &Config) -> Result<()> { @@ -233,6 +233,16 @@ pub fn check(config: &Config) -> Result<()> { } } + if !Server::available_room_versions() + .any(|(version, _)| version == config.default_room_version) + { + return Err!(Config( + "default_room_version", + "Room version {:?} is not available", + config.default_room_version + )); + } + Ok(()) } diff --git a/src/core/info/room_version.rs b/src/core/info/room_version.rs index 40f0cf0a..b33a8562 100644 --- a/src/core/info/room_version.rs +++ b/src/core/info/room_version.rs @@ -20,6 +20,8 @@ pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[ pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] = &[RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; +type RoomVersion = (RoomVersionId, RoomVersionStability); + impl crate::Server { #[inline] pub fn supported_room_version(&self, version: &RoomVersionId) -> bool { @@ -28,15 +30,13 @@ impl crate::Server { #[inline] pub fn supported_room_versions(&self) -> impl Iterator + '_ { - self.available_room_versions() + Self::available_room_versions() .filter(|(_, stability)| self.supported_stability(stability)) .map(at!(0)) } #[inline] - pub fn available_room_versions( - &self, - ) -> impl Iterator { + pub fn available_room_versions() -> impl Iterator { available_room_versions() } @@ -46,7 +46,7 @@ impl crate::Server { } } -pub fn available_room_versions() -> impl Iterator { +pub fn available_room_versions() -> impl Iterator { let unstable_room_versions = UNSTABLE_ROOM_VERSIONS .iter() .cloned() diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index f6ff2b09..fe84578a 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -72,14 +72,6 @@ impl crate::Service for Service { registration_token, }; - if !args - .server - .supported_room_version(&config.default_room_version) - { - error!(config=?s.config.default_room_version, fallback=?conduwuit::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); - s.config.default_room_version = conduwuit::config::default_default_room_version(); - }; - Ok(Arc::new(s)) } From 1351d07735719525da6af3485afcc6039de67b8c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 06:58:26 +0000 Subject: [PATCH 0567/1248] improve path argument to Config::load and constructions Signed-off-by: Jason Volk --- src/core/config/mod.rs | 17 ++++++++++------- src/main/server.rs | 15 +++++++++++---- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d6983540..beaabe5d 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -4,7 +4,7 @@ pub mod proxy; use std::{ collections::{BTreeMap, BTreeSet, HashSet}, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - path::PathBuf, + path::{Path, PathBuf}, }; use conduwuit_macros::config_example_generator; @@ -1797,14 +1797,17 @@ const DEPRECATED_KEYS: &[&str; 9] = &[ impl Config { /// Pre-initialize config - pub fn load(paths: Option<&[PathBuf]>) -> Result { - let paths_files = paths.into_iter().flatten().map(Toml::file); - + pub fn load<'a, I>(paths: I) -> Result + where + I: Iterator, + { let envs = [Env::var("CONDUIT_CONFIG"), Env::var("CONDUWUIT_CONFIG")]; - let envs_files = envs.into_iter().flatten().map(Toml::file); - let config = envs_files - .chain(paths_files) + let config = envs + .into_iter() + .flatten() + .map(Toml::file) + .chain(paths.map(Toml::file)) .fold(Figment::new(), |config, file| config.merge(file.nested())) .merge(Env::prefixed("CONDUIT_").global().split("__")) .merge(Env::prefixed("CONDUWUIT_").global().split("__")); diff --git a/src/main/server.rs b/src/main/server.rs index 359a029c..74859f2b 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{path::PathBuf, sync::Arc}; use conduwuit::{ config::Config, @@ -35,9 +35,16 @@ impl Server { ) -> Result, Error> { let _runtime_guard = runtime.map(runtime::Handle::enter); - let raw_config = Config::load(args.config.as_deref())?; - let raw_config = crate::clap::update(raw_config, args)?; - let config = Config::new(&raw_config)?; + let config_paths = args + .config + .as_deref() + .into_iter() + .flat_map(<[_]>::iter) + .map(PathBuf::as_path); + + let config = Config::load(config_paths) + .and_then(|raw| crate::clap::update(raw, args)) + .and_then(|raw| Config::new(&raw))?; #[cfg(feature = "sentry_telemetry")] let sentry_guard = crate::sentry::init(&config); From 7c6b8b132aea086fc95c5a6def4af14d1c35d0f8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 06:15:10 +0000 Subject: [PATCH 0568/1248] add config reloading indirector Signed-off-by: Jason Volk --- src/core/config/manager.rs | 128 +++++++++++++++++++++++++++++++++++++ src/core/config/mod.rs | 3 +- 2 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 src/core/config/manager.rs diff --git a/src/core/config/manager.rs b/src/core/config/manager.rs new file mode 100644 index 00000000..0c95ca15 --- /dev/null +++ b/src/core/config/manager.rs @@ -0,0 +1,128 @@ +use std::{ + cell::{Cell, RefCell}, + ops::Deref, + ptr, + ptr::null_mut, + sync::{ + atomic::{AtomicPtr, Ordering}, + Arc, + }, +}; + +use super::Config; +use crate::{implement, Result}; + +/// The configuration manager is an indirection to reload the configuration for +/// the server while it is running. In order to not burden or clutter the many +/// callsites which query for configuration items, this object implements Deref +/// for the actively loaded configuration. +pub struct Manager { + active: AtomicPtr, +} + +thread_local! { + static INDEX: Cell = 0.into(); + static HANDLE: RefCell = const { + RefCell::new([const { None }; HISTORY]) + }; +} + +type Handle = Option>; +type Handles = [Handle; HISTORY]; + +const HISTORY: usize = 8; + +impl Manager { + pub(crate) fn new(config: Config) -> Self { + let config = Arc::new(config); + Self { + active: AtomicPtr::new(Arc::into_raw(config).cast_mut()), + } + } +} + +impl Drop for Manager { + fn drop(&mut self) { + let config = self.active.swap(null_mut(), Ordering::AcqRel); + + // SAFETY: The active pointer was set using an Arc::into_raw(). We're obliged to + // reconstitute that into Arc otherwise it will leak. + unsafe { Arc::from_raw(config) }; + } +} + +impl Deref for Manager { + type Target = Arc; + + fn deref(&self) -> &Self::Target { HANDLE.with_borrow_mut(|handle| self.load(handle)) } +} + +/// Update the active configuration, returning prior configuration. +#[implement(Manager)] +#[tracing::instrument(skip_all)] +pub fn update(&self, config: Config) -> Result> { + let config = Arc::new(config); + let new = Arc::into_raw(config); + let old = self.active.swap(new.cast_mut(), Ordering::AcqRel); + + // SAFETY: The old active pointer was set using an Arc::into_raw(). We're + // obliged to reconstitute that into Arc otherwise it will leak. + Ok(unsafe { Arc::from_raw(old) }) +} + +#[implement(Manager)] +fn load(&self, handle: &mut [Option>]) -> &'static Arc { + let config = self.active.load(Ordering::Acquire); + + // Branch taken after config reload or first access by this thread. + if handle[INDEX.get()] + .as_ref() + .is_none_or(|handle| !ptr::eq(config, Arc::as_ptr(handle))) + { + INDEX.set(INDEX.get().wrapping_add(1).wrapping_rem(HISTORY)); + return load_miss(handle, INDEX.get(), config); + } + + let config: &Arc = handle[INDEX.get()] + .as_ref() + .expect("handle was already cached for this thread"); + + // SAFETY: The caller should not hold multiple references at a time directly + // into Config, as a subsequent reference might invalidate the thread's cache + // causing another reference to dangle. + // + // This is a highly unusual pattern as most config values are copied by value or + // used immediately without running overlap with another value. Even if it does + // actually occur somewhere, the window of danger is limited to the config being + // reloaded while the reference is held and another access is made by the same + // thread into a different config value. This is mitigated by creating a buffer + // of old configs rather than discarding at the earliest opportunity; the odds + // of this scenario are thus astronomical. + unsafe { std::mem::transmute(config) } +} + +#[tracing::instrument( + name = "miss", + level = "trace", + skip_all, + fields(%index, ?config) +)] +#[allow(clippy::transmute_ptr_to_ptr)] +fn load_miss( + handle: &mut [Option>], + index: usize, + config: *const Config, +) -> &'static Arc { + // SAFETY: The active pointer was set prior and always remains valid. We're + // reconstituting the Arc here but as a new reference, so the count is + // incremented. This instance will be cached in the thread-local. + let config = unsafe { + Arc::increment_strong_count(config); + Arc::from_raw(config) + }; + + // SAFETY: See the note on the transmute above. The caller should not hold more + // than one reference at a time directly into Config, as the second access + // might invalidate the thread's cache, dangling the reference to the first. + unsafe { std::mem::transmute(handle[index].insert(config)) } +} diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index beaabe5d..e459f50b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1,4 +1,5 @@ pub mod check; +pub mod manager; pub mod proxy; use std::{ @@ -22,8 +23,8 @@ use ruma::{ use serde::{de::IgnoredAny, Deserialize}; use url::Url; -pub use self::check::check; use self::proxy::ProxyConfig; +pub use self::{check::check, manager::Manager}; use crate::{err, error::Error, utils::sys, Result}; /// All the config options for conduwuit. From 5be07ebc0f4bbedb3f0d93d35d290720d042fd0d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 07:02:56 +0000 Subject: [PATCH 0569/1248] eliminate references to services.globals.config Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 18 +++---- src/admin/federation/commands.rs | 2 +- src/admin/user/commands.rs | 8 +-- src/api/client/account.rs | 18 +++---- src/api/client/directory.rs | 6 +-- src/api/client/media.rs | 2 +- src/api/client/media_legacy.rs | 2 +- src/api/client/membership.rs | 12 ++--- src/api/client/openid.rs | 2 +- src/api/client/report.rs | 2 +- src/api/client/room/create.rs | 8 +-- src/api/client/typing.rs | 12 ++--- src/api/client/voip.rs | 2 +- src/api/router/auth.rs | 6 +-- src/api/router/request.rs | 2 +- src/api/server/invite.rs | 4 +- src/api/server/make_join.rs | 4 +- src/api/server/make_knock.rs | 4 +- src/api/server/publicrooms.rs | 2 +- src/api/server/query.rs | 2 +- src/api/server/send.rs | 4 +- src/api/server/send_join.rs | 8 +-- src/api/server/send_knock.rs | 4 +- src/service/admin/mod.rs | 2 +- src/service/globals/mod.rs | 84 ++++++++++++++++---------------- src/service/rooms/alias/mod.rs | 10 ++-- 26 files changed, 116 insertions(+), 114 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 49078dde..af7bd79f 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -170,7 +170,7 @@ pub(super) async fn get_remote_pdu_list( server: Box, force: bool, ) -> Result { - if !self.services.globals.config.allow_federation { + if !self.services.server.config.allow_federation { return Ok(RoomMessageEventContent::text_plain( "Federation is disabled on this homeserver.", )); @@ -235,7 +235,7 @@ pub(super) async fn get_remote_pdu( event_id: Box, server: Box, ) -> Result { - if !self.services.globals.config.allow_federation { + if !self.services.server.config.allow_federation { return Ok(RoomMessageEventContent::text_plain( "Federation is disabled on this homeserver.", )); @@ -419,7 +419,7 @@ pub(super) async fn change_log_level( let handles = &["console"]; if reset { - let old_filter_layer = match EnvFilter::try_new(&self.services.globals.config.log) { + let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) { | Ok(s) => s, | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( @@ -438,7 +438,7 @@ pub(super) async fn change_log_level( | Ok(()) => { return Ok(RoomMessageEventContent::text_plain(format!( "Successfully changed log level back to config value {}", - self.services.globals.config.log + self.services.server.config.log ))); }, | Err(e) => { @@ -554,7 +554,7 @@ pub(super) async fn first_pdu_in_room( .services .rooms .state_cache - .server_in_room(&self.services.globals.config.server_name, &room_id) + .server_in_room(&self.services.server.config.server_name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -583,7 +583,7 @@ pub(super) async fn latest_pdu_in_room( .services .rooms .state_cache - .server_in_room(&self.services.globals.config.server_name, &room_id) + .server_in_room(&self.services.server.config.server_name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -613,7 +613,7 @@ pub(super) async fn force_set_room_state_from_server( .services .rooms .state_cache - .server_in_room(&self.services.globals.config.server_name, &room_id) + .server_in_room(&self.services.server.config.server_name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -818,13 +818,13 @@ pub(super) async fn resolve_true_destination( server_name: Box, no_cache: bool, ) -> Result { - if !self.services.globals.config.allow_federation { + if !self.services.server.config.allow_federation { return Ok(RoomMessageEventContent::text_plain( "Federation is disabled on this homeserver.", )); } - if server_name == self.services.globals.config.server_name { + if server_name == self.services.server.config.server_name { return Ok(RoomMessageEventContent::text_plain( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 75635b1b..be91ef0a 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -92,7 +92,7 @@ pub(super) async fn remote_user_in_rooms( &self, user_id: Box, ) -> Result { - if user_id.server_name() == self.services.globals.config.server_name { + if user_id.server_name() == self.services.server.config.server_name { return Ok(RoomMessageEventContent::text_plain( "User belongs to our server, please use `list-joined-rooms` user admin command \ instead.", diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 57aedd9c..64767a36 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -83,12 +83,12 @@ pub(super) async fn create_user( // content is set to the user's display name with a space before it if !self .services - .globals + .server .config .new_user_displayname_suffix .is_empty() { - write!(displayname, " {}", self.services.globals.config.new_user_displayname_suffix) + write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix) .expect("should be able to write to string buffer"); } @@ -114,8 +114,8 @@ pub(super) async fn create_user( ) .await?; - if !self.services.globals.config.auto_join_rooms.is_empty() { - for room in &self.services.globals.config.auto_join_rooms { + if !self.services.server.config.auto_join_rooms.is_empty() { + for room in &self.services.server.config.auto_join_rooms { let Ok(room_id) = self.services.rooms.alias.resolve(room).await else { error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"); continue; diff --git a/src/api/client/account.rs b/src/api/client/account.rs index e6748124..cb25b276 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -299,7 +299,7 @@ pub(crate) async fn register_route( if !services.globals.new_user_displayname_suffix().is_empty() && body.appservice_info.is_none() { - write!(displayname, " {}", services.globals.config.new_user_displayname_suffix) + write!(displayname, " {}", services.server.config.new_user_displayname_suffix) .expect("should be able to write to string buffer"); } @@ -365,7 +365,7 @@ pub(crate) async fn register_route( \"{device_display_name}\"" ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -378,7 +378,7 @@ pub(crate) async fn register_route( } else { info!("New user \"{user_id}\" registered on this server."); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -395,7 +395,7 @@ pub(crate) async fn register_route( info!("New guest user \"{user_id}\" registered on this server."); if !device_display_name.is_empty() { - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -407,7 +407,7 @@ pub(crate) async fn register_route( } } else { #[allow(clippy::collapsible_else_if)] - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -438,10 +438,10 @@ pub(crate) async fn register_route( } if body.appservice_info.is_none() - && !services.globals.config.auto_join_rooms.is_empty() + && !services.server.config.auto_join_rooms.is_empty() && (services.globals.allow_guests_auto_join_rooms() || !is_guest) { - for room in &services.globals.config.auto_join_rooms { + for room in &services.server.config.auto_join_rooms { let Ok(room_id) = services.rooms.alias.resolve(room).await else { error!( "Failed to resolve room alias to room ID when attempting to auto join \ @@ -570,7 +570,7 @@ pub(crate) async fn change_password_route( info!("User {sender_user} changed their password."); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( @@ -673,7 +673,7 @@ pub(crate) async fn deactivate_route( info!("User {sender_user} deactivated their account."); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::notice_plain(format!( diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index c8faaa46..9166eed9 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -152,7 +152,7 @@ pub(crate) async fn set_room_visibility_route( match &body.visibility { | room::Visibility::Public => { - if services.globals.config.lockdown_public_room_directory + if services.server.config.lockdown_public_room_directory && !services.users.is_admin(sender_user).await && body.appservice_info.is_none() { @@ -162,7 +162,7 @@ pub(crate) async fn set_room_visibility_route( body.room_id ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_text(&format!( @@ -181,7 +181,7 @@ pub(crate) async fn set_room_visibility_route( services.rooms.directory.set_public(&body.room_id); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_text(&format!( diff --git a/src/api/client/media.rs b/src/api/client/media.rs index e58ba626..afbc218a 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -31,7 +31,7 @@ pub(crate) async fn get_media_config_route( _body: Ruma, ) -> Result { Ok(get_media_config::v1::Response { - upload_size: ruma_from_usize(services.globals.config.max_request_size), + upload_size: ruma_from_usize(services.server.config.max_request_size), }) } diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index 6f54a683..29cf3069 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -27,7 +27,7 @@ pub(crate) async fn get_media_config_legacy_route( _body: Ruma, ) -> Result { Ok(get_media_config::v3::Response { - upload_size: ruma_from_usize(services.globals.config.max_request_size), + upload_size: ruma_from_usize(services.server.config.max_request_size), }) } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d94fc3c7..2e23dab9 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -71,7 +71,7 @@ async fn banned_room_check( if let Some(room_id) = room_id { if services.rooms.metadata.is_banned(room_id).await || services - .globals + .server .config .forbidden_remote_server_names .contains(&room_id.server_name().unwrap().to_owned()) @@ -81,12 +81,12 @@ async fn banned_room_check( attempted to join a banned room or banned room server name: {room_id}" ); - if services.globals.config.auto_deactivate_banned_room_attempts { + if services.server.config.auto_deactivate_banned_room_attempts { warn!( "Automatically deactivating user {user_id} due to attempted banned room join" ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::text_plain(format!( @@ -112,7 +112,7 @@ async fn banned_room_check( } } else if let Some(server_name) = server_name { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server_name.to_owned()) @@ -122,12 +122,12 @@ async fn banned_room_check( name {server_name} that is globally forbidden. Rejecting.", ); - if services.globals.config.auto_deactivate_banned_room_attempts { + if services.server.config.auto_deactivate_banned_room_attempts { warn!( "Automatically deactivating user {user_id} due to attempted banned room join" ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_message(RoomMessageEventContent::text_plain(format!( diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 4b2ff727..3547d284 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -37,7 +37,7 @@ pub(crate) async fn create_openid_token_route( Ok(account::request_openid_token::v3::Response { access_token, token_type: TokenType::Bearer, - matrix_server_name: services.globals.config.server_name.clone(), + matrix_server_name: services.server.config.server_name.clone(), expires_in: Duration::from_secs(expires_in), }) } diff --git a/src/api/client/report.rs b/src/api/client/report.rs index fe23b7bd..2b25b518 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -50,7 +50,7 @@ pub(crate) async fn report_room_route( if !services .rooms .state_cache - .server_in_room(&services.globals.config.server_name, &body.room_id) + .server_in_room(&services.server.config.server_name, &body.room_id) .await { return Err!(Request(NotFound( diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index a78242ca..1b6e8667 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -71,7 +71,7 @@ pub(crate) async fn create_room_route( let room_id: OwnedRoomId = if let Some(custom_room_id) = &body.room_id { custom_room_id_check(&services, custom_room_id)? } else { - RoomId::new(&services.globals.config.server_name) + RoomId::new(&services.server.config.server_name) }; // check if room ID doesn't already exist instead of erroring on auth check @@ -83,7 +83,7 @@ pub(crate) async fn create_room_route( } if body.visibility == room::Visibility::Public - && services.globals.config.lockdown_public_room_directory + && services.server.config.lockdown_public_room_directory && !services.users.is_admin(sender_user).await && body.appservice_info.is_none() { @@ -93,7 +93,7 @@ pub(crate) async fn create_room_route( &room_id ); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_text(&format!( @@ -450,7 +450,7 @@ pub(crate) async fn create_room_route( if body.visibility == room::Visibility::Public { services.rooms.directory.set_public(&room_id); - if services.globals.config.admin_room_notices { + if services.server.config.admin_room_notices { services .admin .send_text(&format!( diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index 6eabe96a..b311295b 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::Err; +use conduwuit::{utils::math::Tried, Err}; use ruma::api::client::typing::create_typing_event; use crate::{utils, Result, Ruma}; @@ -31,17 +31,15 @@ pub(crate) async fn create_typing_event_route( let duration = utils::clamp( duration.as_millis().try_into().unwrap_or(u64::MAX), services - .globals + .server .config .typing_client_timeout_min_s - .checked_mul(1000) - .unwrap(), + .try_mul(1000)?, services - .globals + .server .config .typing_client_timeout_max_s - .checked_mul(1000) - .unwrap(), + .try_mul(1000)?, ); services .rooms diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index ec804570..c08b1fdf 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -38,7 +38,7 @@ pub(crate) async fn turn_server_route( let user = body.sender_user.unwrap_or_else(|| { UserId::parse_with_server_name( utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), - &services.globals.config.server_name, + &services.server.config.server_name, ) .unwrap() }); diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index dd25e091..ecea305b 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -71,7 +71,7 @@ pub(super) async fn auth( match metadata { | &get_public_rooms::v3::Request::METADATA => { if !services - .globals + .server .config .allow_public_room_directory_without_auth { @@ -94,7 +94,7 @@ pub(super) async fn auth( | &get_display_name::v3::Request::METADATA | &get_avatar_url::v3::Request::METADATA | &get_timezone_key::unstable::Request::METADATA => { - if services.globals.config.require_auth_for_profile_requests { + if services.server.config.require_auth_for_profile_requests { match token { | Token::Appservice(_) | Token::User(_) => { // we should have validated the token above @@ -127,7 +127,7 @@ pub(super) async fn auth( }), | (AuthScheme::AccessToken, Token::None) => match metadata { | &get_turn_server_info::v3::Request::METADATA => { - if services.globals.config.turn_allow_guests { + if services.server.config.turn_allow_guests { Ok(Auth { origin: None, sender_user: None, diff --git a/src/api/router/request.rs b/src/api/router/request.rs index 627abd30..615a8bff 100644 --- a/src/api/router/request.rs +++ b/src/api/router/request.rs @@ -32,7 +32,7 @@ pub(super) async fn from( let query = serde_html_form::from_str(query) .map_err(|e| err!(Request(Unknown("Failed to read query parameters: {e}"))))?; - let max_body_size = services.globals.config.max_request_size; + let max_body_size = services.server.config.max_request_size; let body = axum::body::to_bytes(body, max_body_size) .await diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 1fea268b..27a4485c 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -37,7 +37,7 @@ pub(crate) async fn create_invite_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) @@ -47,7 +47,7 @@ pub(crate) async fn create_invite_route( } if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index 3900c418..b753346c 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -42,7 +42,7 @@ pub(crate) async fn create_join_event_template_route( .await?; if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -59,7 +59,7 @@ pub(crate) async fn create_join_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 90b9b629..423e202d 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -34,7 +34,7 @@ pub(crate) async fn create_knock_event_template_route( .await?; if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -51,7 +51,7 @@ pub(crate) async fn create_knock_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index 77cde15f..2c09385b 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -20,7 +20,7 @@ pub(crate) async fn get_public_rooms_filtered_route( body: Ruma, ) -> Result { if !services - .globals + .server .config .allow_public_room_directory_over_federation { diff --git a/src/api/server/query.rs b/src/api/server/query.rs index 0e5f7e56..69f62e94 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -63,7 +63,7 @@ pub(crate) async fn get_profile_information_route( body: Ruma, ) -> Result { if !services - .globals + .server .config .allow_inbound_profile_lookup_federation_requests { diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 56a17c22..eec9bd11 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -309,7 +309,7 @@ async fn handle_edu_typing( origin: &ServerName, typing: TypingContent, ) { - if !services.globals.config.allow_incoming_typing { + if !services.server.config.allow_incoming_typing { return; } @@ -344,7 +344,7 @@ async fn handle_edu_typing( if typing.typing { let timeout = utils::millis_since_unix_epoch().saturating_add( services - .globals + .server .config .typing_federation_timeout_s .saturating_mul(1000), diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 97a65bf8..e62089b4 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -268,7 +268,7 @@ pub(crate) async fn create_join_event_v1_route( body: Ruma, ) -> Result { if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -284,7 +284,7 @@ pub(crate) async fn create_join_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) @@ -316,7 +316,7 @@ pub(crate) async fn create_join_event_v2_route( body: Ruma, ) -> Result { if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -326,7 +326,7 @@ pub(crate) async fn create_join_event_v2_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index 95478081..b07620af 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -22,7 +22,7 @@ pub(crate) async fn create_knock_event_v1_route( body: Ruma, ) -> Result { if services - .globals + .server .config .forbidden_remote_server_names .contains(body.origin()) @@ -38,7 +38,7 @@ pub(crate) async fn create_knock_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .globals + .server .config .forbidden_remote_server_names .contains(&server.to_owned()) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 399055aa..bc410631 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -338,7 +338,7 @@ impl Service { } // Check if server-side command-escape is disabled by configuration - if is_public_escape && !self.services.globals.config.admin_escape_commands { + if is_public_escape && !self.services.server.config.admin_escape_commands { return false; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index fe84578a..ef34054f 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,7 +7,7 @@ use std::{ time::Instant, }; -use conduwuit::{error, utils::bytes::pretty, Config, Result}; +use conduwuit::{error, utils::bytes::pretty, Result, Server}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; @@ -16,8 +16,8 @@ use crate::service; pub struct Service { pub db: Data, + server: Arc, - pub config: Config, pub bad_event_ratelimiter: Arc>>, pub server_user: OwnedUserId, pub admin_alias: OwnedRoomAliasId, @@ -57,9 +57,9 @@ impl crate::Service for Service { }, ); - let mut s = Self { + Ok(Arc::new(Self { db, - config: config.clone(), + server: args.server.clone(), bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &config.server_name)) .expect("#admins:server_name is valid alias name"), @@ -70,9 +70,7 @@ impl crate::Service for Service { .expect("@conduit:server_name is valid"), turn_secret, registration_token, - }; - - Ok(Arc::new(s)) + })) } fn memory_usage(&self, out: &mut dyn Write) -> Result { @@ -109,93 +107,97 @@ impl Service { pub fn current_count(&self) -> Result { Ok(self.db.current_count()) } #[inline] - pub fn server_name(&self) -> &ServerName { self.config.server_name.as_ref() } + pub fn server_name(&self) -> &ServerName { self.server.config.server_name.as_ref() } - pub fn allow_registration(&self) -> bool { self.config.allow_registration } + pub fn allow_registration(&self) -> bool { self.server.config.allow_registration } - pub fn allow_guest_registration(&self) -> bool { self.config.allow_guest_registration } + pub fn allow_guest_registration(&self) -> bool { self.server.config.allow_guest_registration } pub fn allow_guests_auto_join_rooms(&self) -> bool { - self.config.allow_guests_auto_join_rooms + self.server.config.allow_guests_auto_join_rooms } - pub fn log_guest_registrations(&self) -> bool { self.config.log_guest_registrations } + pub fn log_guest_registrations(&self) -> bool { self.server.config.log_guest_registrations } - pub fn allow_encryption(&self) -> bool { self.config.allow_encryption } + pub fn allow_encryption(&self) -> bool { self.server.config.allow_encryption } - pub fn allow_federation(&self) -> bool { self.config.allow_federation } + pub fn allow_federation(&self) -> bool { self.server.config.allow_federation } pub fn allow_public_room_directory_over_federation(&self) -> bool { - self.config.allow_public_room_directory_over_federation + self.server + .config + .allow_public_room_directory_over_federation } pub fn allow_device_name_federation(&self) -> bool { - self.config.allow_device_name_federation + self.server.config.allow_device_name_federation } - pub fn allow_room_creation(&self) -> bool { self.config.allow_room_creation } + pub fn allow_room_creation(&self) -> bool { self.server.config.allow_room_creation } pub fn new_user_displayname_suffix(&self) -> &String { - &self.config.new_user_displayname_suffix + &self.server.config.new_user_displayname_suffix } - pub fn allow_check_for_updates(&self) -> bool { self.config.allow_check_for_updates } + pub fn allow_check_for_updates(&self) -> bool { self.server.config.allow_check_for_updates } - pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } + pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.server.config.trusted_servers } - pub fn turn_password(&self) -> &String { &self.config.turn_password } + pub fn turn_password(&self) -> &String { &self.server.config.turn_password } - pub fn turn_ttl(&self) -> u64 { self.config.turn_ttl } + pub fn turn_ttl(&self) -> u64 { self.server.config.turn_ttl } - pub fn turn_uris(&self) -> &[String] { &self.config.turn_uris } + pub fn turn_uris(&self) -> &[String] { &self.server.config.turn_uris } - pub fn turn_username(&self) -> &String { &self.config.turn_username } + pub fn turn_username(&self) -> &String { &self.server.config.turn_username } - pub fn notification_push_path(&self) -> &String { &self.config.notification_push_path } + pub fn notification_push_path(&self) -> &String { &self.server.config.notification_push_path } - pub fn emergency_password(&self) -> &Option { &self.config.emergency_password } + pub fn emergency_password(&self) -> &Option { &self.server.config.emergency_password } pub fn url_preview_domain_contains_allowlist(&self) -> &Vec { - &self.config.url_preview_domain_contains_allowlist + &self.server.config.url_preview_domain_contains_allowlist } pub fn url_preview_domain_explicit_allowlist(&self) -> &Vec { - &self.config.url_preview_domain_explicit_allowlist + &self.server.config.url_preview_domain_explicit_allowlist } pub fn url_preview_domain_explicit_denylist(&self) -> &Vec { - &self.config.url_preview_domain_explicit_denylist + &self.server.config.url_preview_domain_explicit_denylist } pub fn url_preview_url_contains_allowlist(&self) -> &Vec { - &self.config.url_preview_url_contains_allowlist + &self.server.config.url_preview_url_contains_allowlist } - pub fn url_preview_max_spider_size(&self) -> usize { self.config.url_preview_max_spider_size } + pub fn url_preview_max_spider_size(&self) -> usize { + self.server.config.url_preview_max_spider_size + } pub fn url_preview_check_root_domain(&self) -> bool { - self.config.url_preview_check_root_domain + self.server.config.url_preview_check_root_domain } - pub fn forbidden_alias_names(&self) -> &RegexSet { &self.config.forbidden_alias_names } + pub fn forbidden_alias_names(&self) -> &RegexSet { &self.server.config.forbidden_alias_names } - pub fn forbidden_usernames(&self) -> &RegexSet { &self.config.forbidden_usernames } + pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames } - pub fn allow_local_presence(&self) -> bool { self.config.allow_local_presence } + pub fn allow_local_presence(&self) -> bool { self.server.config.allow_local_presence } - pub fn allow_incoming_presence(&self) -> bool { self.config.allow_incoming_presence } + pub fn allow_incoming_presence(&self) -> bool { self.server.config.allow_incoming_presence } - pub fn allow_outgoing_presence(&self) -> bool { self.config.allow_outgoing_presence } + pub fn allow_outgoing_presence(&self) -> bool { self.server.config.allow_outgoing_presence } pub fn allow_incoming_read_receipts(&self) -> bool { - self.config.allow_incoming_read_receipts + self.server.config.allow_incoming_read_receipts } pub fn allow_outgoing_read_receipts(&self) -> bool { - self.config.allow_outgoing_read_receipts + self.server.config.allow_outgoing_read_receipts } - pub fn block_non_admin_invites(&self) -> bool { self.config.block_non_admin_invites } + pub fn block_non_admin_invites(&self) -> bool { self.server.config.block_non_admin_invites } /// checks if `user_id` is local to us via server_name comparison #[inline] @@ -205,7 +207,7 @@ impl Service { #[inline] pub fn server_is_ours(&self, server_name: &ServerName) -> bool { - server_name == self.config.server_name + server_name == self.server.config.server_name } #[inline] diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 0acbb116..91797d01 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use conduwuit::{ err, utils::{stream::TryIgnore, ReadyExt}, - Err, Result, + Err, Result, Server, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{Stream, StreamExt, TryFutureExt}; @@ -31,6 +31,7 @@ struct Data { } struct Services { + server: Arc, admin: Dep, appservice: Dep, globals: Dep, @@ -47,6 +48,7 @@ impl crate::Service for Service { aliasid_alias: args.db["aliasid_alias"].clone(), }, services: Services { + server: args.server.clone(), admin: args.depend::("admin"), appservice: args.depend::("appservice"), globals: args.depend::("globals"), @@ -146,9 +148,9 @@ impl Service { let server_name = room_alias.server_name(); let server_is_ours = self.services.globals.server_is_ours(server_name); let servers_contains_ours = || { - servers.as_ref().is_some_and(|servers| { - servers.contains(&self.services.globals.config.server_name) - }) + servers + .as_ref() + .is_some_and(|servers| servers.contains(&self.services.server.config.server_name)) }; if !server_is_ours && !servers_contains_ours() { From b5c167de121e17696c2542b34a6b7904dade8c21 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 11:29:36 +0000 Subject: [PATCH 0570/1248] call decay prior to purge for trim-memory Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 81fbd3ea..6bdf8b33 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -141,12 +141,14 @@ pub mod this_thread { static DEALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; } - pub fn idle() -> Result { super::notify(&mallctl!("thread.idle")) } + pub fn trim() -> Result { decay().and_then(|()| purge()) } - pub fn trim() -> Result { notify(mallctl!("arena.0.purge")) } + pub fn purge() -> Result { notify(mallctl!("arena.0.purge")) } pub fn decay() -> Result { notify(mallctl!("arena.0.decay")) } + pub fn idle() -> Result { super::notify(&mallctl!("thread.idle")) } + pub fn flush() -> Result { super::notify(&mallctl!("thread.tcache.flush")) } pub fn set_muzzy_decay(decay_ms: isize) -> Result { @@ -239,7 +241,11 @@ pub fn is_prof_enabled() -> Result { get::(&mallctl!("prof.active")).map(is_nonzero!()) } -pub fn trim>>(arena: I) -> Result { +pub fn trim> + Copy>(arena: I) -> Result { + decay(arena).and_then(|()| purge(arena)) +} + +pub fn purge>>(arena: I) -> Result { notify_by_arena(arena.into(), mallctl!("arena.4096.purge")) } From 184a3b0f0cccfbd0a6f4d95f65504b4d1d9bb21f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 23 Jan 2025 19:07:13 +0000 Subject: [PATCH 0571/1248] reduce some tracing span levels; bump ruma Signed-off-by: Jason Volk --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- src/api/client/media_legacy.rs | 10 +++++----- src/api/server/media.rs | 14 ++++++++++++-- src/service/resolver/actual.rs | 24 ++++++++++++------------ src/service/rooms/timeline/mod.rs | 4 ++-- 6 files changed, 45 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7985a411..cd914dfd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3188,7 +3188,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "assign", "js_int", @@ -3210,7 +3210,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "js_int", "ruma-common", @@ -3222,7 +3222,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "as_variant", "assign", @@ -3245,7 +3245,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "as_variant", "base64 0.22.1", @@ -3276,7 +3276,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3301,7 +3301,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "bytes", "http", @@ -3319,7 +3319,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3328,7 +3328,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "js_int", "ruma-common", @@ -3338,7 +3338,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3353,7 +3353,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "js_int", "ruma-common", @@ -3365,7 +3365,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "headers", "http", @@ -3378,7 +3378,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3394,7 +3394,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=427877d5bc14988ed877e500bbb27f8bc08b84e8#427877d5bc14988ed877e500bbb27f8bc08b84e8" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index b8c145ca..d52ce974 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -332,7 +332,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "427877d5bc14988ed877e500bbb27f8bc08b84e8" +rev = "b560338b2a50dbf61ecfe80808b9b095ad4cec00" features = [ "compat", "rand", diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index 29cf3069..4fa0b52e 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -50,7 +50,7 @@ pub(crate) async fn get_media_config_legacy_legacy_route( /// # `GET /_matrix/media/v3/preview_url` /// /// Returns URL preview. -#[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy", level = "debug")] pub(crate) async fn get_media_preview_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -131,7 +131,7 @@ pub(crate) async fn create_content_legacy_route( /// - Only redirects if `allow_redirect` is true /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] pub(crate) async fn get_content_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -197,7 +197,7 @@ pub(crate) async fn get_content_legacy_route( /// - Only redirects if `allow_redirect` is true /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] pub(crate) async fn get_content_legacy_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -216,7 +216,7 @@ pub(crate) async fn get_content_legacy_legacy_route( /// - Only redirects if `allow_redirect` is true /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] pub(crate) async fn get_content_as_filename_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -303,7 +303,7 @@ pub(crate) async fn get_content_as_filename_legacy_legacy_route( /// - Only redirects if `allow_redirect` is true /// - Uses client-provided `timeout_ms` if available, else defaults to 20 /// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy")] +#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy", level = "debug")] pub(crate) async fn get_content_thumbnail_legacy_route( State(services): State, InsecureClientIp(client): InsecureClientIp, diff --git a/src/api/server/media.rs b/src/api/server/media.rs index 03ec7b51..e56f5b9d 100644 --- a/src/api/server/media.rs +++ b/src/api/server/media.rs @@ -14,7 +14,12 @@ use crate::Ruma; /// # `GET /_matrix/federation/v1/media/download/{mediaId}` /// /// Load media from our server. -#[tracing::instrument(skip_all, fields(%client), name = "media_get")] +#[tracing::instrument( + name = "media_get", + level = "debug", + skip_all, + fields(%client) +)] pub(crate) async fn get_content_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -51,7 +56,12 @@ pub(crate) async fn get_content_route( /// # `GET /_matrix/federation/v1/media/thumbnail/{mediaId}` /// /// Load media thumbnail from our server. -#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")] +#[tracing::instrument( + name = "media_thumbnail_get", + level = "debug", + skip_all, + fields(%client) +)] pub(crate) async fn get_content_thumbnail_route( State(services): State, InsecureClientIp(client): InsecureClientIp, diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index c5451c58..33374240 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -58,7 +58,7 @@ impl super::Service { /// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of /// specification - #[tracing::instrument(skip(self, cache), name = "actual")] + #[tracing::instrument(name = "actual", level = "debug", skip(self, cache))] pub async fn resolve_actual_dest( &self, dest: &ServerName, @@ -239,7 +239,7 @@ impl super::Service { Ok(add_port_to_hostname(dest.as_str())) } - #[tracing::instrument(skip_all, name = "well-known")] + #[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] async fn request_well_known(&self, dest: &str) -> Result> { self.conditional_query_and_cache(dest, 8448, true).await?; @@ -303,7 +303,7 @@ impl super::Service { #[inline] async fn conditional_query_and_cache_override( &self, - overname: &str, + untername: &str, hostname: &str, port: u16, cache: bool, @@ -312,34 +312,34 @@ impl super::Service { return Ok(()); } - if self.cache.has_override(overname).await { + if self.cache.has_override(untername).await { return Ok(()); } - self.query_and_cache_override(overname, hostname, port) + self.query_and_cache_override(untername, hostname, port) .await } - #[tracing::instrument(skip(self, overname, port), name = "ip")] + #[tracing::instrument(name = "ip", level = "debug", skip(self))] async fn query_and_cache_override( &self, - overname: &'_ str, + untername: &'_ str, hostname: &'_ str, port: u16, ) -> Result { self.services.server.check_running()?; - debug!("querying IP for {overname:?} ({hostname:?}:{port})"); + debug!("querying IP for {untername:?} ({hostname:?}:{port})"); match self.resolver.resolver.lookup_ip(hostname.to_owned()).await { | Err(e) => Self::handle_resolve_error(&e, hostname), | Ok(override_ip) => { - self.cache.set_override(overname, &CachedOverride { + self.cache.set_override(untername, &CachedOverride { ips: override_ip.into_iter().take(MAX_IPS).collect(), port, expire: CachedOverride::default_expire(), - overriding: (hostname != overname) + overriding: (hostname != untername) .then_some(hostname.into()) - .inspect(|_| debug_info!("{overname:?} overriden by {hostname:?}")), + .inspect(|_| debug_info!("{untername:?} overriden by {hostname:?}")), }); Ok(()) @@ -347,7 +347,7 @@ impl super::Service { } } - #[tracing::instrument(skip_all, name = "srv")] + #[tracing::instrument(name = "srv", level = "debug", skip(self))] async fn query_srv_record(&self, hostname: &'_ str) -> Result> { let hostnames = [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index bd60e40e..362bfab5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1017,7 +1017,7 @@ impl Service { } /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] + #[tracing::instrument(name = "redact", level = "debug", skip(self))] pub async fn redact_pdu( &self, event_id: &EventId, @@ -1053,7 +1053,7 @@ impl Service { self.replace_pdu(&pdu_id, &obj, &pdu).await } - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(name = "backfill", level = "debug", skip(self))] pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { if self .services From b1b6dc0479538a207b3cf62ac90b58abb38ae103 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 07:04:29 +0000 Subject: [PATCH 0572/1248] reloadable configuration Signed-off-by: Jason Volk --- src/admin/server/commands.rs | 22 +++++++++++++++++++--- src/admin/server/mod.rs | 7 +++++++ src/core/server.rs | 6 +++--- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 6469a0e9..3ea27883 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,6 @@ -use std::{fmt::Write, sync::Arc}; +use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{info, utils::time, warn, Err, Result}; +use conduwuit::{info, utils::time, warn, Config, Err, Result}; use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; @@ -23,10 +23,26 @@ pub(super) async fn show_config(&self) -> Result { // Construct and send the response Ok(RoomMessageEventContent::text_markdown(format!( "{}", - self.services.server.config + *self.services.server.config ))) } +#[admin_command] +pub(super) async fn reload_config( + &self, + path: Option, +) -> Result { + let path = path.as_deref().into_iter(); + let config = Config::load(path).and_then(|raw| Config::new(&raw))?; + if config.server_name != self.services.server.config.server_name { + return Err!("You can't change the server name."); + } + + let _old = self.services.server.config.update(config)?; + + Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) +} + #[admin_command] pub(super) async fn list_features( &self, diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 69ad7953..3f3d6c5e 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -1,5 +1,7 @@ mod commands; +use std::path::PathBuf; + use clap::Subcommand; use conduwuit::Result; @@ -14,6 +16,11 @@ pub(super) enum ServerCommand { /// - Show configuration values ShowConfig, + /// - Reload configuration values + ReloadConfig { + path: Option, + }, + /// - List the features built into the server ListFeatures { #[arg(short, long)] diff --git a/src/core/server.rs b/src/core/server.rs index 948eea36..6838c9c9 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -8,12 +8,12 @@ use std::{ use tokio::{runtime, sync::broadcast}; -use crate::{config::Config, err, log::Log, metrics::Metrics, Err, Result}; +use crate::{config, config::Config, err, log::Log, metrics::Metrics, Err, Result}; /// Server runtime state; public portion pub struct Server { /// Server-wide configuration instance - pub config: Config, + pub config: config::Manager, /// Timestamp server was started; used for uptime. pub started: SystemTime, @@ -46,7 +46,7 @@ impl Server { #[must_use] pub fn new(config: Config, runtime: Option, log: Log) -> Self { Self { - config, + config: config::Manager::new(config), started: SystemTime::now(), stopping: AtomicBool::new(false), reloading: AtomicBool::new(false), From d59f68a51aa35e7e5491da79d667ad4dd497be5e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 24 Jan 2025 22:49:10 +0000 Subject: [PATCH 0573/1248] add sensitive-field directives to config display Signed-off-by: Jason Volk --- src/core/config/mod.rs | 9 +++++++++ src/macros/config.rs | 27 ++++++++++++++++++++++----- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e459f50b..c541c7e4 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -502,6 +502,8 @@ pub struct Config { /// YOU NEED TO EDIT THIS OR USE registration_token_file. /// /// example: "o&^uCtes4HPf0Vu@F20jQeeWE7" + /// + /// display: sensitive pub registration_token: Option, /// Path to a file on the system that gets read for the registration token. @@ -793,6 +795,8 @@ pub struct Config { /// Static TURN password to provide the client if not using a shared secret /// ("turn_secret"). It is recommended to use a shared secret over static /// credentials. + /// + /// display: sensitive #[serde(default)] pub turn_password: String, @@ -814,6 +818,8 @@ pub struct Config { /// /// This is more secure, but if needed you can use traditional static /// username/password credentials. + /// + /// display: sensitive #[serde(default)] pub turn_secret: String, @@ -1111,6 +1117,8 @@ pub struct Config { /// security purposes. /// /// example: "F670$2CP@Hw8mG7RY1$%!#Ic7YA" + /// + /// display: sensitive pub emergency_password: Option, /// default: "/_matrix/push/v1/notify" @@ -1560,6 +1568,7 @@ pub struct Config { /// Sentry reporting URL, if a custom one is desired. /// + /// display: sensitive /// default: "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, diff --git a/src/macros/config.rs b/src/macros/config.rs index 90d6ef15..50feefa8 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -15,7 +15,7 @@ use crate::{ const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; -const HIDDEN: &[&str] = &["default"]; +const HIDDEN: &[&str] = &["default", "display"]; #[allow(clippy::needless_pass_by_value)] pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { @@ -121,10 +121,27 @@ fn generate_example(input: &ItemStruct, args: &[Meta], write: bool) -> Result Date: Fri, 24 Jan 2025 23:45:35 +0000 Subject: [PATCH 0574/1248] fix missing iteration-optimized read options on several stream types Signed-off-by: Jason Volk --- src/database/map.rs | 3 ++- src/database/map/options.rs | 29 +++++++++++++++++++---------- src/database/map/rev_stream.rs | 4 ++-- src/database/map/rev_stream_from.rs | 2 +- src/database/map/stream.rs | 4 ++-- src/database/map/stream_from.rs | 4 ++-- 6 files changed, 28 insertions(+), 18 deletions(-) diff --git a/src/database/map.rs b/src/database/map.rs index 33cae594..97e90659 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -34,7 +34,8 @@ use conduwuit::Result; use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteOptions}; pub(crate) use self::options::{ - cache_read_options_default, iter_options_default, read_options_default, write_options_default, + cache_iter_options_default, cache_read_options_default, iter_options_default, + read_options_default, write_options_default, }; use crate::{watchers::Watchers, Engine}; diff --git a/src/database/map/options.rs b/src/database/map/options.rs index 90dc0261..f726036d 100644 --- a/src/database/map/options.rs +++ b/src/database/map/options.rs @@ -2,24 +2,33 @@ use rocksdb::{ReadOptions, ReadTier, WriteOptions}; #[inline] pub(crate) fn iter_options_default() -> ReadOptions { - let mut read_options = read_options_default(); - read_options.set_background_purge_on_iterator_cleanup(true); - //read_options.set_pin_data(true); - read_options + let mut options = read_options_default(); + options.set_background_purge_on_iterator_cleanup(true); + //options.set_pin_data(true); + options +} + +#[inline] +pub(crate) fn cache_iter_options_default() -> ReadOptions { + let mut options = cache_read_options_default(); + options.set_background_purge_on_iterator_cleanup(true); + //options.set_pin_data(true); + options } #[inline] pub(crate) fn cache_read_options_default() -> ReadOptions { - let mut read_options = read_options_default(); - read_options.set_read_tier(ReadTier::BlockCache); - read_options + let mut options = read_options_default(); + options.set_read_tier(ReadTier::BlockCache); + options.fill_cache(false); + options } #[inline] pub(crate) fn read_options_default() -> ReadOptions { - let mut read_options = ReadOptions::default(); - read_options.set_total_order_seek(true); - read_options + let mut options = ReadOptions::default(); + options.set_total_order_seek(true); + options } #[inline] diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index 1d5d3d10..56b20b9b 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -31,7 +31,7 @@ where pub fn rev_raw_stream(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::read_options_default(); + let opts = super::iter_options_default(); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_rev(None); @@ -66,7 +66,7 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> fields(%map), )] pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_read_options_default(); + let opts = super::cache_iter_options_default(); let state = stream::State::new(map, opts).init_rev(None); !state.is_incomplete() diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 1b66e8cc..83832bdd 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -118,7 +118,7 @@ pub(super) fn is_cached

    (map: &Arc, from: &P) -> bool where P: AsRef<[u8]> + ?Sized, { - let cache_opts = super::cache_read_options_default(); + let cache_opts = super::cache_iter_options_default(); let cache_status = stream::State::new(map, cache_opts) .init_rev(from.as_ref().into()) .status(); diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index fa3b0ad7..f1b5fdc3 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -30,7 +30,7 @@ where pub fn raw_stream(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::read_options_default(); + let opts = super::iter_options_default(); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_fwd(None); @@ -65,7 +65,7 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + fields(%map), )] pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_read_options_default(); + let opts = super::cache_iter_options_default(); let state = stream::State::new(map, opts).init_fwd(None); !state.is_incomplete() diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 4296b6f6..562ab6b1 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -77,7 +77,7 @@ where { use crate::pool::Seek; - let opts = super::read_options_default(); + let opts = super::iter_options_default(); let state = stream::State::new(self, opts); if is_cached(self, from) { let state = state.init_fwd(from.as_ref().into()); @@ -115,7 +115,7 @@ pub(super) fn is_cached

    (map: &Arc, from: &P) -> bool where P: AsRef<[u8]> + ?Sized, { - let opts = super::cache_read_options_default(); + let opts = super::cache_iter_options_default(); let state = stream::State::new(map, opts).init_fwd(from.as_ref().into()); !state.is_incomplete() From 72daf7ea6816ebf46c43b02e137d58bed5bee883 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 02:01:20 -0500 Subject: [PATCH 0575/1248] bump rocksdb to v9.10.0, reverts upstream rocksdb regression causing deadlocks on corrupt WAL files Signed-off-by: June Clementine Strawberry --- Cargo.lock | 8 +++---- deps/rust-rocksdb/Cargo.toml | 2 +- flake.lock | 44 ++++++++++++++++++------------------ flake.nix | 2 +- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd914dfd..cb1458db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3408,8 +3408,8 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.31.0+9.9.3" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2d31cf323df7c6d95396ef0213e28936c2218bd6#2d31cf323df7c6d95396ef0213e28936c2218bd6" +version = "0.32.0+9.10.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e#24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" dependencies = [ "bindgen", "bzip2-sys", @@ -3425,8 +3425,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.35.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2d31cf323df7c6d95396ef0213e28936c2218bd6#2d31cf323df7c6d95396ef0213e28936c2218bd6" +version = "0.36.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e#24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 96554aed..40c0541e 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "2d31cf323df7c6d95396ef0213e28936c2218bd6" +rev = "24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index 210e8e08..d245ccdd 100644 --- a/flake.lock +++ b/flake.lock @@ -32,11 +32,11 @@ "nixpkgs": "nixpkgs_4" }, "locked": { - "lastModified": 1733424942, - "narHash": "sha256-5t7Sl6EkOaoP4FvzLmH7HFDbdl9SizmLh53RjDQCbWQ=", + "lastModified": 1737621947, + "narHash": "sha256-8HFvG7fvIFbgtaYAY2628Tb89fA55nPm2jSiNs0/Cws=", "owner": "cachix", "repo": "cachix", - "rev": "8b6b0e4694b9aa78b2ea4c93bff6e1a222dc7e4a", + "rev": "f65a3cd5e339c223471e64c051434616e18cc4f5", "type": "github" }, "original": { @@ -117,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1736566337, - "narHash": "sha256-SC0eDcZPqISVt6R0UfGPyQLrI0+BppjjtQ3wcSlk0oI=", + "lastModified": 1737689766, + "narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=", "owner": "ipetkov", "repo": "crane", - "rev": "9172acc1ee6c7e1cbafc3044ff850c568c75a5a3", + "rev": "6fe74265bbb6d016d663b1091f015e2976c4a527", "type": "github" }, "original": { @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1736836313, - "narHash": "sha256-zdZ7/T6yG0/hzoVOiNpDiR/sW3zR6oSMrfIFJK2BrrE=", + "lastModified": 1737700483, + "narHash": "sha256-1778bR4GDDc51/iZQvcshGLZ4JU87zCzqei8Hn7vU1A=", "owner": "nix-community", "repo": "fenix", - "rev": "056c9393c821a4df356df6ce7f14c722dc8717ec", + "rev": "bab2a2840bc2d5ae7c6a133602185edbe4ca7daa", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1736719310, - "narHash": "sha256-Turvx60THwzTiUHb49WV3upUgsPuktr7tVy2Lwu2xJg=", + "lastModified": 1737600516, + "narHash": "sha256-EKyLQ3pbcjoU5jH5atge59F4fzuhTsb6yalUj6Ve2t8=", "owner": "axboe", "repo": "liburing", - "rev": "3124a4619e4daf26b06d48ccf0186a947070c415", + "rev": "6c509e2b0c881a13b83b259a221bf15fc9b3f681", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1736817698, - "narHash": "sha256-1m+JP9RUsbeLVv/tF1DX3Ew9Vl/fatXnlh/g5k3jcSk=", + "lastModified": 1737717945, + "narHash": "sha256-ET91TMkab3PmOZnqiJQYOtSGvSTvGeHoegAv4zcTefM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2b1fca3296ddd1602d2c4f104a4050e006f4b0cb", + "rev": "ecd26a469ac56357fd333946a99086e992452b6a", "type": "github" }, "original": { @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1734469478, - "narHash": "sha256-IcQ4N8xADYal79K+ONmNq4RLlIwdgUqgrVzgNgiIaG8=", + "lastModified": 1737761947, + "narHash": "sha256-FqpAOeFGuA+luV36jaf5aVz3UB183n6wUrTbFxCwjjQ=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "8b4808e7de2fbb5d119d8d72cdca76d8ab84bc47", + "rev": "d078ca31e802696b26d972bda7bed86ee1382156", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.9.3", + "ref": "v9.10.0", "repo": "rocksdb", "type": "github" } @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1736690231, - "narHash": "sha256-g9gyxX+F6CrkT5gRIMKPnCPom0o9ZDzYnzzeNF86D6Q=", + "lastModified": 1737634189, + "narHash": "sha256-AG5G9KDsl0Ngby9EfWvlemma7WWG0KCADTIccPJuzUE=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "8364ef299790cb6ec22b9e09e873c97dbe9f2cb5", + "rev": "84d44d0a574630aa8500ed62b6c01ccd3fae2473", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 920d3d14..1d38f80f 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 6a7fe3ab7c0ddd489250f1c2922c7808d67bff43 Mon Sep 17 00:00:00 2001 From: morguldir Date: Tue, 21 Jan 2025 12:11:49 +0100 Subject: [PATCH 0576/1248] limit wal archive size to 1gb Signed-off-by: morguldir --- src/database/engine/db_opts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 26f53825..01847257 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -55,7 +55,7 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul // Files opts.set_table_cache_num_shard_bits(7); - opts.set_wal_size_limit_mb(1024 * 1024 * 1024); + opts.set_wal_size_limit_mb(1024); opts.set_max_total_wal_size(1024 * 1024 * 512); opts.set_writable_file_max_buffer_size(1024 * 1024 * 2); From eed3291625d6b6f454ff24e41888d17ed492e1b7 Mon Sep 17 00:00:00 2001 From: morguldir Date: Tue, 21 Jan 2025 13:32:10 +0100 Subject: [PATCH 0577/1248] ci: set variable after ssh has been configured, mainly for draft pull requests --- .github/workflows/ci.yml | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 007adace..ce662101 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -306,6 +306,8 @@ jobs: echo "Checking connection" ssh -q website "echo test" + echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" + - uses: nixbuild/nix-quick-install-action@master - name: Restore and cache Nix store @@ -489,7 +491,7 @@ jobs: - name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-x86_64-linux-musl-x86_64-haswell-optimised fi @@ -497,7 +499,7 @@ jobs: - name: Upload static-${{ matrix.target }}-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then chmod +x static-${{ matrix.target }} scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }} fi @@ -505,14 +507,14 @@ jobs: - name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/x86_64-linux-musl-x86_64-haswell-optimised.deb fi - name: Upload static deb ${{ matrix.target }}-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}.deb fi @@ -534,14 +536,14 @@ jobs: - name: Upload static-${{ matrix.target }}-debug-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }}-debug fi - name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE]; then scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}-debug.deb fi @@ -583,21 +585,21 @@ jobs: - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz to webserver if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz fi - name: Upload OCI image ${{ matrix.target }}-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}.tar.gz fi - name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}-debug.tar.gz fi @@ -693,7 +695,7 @@ jobs: - name: Upload macOS x86_64 binary to webserver if: ${{ matrix.os == 'macos-13' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then chmod +x conduwuit-macos-x86_64 scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-x86_64 fi @@ -701,7 +703,7 @@ jobs: - name: Upload macOS arm64 binary to webserver if: ${{ matrix.os == 'macos-latest' }} run: | - if [ ! -z $WEB_UPLOAD_SSH_USERNAME ]; then + if [ ! -z $SSH_WEBSITE ]; then chmod +x conduwuit-macos-arm64 scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-arm64 fi From cd5d4f48bec719a938f50cb17b667668105a1141 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 03:08:35 -0500 Subject: [PATCH 0578/1248] add mau.dev mirror of conduwuit Signed-off-by: June Clementine Strawberry --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 796f96f8..74b6bddf 100644 --- a/README.md +++ b/README.md @@ -109,7 +109,8 @@ Both, but I prefer conduwuit. - GitLab: - git.girlcock.ceo: - git.gay: -- Codeberg: +- mau.dev: +- Codeberg: - sourcehut: From 2abf15b9e9587e6b625c0f40bf29bef75368630e Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 13:14:53 -0500 Subject: [PATCH 0579/1248] revert back to v9.9.3 due to upstream build issue with macos Signed-off-by: June Clementine Strawberry --- Cargo.lock | 12 ++++++------ deps/rust-rocksdb/Cargo.toml | 2 +- flake.lock | 20 ++++++++++---------- flake.nix | 2 +- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb1458db..5848cc46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1250,7 +1250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -2183,7 +2183,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -3016,7 +3016,7 @@ dependencies = [ "once_cell", "socket2", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -3409,7 +3409,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e#24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" dependencies = [ "bindgen", "bzip2-sys", @@ -3426,7 +3426,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e#24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" dependencies = [ "libc", "rust-librocksdb-sys", @@ -3477,7 +3477,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 40c0541e..ba8259a3 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "24826ed7e9cf9ed9ba7ba2f1ec63091006025c9e" +rev = "1f032427d3a0e7b0f13c04b4e34712bd8610291b" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index d245ccdd..5af6ec43 100644 --- a/flake.lock +++ b/flake.lock @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1737700483, - "narHash": "sha256-1778bR4GDDc51/iZQvcshGLZ4JU87zCzqei8Hn7vU1A=", + "lastModified": 1737786656, + "narHash": "sha256-ubCW9Jy7ZUOF354bWxTgLDpVnTvIpNr6qR4H/j7I0oo=", "owner": "nix-community", "repo": "fenix", - "rev": "bab2a2840bc2d5ae7c6a133602185edbe4ca7daa", + "rev": "2f721f527886f801403f389a9cabafda8f1e3b7f", "type": "github" }, "original": { @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1737761947, - "narHash": "sha256-FqpAOeFGuA+luV36jaf5aVz3UB183n6wUrTbFxCwjjQ=", + "lastModified": 1737828695, + "narHash": "sha256-8Ev6zzhNPU798JNvU27a7gj5X+6SDG3jBweUkQ59DbA=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "d078ca31e802696b26d972bda7bed86ee1382156", + "rev": "a4d9230dcc9d03be428b9a728133f8f646c0065c", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.10.0", + "ref": "v9.9.3", "repo": "rocksdb", "type": "github" } @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1737634189, - "narHash": "sha256-AG5G9KDsl0Ngby9EfWvlemma7WWG0KCADTIccPJuzUE=", + "lastModified": 1737728869, + "narHash": "sha256-U4pl3Hi0lT6GP4ecN3q9wdD2sdaKMbmD/5NJ1NdJ9AM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "84d44d0a574630aa8500ed62b6c01ccd3fae2473", + "rev": "6e4c29f7ce18cea7d3d31237a4661ab932eab636", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 1d38f80f..920d3d14 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 9514064c1c709dc7c437b1478b224bb0d711ec05 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 13:40:31 -0500 Subject: [PATCH 0580/1248] use --locked for macOS builds Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ce662101..de6dbc77 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -669,7 +669,7 @@ jobs: - name: Build macOS x86_64 binary if: ${{ matrix.os == 'macos-13' }} run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release + CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls cp -v -f target/release/conduwuit conduwuit-macos-x86_64 otool -L conduwuit-macos-x86_64 @@ -677,12 +677,13 @@ jobs: - name: Run x86_64 macOS release binary if: ${{ matrix.os == 'macos-13' }} run: | + ./conduwuit-macos-x86_64 --help ./conduwuit-macos-x86_64 --version - name: Build macOS arm64 binary if: ${{ matrix.os == 'macos-latest' }} run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release + CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls cp -v -f target/release/conduwuit conduwuit-macos-arm64 otool -L conduwuit-macos-arm64 @@ -690,6 +691,7 @@ jobs: - name: Run arm64 macOS release binary if: ${{ matrix.os == 'macos-latest' }} run: | + ./conduwuit-macos-arm64 --help ./conduwuit-macos-arm64 --version - name: Upload macOS x86_64 binary to webserver From 1d26eec82d8d75f3d67cd973482bc7aa604e6381 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 11 Jan 2025 14:12:08 +0000 Subject: [PATCH 0581/1248] publish README to docker hub --- .github/workflows/docker-hub-description.yml | 36 ++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 .github/workflows/docker-hub-description.yml diff --git a/.github/workflows/docker-hub-description.yml b/.github/workflows/docker-hub-description.yml new file mode 100644 index 00000000..5ff5f666 --- /dev/null +++ b/.github/workflows/docker-hub-description.yml @@ -0,0 +1,36 @@ +name: Update Docker Hub Description + +on: + push: + branches: + - main + paths: + - README.md + - .github/workflows/docker-hub-description.yml + +jobs: + dockerHubDescription: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Setting variables + uses: actions/github-script@v7 + id: var + with: + script: | + const githubRepo = '${{ github.repository }}'.toLowerCase() + const repoId = githubRepo.split('/')[1] + + core.setOutput('github_repository', githubRepo) + const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId + core.setOutput('docker_repo', dockerRepo) + - name: Docker Hub Description + uses: peter-evans/dockerhub-description@v4 + with: + username: ${{ vars.DOCKER_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + repository: ${{ steps.var.outputs.docker_repo }} + short-description: ${{ github.event.repository.description }} + enable-url-completion: true From d86061084cf2d544b99e16890914001b116ab2ca Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 01:05:41 +0000 Subject: [PATCH 0582/1248] Publish haswell images to image registries (#674) * push haswell images to docker repos * Add OCI labels to image * fixup! Add OCI labels to image * fixup! push haswell images to docker repos * fixup! Add OCI labels to image * fixup! Add OCI labels to image * fixup! Add OCI labels to image --- .github/workflows/ci.yml | 174 ++++++++++++++++++++------------- nix/pkgs/oci-image/default.nix | 6 ++ 2 files changed, 114 insertions(+), 66 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de6dbc77..345713aa 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -566,6 +566,14 @@ jobs: cp -v -f result oci-image-${{ matrix.target }}-debug.tar.gz + - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub + if: ${{ matrix.target == 'x86_64-linux-musl' }} + uses: actions/upload-artifact@v4 + with: + name: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised + path: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz + if-no-files-found: error + compression-level: 0 - name: Upload OCI image ${{ matrix.target }}-all-features to GitHub uses: actions/upload-artifact@v4 with: @@ -745,18 +753,11 @@ jobs: contents: read if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' env: - DOCKER_ARM64: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 - DOCKER_AMD64: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 - DOCKER_TAG: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - DOCKER_BRANCH: docker.io/${{ needs.variables.outputs.github_repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} - GHCR_ARM64: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 - GHCR_AMD64: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 - GHCR_TAG: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - GHCR_BRANCH: ghcr.io/${{ needs.variables.outputs.github_repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} - GLCR_ARM64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8 - GLCR_AMD64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64 - GLCR_TAG: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - GLCR_BRANCH: registry.gitlab.com/conduwuit/conduwuit:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} + DOCKER_HUB_REPO: docker.io/${{ needs.variables.outputs.github_repository }} + GHCR_REPO: ghcr.io/${{ needs.variables.outputs.github_repository }} + GLCR_REPO: registry.gitlab.com/conduwuit/conduwuit + UNIQUE_TAG: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} + BRANCH_TAG: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} @@ -790,143 +791,184 @@ jobs: - name: Move OCI images into position run: | + mv -v oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised/*.tar.gz oci-image-amd64-haswell-optimised.tar.gz mv -v oci-image-x86_64-linux-musl/*.tar.gz oci-image-amd64.tar.gz mv -v oci-image-aarch64-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz mv -v oci-image-x86_64-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz + - name: Load and push amd64 haswell image + run: | + docker load -i oci-image-amd64.tar.gz + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + fi + if [ $GHCR_ENABLED = "true" ]; then + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-haswell + docker push ${GHCR_REPO}:${UNIQUE_TAG}-haswell + fi + if [ ! -z $GITLAB_TOKEN ]; then + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-haswell + docker push ${GLCR_REPO}:${UNIQUE_TAG}-haswell + fi + - name: Load and push amd64 image run: | docker load -i oci-image-amd64.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64} - docker push ${DOCKER_AMD64} + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 fi if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64} - docker push ${GHCR_AMD64} + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64 + docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64 fi if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64} - docker push ${GLCR_AMD64} + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64 + docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64 fi - name: Load and push arm64 image run: | docker load -i oci-image-arm64v8.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64} - docker push ${DOCKER_ARM64} + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 fi if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64} - docker push ${GHCR_ARM64} + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 + docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 fi if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64} - docker push ${GLCR_ARM64} + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 + docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 fi - name: Load and push amd64 debug image run: | docker load -i oci-image-amd64-debug.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_AMD64}-debug - docker push ${DOCKER_AMD64}-debug + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug fi if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_AMD64}-debug - docker push ${GHCR_AMD64}-debug + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug + docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug fi if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_AMD64}-debug - docker push ${GLCR_AMD64}-debug + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug + docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug fi - name: Load and push arm64 debug image run: | docker load -i oci-image-arm64v8-debug.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_ARM64}-debug - docker push ${DOCKER_ARM64}-debug + docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug + docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug fi if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_ARM64}-debug - docker push ${GHCR_ARM64}-debug + docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug + docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug fi if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_ARM64}-debug - docker push ${GLCR_ARM64}-debug + docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug + docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug + fi + + - name: Create Docker haswell manifests + run: | + # Dockerhub Container Registry + if [ ! -z $DOCKERHUB_TOKEN ]; then + docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + fi + # GitHub Container Registry + if [ $GHCR_ENABLED = "true" ]; then + docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell + fi + # GitLab Container Registry + if [ ! -z $GITLAB_TOKEN ]; then + docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell fi - name: Create Docker combined manifests run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_TAG} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} - docker manifest create ${DOCKER_BRANCH} --amend ${DOCKER_ARM64} --amend ${DOCKER_AMD64} + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 + docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_TAG} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} - docker manifest create ${GHCR_BRANCH} --amend ${GHCR_ARM64} --amend ${GHCR_AMD64} + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 + docker manifest create ${GHCR_REPO}:${BRANCH_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_TAG} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} - docker manifest create ${GLCR_BRANCH} --amend ${GLCR_ARM64} --amend ${GLCR_AMD64} + docker manifest create ${GLCR_REPO}:${UNIQUE_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 + docker manifest create ${GLCR_REPO}:${BRANCH_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 fi - name: Create Docker combined debug manifests run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_TAG}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug - docker manifest create ${DOCKER_BRANCH}-debug --amend ${DOCKER_ARM64}-debug --amend ${DOCKER_AMD64}-debug + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug + docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_TAG}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug - docker manifest create ${GHCR_BRANCH}-debug --amend ${GHCR_ARM64}-debug --amend ${GHCR_AMD64}-debug + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug + docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_TAG}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug - docker manifest create ${GLCR_BRANCH}-debug --amend ${GLCR_ARM64}-debug --amend ${GLCR_AMD64}-debug + docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug + docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug fi - name: Push manifests to Docker registries run: | if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest push ${DOCKER_TAG} - docker manifest push ${DOCKER_BRANCH} - docker manifest push ${DOCKER_TAG}-debug - docker manifest push ${DOCKER_BRANCH}-debug + docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG} + docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG} + docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug + docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug + docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell fi if [ $GHCR_ENABLED = "true" ]; then - docker manifest push ${GHCR_TAG} - docker manifest push ${GHCR_BRANCH} - docker manifest push ${GHCR_TAG}-debug - docker manifest push ${GHCR_BRANCH}-debug + docker manifest push ${GHCR_REPO}:${UNIQUE_TAG} + docker manifest push ${GHCR_REPO}:${BRANCH_TAG} + docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-debug + docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-debug + docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-haswell + docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-haswell fi if [ ! -z $GITLAB_TOKEN ]; then - docker manifest push ${GLCR_TAG} - docker manifest push ${GLCR_BRANCH} - docker manifest push ${GLCR_TAG}-debug - docker manifest push ${GLCR_BRANCH}-debug + docker manifest push ${GLCR_REPO}:${UNIQUE_TAG} + docker manifest push ${GLCR_REPO}:${BRANCH_TAG} + docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-debug + docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-debug + docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-haswell + docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-haswell fi - name: Add Image Links to Job Summary run: | if [ ! -z $DOCKERHUB_TOKEN ]; then - echo "- \`docker pull ${DOCKER_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${DOCKER_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY fi if [ $GHCR_ENABLED = "true" ]; then - echo "- \`docker pull ${GHCR_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GHCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY fi if [ ! -z $GITLAB_TOKEN ]; then - echo "- \`docker pull ${GLCR_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY + echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY fi diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 152e00d1..d378d017 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -28,5 +28,11 @@ dockerTools.buildLayeredImage { Env = [ "RUST_BACKTRACE=full" ]; + Labels = { + "org.opencontainers.image.title" = main.pname; + "org.opencontainers.image.version" = main.version; + "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; + # "org.opencontainers.image.created" = builtins.formatTime "%Y-%m-%dT%H:%M:%SZ" inputs.self.lastModified; + }; }; } From 9dcf289c7a1f57bfb512f406ddcfb22895e30846 Mon Sep 17 00:00:00 2001 From: bumpsoo Date: Sun, 5 Jan 2025 11:37:40 +0900 Subject: [PATCH 0583/1248] (doc): Update docker-compose.yml and conduwuit-example.toml The server cannot start without a registration token when registration is configured Signed-off-by: bumpsoo --- conduwuit-example.toml | 7 +++++-- docs/deploying/docker-compose.for-traefik.yml | 2 ++ docs/deploying/docker-compose.with-caddy.yml | 2 ++ docs/deploying/docker-compose.yml | 2 ++ src/core/config/mod.rs | 6 +++++- 5 files changed, 16 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 79efbd14..3ecc1628 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -389,13 +389,16 @@ # #allow_registration = false -# This item is undocumented. Please contribute documentation for it. +# Enabling this setting opens registration to anyone without restrictions. +# This makes your server vulnerable to abuse # #yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = false # A static registration token that new users will have to provide when # creating an account. If unset and `allow_registration` is true, -# registration is open without any condition. +# you must set +# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` +# to true to allow open registration without any conditions. # # YOU NEED TO EDIT THIS OR USE registration_token_file. # diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index b4316426..366f6999 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -17,6 +17,8 @@ services: CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' + CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' CONDUWUIT_ALLOW_FEDERATION: 'true' CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index c080293f..431cf2d4 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -33,6 +33,8 @@ services: CONDUWUIT_PORT: 6167 CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' + CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' CONDUWUIT_ALLOW_FEDERATION: 'true' CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index 3b7d84ed..ca33b5f5 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -17,6 +17,8 @@ services: CONDUWUIT_PORT: 6167 CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB CONDUWUIT_ALLOW_REGISTRATION: 'true' + CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' CONDUWUIT_ALLOW_FEDERATION: 'true' CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index c541c7e4..133f0887 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -492,12 +492,16 @@ pub struct Config { #[serde(default)] pub allow_registration: bool, + /// Enabling this setting opens registration to anyone without restrictions. + /// This makes your server vulnerable to abuse #[serde(default)] pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool, /// A static registration token that new users will have to provide when /// creating an account. If unset and `allow_registration` is true, - /// registration is open without any condition. + /// you must set + /// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` + /// to true to allow open registration without any conditions. /// /// YOU NEED TO EDIT THIS OR USE registration_token_file. /// From 5b5ccba64e3d36a9235f4e0d449f40d859046dad Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 21:14:38 -0500 Subject: [PATCH 0584/1248] make conduwuit lowercase in the user-agent again Signed-off-by: June Clementine Strawberry --- src/core/info/version.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/info/version.rs b/src/core/info/version.rs index fb71d4e1..37580210 100644 --- a/src/core/info/version.rs +++ b/src/core/info/version.rs @@ -7,7 +7,7 @@ use std::sync::OnceLock; -static BRANDING: &str = "Conduwuit"; +static BRANDING: &str = "conduwuit"; static SEMANTIC: &str = env!("CARGO_PKG_VERSION"); static VERSION: OnceLock = OnceLock::new(); From c323894497e263514e92bfe0dee8397085305bc0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 21:20:29 -0500 Subject: [PATCH 0585/1248] use test in postrm deb script before deleting Signed-off-by: June Clementine Strawberry --- debian/postrm | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/debian/postrm b/debian/postrm index f516f2a2..3c0b1c09 100644 --- a/debian/postrm +++ b/debian/postrm @@ -10,21 +10,33 @@ CONDUWUIT_DATABASE_PATH_SYMLINK=/var/lib/matrix-conduit case $1 in purge) # Remove debconf changes from the db - db_purge + #db_purge # Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior # "configuration files must be preserved when the package is removed, and # only deleted when the package is purged." + + # + if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then - rm -v -r "$CONDUWUIT_CONFIG_PATH" + if test -L "$CONDUWUIT_CONFIG_PATH"; then + echo "Deleting conduwuit configuration files" + rm -v -r "$CONDUWUIT_CONFIG_PATH" + fi fi if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then - rm -v -r "$CONDUWUIT_DATABASE_PATH" + if test -L "$CONDUWUIT_DATABASE_PATH"; then + echo "Deleting conduwuit database directory" + rm -r "$CONDUWUIT_DATABASE_PATH" + fi fi if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then - rm -v -r "$CONDUWUIT_DATABASE_PATH_SYMLINK" + if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then + echo "Removing matrix-conduit symlink" + rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK" + fi fi ;; esac From 4b331fe50e568241f6703d92b005149da9dc4a52 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 22:42:16 -0500 Subject: [PATCH 0586/1248] update README.md, crate metadata, and OCI image metadata Signed-off-by: June Clementine Strawberry --- Cargo.toml | 9 +++-- README.md | 72 ++++++++++++++++++++++++---------- nix/pkgs/oci-image/default.nix | 13 ++++-- 3 files changed, 66 insertions(+), 28 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d52ce974..c4af4a7c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,14 +7,15 @@ default-members = ["src/*"] [workspace.package] authors = [ - "strawberry ", - "timokoesters ", + "June Clementine Strawberry ", + "strawberry ", # woof + "Jason Volk ", ] categories = ["network-programming"] -description = "a very cool fork of Conduit, a Matrix homeserver written in Rust" +description = "a very cool Matrix chat homeserver written in Rust" edition = "2021" homepage = "https://conduwuit.puppyirl.gay/" -keywords = ["chat", "matrix", "server", "uwu"] +keywords = ["chat", "matrix", "networking", "server", "uwu"] license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" diff --git a/README.md b/README.md index 74b6bddf..13a1c67f 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ -### a very cool, featureful fork of [Conduit](https://conduit.rs/) +### a very cool [Matrix](https://matrix.org/) chat homeserver written in Rust @@ -15,16 +15,15 @@ information and how to deploy/setup conduwuit. #### What is Matrix? -[Matrix](https://matrix.org) is an open network for secure and decentralized -communication. Users from every Matrix homeserver can chat with users from all -other Matrix servers. You can even use bridges (also called Matrix Appservices) -to communicate with users outside of Matrix, like a community on Discord. +[Matrix](https://matrix.org) is an open, federated, and extensible network for +decentralised communication. Users from any Matrix homeserver can chat with users from all +other homeservers over federation. Matrix is designed to be extensible and built on top of. +You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord. #### What is the goal? -A high-performance and efficient Matrix homeserver that's easy to set up and -just works. You can install it on a mini-computer like the Raspberry Pi to -host Matrix for your family, friends or company. +A high-performance, efficient, low-cost, and featureful Matrix homeserver that's +easy to set up and just works with minimal configuration needed. #### Can I try it out? @@ -37,17 +36,22 @@ homeserver". This means there are rules, so please read the rules: [https://transfem.dev/homeserver_rules.txt](https://transfem.dev/homeserver_rules.txt) transfem.dev is also listed at -[servers.joinmatrix.org](https://servers.joinmatrix.org/) +[servers.joinmatrix.org](https://servers.joinmatrix.org/), which is a list of +popular public Matrix homeservers, including some others that run conduwuit. #### What is the current status? -conduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status -initially was inherited from Conduit, however overtime this Beta status is rapidly -becoming less and less relevant as our codebase significantly diverges more and more. +conduwuit is technically a hard fork of [Conduit](https://conduit.rs/), which is in beta. +The beta status initially was inherited from Conduit, however the huge amount of +codebase divergance, changes, fixes, and improvements have effectively made this +beta status not entirely applicable to us anymore. -conduwuit is quite stable and very usable as a daily driver and for a low-medium -sized homeserver. There is still a lot of more work to be done, but it is in a far -better place than the project was in early 2024. +conduwuit is very stable based on our rapidly growing userbase, has lots of features that users +expect, and very usable as a daily driver for small, medium, and upper-end medium sized homeservers. + +A lot of critical stability and performance issues have been fixed, and a lot of +necessary groundwork has finished; making this project way better than it was +back in the start at ~early 2024. #### How is conduwuit funded? Is conduwuit sustainable? @@ -72,16 +76,37 @@ Conduit like before. If you are truly finding yourself wanting to migrate back to Conduit, we would appreciate all your feedback and if we can assist with any issues or concerns. +#### Can I migrate from Synapse or Dendrite? + +Currently there is no known way to seamlessly migrate all user data from the old +homeserver to conduwuit. However it is perfectly acceptable to replace the old +homeserver software with conduwuit using the same server name and there will not +be any issues with federation. + +There is an interest in developing a built-in seamless user data migration +method into conduwuit, however there is no concrete ETA or timeline for this. + + #### Contact -If you run into any question, feel free to +[`#conduwuit:puppygock.gay`](https://matrix.to/#/#conduwuit:puppygock.gay) +is the official project Matrix room. You can get support here, ask questions or +concerns, get assistance setting up conduwuit, etc. -- Ask us in `#conduwuit:puppygock.gay` on Matrix -- [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) +This room should stay relevant and focused on conduwuit. An offtopic general +chatter room can be found there as well. + +Please keep the issue trackers focused on bug reports and enhancement requests. +General support is extremely difficult to be offered over an issue tracker, and +simple questions should be asked directly in an interactive platform like our +Matrix room above as they can turn into a relevant discussion and/or may not be +simple to answer. If you're not sure, just ask in the Matrix room. + +If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) #### Donate @@ -89,9 +114,11 @@ conduwuit development is purely made possible by myself and contributors. I do not get paid to work on this, and I work on it in my free time. Donations are heavily appreciated! 💜🥺 -- Liberapay: -- Ko-fi (note they take a fee): -- GitHub Sponsors: +- Liberapay (preferred): +- GitHub Sponsors (preferred): +- Ko-fi: + +I do not and will not accept cryptocurrency donations, including things related. #### Logo @@ -105,6 +132,9 @@ Both, but I prefer conduwuit. #### Mirrors of conduwuit +If GitHub is unavailable in your country, or has poor connectivity, conduwuit's +source code is mirrored onto the following additional platforms I maintain: + - GitHub: - GitLab: - git.girlcock.ceo: diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index d378d017..5520c920 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -29,10 +29,17 @@ dockerTools.buildLayeredImage { "RUST_BACKTRACE=full" ]; Labels = { - "org.opencontainers.image.title" = main.pname; - "org.opencontainers.image.version" = main.version; + "org.opencontainers.image.authors" = "June Clementine Strawberry and Jason Volk + "; + "org.opencontainers.image.created" ="@${toString inputs.self.lastModified}"; + "org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust"; + "org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/"; + "org.opencontainers.image.licenses" = "Apache-2.0"; "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; - # "org.opencontainers.image.created" = builtins.formatTime "%Y-%m-%dT%H:%M:%SZ" inputs.self.lastModified; + "org.opencontainers.image.title" = main.pname; + "org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/"; + "org.opencontainers.image.vendor" = "girlbossceo"; + "org.opencontainers.image.version" = main.version; }; }; } From 3b0195e6b387364d8919ce90e2f461e82d2f51d1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 25 Jan 2025 21:04:52 -0500 Subject: [PATCH 0587/1248] misc various github actions ci fixes Signed-off-by: June Clementine Strawberry --- .gitea/PULL_REQUEST_TEMPLATE.md | 8 - .gitea/workflows/ci.yml | 264 ------------------- .github/workflows/ci.yml | 80 +++--- .github/workflows/docker-hub-description.yml | 5 + .github/workflows/documentation.yml | 3 + 5 files changed, 56 insertions(+), 304 deletions(-) delete mode 100644 .gitea/PULL_REQUEST_TEMPLATE.md delete mode 100644 .gitea/workflows/ci.yml diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 4210554b..00000000 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,8 +0,0 @@ - - - ------------------------------------------------------------------------------ - -- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test` -- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license - diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml deleted file mode 100644 index ef436734..00000000 --- a/.gitea/workflows/ci.yml +++ /dev/null @@ -1,264 +0,0 @@ -name: CI and Artifacts - -on: - pull_request: - push: - # documentation workflow deals with this or is not relevant for this workflow - paths-ignore: - - '*.md' - - 'conduwuit-example.toml' - - 'book.toml' - - '.gitlab-ci.yml' - - '.gitignore' - - 'renovate.json' - - 'docs/**' - - 'debian/**' - - 'docker/**' - branches: - - main - tags: - - '*' - # Allows you to run this workflow manually from the Actions tab - #workflow_dispatch: - -#concurrency: -# group: ${{ gitea.head_ref || gitea.ref_name }} -# cancel-in-progress: true - -env: - # Required to make some things output color - TERM: ansi - # Publishing to my nix binary cache - ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} - # conduwuit.cachix.org - CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }} - # Just in case incremental is still being set to true, speeds up CI - CARGO_INCREMENTAL: 0 - # Custom nix binary cache if fork is being used - ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} - ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} - # Get error output from nix that we can actually use - NIX_CONFIG: show-trace = true - -#permissions: -# packages: write -# contents: read - -jobs: - tests: - name: Test - runs-on: ubuntu-latest - steps: - - name: Sync repository - uses: https://github.com/actions/checkout@v4 - - - name: Tag comparison check - if: startsWith(gitea.ref, 'refs/tags/v') - run: | - # Tag mismatch with latest repo tag check to prevent potential downgrades - LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - - if [ $LATEST_TAG != ${{ gitea.ref_name }} ]; then - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - name: Install Nix - uses: https://github.com/DeterminateSystems/nix-installer-action@main - with: - diagnostic-endpoint: "" - extra-conf: | - experimental-features = nix-command flakes - accept-flake-config = true - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Configure Magic Nix Cache - uses: https://github.com/DeterminateSystems/magic-nix-cache-action@main - with: - diagnostic-endpoint: "" - upstream-cache: "https://attic.kennel.juneis.dog/conduwuit" - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a /etc/nix/nix.conf > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv - direnv allow - nix develop .#all-features --command true - - - name: Cache CI dependencies - run: | - bin/nix-build-and-cache ci - - - name: Run CI tests - run: | - direnv exec . engage > >(tee -a test_output.log) - - - name: Sync Complement repository - uses: https://github.com/actions/checkout@v4 - with: - repository: 'matrix-org/complement' - path: complement_src - - - name: Run Complement tests - run: | - direnv exec . bin/complement 'complement_src' 'complement_test_logs.jsonl' 'complement_test_results.jsonl' - cp -v -f result complement_oci_image.tar.gz - - - name: Upload Complement OCI image - uses: https://github.com/actions/upload-artifact@v4 - with: - name: complement_oci_image.tar.gz - path: complement_oci_image.tar.gz - if-no-files-found: error - - - name: Upload Complement logs - uses: https://github.com/actions/upload-artifact@v4 - with: - name: complement_test_logs.jsonl - path: complement_test_logs.jsonl - if-no-files-found: error - - - name: Upload Complement results - uses: https://github.com/actions/upload-artifact@v4 - with: - name: complement_test_results.jsonl - path: complement_test_results.jsonl - if-no-files-found: error - - - name: Diff Complement results with checked-in repo results - run: | - diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_test_output.log) - echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - - name: Update Job Summary - if: success() || failure() - run: | - if [ ${{ job.status }} == 'success' ]; then - echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY - else - echo '```' >> $GITHUB_STEP_SUMMARY - tail -n 40 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - fi - - build: - name: Build - runs-on: ubuntu-latest - needs: tests - strategy: - matrix: - include: - - target: aarch64-unknown-linux-musl - - target: x86_64-unknown-linux-musl - steps: - - name: Sync repository - uses: https://github.com/actions/checkout@v4 - - - name: Install Nix - uses: https://github.com/DeterminateSystems/nix-installer-action@main - with: - diagnostic-endpoint: "" - extra-conf: | - experimental-features = nix-command flakes - accept-flake-config = true - - - name: Install and enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Configure Magic Nix Cache - uses: https://github.com/DeterminateSystems/magic-nix-cache-action@main - with: - diagnostic-endpoint: "" - upstream-cache: "https://attic.kennel.juneis.dog/conduwuit" - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a /etc/nix/nix.conf > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv - direnv allow - nix develop .#all-features --command true - - - name: Build static ${{ matrix.target }} - run: | - CARGO_DEB_TARGET_TUPLE=$(echo ${{ matrix.target }} | grep -o -E '^([^-]*-){3}[^-]*') - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-${{ matrix.target }} - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduit target/release/conduwuit - cp -v -f result/bin/conduit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - # -p conduit is the main crate name - direnv exec . cargo deb --verbose --no-build --no-strip -p conduit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb - mv -v target/release/conduwuit static-${{ matrix.target }} - mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb - - - name: Upload static-${{ matrix.target }} - uses: https://github.com/actions/upload-artifact@v4 - with: - name: static-${{ matrix.target }} - path: static-${{ matrix.target }} - if-no-files-found: error - - - name: Upload deb ${{ matrix.target }} - uses: https://github.com/actions/upload-artifact@v4 - with: - name: deb-${{ matrix.target }} - path: ${{ matrix.target }}.deb - if-no-files-found: error - compression-level: 0 - - - name: Build OCI image ${{ matrix.target }} - run: | - bin/nix-build-and-cache just .#oci-image-${{ matrix.target }} - cp -v -f result oci-image-${{ matrix.target }}.tar.gz - - - name: Upload OCI image ${{ matrix.target }} - uses: https://github.com/actions/upload-artifact@v4 - with: - name: oci-image-${{ matrix.target }} - path: oci-image-${{ matrix.target }}.tar.gz - if-no-files-found: error - compression-level: 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 345713aa..b0b0bd53 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,8 +22,8 @@ concurrency: env: # sccache only on main repo - SCCACHE_GHA_ENABLED: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}" - RUSTC_WRAPPER: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" + SCCACHE_GHA_ENABLED: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}" + RUSTC_WRAPPER: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" SCCACHE_BUCKET: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" SCCACHE_S3_USE_SSL: ${{ vars.SCCACHE_S3_USE_SSL }} SCCACHE_REGION: ${{ vars.SCCACHE_REGION }} @@ -51,8 +51,8 @@ env: extra-experimental-features = nix-command flakes accept-flake-config = true WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }} - GH_SHA: ${{ github.sha }} GH_REF_NAME: ${{ github.ref_name }} + WEBSERVER_DIR_NAME: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} permissions: {} @@ -85,11 +85,13 @@ jobs: END echo "Checking connection" - ssh -q website "echo test" + ssh -q website "echo test" || ssh -q website "echo test" echo "Creating commit rev directory on web server" - ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/$GITHUB_SHA/" - ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/$GITHUB_SHA/" + ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" + ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" + + echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - name: Install liburing run: | @@ -124,6 +126,9 @@ jobs: - uses: nixbuild/nix-quick-install-action@master - name: Restore and cache Nix store + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -184,11 +189,16 @@ jobs: # use sccache for Rust - name: Run sccache-cache - if: (env.SCCACHE_GHA_ENABLED == 'true') + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -304,13 +314,16 @@ jobs: END echo "Checking connection" - ssh -q website "echo test" + ssh -q website "echo test" || ssh -q website "echo test" echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - uses: nixbuild/nix-quick-install-action@master - name: Restore and cache Nix store + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -364,11 +377,16 @@ jobs: # use sccache for Rust - name: Run sccache-cache - if: (env.SCCACHE_GHA_ENABLED == 'true') + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -493,29 +511,27 @@ jobs: run: | if [ ! -z $SSH_WEBSITE ]; then chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised - scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-x86_64-linux-musl-x86_64-haswell-optimised + scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-x86_64-linux-musl-x86_64-haswell-optimised fi - name: Upload static-${{ matrix.target }}-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then chmod +x static-${{ matrix.target }} - scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }} + scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }} fi - name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | if [ ! -z $SSH_WEBSITE ]; then - scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/x86_64-linux-musl-x86_64-haswell-optimised.deb + scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/x86_64-linux-musl-x86_64-haswell-optimised.deb fi - name: Upload static deb ${{ matrix.target }}-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then - scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}.deb + scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}.deb fi - name: Upload static-${{ matrix.target }}-debug-all-features to GitHub @@ -534,17 +550,15 @@ jobs: compression-level: 0 - name: Upload static-${{ matrix.target }}-debug-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then - scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/static-${{ matrix.target }}-debug + scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}-debug fi - name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | - if [ ! -z $SSH_WEBSITE]; then - scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/${{ matrix.target }}-debug.deb + if [ ! -z $SSH_WEBSITE ]; then + scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}-debug.deb fi - name: Build OCI image ${{ matrix.target }}-all-features @@ -594,21 +608,19 @@ jobs: if: ${{ matrix.target == 'x86_64-linux-musl' }} run: | if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz + scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz fi - name: Upload OCI image ${{ matrix.target }}-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}.tar.gz + scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}.tar.gz fi - name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' run: | if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/oci-image-${{ matrix.target }}-debug.tar.gz + scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz fi build_mac_binaries: @@ -647,7 +659,9 @@ jobs: END echo "Checking connection" - ssh -q website "echo test" + ssh -q website "echo test" || ssh -q website "echo test" + + echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - name: Tag comparison check if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} @@ -663,7 +677,9 @@ jobs: # use sccache for Rust - name: Run sccache-cache - if: (env.SCCACHE_GHA_ENABLED == 'true') + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache @@ -707,7 +723,7 @@ jobs: run: | if [ ! -z $SSH_WEBSITE ]; then chmod +x conduwuit-macos-x86_64 - scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-x86_64 + scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-x86_64 fi - name: Upload macOS arm64 binary to webserver @@ -715,7 +731,7 @@ jobs: run: | if [ ! -z $SSH_WEBSITE ]; then chmod +x conduwuit-macos-arm64 - scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${GH_SHA}/conduwuit-macos-arm64 + scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-arm64 fi - name: Upload macOS x86_64 binary @@ -881,15 +897,15 @@ jobs: run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${BRANCH_TAG}-haswell fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${BRANCH_TAG}-haswell fi - name: Create Docker combined manifests diff --git a/.github/workflows/docker-hub-description.yml b/.github/workflows/docker-hub-description.yml index 5ff5f666..96b2d38b 100644 --- a/.github/workflows/docker-hub-description.yml +++ b/.github/workflows/docker-hub-description.yml @@ -8,13 +8,17 @@ on: - README.md - .github/workflows/docker-hub-description.yml + workflow_dispatch: + jobs: dockerHubDescription: runs-on: ubuntu-latest + if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' steps: - uses: actions/checkout@v4 with: persist-credentials: false + - name: Setting variables uses: actions/github-script@v7 id: var @@ -26,6 +30,7 @@ jobs: core.setOutput('github_repository', githubRepo) const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId core.setOutput('docker_repo', dockerRepo) + - name: Docker Hub Description uses: peter-evans/dockerhub-description@v4 with: diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index b0ccdb47..0eefe0a4 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -73,6 +73,9 @@ jobs: - uses: nixbuild/nix-quick-install-action@master - name: Restore and cache Nix store + # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting + # releases and tags + if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key From 29a19ba437c6b387f3f250a2d91e2edd6d751a18 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 02:06:20 +0000 Subject: [PATCH 0588/1248] add write_to_cache to descriptor Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 6 +++++- src/database/engine/descriptor.rs | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 1230081c..ab11b9e1 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -77,20 +77,24 @@ fn descriptor_cf_options( fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache>) -> Result { let mut table = table_options(desc, cache.is_some()); + if let Some(cache) = cache { table.set_block_cache(cache); } else { table.disable_cache(); } + let prepopulate = if desc.write_to_cache { "kFlushOnly" } else { "kDisable" }; + let string = format!( "{{block_based_table_factory={{num_file_reads_for_auto_readahead={0};\ max_auto_readahead_size={1};initial_auto_readahead_size={2};\ - enable_index_compression={3}}}}}", + enable_index_compression={3};prepopulate_block_cache={4}}}}}", desc.auto_readahead_thresh, desc.auto_readahead_max, desc.auto_readahead_init, desc.compressed_index, + prepopulate, ); opts.set_options_from_string(&string).map_err(map_err)?; diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 6ce8b5ad..c4dc2901 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -40,6 +40,7 @@ pub(crate) struct Descriptor { pub(crate) bottommost_level: Option, pub(crate) block_index_hashing: Option, pub(crate) cache_shards: u32, + pub(crate) write_to_cache: bool, pub(crate) auto_readahead_thresh: u32, pub(crate) auto_readahead_init: usize, pub(crate) auto_readahead_max: usize, @@ -71,6 +72,7 @@ pub(crate) static BASE: Descriptor = Descriptor { bottommost_level: Some(SENTINEL_COMPRESSION_LEVEL), block_index_hashing: None, cache_shards: 64, + write_to_cache: false, auto_readahead_thresh: 0, auto_readahead_init: 1024 * 16, auto_readahead_max: 1024 * 1024 * 2, From 186c459584f3a25f00d873c5f73c820161226791 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 02:06:44 +0000 Subject: [PATCH 0589/1248] use scalar for file shape; increase shape for small-type columns Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 2 +- src/database/engine/descriptor.rs | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index ab11b9e1..382bc169 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -33,7 +33,7 @@ fn descriptor_cf_options( opts.set_write_buffer_size(desc.write_size); opts.set_target_file_size_base(desc.file_size); - opts.set_target_file_size_multiplier(desc.file_shape[0]); + opts.set_target_file_size_multiplier(desc.file_shape); opts.set_level_zero_file_num_compaction_trigger(desc.level0_width); opts.set_level_compaction_dynamic_level_bytes(false); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index c4dc2901..c735f402 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -27,7 +27,7 @@ pub(crate) struct Descriptor { pub(crate) level_size: u64, pub(crate) level_shape: [i32; 7], pub(crate) file_size: u64, - pub(crate) file_shape: [i32; 1], + pub(crate) file_shape: i32, pub(crate) level0_width: i32, pub(crate) merge_width: (i32, i32), pub(crate) ttl: u64, @@ -59,7 +59,7 @@ pub(crate) static BASE: Descriptor = Descriptor { level_size: 1024 * 1024 * 8, level_shape: [1, 1, 1, 3, 7, 15, 31], file_size: 1024 * 1024, - file_shape: [2], + file_shape: 2, level0_width: 2, merge_width: (2, 16), ttl: 60 * 60 * 24 * 21, @@ -106,6 +106,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { write_size: 1024 * 1024 * 16, level_size: 1024 * 512, file_size: 1024 * 128, + file_shape: 3, index_size: 512, block_size: 512, cache_shards: 64, @@ -121,6 +122,7 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { write_size: 1024 * 1024 * 16, level_size: 1024 * 1024, file_size: 1024 * 512, + file_shape: 3, block_size: 512, cache_shards: 64, block_index_hashing: Some(false), From 9ad4f20da4547f66720e363c92fac13a5c3af343 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 08:59:48 +0000 Subject: [PATCH 0590/1248] propagate underflow as error result, not index bounds panic Signed-off-by: Jason Volk --- src/database/de.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index 4fdc2251..7cc8f00a 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -298,9 +298,11 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { fn deserialize_i64>(self, visitor: V) -> Result { const BYTES: usize = size_of::(); - let end = self.pos.saturating_add(BYTES); + let end = self.pos.saturating_add(BYTES).min(self.buf.len()); let bytes: ArrayVec = self.buf[self.pos..end].try_into()?; - let bytes = bytes.into_inner().expect("array size matches i64"); + let bytes = bytes + .into_inner() + .map_err(|_| Self::Error::SerdeDe("i64 buffer underflow".into()))?; self.inc_pos(BYTES); visitor.visit_i64(i64::from_be_bytes(bytes)) @@ -328,9 +330,11 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { fn deserialize_u64>(self, visitor: V) -> Result { const BYTES: usize = size_of::(); - let end = self.pos.saturating_add(BYTES); + let end = self.pos.saturating_add(BYTES).min(self.buf.len()); let bytes: ArrayVec = self.buf[self.pos..end].try_into()?; - let bytes = bytes.into_inner().expect("array size matches u64"); + let bytes = bytes + .into_inner() + .map_err(|_| Self::Error::SerdeDe("u64 buffer underflow".into()))?; self.inc_pos(BYTES); visitor.visit_u64(u64::from_be_bytes(bytes)) From 68856645ee4f5a6a437b01c4c94bdc233d99f140 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 07:18:33 +0000 Subject: [PATCH 0591/1248] refactor lazy-loading Signed-off-by: Jason Volk --- src/api/client/context.rs | 109 ++++--- src/api/client/message.rs | 151 ++++----- src/api/client/sync/v3.rs | 422 ++++++++++---------------- src/service/rooms/lazy_loading/mod.rs | 198 +++++++----- 4 files changed, 418 insertions(+), 462 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index b957561c..388bcf4d 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - at, err, ref_at, + at, deref_at, err, ref_at, utils::{ future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, @@ -8,15 +8,15 @@ use conduwuit::{ }, Err, PduEvent, Result, }; -use futures::{join, try_join, FutureExt, StreamExt, TryFutureExt}; -use ruma::{ - api::client::{context::get_context, filter::LazyLoadOptions}, - events::StateEventType, - OwnedEventId, UserId, +use futures::{ + future::{join, join3, try_join3, OptionFuture}, + FutureExt, StreamExt, TryFutureExt, }; +use ruma::{api::client::context::get_context, events::StateEventType, OwnedEventId, UserId}; +use service::rooms::{lazy_loading, lazy_loading::Options}; use crate::{ - client::message::{event_filter, ignored_filter, update_lazy, visibility_filter, LazySet}, + client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, Ruma, }; @@ -33,10 +33,10 @@ pub(crate) async fn get_context_route( State(services): State, body: Ruma, ) -> Result { - let filter = &body.filter; let sender = body.sender(); - let (sender_user, _) = sender; + let (sender_user, sender_device) = sender; let room_id = &body.room_id; + let filter = &body.filter; // Use limit or else 10, with maximum 100 let limit: usize = body @@ -45,18 +45,6 @@ pub(crate) async fn get_context_route( .unwrap_or(LIMIT_DEFAULT) .min(LIMIT_MAX); - // some clients, at least element, seem to require knowledge of redundant - // members for "inline" profiles on the timeline to work properly - let lazy_load_enabled = matches!(filter.lazy_load_options, LazyLoadOptions::Enabled { .. }); - - let lazy_load_redundant = if let LazyLoadOptions::Enabled { include_redundant_members } = - filter.lazy_load_options - { - include_redundant_members - } else { - false - }; - let base_id = services .rooms .timeline @@ -75,7 +63,7 @@ pub(crate) async fn get_context_route( .user_can_see_event(sender_user, &body.room_id, &body.event_id) .map(Ok); - let (base_id, base_pdu, visible) = try_join!(base_id, base_pdu, visible)?; + let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?; if base_pdu.room_id != body.room_id || base_pdu.event_id != body.event_id { return Err!(Request(NotFound("Base event not found."))); @@ -112,12 +100,32 @@ pub(crate) async fn get_context_route( .collect(); let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) = - join!(base_event, events_before, events_after); + join3(base_event, events_before, events_after).await; + + let lazy_loading_context = lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: Some(base_count.into_unsigned()), + options: Some(&filter.lazy_load_options), + }; + + let lazy_loading_witnessed: OptionFuture<_> = filter + .lazy_load_options + .is_enabled() + .then_some( + base_event + .iter() + .chain(events_before.iter()) + .chain(events_after.iter()), + ) + .map(|witnessed| lazy_loading_witness(&services, &lazy_loading_context, witnessed)) + .into(); let state_at = events_after .last() .map(ref_at!(1)) - .map_or(body.event_id.as_ref(), |e| e.event_id.as_ref()); + .map_or(body.event_id.as_ref(), |pdu| pdu.event_id.as_ref()); let state_ids = services .rooms @@ -126,41 +134,32 @@ pub(crate) async fn get_context_route( .or_else(|_| services.rooms.state.get_room_shortstatehash(room_id)) .and_then(|shortstatehash| services.rooms.state_accessor.state_full_ids(shortstatehash)) .map_err(|e| err!(Database("State not found: {e}"))) - .await?; + .boxed(); - let lazy = base_event - .iter() - .chain(events_before.iter()) - .chain(events_after.iter()) - .stream() - .fold(LazySet::new(), |lazy, item| { - update_lazy(&services, room_id, sender, lazy, item, lazy_load_redundant) - }) - .await; + let (lazy_loading_witnessed, state_ids) = join(lazy_loading_witnessed, state_ids).await; - let lazy = &lazy; - let state: Vec<_> = state_ids - .iter() - .stream() - .broad_filter_map(|(shortstatekey, event_id)| { - services - .rooms - .short - .get_statekey_from_short(*shortstatekey) - .map_ok(move |(event_type, state_key)| (event_type, state_key, event_id)) - .ok() - }) - .ready_filter_map(|(event_type, state_key, event_id)| { - if !lazy_load_enabled || event_type != StateEventType::RoomMember { - return Some(event_id); + let state_ids = state_ids?; + let lazy_loading_witnessed = lazy_loading_witnessed.unwrap_or_default(); + let shortstatekeys = state_ids.iter().stream().map(deref_at!(0)); + + let state: Vec<_> = services + .rooms + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(state_ids.iter().stream().map(at!(1))) + .ready_filter_map(|item| Some((item.0.ok()?, item.1))) + .ready_filter_map(|((event_type, state_key), event_id)| { + if filter.lazy_load_options.is_enabled() + && event_type == StateEventType::RoomMember + && state_key + .as_str() + .try_into() + .is_ok_and(|user_id: &UserId| !lazy_loading_witnessed.contains(user_id)) + { + return None; } - state_key - .as_str() - .try_into() - .ok() - .filter(|&user_id: &&UserId| lazy.contains(user_id)) - .map(|_| event_id) + Some(event_id) }) .broad_filter_map(|event_id: &OwnedEventId| { services.rooms.timeline.get_pdu(event_id).ok() diff --git a/src/api/client/message.rs b/src/api/client/message.rs index ec9a14d5..a508b5da 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,5 +1,3 @@ -use std::collections::HashSet; - use axum::extract::State; use conduwuit::{ at, is_equal_to, @@ -10,7 +8,7 @@ use conduwuit::{ }, Event, PduCount, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt}; use ruma::{ api::{ client::{filter::RoomEventFilter, message::get_message_events}, @@ -18,14 +16,19 @@ use ruma::{ }, events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, serde::Raw, - DeviceId, OwnedUserId, RoomId, UserId, + RoomId, UserId, +}; +use service::{ + rooms::{ + lazy_loading, + lazy_loading::{Options, Witness}, + timeline::PdusIterItem, + }, + Services, }; -use service::{rooms::timeline::PdusIterItem, Services}; use crate::Ruma; -pub(crate) type LazySet = HashSet; - /// list of safe and common non-state events to ignore if the user is ignored const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 17] = &[ Audio, @@ -84,13 +87,6 @@ pub(crate) async fn get_message_events_route( .unwrap_or(LIMIT_DEFAULT) .min(LIMIT_MAX); - services.rooms.lazy_loading.lazy_load_confirm_delivery( - sender_user, - sender_device, - room_id, - from, - ); - if matches!(body.dir, Direction::Backward) { services .rooms @@ -127,35 +123,34 @@ pub(crate) async fn get_message_events_route( .collect() .await; - let lazy = events - .iter() - .stream() - .fold(LazySet::new(), |lazy, item| { - update_lazy(&services, room_id, sender, lazy, item, false) - }) - .await; + let lazy_loading_context = lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: Some(from.into_unsigned()), + options: Some(&filter.lazy_load_options), + }; - let state = lazy - .iter() - .stream() - .broad_filter_map(|user_id| get_member_event(&services, room_id, user_id)) + let witness: OptionFuture<_> = filter + .lazy_load_options + .is_enabled() + .then(|| lazy_loading_witness(&services, &lazy_loading_context, events.iter())) + .into(); + + let state = witness + .map(Option::into_iter) + .map(|option| option.flat_map(Witness::into_iter)) + .map(IterStream::stream) + .into_stream() + .flatten() + .broad_filter_map(|user_id| async move { + get_member_event(&services, room_id, &user_id).await + }) .collect() .await; let next_token = events.last().map(at!(0)); - if !cfg!(feature = "element_hacks") { - if let Some(next_token) = next_token { - services.rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy, - next_token, - ); - } - } - let chunk = events .into_iter() .map(at!(1)) @@ -170,6 +165,52 @@ pub(crate) async fn get_message_events_route( }) } +pub(crate) async fn lazy_loading_witness<'a, I>( + services: &Services, + lazy_loading_context: &lazy_loading::Context<'_>, + events: I, +) -> Witness +where + I: Iterator + Clone + Send, +{ + let oldest = events + .clone() + .map(|(count, _)| count) + .copied() + .min() + .unwrap_or_else(PduCount::max); + + let newest = events + .clone() + .map(|(count, _)| count) + .copied() + .max() + .unwrap_or_else(PduCount::max); + + let receipts = services + .rooms + .read_receipt + .readreceipts_since(lazy_loading_context.room_id, oldest.into_unsigned()); + + pin_mut!(receipts); + let witness: Witness = events + .stream() + .map(|(_, pdu)| pdu.sender.clone()) + .chain( + receipts + .ready_take_while(|(_, c, _)| *c <= newest.into_unsigned()) + .map(|(user_id, ..)| user_id.to_owned()), + ) + .collect() + .await; + + services + .rooms + .lazy_loading + .witness_retain(witness, lazy_loading_context) + .await +} + async fn get_member_event( services: &Services, room_id: &RoomId, @@ -184,42 +225,6 @@ async fn get_member_event( .ok() } -pub(crate) async fn update_lazy( - services: &Services, - room_id: &RoomId, - sender: (&UserId, &DeviceId), - mut lazy: LazySet, - item: &PdusIterItem, - force: bool, -) -> LazySet { - let (_, event) = &item; - let (sender_user, sender_device) = sender; - - /* TODO: Remove the "element_hacks" check when these are resolved: - * https://github.com/vector-im/element-android/issues/3417 - * https://github.com/vector-im/element-web/issues/21034 - */ - if force || cfg!(features = "element_hacks") { - lazy.insert(event.sender().into()); - return lazy; - } - - if lazy.contains(event.sender()) { - return lazy; - } - - if !services - .rooms - .lazy_loading - .lazy_load_was_sent_before(sender_user, sender_device, room_id, event.sender()) - .await - { - lazy.insert(event.sender().into()); - } - - lazy -} - pub(crate) async fn ignored_filter( services: &Services, item: PdusIterItem, diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index d6b9f15c..7cca9616 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,9 +6,9 @@ use std::{ use axum::extract::State; use conduwuit::{ - at, err, error, extract_variant, is_equal_to, is_false, + at, err, error, extract_variant, is_equal_to, pdu::EventHash, - result::{FlatOk, LogDebugErr}, + result::FlatOk, utils::{ self, future::OptionExt, @@ -19,16 +19,20 @@ use conduwuit::{ Error, PduCount, PduEvent, Result, }; use conduwuit_service::{ - rooms::short::{ShortStateHash, ShortStateKey}, + rooms::{ + lazy_loading, + lazy_loading::{Options, Witness}, + short::ShortStateHash, + }, Services, }; use futures::{ - future::{join, join3, join4, join5, try_join, try_join3, OptionFuture}, + future::{join, join3, join4, join5, try_join, try_join4, OptionFuture}, FutureExt, StreamExt, TryFutureExt, }; use ruma::{ api::client::{ - filter::{FilterDefinition, LazyLoadOptions}, + filter::FilterDefinition, sync::sync_events::{ self, v3::{ @@ -152,9 +156,14 @@ pub(crate) async fn build_sync_events( let (sender_user, sender_device) = body.sender(); let next_batch = services.globals.current_count()?; - let next_batch_string = next_batch.to_string(); + let since = body + .body + .since + .as_ref() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); - // Load filter + let full_state = body.body.full_state; let filter = match body.body.filter.as_ref() { | None => FilterDefinition::default(), | Some(Filter::FilterDefinition(ref filter)) => filter.clone(), @@ -165,24 +174,6 @@ pub(crate) async fn build_sync_events( .unwrap_or_default(), }; - // some clients, at least element, seem to require knowledge of redundant - // members for "inline" profiles on the timeline to work properly - let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options - { - | LazyLoadOptions::Enabled { include_redundant_members } => - (true, include_redundant_members), - | LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")), - }; - - let full_state = body.body.full_state; - - let since = body - .body - .since - .as_ref() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - let joined_rooms = services .rooms .state_cache @@ -196,9 +187,8 @@ pub(crate) async fn build_sync_events( room_id.clone(), since, next_batch, - lazy_load_enabled, - lazy_load_send_redundant, full_state, + &filter, ) .map_ok(move |(joined_room, dlu, jeu)| (room_id, joined_room, dlu, jeu)) .ok() @@ -227,9 +217,9 @@ pub(crate) async fn build_sync_events( since, room_id.clone(), sender_user, - &next_batch_string, + next_batch, full_state, - lazy_load_enabled, + &filter, ) .map_ok(move |left_room| (room_id, left_room)) .ok() @@ -358,7 +348,7 @@ pub(crate) async fn build_sync_events( device_one_time_keys_count, // Fallback keys are not yet supported device_unused_fallback_key_types: None, - next_batch: next_batch_string, + next_batch: next_batch.to_string(), presence: Presence { events: presence_updates .unwrap_or_default() @@ -449,7 +439,6 @@ async fn process_presence_updates( fields( room_id = %room_id, full = %full_state, - ll = %lazy_load_enabled, ), )] #[allow(clippy::too_many_arguments)] @@ -458,9 +447,9 @@ async fn handle_left_room( since: u64, ref room_id: OwnedRoomId, sender_user: &UserId, - next_batch_string: &str, + next_batch: u64, full_state: bool, - lazy_load_enabled: bool, + filter: &FilterDefinition, ) -> Result> { let left_count = services .rooms @@ -503,7 +492,7 @@ async fn handle_left_room( account_data: RoomAccountData { events: Vec::new() }, timeline: Timeline { limited: false, - prev_batch: Some(next_batch_string.to_owned()), + prev_batch: Some(next_batch.to_string()), events: Vec::new(), }, state: RoomState { @@ -567,28 +556,32 @@ async fn handle_left_room( .get_statekey_from_short(shortstatekey) .await?; - // TODO: Delete "element_hacks" when this is resolved: https://github.com/vector-im/element-web/issues/22565 - if !lazy_load_enabled - || event_type != StateEventType::RoomMember - || full_state - || (cfg!(feature = "element_hacks") && *sender_user == state_key) + if filter.room.state.lazy_load_options.is_enabled() + && event_type == StateEventType::RoomMember + && !full_state + && state_key + .as_str() + .try_into() + .is_ok_and(|user_id: &UserId| sender_user != user_id) { - let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { - error!("Pdu in state not found: {event_id}"); - continue; - }; - - left_state_events.push(pdu.to_sync_state_event()); + continue; } + + let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { + error!("Pdu in state not found: {event_id}"); + continue; + }; + + left_state_events.push(pdu.to_sync_state_event()); } } Ok(Some(LeftRoom { account_data: RoomAccountData { events: Vec::new() }, timeline: Timeline { - limited: true, /* TODO: support left timeline events so we dont need to set this to - * true */ - prev_batch: Some(next_batch_string.to_owned()), + // TODO: support left timeline events so we dont need to set limited to true + limited: true, + prev_batch: Some(next_batch.to_string()), events: Vec::new(), // and so we dont need to set this to empty vec }, state: RoomState { events: left_state_events }, @@ -611,9 +604,8 @@ async fn load_joined_room( ref room_id: OwnedRoomId, since: u64, next_batch: u64, - lazy_load_enabled: bool, - lazy_load_send_redundant: bool, full_state: bool, + filter: &FilterDefinition, ) -> Result<(JoinedRoom, HashSet, HashSet)> { let sincecount = PduCount::Normal(since); let next_batchcount = PduCount::Normal(next_batch); @@ -640,17 +632,26 @@ async fn load_joined_room( 10_usize, ); - let (current_shortstatehash, since_shortstatehash, timeline) = - try_join3(current_shortstatehash, since_shortstatehash, timeline).await?; + let receipt_events = services + .rooms + .read_receipt + .readreceipts_since(room_id, since) + .filter_map(|(read_user, _, edu)| async move { + services + .users + .user_is_ignored(read_user, sender_user) + .await + .or_some((read_user.to_owned(), edu)) + }) + .collect::>>() + .map(Ok); + + let (current_shortstatehash, since_shortstatehash, timeline, receipt_events) = + try_join4(current_shortstatehash, since_shortstatehash, timeline, receipt_events) + .boxed() + .await?; let (timeline_pdus, limited) = timeline; - let timeline_users = - timeline_pdus - .iter() - .fold(HashSet::new(), |mut timeline_users, (_, event)| { - timeline_users.insert(event.sender.as_str().to_owned()); - timeline_users - }); let last_notification_read: OptionFuture<_> = timeline_pdus .is_empty() @@ -662,21 +663,68 @@ async fn load_joined_room( }) .into(); - let send_notification_counts = last_notification_read - .is_none_or(|&count| count > since) - .await; - - services.rooms.lazy_loading.lazy_load_confirm_delivery( - sender_user, - sender_device, - room_id, - sincecount, - ); - let no_state_changes = timeline_pdus.is_empty() && (since_shortstatehash.is_none() || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))); + let since_sender_member: OptionFuture<_> = since_shortstatehash + .map(|short| { + services + .rooms + .state_accessor + .state_get_content(short, &StateEventType::RoomMember, sender_user.as_str()) + .ok() + }) + .into(); + + let joined_since_last_sync = + since_sender_member + .await + .flatten() + .is_none_or(|content: RoomMemberEventContent| { + content.membership != MembershipState::Join + }); + + let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() + || filter.room.timeline.lazy_load_options.is_enabled(); + + let generate_witness = + lazy_loading_enabled && (since_shortstatehash.is_none() || joined_since_last_sync); + + let lazy_reset = lazy_loading_enabled && since_shortstatehash.is_none(); + + let lazy_loading_context = &lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: None, + options: Some(&filter.room.state.lazy_load_options), + }; + + // Reset lazy loading because this is an initial sync + let lazy_load_reset: OptionFuture<_> = lazy_reset + .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) + .into(); + + lazy_load_reset.await; + let witness: Option = generate_witness.then(|| { + timeline_pdus + .iter() + .map(|(_, pdu)| pdu.sender.clone()) + .chain(receipt_events.keys().cloned()) + .collect() + }); + + let witness: OptionFuture<_> = witness + .map(|witness| { + services + .rooms + .lazy_loading + .witness_retain(witness, lazy_loading_context) + }) + .into(); + + let witness = witness.await; let mut device_list_updates = HashSet::::new(); let mut left_encrypted_users = HashSet::::new(); let StateChanges { @@ -691,19 +739,17 @@ async fn load_joined_room( calculate_state_changes( services, sender_user, - sender_device, room_id, - next_batchcount, - lazy_load_enabled, - lazy_load_send_redundant, full_state, + filter, &mut device_list_updates, &mut left_encrypted_users, since_shortstatehash, current_shortstatehash, - &timeline_pdus, - &timeline_users, + joined_since_last_sync, + witness.as_ref(), ) + .boxed() .await? }; @@ -728,19 +774,6 @@ async fn load_joined_room( .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); - let receipt_events = services - .rooms - .read_receipt - .readreceipts_since(room_id, since) - .filter_map(|(read_user, _, edu)| async move { - services - .users - .user_is_ignored(read_user, sender_user) - .await - .or_some((read_user.to_owned(), edu)) - }) - .collect::>>(); - let typing_events = services .rooms .typing @@ -760,6 +793,10 @@ async fn load_joined_room( }) .unwrap_or(Vec::new()); + let send_notification_counts = last_notification_read + .is_none_or(|&count| count > since) + .await; + let notification_count: OptionFuture<_> = send_notification_counts .then(|| { services @@ -782,14 +819,14 @@ async fn load_joined_room( }) .into(); - let events = join4(room_events, account_data_events, receipt_events, typing_events); + let events = join3(room_events, account_data_events, typing_events); let unread_notifications = join(notification_count, highlight_count); let (unread_notifications, events, device_updates) = join3(unread_notifications, events, device_updates) .boxed() .await; - let (room_events, account_data_events, receipt_events, typing_events) = events; + let (room_events, account_data_events, typing_events) = events; let (notification_count, highlight_count) = unread_notifications; device_list_updates.extend(device_updates); @@ -866,7 +903,6 @@ async fn load_joined_room( skip_all, fields( full = %full_state, - ll = ?(lazy_load_enabled, lazy_load_send_redundant), cs = %current_shortstatehash, ss = ?since_shortstatehash, ) @@ -875,64 +911,38 @@ async fn load_joined_room( async fn calculate_state_changes( services: &Services, sender_user: &UserId, - sender_device: &DeviceId, room_id: &RoomId, - next_batchcount: PduCount, - lazy_load_enabled: bool, - lazy_load_send_redundant: bool, full_state: bool, + filter: &FilterDefinition, device_list_updates: &mut HashSet, left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, - timeline_pdus: &Vec<(PduCount, PduEvent)>, - timeline_users: &HashSet, + joined_since_last_sync: bool, + witness: Option<&Witness>, ) -> Result { - let since_sender_member: OptionFuture<_> = since_shortstatehash - .map(|short| { - services - .rooms - .state_accessor - .state_get_content(short, &StateEventType::RoomMember, sender_user.as_str()) - .ok() - }) - .into(); - - let joined_since_last_sync = - since_sender_member - .await - .flatten() - .is_none_or(|content: RoomMemberEventContent| { - content.membership != MembershipState::Join - }); - if since_shortstatehash.is_none() || joined_since_last_sync { calculate_state_initial( services, sender_user, - sender_device, room_id, - next_batchcount, - lazy_load_enabled, full_state, + filter, current_shortstatehash, - timeline_users, + witness, ) .await } else { calculate_state_incremental( services, sender_user, - sender_device, room_id, - next_batchcount, - lazy_load_send_redundant, full_state, + filter, device_list_updates, left_encrypted_users, since_shortstatehash, current_shortstatehash, - timeline_pdus, joined_since_last_sync, ) .await @@ -944,87 +954,54 @@ async fn calculate_state_changes( async fn calculate_state_initial( services: &Services, sender_user: &UserId, - sender_device: &DeviceId, room_id: &RoomId, - next_batchcount: PduCount, - lazy_load_enabled: bool, full_state: bool, + filter: &FilterDefinition, current_shortstatehash: ShortStateHash, - timeline_users: &HashSet, + witness: Option<&Witness>, ) -> Result { - // Probably since = 0, we will do an initial sync - let state = services + let state_events = services .rooms .state_accessor .state_full_ids(current_shortstatehash) - .await? - .into_iter() - .stream() - .broad_filter_map(|(shortstatekey, event_id): (ShortStateKey, OwnedEventId)| { - services - .rooms - .short - .get_statekey_from_short(shortstatekey) - .map_ok(move |(event_type, state_key)| ((event_type, state_key), event_id)) - .ok() - }) - .fold((Vec::new(), HashSet::new()), |a, item| async move { - let (mut state_events, mut lazy_loaded) = a; - let ((event_type, state_key), event_id) = item; + .await?; - if event_type != StateEventType::RoomMember { - let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { - error!("Pdu in state not found: {event_id}"); - return (state_events, lazy_loaded); - }; + let shortstatekeys = state_events.keys().copied().stream(); - state_events.push(pdu); - return (state_events, lazy_loaded); + let state_events = services + .rooms + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(state_events.values().cloned().stream()) + .ready_filter_map(|item| Some((item.0.ok()?, item.1))) + .ready_filter_map(|((event_type, state_key), event_id)| { + let lazy_load_enabled = filter.room.state.lazy_load_options.is_enabled() + || filter.room.timeline.lazy_load_options.is_enabled(); + + if lazy_load_enabled + && event_type == StateEventType::RoomMember + && !full_state + && state_key.as_str().try_into().is_ok_and(|user_id: &UserId| { + sender_user != user_id + && witness.is_some_and(|witness| !witness.contains(user_id)) + }) { + return None; } - // TODO: Delete "element_hacks" when this is resolved: https://github.com/vector-im/element-web/issues/22565 - if !lazy_load_enabled - || full_state - || timeline_users.contains(&state_key) - || (cfg!(feature = "element_hacks") && *sender_user == state_key) - { - let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { - error!("Pdu in state not found: {event_id}"); - return (state_events, lazy_loaded); - }; - - // This check is in case a bad user ID made it into the database - if let Ok(uid) = OwnedUserId::parse(&state_key) { - lazy_loaded.insert(uid); - } - - state_events.push(pdu); - } - - (state_events, lazy_loaded) + Some(event_id) }) + .broad_filter_map(|event_id: OwnedEventId| async move { + services.rooms.timeline.get_pdu(&event_id).await.ok() + }) + .collect() .map(Ok); let counts = calculate_counts(services, room_id, sender_user); - let ((joined_member_count, invited_member_count, heroes), (state_events, lazy_loaded)) = - try_join(counts, state).boxed().await?; - - // Reset lazy loading because this is an initial sync - services - .rooms - .lazy_loading - .lazy_load_reset(sender_user, sender_device, room_id) - .await; + let ((joined_member_count, invited_member_count, heroes), state_events) = + try_join(counts, state_events).boxed().await?; // The state_events above should contain all timeline_users, let's mark them as // lazy loaded. - services.rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ); Ok(StateChanges { heroes, @@ -1040,16 +1017,13 @@ async fn calculate_state_initial( async fn calculate_state_incremental( services: &Services, sender_user: &UserId, - sender_device: &DeviceId, room_id: &RoomId, - next_batchcount: PduCount, - lazy_load_send_redundant: bool, full_state: bool, + _filter: &FilterDefinition, device_list_updates: &mut HashSet, left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, - timeline_pdus: &Vec<(PduCount, PduEvent)>, joined_since_last_sync: bool, ) -> Result { // Incremental /sync @@ -1162,76 +1136,12 @@ async fn calculate_state_incremental( (None, None, None) }; - let mut state_events = delta_state_events; - - // Mark all member events we're returning as lazy-loaded - let mut lazy_loaded = state_events - .iter() - .filter(|pdu| pdu.kind == RoomMember) - .filter_map(|pdu| { - pdu.state_key - .clone() - .map(TryInto::try_into) - .map(LogDebugErr::log_debug_err) - .flat_ok() - }) - .fold(HashSet::new(), |mut lazy_loaded, user_id| { - lazy_loaded.insert(user_id); - lazy_loaded - }); - - // Fetch contextual member state events for events from the timeline, and - // mark them as lazy-loaded as well. - for (_, event) in timeline_pdus { - if lazy_loaded.contains(&event.sender) { - continue; - } - - let sent_before: OptionFuture<_> = (!lazy_load_send_redundant) - .then(|| { - services.rooms.lazy_loading.lazy_load_was_sent_before( - sender_user, - sender_device, - room_id, - &event.sender, - ) - }) - .into(); - - let member_event: OptionFuture<_> = sent_before - .await - .is_none_or(is_false!()) - .then(|| { - services.rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomMember, - event.sender.as_str(), - ) - }) - .into(); - - let Some(Ok(member_event)) = member_event.await else { - continue; - }; - - lazy_loaded.insert(event.sender.clone()); - state_events.push(member_event); - } - - services.rooms.lazy_loading.lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ); - Ok(StateChanges { heroes, joined_member_count, invited_member_count, joined_since_last_sync, - state_events, + state_events: delta_state_events, }) } diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index c3c27b9e..67274ff1 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,109 +1,65 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Write, - sync::{Arc, Mutex}, -}; +//! Lazy Loading + +use std::{collections::HashSet, sync::Arc}; use conduwuit::{ implement, - utils::{stream::TryIgnore, ReadyExt}, - PduCount, Result, + utils::{stream::TryIgnore, IterStream, ReadyExt}, + Result, }; -use database::{Interfix, Map}; -use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; +use database::{Database, Deserialized, Handle, Interfix, Map}; +use futures::{pin_mut, Stream, StreamExt}; +use ruma::{api::client::filter::LazyLoadOptions, DeviceId, OwnedUserId, RoomId, UserId}; pub struct Service { - lazy_load_waiting: Mutex, db: Data, } struct Data { lazyloadedids: Arc, + db: Arc, } -type LazyLoadWaiting = HashMap; -type LazyLoadWaitingKey = (OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount); -type LazyLoadWaitingVal = HashSet; +pub trait Options: Send + Sync { + fn is_enabled(&self) -> bool; + fn include_redundant_members(&self) -> bool; +} + +#[derive(Clone, Debug)] +pub struct Context<'a> { + pub user_id: &'a UserId, + pub device_id: &'a DeviceId, + pub room_id: &'a RoomId, + pub token: Option, + pub options: Option<&'a LazyLoadOptions>, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Status { + Unseen, + Seen(u64), +} + +pub type Witness = HashSet; +type Key<'a> = (&'a UserId, &'a DeviceId, &'a RoomId, &'a UserId); impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - lazy_load_waiting: LazyLoadWaiting::new().into(), db: Data { lazyloadedids: args.db["lazyloadedids"].clone(), + db: args.db.clone(), }, })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { - let lazy_load_waiting = self.lazy_load_waiting.lock().expect("locked").len(); - writeln!(out, "lazy_load_waiting: {lazy_load_waiting}")?; - - Ok(()) - } - - fn clear_cache(&self) { self.lazy_load_waiting.lock().expect("locked").clear(); } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } #[implement(Service)] #[tracing::instrument(skip(self), level = "debug")] -#[inline] -pub async fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, -) -> bool { - let key = (user_id, device_id, room_id, ll_user); - self.db.lazyloadedids.qry(&key).await.is_ok() -} - -#[implement(Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet, - count: PduCount, -) { - let key = (user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), count); - - self.lazy_load_waiting - .lock() - .expect("locked") - .insert(key, lazy_load); -} - -#[implement(Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: PduCount, -) { - let key = (user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), since); - - let Some(user_ids) = self.lazy_load_waiting.lock().expect("locked").remove(&key) else { - return; - }; - - for ll_id in &user_ids { - let key = (user_id, device_id, room_id, ll_id); - self.db.lazyloadedids.put_raw(key, []); - } -} - -#[implement(Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub async fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId) { - let prefix = (user_id, device_id, room_id, Interfix); +pub async fn reset(&self, ctx: &Context<'_>) { + let prefix = (ctx.user_id, ctx.device_id, ctx.room_id, Interfix); self.db .lazyloadedids .keys_prefix_raw(&prefix) @@ -111,3 +67,89 @@ pub async fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room .ready_for_each(|key| self.db.lazyloadedids.remove(key)) .await; } + +#[implement(Service)] +#[tracing::instrument(name = "retain", level = "debug", skip_all)] +pub async fn witness_retain(&self, senders: Witness, ctx: &Context<'_>) -> Witness { + debug_assert!( + ctx.options.is_none_or(Options::is_enabled), + "lazy loading should be enabled by your options" + ); + + let include_redundant = cfg!(feature = "element_hacks") + || ctx.options.is_some_and(Options::include_redundant_members); + + let witness = self + .witness(ctx, senders.iter().map(AsRef::as_ref)) + .zip(senders.iter().stream()); + + pin_mut!(witness); + let _cork = self.db.db.cork(); + let mut senders = Witness::with_capacity(senders.len()); + while let Some((status, sender)) = witness.next().await { + if include_redundant || status == Status::Unseen { + senders.insert(sender.into()); + continue; + } + + if let Status::Seen(seen) = status { + if seen == 0 || ctx.token == Some(seen) { + senders.insert(sender.into()); + continue; + } + } + } + + senders +} + +#[implement(Service)] +fn witness<'a, I>( + &'a self, + ctx: &'a Context<'a>, + senders: I, +) -> impl Stream + Send + 'a +where + I: Iterator + Send + Clone + 'a, +{ + let make_key = + |sender: &'a UserId| -> Key<'a> { (ctx.user_id, ctx.device_id, ctx.room_id, sender) }; + + self.db + .lazyloadedids + .qry_batch(senders.clone().stream().map(make_key)) + .map(into_status) + .zip(senders.stream()) + .map(move |(status, sender)| { + if matches!(status, Status::Unseen) { + self.db + .lazyloadedids + .put_aput::<8, _, _>(make_key(sender), 0_u64); + } else if matches!(status, Status::Seen(0)) { + self.db + .lazyloadedids + .put_aput::<8, _, _>(make_key(sender), ctx.token.unwrap_or(0_u64)); + } + + status + }) +} + +fn into_status(result: Result>) -> Status { + match result.and_then(|handle| handle.deserialized()) { + | Ok(seen) => Status::Seen(seen), + | Err(_) => Status::Unseen, + } +} + +impl Options for LazyLoadOptions { + fn include_redundant_members(&self) -> bool { + if let Self::Enabled { include_redundant_members } = self { + *include_redundant_members + } else { + false + } + } + + fn is_enabled(&self) -> bool { !self.is_disabled() } +} From 4b3c54bbfa8340c4bab09a221f47afb0c6d04346 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 26 Jan 2025 13:11:47 -0500 Subject: [PATCH 0592/1248] check if DOCKER_USERNAME is empty as well in dockerhub desc publish Signed-off-by: June Clementine Strawberry --- .github/workflows/docker-hub-description.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-hub-description.yml b/.github/workflows/docker-hub-description.yml index 96b2d38b..b4f142db 100644 --- a/.github/workflows/docker-hub-description.yml +++ b/.github/workflows/docker-hub-description.yml @@ -13,7 +13,7 @@ on: jobs: dockerHubDescription: runs-on: ubuntu-latest - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' + if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' && (vars.DOCKER_USERNAME != '') }} steps: - uses: actions/checkout@v4 with: From ee63f720c97e6047ac712ce828505e7e82923336 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 20:39:08 +0000 Subject: [PATCH 0593/1248] revert incorrect tags --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b0b0bd53..cde31232 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -897,15 +897,15 @@ jobs: run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell + docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${BRANCH_TAG}-haswell + docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${BRANCH_TAG}-haswell + docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell fi - name: Create Docker combined manifests From 24e6086f12d6f3421b45c13b02aac96c3a188205 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 20:44:40 +0000 Subject: [PATCH 0594/1248] load correct image file --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cde31232..42f4e12e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -815,7 +815,7 @@ jobs: - name: Load and push amd64 haswell image run: | - docker load -i oci-image-amd64.tar.gz + docker load -i oci-image-amd64-haswell-optimised.tar.gz if [ ! -z $DOCKERHUB_TOKEN ]; then docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell From 1c585ab1b6b55e40dadca2d1339ea5f8e4a244bb Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 22:16:32 +0000 Subject: [PATCH 0595/1248] create manifests for unique docker tags --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 42f4e12e..8a870700 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -897,14 +897,17 @@ jobs: run: | # Dockerhub Container Registry if [ ! -z $DOCKERHUB_TOKEN ]; then + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then + docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell fi From db7d23e7804f9ed707358a0da9d8f3f4fa588bd1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 26 Jan 2025 23:52:51 +0000 Subject: [PATCH 0596/1248] fix creating manifest on wrong repo --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8a870700..de599f45 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -902,7 +902,7 @@ jobs: fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell fi # GitLab Container Registry From 71a3855af61b0071832c23085f76a8711e32b49c Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 26 Jan 2025 21:30:15 -0500 Subject: [PATCH 0597/1248] fix couple more docker manifest typos Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de599f45..35d60aa1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -902,12 +902,12 @@ jobs: fi # GitHub Container Registry if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell fi # GitLab Container Registry if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell + docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell fi From 3e0ff2dc840cd9b3b823cf5d8d9a6739ee531896 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 23:41:39 +0000 Subject: [PATCH 0598/1248] simplify references to server_name Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 12 ++++++------ src/admin/federation/commands.rs | 2 +- src/admin/server/commands.rs | 2 +- src/api/client/openid.rs | 2 +- src/api/client/report.rs | 2 +- src/api/client/room/create.rs | 2 +- src/api/client/voip.rs | 2 +- src/core/server.rs | 6 ++++++ src/service/globals/mod.rs | 8 ++++---- src/service/migrations.rs | 10 ++++------ src/service/resolver/actual.rs | 3 +-- src/service/rooms/alias/mod.rs | 2 +- src/service/sending/sender.rs | 2 +- 13 files changed, 29 insertions(+), 26 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index af7bd79f..855941fd 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -554,7 +554,7 @@ pub(super) async fn first_pdu_in_room( .services .rooms .state_cache - .server_in_room(&self.services.server.config.server_name, &room_id) + .server_in_room(&self.services.server.name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -583,7 +583,7 @@ pub(super) async fn latest_pdu_in_room( .services .rooms .state_cache - .server_in_room(&self.services.server.config.server_name, &room_id) + .server_in_room(&self.services.server.name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -613,7 +613,7 @@ pub(super) async fn force_set_room_state_from_server( .services .rooms .state_cache - .server_in_room(&self.services.server.config.server_name, &room_id) + .server_in_room(&self.services.server.name, &room_id) .await { return Ok(RoomMessageEventContent::text_plain( @@ -757,7 +757,7 @@ pub(super) async fn get_signing_keys( query: bool, ) -> Result { let server_name = - server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); + server_name.unwrap_or_else(|| self.services.server.name.clone().into()); if let Some(notary) = notary { let signing_keys = self @@ -794,7 +794,7 @@ pub(super) async fn get_verify_keys( server_name: Option>, ) -> Result { let server_name = - server_name.unwrap_or_else(|| self.services.server.config.server_name.clone().into()); + server_name.unwrap_or_else(|| self.services.server.name.clone().into()); let keys = self .services @@ -824,7 +824,7 @@ pub(super) async fn resolve_true_destination( )); } - if server_name == self.services.server.config.server_name { + if server_name == self.services.server.name { return Ok(RoomMessageEventContent::text_plain( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index be91ef0a..13bc8da4 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -92,7 +92,7 @@ pub(super) async fn remote_user_in_rooms( &self, user_id: Box, ) -> Result { - if user_id.server_name() == self.services.server.config.server_name { + if user_id.server_name() == self.services.server.name { return Ok(RoomMessageEventContent::text_plain( "User belongs to our server, please use `list-joined-rooms` user admin command \ instead.", diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 3ea27883..47509bad 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -34,7 +34,7 @@ pub(super) async fn reload_config( ) -> Result { let path = path.as_deref().into_iter(); let config = Config::load(path).and_then(|raw| Config::new(&raw))?; - if config.server_name != self.services.server.config.server_name { + if config.server_name != self.services.server.name { return Err!("You can't change the server name."); } diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 3547d284..671d0c6d 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -37,7 +37,7 @@ pub(crate) async fn create_openid_token_route( Ok(account::request_openid_token::v3::Response { access_token, token_type: TokenType::Bearer, - matrix_server_name: services.server.config.server_name.clone(), + matrix_server_name: services.server.name.clone(), expires_in: Duration::from_secs(expires_in), }) } diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 2b25b518..57de3f12 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -50,7 +50,7 @@ pub(crate) async fn report_room_route( if !services .rooms .state_cache - .server_in_room(&services.server.config.server_name, &body.room_id) + .server_in_room(&services.server.name, &body.room_id) .await { return Err!(Request(NotFound( diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 1b6e8667..a401b63d 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -71,7 +71,7 @@ pub(crate) async fn create_room_route( let room_id: OwnedRoomId = if let Some(custom_room_id) = &body.room_id { custom_room_id_check(&services, custom_room_id)? } else { - RoomId::new(&services.server.config.server_name) + RoomId::new(&services.server.name) }; // check if room ID doesn't already exist instead of erroring on auth check diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index c08b1fdf..70ad4913 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -38,7 +38,7 @@ pub(crate) async fn turn_server_route( let user = body.sender_user.unwrap_or_else(|| { UserId::parse_with_server_name( utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), - &services.server.config.server_name, + &services.server.name, ) .unwrap() }); diff --git a/src/core/server.rs b/src/core/server.rs index 6838c9c9..05a4aae7 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -6,12 +6,17 @@ use std::{ time::SystemTime, }; +use ruma::OwnedServerName; use tokio::{runtime, sync::broadcast}; use crate::{config, config::Config, err, log::Log, metrics::Metrics, Err, Result}; /// Server runtime state; public portion pub struct Server { + /// Configured name of server. This is the same as the one in the config + /// but developers can (and should) reference this string instead. + pub name: OwnedServerName, + /// Server-wide configuration instance pub config: config::Manager, @@ -46,6 +51,7 @@ impl Server { #[must_use] pub fn new(config: Config, runtime: Option, log: Log) -> Self { Self { + name: config.server_name.clone(), config: config::Manager::new(config), started: SystemTime::now(), stopping: AtomicBool::new(false), diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index ef34054f..485d5020 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -61,11 +61,11 @@ impl crate::Service for Service { db, server: args.server.clone(), bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &config.server_name)) + admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &args.server.name)) .expect("#admins:server_name is valid alias name"), server_user: UserId::parse_with_server_name( String::from("conduit"), - &config.server_name, + &args.server.name, ) .expect("@conduit:server_name is valid"), turn_secret, @@ -107,7 +107,7 @@ impl Service { pub fn current_count(&self) -> Result { Ok(self.db.current_count()) } #[inline] - pub fn server_name(&self) -> &ServerName { self.server.config.server_name.as_ref() } + pub fn server_name(&self) -> &ServerName { self.server.name.as_ref() } pub fn allow_registration(&self) -> bool { self.server.config.allow_registration } @@ -207,7 +207,7 @@ impl Service { #[inline] pub fn server_is_ours(&self, server_name: &ServerName) -> bool { - server_name == self.server.config.server_name + server_name == self.server_name() } #[inline] diff --git a/src/service/migrations.rs b/src/service/migrations.rs index c42c0324..27b4ab5a 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -218,8 +218,6 @@ async fn migrate(services: &Services) -> Result<()> { } async fn db_lt_12(services: &Services) -> Result<()> { - let config = &services.server.config; - for username in &services .users .list_local_users() @@ -227,7 +225,8 @@ async fn db_lt_12(services: &Services) -> Result<()> { .collect::>() .await { - let user = match UserId::parse_with_server_name(username.as_str(), &config.server_name) { + let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name) + { | Ok(u) => u, | Err(e) => { warn!("Invalid username {username}: {e}"); @@ -297,8 +296,6 @@ async fn db_lt_12(services: &Services) -> Result<()> { } async fn db_lt_13(services: &Services) -> Result<()> { - let config = &services.server.config; - for username in &services .users .list_local_users() @@ -306,7 +303,8 @@ async fn db_lt_13(services: &Services) -> Result<()> { .collect::>() .await { - let user = match UserId::parse_with_server_name(username.as_str(), &config.server_name) { + let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name) + { | Ok(u) => u, | Err(e) => { warn!("Invalid username {username}: {e}"); diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 33374240..66854764 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -401,8 +401,7 @@ impl super::Service { } fn validate_dest(&self, dest: &ServerName) -> Result<()> { - let config = &self.services.server.config; - if dest == config.server_name && !config.federation_loopback { + if dest == self.services.server.name && !self.services.server.config.federation_loopback { return Err!("Won't send federation request to ourselves"); } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 91797d01..17ed5e13 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -150,7 +150,7 @@ impl Service { let servers_contains_ours = || { servers .as_ref() - .is_some_and(|servers| servers.contains(&self.services.server.config.server_name)) + .is_some_and(|servers| servers.contains(&self.services.server.name)) }; if !server_is_ours && !servers_contains_ours() { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 5fd4cf91..122e75c5 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -850,7 +850,7 @@ impl Service { let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); let request = send_transaction_message::v1::Request { - origin: self.server.config.server_name.clone(), + origin: self.server.name.clone(), pdus: pdu_jsons, edus: edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), From 4a2d0d35bcae8f189774214eb850d78dd53332eb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 00:24:11 +0000 Subject: [PATCH 0599/1248] split federation request from sending service Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 6 +- .../send.rs => federation/execute.rs} | 163 ++++++++++-------- src/service/federation/mod.rs | 33 ++++ src/service/mod.rs | 1 + src/service/sending/mod.rs | 24 ++- src/service/sending/sender.rs | 2 +- src/service/services.rs | 4 +- 7 files changed, 146 insertions(+), 87 deletions(-) rename src/service/{sending/send.rs => federation/execute.rs} (65%) create mode 100644 src/service/federation/mod.rs diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 855941fd..cdd69c0f 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -756,8 +756,7 @@ pub(super) async fn get_signing_keys( notary: Option>, query: bool, ) -> Result { - let server_name = - server_name.unwrap_or_else(|| self.services.server.name.clone().into()); + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); if let Some(notary) = notary { let signing_keys = self @@ -793,8 +792,7 @@ pub(super) async fn get_verify_keys( &self, server_name: Option>, ) -> Result { - let server_name = - server_name.unwrap_or_else(|| self.services.server.name.clone().into()); + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); let keys = self .services diff --git a/src/service/sending/send.rs b/src/service/federation/execute.rs similarity index 65% rename from src/service/sending/send.rs rename to src/service/federation/execute.rs index c8a64f3c..27d98968 100644 --- a/src/service/sending/send.rs +++ b/src/service/federation/execute.rs @@ -1,4 +1,4 @@ -use std::mem; +use std::{fmt::Debug, mem}; use bytes::Bytes; use conduwuit::{ @@ -20,82 +20,109 @@ use ruma::{ use crate::resolver::actual::ActualDest; -impl super::Service { - #[tracing::instrument( +/// Sends a request to a federation server +#[implement(super::Service)] +#[tracing::instrument(skip_all, name = "request", level = "debug")] +pub async fn execute(&self, dest: &ServerName, request: T) -> Result +where + T: OutgoingRequest + Debug + Send, +{ + let client = &self.services.client.federation; + self.execute_on(client, dest, request).await +} + +/// Like execute() but with a very large timeout +#[implement(super::Service)] +#[tracing::instrument(skip_all, name = "synapse", level = "debug")] +pub async fn execute_synapse( + &self, + dest: &ServerName, + request: T, +) -> Result +where + T: OutgoingRequest + Debug + Send, +{ + let client = &self.services.client.synapse; + self.execute_on(client, dest, request).await +} + +#[implement(super::Service)] +#[tracing::instrument( level = "debug" skip(self, client, request), )] - pub async fn send( - &self, - client: &Client, - dest: &ServerName, - request: T, - ) -> Result - where - T: OutgoingRequest + Send, +pub async fn execute_on( + &self, + client: &Client, + dest: &ServerName, + request: T, +) -> Result +where + T: OutgoingRequest + Send, +{ + if !self.services.server.config.allow_federation { + return Err!(Config("allow_federation", "Federation is disabled.")); + } + + if self + .services + .server + .config + .forbidden_remote_server_names + .contains(dest) { - if !self.server.config.allow_federation { - return Err!(Config("allow_federation", "Federation is disabled.")); - } - - if self - .server - .config - .forbidden_remote_server_names - .contains(dest) - { - return Err!(Request(Forbidden(debug_warn!( - "Federation with {dest} is not allowed." - )))); - } - - let actual = self.services.resolver.get_actual_dest(dest).await?; - let request = into_http_request::(&actual, request)?; - let request = self.prepare(dest, request)?; - self.execute::(dest, &actual, request, client).await + return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } - async fn execute( - &self, - dest: &ServerName, - actual: &ActualDest, - request: Request, - client: &Client, - ) -> Result - where - T: OutgoingRequest + Send, - { - let url = request.url().clone(); - let method = request.method().clone(); + let actual = self.services.resolver.get_actual_dest(dest).await?; + let request = into_http_request::(&actual, request)?; + let request = self.prepare(dest, request)?; + self.perform::(dest, &actual, request, client).await +} - debug!(?method, ?url, "Sending request"); - match client.execute(request).await { - | Ok(response) => handle_response::(dest, actual, &method, &url, response).await, - | Err(error) => - Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), +#[implement(super::Service)] +async fn perform( + &self, + dest: &ServerName, + actual: &ActualDest, + request: Request, + client: &Client, +) -> Result +where + T: OutgoingRequest + Send, +{ + let url = request.url().clone(); + let method = request.method().clone(); + + debug!(?method, ?url, "Sending request"); + match client.execute(request).await { + | Ok(response) => handle_response::(dest, actual, &method, &url, response).await, + | Err(error) => + Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), + } +} + +#[implement(super::Service)] +fn prepare(&self, dest: &ServerName, mut request: http::Request>) -> Result { + self.sign_request(&mut request, dest); + + let request = Request::try_from(request)?; + self.validate_url(request.url())?; + self.services.server.check_running()?; + + Ok(request) +} + +#[implement(super::Service)] +fn validate_url(&self, url: &Url) -> Result<()> { + if let Some(url_host) = url.host_str() { + if let Ok(ip) = IPAddress::parse(url_host) { + trace!("Checking request URL IP {ip:?}"); + self.services.resolver.validate_ip(&ip)?; } } - fn prepare(&self, dest: &ServerName, mut request: http::Request>) -> Result { - self.sign_request(&mut request, dest); - - let request = Request::try_from(request)?; - self.validate_url(request.url())?; - self.server.check_running()?; - - Ok(request) - } - - fn validate_url(&self, url: &Url) -> Result<()> { - if let Some(url_host) = url.host_str() { - if let Ok(ip) = IPAddress::parse(url_host) { - trace!("Checking request URL IP {ip:?}"); - self.services.resolver.validate_ip(&ip)?; - } - } - - Ok(()) - } + Ok(()) } async fn handle_response( @@ -195,7 +222,7 @@ fn sign_request(&self, http_request: &mut http::Request>, dest: &ServerN type Value = CanonicalJsonValue; type Object = CanonicalJsonObject; - let origin = self.services.globals.server_name(); + let origin = &self.services.server.name; let body = http_request.body(); let uri = http_request .uri() diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs new file mode 100644 index 00000000..dacdb20e --- /dev/null +++ b/src/service/federation/mod.rs @@ -0,0 +1,33 @@ +mod execute; + +use std::sync::Arc; + +use conduwuit::{Result, Server}; + +use crate::{client, resolver, server_keys, Dep}; + +pub struct Service { + services: Services, +} + +struct Services { + server: Arc, + client: Dep, + resolver: Dep, + server_keys: Dep, +} + +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + services: Services { + server: args.server.clone(), + client: args.depend::("client"), + resolver: args.depend::("resolver"), + server_keys: args.depend::("server_keys"), + }, + })) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} diff --git a/src/service/mod.rs b/src/service/mod.rs index 789994d3..2102921f 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -10,6 +10,7 @@ pub mod admin; pub mod appservice; pub mod client; pub mod emergency; +pub mod federation; pub mod globals; pub mod key_backups; pub mod media; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index e52bfb25..80bca112 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -1,7 +1,6 @@ mod appservice; mod data; mod dest; -mod send; mod sender; use std::{ @@ -30,8 +29,8 @@ pub use self::{ sender::{EDU_LIMIT, PDU_LIMIT}, }; use crate::{ - account_data, client, globals, presence, pusher, resolver, rooms, rooms::timeline::RawPduId, - server_keys, users, Dep, + account_data, client, federation, globals, presence, pusher, rooms, + rooms::timeline::RawPduId, users, Dep, }; pub struct Service { @@ -44,7 +43,6 @@ pub struct Service { struct Services { client: Dep, globals: Dep, - resolver: Dep, state: Dep, state_cache: Dep, user: Dep, @@ -55,7 +53,7 @@ struct Services { account_data: Dep, appservice: Dep, pusher: Dep, - server_keys: Dep, + federation: Dep, } #[derive(Clone, Debug, PartialEq, Eq)] @@ -83,7 +81,6 @@ impl crate::Service for Service { services: Services { client: args.depend::("client"), globals: args.depend::("globals"), - resolver: args.depend::("resolver"), state: args.depend::("rooms::state"), state_cache: args.depend::("rooms::state_cache"), user: args.depend::("rooms::user"), @@ -94,7 +91,7 @@ impl crate::Service for Service { account_data: args.depend::("account_data"), appservice: args.depend::("appservice"), pusher: args.depend::("pusher"), - server_keys: args.depend::("server_keys"), + federation: args.depend::("federation"), }, channels: (0..num_senders).map(|_| loole::unbounded()).collect(), })) @@ -277,7 +274,7 @@ impl Service { } /// Sends a request to a federation server - #[tracing::instrument(skip_all, name = "request", level = "debug")] + #[inline] pub async fn send_federation_request( &self, dest: &ServerName, @@ -286,12 +283,11 @@ impl Service { where T: OutgoingRequest + Debug + Send, { - let client = &self.services.client.federation; - self.send(client, dest, request).await + self.services.federation.execute(dest, request).await } /// Like send_federation_request() but with a very large timeout - #[tracing::instrument(skip_all, name = "synapse", level = "debug")] + #[inline] pub async fn send_synapse_request( &self, dest: &ServerName, @@ -300,8 +296,10 @@ impl Service { where T: OutgoingRequest + Debug + Send, { - let client = &self.services.client.synapse; - self.send(client, dest, request).await + self.services + .federation + .execute_synapse(dest, request) + .await } /// Sends a request to an appservice diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 122e75c5..c91e1d31 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -858,7 +858,7 @@ impl Service { }; let client = &self.services.client.sender; - self.send(client, &server, request) + self.services.federation.execute_on(client, &server, request) .await .inspect(|response| { response diff --git a/src/service/services.rs b/src/service/services.rs index 1aa87f58..cb5cc12f 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -10,7 +10,7 @@ use database::Database; use tokio::sync::Mutex; use crate::{ - account_data, admin, appservice, client, emergency, globals, key_backups, + account_data, admin, appservice, client, emergency, federation, globals, key_backups, manager::Manager, media, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, @@ -30,6 +30,7 @@ pub struct Services { pub pusher: Arc, pub resolver: Arc, pub rooms: rooms::Service, + pub federation: Arc, pub sending: Arc, pub server_keys: Arc, pub sync: Arc, @@ -95,6 +96,7 @@ impl Services { typing: build!(rooms::typing::Service), user: build!(rooms::user::Service), }, + federation: build!(federation::Service), sending: build!(sending::Service), server_keys: build!(server_keys::Service), sync: build!(sync::Service), From d0b4a619af08030a28f3a445fb8031bafc3cf90a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 03:30:34 +0000 Subject: [PATCH 0600/1248] furnish batch interface with trait Signed-off-by: Jason Volk --- src/database/map.rs | 3 ++ src/database/map/get.rs | 52 +--------------------- src/database/map/get_batch.rs | 45 ++++++++----------- src/database/map/qry.rs | 54 +++++++++++++++++++++++ src/database/map/qry_batch.rs | 63 +++++++++++++++++++++++++++ src/database/mod.rs | 2 +- src/service/rooms/lazy_loading/mod.rs | 10 +++-- src/service/rooms/short/mod.rs | 19 ++++---- 8 files changed, 155 insertions(+), 93 deletions(-) create mode 100644 src/database/map/qry.rs create mode 100644 src/database/map/qry_batch.rs diff --git a/src/database/map.rs b/src/database/map.rs index 97e90659..5176c529 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -9,6 +9,8 @@ mod keys_from; mod keys_prefix; mod open; mod options; +mod qry; +mod qry_batch; mod remove; mod rev_keys; mod rev_keys_from; @@ -37,6 +39,7 @@ pub(crate) use self::options::{ cache_iter_options_default, cache_read_options_default, iter_options_default, read_options_default, write_options_default, }; +pub use self::{get_batch::Get, qry_batch::Qry}; use crate::{watchers::Watchers, Engine}; pub struct Map { diff --git a/src/database/map/get.rs b/src/database/map/get.rs index 73182042..d6c65be2 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,65 +1,15 @@ -use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; +use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; use futures::{future::ready, Future, FutureExt, TryFutureExt}; use rocksdb::{DBPinnableSlice, ReadOptions}; -use serde::Serialize; use tokio::task; use crate::{ - keyval::KeyBuf, - ser, util::{is_incomplete, map_err, or_else}, Handle, }; -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is serialized into an allocated buffer to perform -/// the query. -#[implement(super::Map)] -#[inline] -pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send -where - K: Serialize + ?Sized + Debug, -{ - let mut buf = KeyBuf::new(); - self.bqry(key, &mut buf) -} - -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is serialized into a fixed-sized buffer to perform -/// the query. The maximum size is supplied as const generic parameter. -#[implement(super::Map)] -#[inline] -pub fn aqry( - self: &Arc, - key: &K, -) -> impl Future>> + Send -where - K: Serialize + ?Sized + Debug, -{ - let mut buf = ArrayVec::::new(); - self.bqry(key, &mut buf) -} - -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is serialized into a user-supplied Writer. -#[implement(super::Map)] -#[tracing::instrument(skip(self, buf), level = "trace")] -pub fn bqry( - self: &Arc, - key: &K, - buf: &mut B, -) -> impl Future>> + Send -where - K: Serialize + ?Sized + Debug, - B: Write + AsRef<[u8]>, -{ - let key = ser::serialize(buf, key).expect("failed to serialize query key"); - self.get(key) -} - /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index ee9269e3..ab9c1dc8 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,4 +1,4 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; +use std::{convert::AsRef, sync::Arc}; use conduwuit::{ implement, @@ -10,43 +10,34 @@ use conduwuit::{ }; use futures::{Stream, StreamExt, TryStreamExt}; use rocksdb::{DBPinnableSlice, ReadOptions}; -use serde::Serialize; use super::get::{cached_handle_from, handle_from}; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::Handle; -#[implement(super::Map)] -#[tracing::instrument(skip(self, keys), level = "trace")] -pub fn qry_batch<'a, S, K>( - self: &'a Arc, - keys: S, -) -> impl Stream>> + Send + 'a +pub trait Get<'a, K, S> where + Self: Sized, S: Stream + Send + 'a, - K: Serialize + Debug + 'a, + K: AsRef<[u8]> + Send + Sync + 'a, { - use crate::pool::Get; + fn get(self, map: &'a Arc) -> impl Stream>> + Send + 'a; +} - keys.ready_chunks(automatic_amplification()) - .widen_then(automatic_width(), |chunk| { - let keys = chunk - .iter() - .map(ser::serialize_to::) - .map(|result| result.expect("failed to serialize query key")) - .map(Into::into) - .collect(); - - self.db - .pool - .execute_get(Get { map: self.clone(), key: keys, res: None }) - }) - .map_ok(|results| results.into_iter().stream()) - .try_flatten() +impl<'a, K, S> Get<'a, K, S> for S +where + Self: Sized, + S: Stream + Send + 'a, + K: AsRef<[u8]> + Send + Sync + 'a, +{ + #[inline] + fn get(self, map: &'a Arc) -> impl Stream>> + Send + 'a { + map.get_batch(self) + } } #[implement(super::Map)] #[tracing::instrument(skip(self, keys), level = "trace")] -pub fn get_batch<'a, S, K>( +pub(crate) fn get_batch<'a, S, K>( self: &'a Arc, keys: S, ) -> impl Stream>> + Send + 'a diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs new file mode 100644 index 00000000..401eba43 --- /dev/null +++ b/src/database/map/qry.rs @@ -0,0 +1,54 @@ +use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; + +use arrayvec::ArrayVec; +use conduwuit::{implement, Result}; +use futures::Future; +use serde::Serialize; + +use crate::{keyval::KeyBuf, ser, Handle}; + +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is serialized into an allocated buffer to perform +/// the query. +#[implement(super::Map)] +#[inline] +pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = KeyBuf::new(); + self.bqry(key, &mut buf) +} + +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is serialized into a fixed-sized buffer to perform +/// the query. The maximum size is supplied as const generic parameter. +#[implement(super::Map)] +#[inline] +pub fn aqry( + self: &Arc, + key: &K, +) -> impl Future>> + Send +where + K: Serialize + ?Sized + Debug, +{ + let mut buf = ArrayVec::::new(); + self.bqry(key, &mut buf) +} + +/// Fetch a value from the database into cache, returning a reference-handle +/// asynchronously. The key is serialized into a user-supplied Writer. +#[implement(super::Map)] +#[tracing::instrument(skip(self, buf), level = "trace")] +pub fn bqry( + self: &Arc, + key: &K, + buf: &mut B, +) -> impl Future>> + Send +where + K: Serialize + ?Sized + Debug, + B: Write + AsRef<[u8]>, +{ + let key = ser::serialize(buf, key).expect("failed to serialize query key"); + self.get(key) +} diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs new file mode 100644 index 00000000..31817c48 --- /dev/null +++ b/src/database/map/qry_batch.rs @@ -0,0 +1,63 @@ +use std::{fmt::Debug, sync::Arc}; + +use conduwuit::{ + implement, + utils::{ + stream::{automatic_amplification, automatic_width, WidebandExt}, + IterStream, + }, + Result, +}; +use futures::{Stream, StreamExt, TryStreamExt}; +use serde::Serialize; + +use crate::{keyval::KeyBuf, ser, Handle}; + +pub trait Qry<'a, K, S> +where + S: Stream + Send + 'a, + K: Serialize + Debug, +{ + fn qry(self, map: &'a Arc) -> impl Stream>> + Send + 'a; +} + +impl<'a, K, S> Qry<'a, K, S> for S +where + Self: 'a, + S: Stream + Send + 'a, + K: Serialize + Debug + 'a, +{ + #[inline] + fn qry(self, map: &'a Arc) -> impl Stream>> + Send + 'a { + map.qry_batch(self) + } +} + +#[implement(super::Map)] +#[tracing::instrument(skip(self, keys), level = "trace")] +pub(crate) fn qry_batch<'a, S, K>( + self: &'a Arc, + keys: S, +) -> impl Stream>> + Send + 'a +where + S: Stream + Send + 'a, + K: Serialize + Debug + 'a, +{ + use crate::pool::Get; + + keys.ready_chunks(automatic_amplification()) + .widen_then(automatic_width(), |chunk| { + let keys = chunk + .iter() + .map(ser::serialize_to::) + .map(|result| result.expect("failed to serialize query key")) + .map(Into::into) + .collect(); + + self.db + .pool + .execute_get(Get { map: self.clone(), key: keys, res: None }) + }) + .map_ok(|results| results.into_iter().stream()) + .try_flatten() +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 42b7f5e3..4f8e2ad9 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -30,7 +30,7 @@ pub use self::{ deserialized::Deserialized, handle::Handle, keyval::{serialize_key, serialize_val, KeyVal, Slice}, - map::{compact, Map}, + map::{compact, Get, Map, Qry}, ser::{serialize, serialize_to, serialize_to_vec, Cbor, Interfix, Json, Separator, SEP}, }; pub(crate) use self::{ diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 67274ff1..a6e00271 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -7,7 +7,7 @@ use conduwuit::{ utils::{stream::TryIgnore, IterStream, ReadyExt}, Result, }; -use database::{Database, Deserialized, Handle, Interfix, Map}; +use database::{Database, Deserialized, Handle, Interfix, Map, Qry}; use futures::{pin_mut, Stream, StreamExt}; use ruma::{api::client::filter::LazyLoadOptions, DeviceId, OwnedUserId, RoomId, UserId}; @@ -115,9 +115,11 @@ where let make_key = |sender: &'a UserId| -> Key<'a> { (ctx.user_id, ctx.device_id, ctx.room_id, sender) }; - self.db - .lazyloadedids - .qry_batch(senders.clone().stream().map(make_key)) + senders + .clone() + .stream() + .map(make_key) + .qry(&self.db.lazyloadedids) .map(into_status) .zip(senders.stream()) .map(move |(status, sender)| { diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 4a591592..dd586d02 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -2,7 +2,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId}; use conduwuit::{err, implement, utils, utils::IterStream, Result}; -use database::{Deserialized, Map}; +use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; use serde::Deserialize; @@ -67,9 +67,10 @@ pub fn multi_get_or_create_shorteventid<'a, I>( where I: Iterator + Clone + Debug + Send + 'a, { - self.db - .eventid_shorteventid - .get_batch(event_ids.clone().stream()) + event_ids + .clone() + .stream() + .get(&self.db.eventid_shorteventid) .zip(event_ids.into_iter().stream()) .map(|(result, event_id)| match result { | Ok(ref short) => utils::u64_from_u8(short), @@ -171,9 +172,8 @@ where Id: for<'de> Deserialize<'de> + Sized + ToOwned + 'a, ::Owned: Borrow, { - self.db - .shorteventid_eventid - .qry_batch(shorteventid) + shorteventid + .qry(&self.db.shorteventid_eventid) .map(Deserialized::deserialized) } @@ -204,9 +204,8 @@ pub fn multi_get_statekey_from_short<'a, S>( where S: Stream + Send + 'a, { - self.db - .shortstatekey_statekey - .qry_batch(shortstatekey) + shortstatekey + .qry(&self.db.shortstatekey_statekey) .map(Deserialized::deserialized) } From 6db8df5e232fad8c1e229194e6336b9f268b13c1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 04:26:40 +0000 Subject: [PATCH 0601/1248] skip redundant acl check when sender is origin Signed-off-by: Jason Volk --- src/service/rooms/event_handler/handle_incoming_pdu.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 4e6f0b0c..94d4bcc7 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -79,7 +79,9 @@ pub async fn handle_incoming_pdu<'a>( .try_into() .map_err(|e| err!(Request(InvalidParam("PDU does not have a valid sender key: {e}"))))?; - self.acl_check(sender.server_name(), room_id).await?; + if sender.server_name() != origin { + self.acl_check(sender.server_name(), room_id).await?; + } // Fetch create event let create_event = self From 13335042b715b8279cd9adbaa6425a11eb7d3e64 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 04:47:07 +0000 Subject: [PATCH 0602/1248] enable the paranoid-checks options in debug mode Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 382bc169..83bce08c 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -72,6 +72,13 @@ fn descriptor_cf_options( opts.set_options_from_string("{{arena_block_size=2097152;}}") .map_err(map_err)?; + #[cfg(debug_assertions)] + opts.set_options_from_string( + "{{paranoid_checks=true;paranoid_file_checks=true;force_consistency_checks=true;\ + verify_sst_unique_id_in_manifest=true;}}", + ) + .map_err(map_err)?; + Ok(opts) } From 98f95705478dbe60a56206dd2ae9057f602040ea Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 07:05:00 +0000 Subject: [PATCH 0603/1248] add option to disable rocksdb checksums reference runtime state for default option initialization Signed-off-by: Jason Volk --- conduwuit-example.toml | 7 ++++ src/core/config/mod.rs | 9 ++++++ src/database/engine.rs | 1 + src/database/engine/open.rs | 1 + src/database/map.rs | 6 ++-- src/database/map/keys.rs | 2 +- src/database/map/keys_from.rs | 2 +- src/database/map/options.rs | 50 +++++++++++++++++------------ src/database/map/rev_keys.rs | 2 +- src/database/map/rev_keys_from.rs | 2 +- src/database/map/rev_stream.rs | 4 +-- src/database/map/rev_stream_from.rs | 4 +-- src/database/map/stream.rs | 4 +-- src/database/map/stream_from.rs | 4 +-- 14 files changed, 62 insertions(+), 36 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3ecc1628..51d948e8 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -897,6 +897,13 @@ # #rocksdb_paranoid_file_checks = false +# Enables or disables checksum verification in rocksdb at runtime. +# Checksums are usually hardware accelerated with low overhead; they are +# enabled in rocksdb by default. Older or slower platforms may see gains +# from disabling. +# +#rocksdb_checksums = true + # Database repair mode (for RocksDB SST corruption). # # Use this option when the server reports corruption while running or diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 133f0887..94788fa4 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1049,6 +1049,15 @@ pub struct Config { #[serde(default)] pub rocksdb_paranoid_file_checks: bool, + /// Enables or disables checksum verification in rocksdb at runtime. + /// Checksums are usually hardware accelerated with low overhead; they are + /// enabled in rocksdb by default. Older or slower platforms may see gains + /// from disabling. + /// + /// default: true + #[serde(default = "true_fn")] + pub rocksdb_checksums: bool, + /// Database repair mode (for RocksDB SST corruption). /// /// Use this option when the server reports corruption while running or diff --git a/src/database/engine.rs b/src/database/engine.rs index 76b2889b..be3d62cf 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -32,6 +32,7 @@ use crate::{ pub struct Engine { pub(super) read_only: bool, pub(super) secondary: bool, + pub(crate) checksums: bool, corks: AtomicU32, pub(crate) db: Db, pub(crate) pool: Arc, diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 6a801878..ad724765 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -58,6 +58,7 @@ pub(crate) async fn open(ctx: Arc, desc: &[Descriptor]) -> Result) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_fwd(None); diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index b83775ac..76c76325 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -53,7 +53,7 @@ where { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self, from) { return stream::Keys::<'_>::from(state.init_fwd(from.as_ref().into())).boxed(); diff --git a/src/database/map/options.rs b/src/database/map/options.rs index f726036d..9e2ad898 100644 --- a/src/database/map/options.rs +++ b/src/database/map/options.rs @@ -1,35 +1,43 @@ +use std::sync::Arc; + use rocksdb::{ReadOptions, ReadTier, WriteOptions}; -#[inline] -pub(crate) fn iter_options_default() -> ReadOptions { - let mut options = read_options_default(); - options.set_background_purge_on_iterator_cleanup(true); - //options.set_pin_data(true); - options -} +use crate::Engine; #[inline] -pub(crate) fn cache_iter_options_default() -> ReadOptions { - let mut options = cache_read_options_default(); - options.set_background_purge_on_iterator_cleanup(true); - //options.set_pin_data(true); - options -} - -#[inline] -pub(crate) fn cache_read_options_default() -> ReadOptions { - let mut options = read_options_default(); +pub(crate) fn cache_iter_options_default(db: &Arc) -> ReadOptions { + let mut options = iter_options_default(db); options.set_read_tier(ReadTier::BlockCache); options.fill_cache(false); options } #[inline] -pub(crate) fn read_options_default() -> ReadOptions { - let mut options = ReadOptions::default(); - options.set_total_order_seek(true); +pub(crate) fn iter_options_default(db: &Arc) -> ReadOptions { + let mut options = read_options_default(db); + options.set_background_purge_on_iterator_cleanup(true); options } #[inline] -pub(crate) fn write_options_default() -> WriteOptions { WriteOptions::default() } +pub(crate) fn cache_read_options_default(db: &Arc) -> ReadOptions { + let mut options = read_options_default(db); + options.set_read_tier(ReadTier::BlockCache); + options.fill_cache(false); + options +} + +#[inline] +pub(crate) fn read_options_default(db: &Arc) -> ReadOptions { + let mut options = ReadOptions::default(); + options.set_total_order_seek(true); + + if !db.checksums { + options.set_verify_checksums(false); + } + + options +} + +#[inline] +pub(crate) fn write_options_default(_db: &Arc) -> WriteOptions { WriteOptions::default() } diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index a559d04b..21558a17 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -22,7 +22,7 @@ where pub fn rev_raw_keys(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_rev(None); diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index 5b159195..65072337 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -61,7 +61,7 @@ where { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self, from) { return stream::KeysRev::<'_>::from(state.init_rev(from.as_ref().into())).boxed(); diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index 56b20b9b..f55053be 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -31,7 +31,7 @@ where pub fn rev_raw_stream(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_rev(None); @@ -66,7 +66,7 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> fields(%map), )] pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_iter_options_default(); + let opts = super::cache_iter_options_default(&map.db); let state = stream::State::new(map, opts).init_rev(None); !state.is_incomplete() diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index 83832bdd..ddc98607 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -80,7 +80,7 @@ where { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self, from) { let state = state.init_rev(from.as_ref().into()); @@ -118,7 +118,7 @@ pub(super) fn is_cached

    (map: &Arc, from: &P) -> bool where P: AsRef<[u8]> + ?Sized, { - let cache_opts = super::cache_iter_options_default(); + let cache_opts = super::cache_iter_options_default(&map.db); let cache_status = stream::State::new(map, cache_opts) .init_rev(from.as_ref().into()) .status(); diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index f1b5fdc3..bfc8ba04 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -30,7 +30,7 @@ where pub fn raw_stream(self: &Arc) -> impl Stream>> + Send { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self) { let state = state.init_fwd(None); @@ -65,7 +65,7 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + fields(%map), )] pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_iter_options_default(); + let opts = super::cache_iter_options_default(&map.db); let state = stream::State::new(map, opts).init_fwd(None); !state.is_incomplete() diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 562ab6b1..74140a65 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -77,7 +77,7 @@ where { use crate::pool::Seek; - let opts = super::iter_options_default(); + let opts = super::iter_options_default(&self.db); let state = stream::State::new(self, opts); if is_cached(self, from) { let state = state.init_fwd(from.as_ref().into()); @@ -115,7 +115,7 @@ pub(super) fn is_cached

    (map: &Arc, from: &P) -> bool where P: AsRef<[u8]> + ?Sized, { - let opts = super::cache_iter_options_default(); + let opts = super::cache_iter_options_default(&map.db); let state = stream::State::new(map, opts).init_fwd(from.as_ref().into()); !state.is_incomplete() From 2b730a30ad8f6fc0b188f79f7c45746ba46eff52 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 09:59:59 +0000 Subject: [PATCH 0604/1248] add broad_flat_map Signed-off-by: Jason Volk --- src/core/utils/stream/broadband.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs index 6d1ff6fe..282008e7 100644 --- a/src/core/utils/stream/broadband.rs +++ b/src/core/utils/stream/broadband.rs @@ -35,6 +35,13 @@ where Fut: Future> + Send, U: Send; + fn broadn_flat_map(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Stream + Send + Unpin, + U: Send; + fn broadn_then(self, n: N, f: F) -> impl Stream + Send where N: Into>, @@ -70,6 +77,16 @@ where self.broadn_filter_map(None, f) } + #[inline] + fn broad_flat_map(self, f: F) -> impl Stream + Send + where + F: Fn(Item) -> Fut + Send, + Fut: Stream + Send + Unpin, + U: Send, + { + self.broadn_flat_map(None, f) + } + #[inline] fn broad_then(self, f: F) -> impl Stream + Send where @@ -122,6 +139,17 @@ where .ready_filter_map(identity) } + #[inline] + fn broadn_flat_map(self, n: N, f: F) -> impl Stream + Send + where + N: Into>, + F: Fn(Item) -> Fut + Send, + Fut: Stream + Send + Unpin, + U: Send, + { + self.flat_map_unordered(n.into().unwrap_or_else(automatic_width), f) + } + #[inline] fn broadn_then(self, n: N, f: F) -> impl Stream + Send where From 677316631a029fdc23fb48092a1af14284e26448 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 06:15:01 +0000 Subject: [PATCH 0605/1248] pipeline prologue of handle_incoming_pdu simplify room_version/first_pdu_in_room argument passing Signed-off-by: Jason Volk --- .../fetch_and_handle_outliers.rs | 13 ++-- src/service/rooms/event_handler/fetch_prev.rs | 17 ++--- .../rooms/event_handler/fetch_state.rs | 5 +- .../event_handler/handle_incoming_pdu.rs | 72 +++++++++++-------- .../rooms/event_handler/handle_outlier_pdu.rs | 1 - .../rooms/event_handler/handle_prev_pdu.rs | 8 +-- .../event_handler/upgrade_outlier_pdu.rs | 2 +- 7 files changed, 62 insertions(+), 56 deletions(-) diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 84d0edd0..540ebb64 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -10,10 +10,11 @@ use conduwuit::{ }; use futures::TryFutureExt; use ruma::{ - api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, RoomVersionId, - ServerName, + api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, ServerName, }; +use super::get_room_version_id; + /// Find the event and auth it. Once the event is validated (steps 1 - 8) /// it is appended to the outliers Tree. /// @@ -30,7 +31,6 @@ pub(super) async fn fetch_and_handle_outliers<'a>( events: &'a [OwnedEventId], create_event: &'a PduEvent, room_id: &'a RoomId, - room_version_id: &'a RoomVersionId, ) -> Vec<(Arc, Option>)> { let back_off = |id| match self .services @@ -113,8 +113,13 @@ pub(super) async fn fetch_and_handle_outliers<'a>( { | Ok(res) => { debug!("Got {next_id} over federation"); + let Ok(room_version_id) = get_room_version_id(create_event) else { + back_off((*next_id).to_owned()); + continue; + }; + let Ok((calculated_event_id, value)) = - pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) + pdu::gen_event_id_canonical_json(&res.pdu, &room_version_id) else { back_off((*next_id).to_owned()); continue; diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 5966aeba..aea70739 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -8,8 +8,7 @@ use futures::{future, FutureExt}; use ruma::{ int, state_res::{self}, - uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, - ServerName, + uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, }; use super::check_room_id; @@ -26,7 +25,7 @@ pub(super) async fn fetch_prev( origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, - room_version_id: &RoomVersionId, + first_ts_in_room: UInt, initial_set: Vec, ) -> Result<( Vec, @@ -36,21 +35,13 @@ pub(super) async fn fetch_prev( let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: VecDeque = initial_set.into(); - let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; - let mut amount = 0; while let Some(prev_event_id) = todo_outlier_stack.pop_front() { self.services.server.check_running()?; if let Some((pdu, mut json_opt)) = self - .fetch_and_handle_outliers( - origin, - &[prev_event_id.clone()], - create_event, - room_id, - room_version_id, - ) + .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id) .boxed() .await .pop() @@ -74,7 +65,7 @@ pub(super) async fn fetch_prev( } if let Some(json) = json_opt { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + if pdu.origin_server_ts > first_ts_in_room { amount = amount.saturating_add(1); for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 0892655e..4f2580db 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -4,7 +4,7 @@ use conduwuit::{debug, debug_warn, implement, Err, Error, PduEvent, Result}; use futures::FutureExt; use ruma::{ api::federation::event::get_room_state_ids, events::StateEventType, EventId, OwnedEventId, - RoomId, RoomVersionId, ServerName, + RoomId, ServerName, }; use crate::rooms::short::ShortStateKey; @@ -23,7 +23,6 @@ pub(super) async fn fetch_state( origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, - room_version_id: &RoomVersionId, event_id: &EventId, ) -> Result>> { let res = self @@ -38,7 +37,7 @@ pub(super) async fn fetch_state( debug!("Fetching state events"); let state_vec = self - .fetch_and_handle_outliers(origin, &res.pdu_ids, create_event, room_id, room_version_id) + .fetch_and_handle_outliers(origin, &res.pdu_ids, create_event, room_id) .boxed() .await; diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 94d4bcc7..7db71961 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -1,14 +1,15 @@ use std::{ collections::{hash_map, BTreeMap}, - sync::Arc, time::Instant, }; use conduwuit::{debug, err, implement, warn, Err, Result}; -use futures::{FutureExt, TryFutureExt}; +use futures::{ + future::{try_join5, OptionFuture}, + FutureExt, +}; use ruma::{events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId}; -use super::{check_room_id, get_room_version_id}; use crate::rooms::timeline::RawPduId; /// When receiving an event one needs to: @@ -59,19 +60,13 @@ pub async fn handle_incoming_pdu<'a>( } // 1.1 Check the server is in the room - if !self.services.metadata.exists(room_id).await { - return Err!(Request(NotFound("Room is unknown to this server"))); - } + let meta_exists = self.services.metadata.exists(room_id).map(Ok); // 1.2 Check if the room is disabled - if self.services.metadata.is_disabled(room_id).await { - return Err!(Request(Forbidden( - "Federation of this room is currently disabled on this server." - ))); - } + let is_disabled = self.services.metadata.is_disabled(room_id).map(Ok); // 1.3.1 Check room ACL on origin field/server - self.acl_check(origin, room_id).await?; + let origin_acl_check = self.acl_check(origin, room_id); // 1.3.2 Check room ACL on sender's server name let sender: &UserId = value @@ -79,36 +74,53 @@ pub async fn handle_incoming_pdu<'a>( .try_into() .map_err(|e| err!(Request(InvalidParam("PDU does not have a valid sender key: {e}"))))?; - if sender.server_name() != origin { - self.acl_check(sender.server_name(), room_id).await?; - } + let sender_acl_check: OptionFuture<_> = sender + .server_name() + .ne(origin) + .then(|| self.acl_check(sender.server_name(), room_id)) + .into(); // Fetch create event - let create_event = self - .services - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .map_ok(Arc::new) - .await?; + let create_event = + self.services + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, ""); - // Procure the room version - let room_version_id = get_room_version_id(&create_event)?; + let (meta_exists, is_disabled, (), (), create_event) = try_join5( + meta_exists, + is_disabled, + origin_acl_check, + sender_acl_check.map(|o| o.unwrap_or(Ok(()))), + create_event, + ) + .await?; - let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?; + if !meta_exists { + return Err!(Request(NotFound("Room is unknown to this server"))); + } + + if is_disabled { + return Err!(Request(Forbidden("Federation of this room is disabled by this server."))); + } let (incoming_pdu, val) = self .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false) - .boxed() .await?; - check_room_id(room_id, &incoming_pdu)?; - // 8. if not timeline event: stop if !is_timeline_event { return Ok(None); } + // Skip old events - if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + let first_ts_in_room = self + .services + .timeline + .first_pdu_in_room(room_id) + .await? + .origin_server_ts; + + if incoming_pdu.origin_server_ts < first_ts_in_room { return Ok(None); } @@ -119,7 +131,7 @@ pub async fn handle_incoming_pdu<'a>( origin, &create_event, room_id, - &room_version_id, + first_ts_in_room, incoming_pdu.prev_events.clone(), ) .await?; @@ -134,7 +146,7 @@ pub async fn handle_incoming_pdu<'a>( room_id, &mut eventid_info, &create_event, - &first_pdu_in_room, + first_ts_in_room, &prev_id, ) .await diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 3ad73295..a35aabe0 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -84,7 +84,6 @@ pub(super) async fn handle_outlier_pdu<'a>( &incoming_pdu.auth_events, create_event, room_id, - &room_version_id, )) .await; } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 2bec4eba..32ab505f 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -7,7 +7,7 @@ use std::{ use conduwuit::{ debug, implement, utils::continue_exponential_backoff_secs, Err, PduEvent, Result, }; -use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName}; +use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; #[implement(super::Service)] #[allow(clippy::type_complexity)] @@ -27,8 +27,8 @@ pub(super) async fn handle_prev_pdu<'a>( OwnedEventId, (Arc, BTreeMap), >, - create_event: &Arc, - first_pdu_in_room: &PduEvent, + create_event: &PduEvent, + first_ts_in_room: UInt, prev_id: &EventId, ) -> Result { // Check for disabled again because it might have changed @@ -62,7 +62,7 @@ pub(super) async fn handle_prev_pdu<'a>( if let Some((pdu, json)) = eventid_info.remove(prev_id) { // Skip old events - if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + if pdu.origin_server_ts < first_ts_in_room { return Ok(()); } diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 8adf4246..f0c8f0c5 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -63,7 +63,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( if state_at_incoming_event.is_none() { state_at_incoming_event = self - .fetch_state(origin, create_event, room_id, &room_version_id, &incoming_pdu.event_id) + .fetch_state(origin, create_event, room_id, &incoming_pdu.event_id) .await?; } From 94d786ac12890306be5d0577bc3fcc8f6b856558 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 10:43:53 +0000 Subject: [PATCH 0606/1248] process rooms and edus concurrently Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/api/Cargo.toml | 1 + src/api/server/send.rs | 241 ++++++++++-------- .../rooms/event_handler/parse_incoming_pdu.rs | 9 +- src/service/rooms/timeline/mod.rs | 2 +- 5 files changed, 142 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5848cc46..3a435a10 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -685,6 +685,7 @@ dependencies = [ "http-body-util", "hyper", "ipaddress", + "itertools 0.13.0", "log", "rand", "reqwest", diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 1b463fbc..385e786f 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -50,6 +50,7 @@ http.workspace = true http-body-util.workspace = true hyper.workspace = true ipaddress.workspace = true +itertools.workspace = true log.workspace = true rand.workspace = true reqwest.workspace = true diff --git a/src/api/server/send.rs b/src/api/server/send.rs index eec9bd11..016f5194 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -3,10 +3,17 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, debug_warn, err, error, result::LogErr, trace, utils::ReadyExt, warn, Err, Error, - Result, + debug, debug_warn, err, error, + result::LogErr, + trace, + utils::{ + stream::{automatic_width, BroadbandExt, TryBroadbandExt}, + IterStream, ReadyExt, + }, + warn, Err, Error, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use itertools::Itertools; use ruma::{ api::{ client::error::ErrorKind, @@ -19,11 +26,9 @@ use ruma::{ }, }, events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, - serde::Raw, to_device::DeviceIdOrAllDevices, - OwnedEventId, ServerName, + CanonicalJsonObject, OwnedEventId, OwnedRoomId, ServerName, }; -use serde_json::value::RawValue as RawJsonValue; use service::{ sending::{EDU_LIMIT, PDU_LIMIT}, Services, @@ -34,7 +39,8 @@ use crate::{ Ruma, }; -type ResolvedMap = BTreeMap>; +type ResolvedMap = BTreeMap; +type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); /// # `PUT /_matrix/federation/v1/send/{txnId}` /// @@ -73,91 +79,41 @@ pub(crate) async fn send_transaction_message_route( let txn_start_time = Instant::now(); trace!( - pdus = ?body.pdus.len(), - edus = ?body.edus.len(), + pdus = body.pdus.len(), + edus = body.edus.len(), elapsed = ?txn_start_time.elapsed(), id = ?body.transaction_id, origin =?body.origin(), "Starting txn", ); - let resolved_map = - handle_pdus(&services, &client, &body.pdus, body.origin(), &txn_start_time) - .boxed() - .await?; + let pdus = body + .pdus + .iter() + .stream() + .broad_then(|pdu| services.rooms.event_handler.parse_incoming_pdu(pdu)) + .inspect_err(|e| debug_warn!("Could not parse PDU: {e}")) + .ready_filter_map(Result::ok); - handle_edus(&services, &client, &body.edus, body.origin()) - .boxed() - .await; + let edus = body + .edus + .iter() + .map(|edu| edu.json().get()) + .map(serde_json::from_str) + .filter_map(Result::ok) + .stream(); + + let results = handle(&services, &client, body.origin(), txn_start_time, pdus, edus).await?; debug!( - pdus = ?body.pdus.len(), - edus = ?body.edus.len(), + pdus = body.pdus.len(), + edus = body.edus.len(), elapsed = ?txn_start_time.elapsed(), id = ?body.transaction_id, origin =?body.origin(), "Finished txn", ); - - Ok(send_transaction_message::v1::Response { - pdus: resolved_map - .into_iter() - .map(|(e, r)| (e, r.map_err(error::sanitized_message))) - .collect(), - }) -} - -async fn handle_pdus( - services: &Services, - _client: &IpAddr, - pdus: &[Box], - origin: &ServerName, - txn_start_time: &Instant, -) -> Result { - let mut parsed_pdus = Vec::with_capacity(pdus.len()); - for pdu in pdus { - parsed_pdus.push(match services.rooms.event_handler.parse_incoming_pdu(pdu).await { - | Ok(t) => t, - | Err(e) => { - debug_warn!("Could not parse PDU: {e}"); - continue; - }, - }); - - // We do not add the event_id field to the pdu here because of signature - // and hashes checks - } - - let mut resolved_map = BTreeMap::new(); - for (event_id, value, room_id) in parsed_pdus { - services.server.check_running()?; - let pdu_start_time = Instant::now(); - let mutex_lock = services - .rooms - .event_handler - .mutex_federation - .lock(&room_id) - .await; - - let result = services - .rooms - .event_handler - .handle_incoming_pdu(origin, &room_id, &event_id, value, true) - .boxed() - .await - .map(|_| ()); - - drop(mutex_lock); - debug!( - pdu_elapsed = ?pdu_start_time.elapsed(), - txn_elapsed = ?txn_start_time.elapsed(), - "Finished PDU {event_id}", - ); - - resolved_map.insert(event_id, result); - } - - for (id, result) in &resolved_map { + for (id, result) in &results { if let Err(e) = result { if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { warn!("Incoming PDU failed {id}: {e:?}"); @@ -165,39 +121,112 @@ async fn handle_pdus( } } - Ok(resolved_map) + Ok(send_transaction_message::v1::Response { + pdus: results + .into_iter() + .map(|(e, r)| (e, r.map_err(error::sanitized_message))) + .collect(), + }) } -async fn handle_edus( +async fn handle( services: &Services, client: &IpAddr, - edus: &[Raw], origin: &ServerName, -) { - for edu in edus - .iter() - .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) - { - match edu { - | Edu::Presence(presence) => { - handle_edu_presence(services, client, origin, presence).await; - }, - | Edu::Receipt(receipt) => - handle_edu_receipt(services, client, origin, receipt).await, - | Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await, - | Edu::DeviceListUpdate(content) => { - handle_edu_device_list_update(services, client, origin, content).await; - }, - | Edu::DirectToDevice(content) => { - handle_edu_direct_to_device(services, client, origin, content).await; - }, - | Edu::SigningKeyUpdate(content) => { - handle_edu_signing_key_update(services, client, origin, content).await; - }, - | Edu::_Custom(ref _custom) => { - debug_warn!(?edus, "received custom/unknown EDU"); - }, - } + started: Instant, + pdus: impl Stream + Send, + edus: impl Stream + Send, +) -> Result { + // group pdus by room + let pdus = pdus + .collect() + .map(|mut pdus: Vec<_>| { + pdus.sort_by(|(room_a, ..), (room_b, ..)| room_a.cmp(room_b)); + pdus.into_iter() + .into_grouping_map_by(|(room_id, ..)| room_id.clone()) + .collect() + }) + .await; + + // we can evaluate rooms concurrently + let results: ResolvedMap = pdus + .into_iter() + .try_stream() + .broad_and_then(|(room_id, pdus)| { + handle_room(services, client, origin, started, room_id, pdus) + .map_ok(Vec::into_iter) + .map_ok(IterStream::try_stream) + }) + .try_flatten() + .try_collect() + .boxed() + .await?; + + // evaluate edus after pdus, at least for now. + edus.for_each_concurrent(automatic_width(), |edu| handle_edu(services, client, origin, edu)) + .boxed() + .await; + + Ok(results) +} + +async fn handle_room( + services: &Services, + _client: &IpAddr, + origin: &ServerName, + txn_start_time: Instant, + room_id: OwnedRoomId, + pdus: Vec, +) -> Result> { + let _room_lock = services + .rooms + .event_handler + .mutex_federation + .lock(&room_id) + .await; + + let mut results = Vec::with_capacity(pdus.len()); + for (_, event_id, value) in pdus { + services.server.check_running()?; + let pdu_start_time = Instant::now(); + let result = services + .rooms + .event_handler + .handle_incoming_pdu(origin, &room_id, &event_id, value, true) + .await + .map(|_| ()); + + debug!( + pdu_elapsed = ?pdu_start_time.elapsed(), + txn_elapsed = ?txn_start_time.elapsed(), + "Finished PDU {event_id}", + ); + + results.push((event_id, result)); + } + + Ok(results) +} + +async fn handle_edu(services: &Services, client: &IpAddr, origin: &ServerName, edu: Edu) { + match edu { + | Edu::Presence(presence) => { + handle_edu_presence(services, client, origin, presence).await; + }, + | Edu::Receipt(receipt) => handle_edu_receipt(services, client, origin, receipt).await, + | Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await, + | Edu::DeviceListUpdate(content) => { + handle_edu_device_list_update(services, client, origin, content).await; + }, + | Edu::DirectToDevice(content) => { + handle_edu_direct_to_device(services, client, origin, content).await; + }, + | Edu::SigningKeyUpdate(content) => { + handle_edu_signing_key_update(services, client, origin, content).await; + }, + | Edu::_Custom(ref _custom) => { + debug_warn!(?edu, "received custom/unknown EDU"); + }, } } diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 0c11314d..9b130763 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -2,11 +2,10 @@ use conduwuit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId}; use serde_json::value::RawValue as RawJsonValue; +type Parsed = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); + #[implement(super::Service)] -pub async fn parse_incoming_pdu( - &self, - pdu: &RawJsonValue, -) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { +pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result { let value = serde_json::from_str::(pdu.get()).map_err(|e| { err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}"))) })?; @@ -28,5 +27,5 @@ pub async fn parse_incoming_pdu( err!(Request(InvalidParam("Could not convert event to canonical json: {e}"))) })?; - Ok((event_id, value, room_id)) + Ok((room_id, event_id, value)) } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 362bfab5..bf585a6b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1166,7 +1166,7 @@ impl Service { #[tracing::instrument(skip(self, pdu), level = "debug")] pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box) -> Result<()> { - let (event_id, value, room_id) = + let (room_id, event_id, value) = self.services.event_handler.parse_incoming_pdu(&pdu).await?; // Lock so we cannot backfill the same pdu twice at the same time From c516a8df3e80d38656f2251204119c720b5d96f7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 15:12:08 +0000 Subject: [PATCH 0607/1248] fanout edu processing Signed-off-by: Jason Volk --- src/api/server/send.rs | 459 ++++++++++++++++++++++++----------------- 1 file changed, 265 insertions(+), 194 deletions(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 016f5194..f4903447 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -20,19 +20,22 @@ use ruma::{ federation::transactions::{ edu::{ DeviceListUpdateContent, DirectDeviceContent, Edu, PresenceContent, - ReceiptContent, SigningKeyUpdateContent, TypingContent, + PresenceUpdate, ReceiptContent, ReceiptData, ReceiptMap, SigningKeyUpdateContent, + TypingContent, }, send_transaction_message, }, }, events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, + serde::Raw, to_device::DeviceIdOrAllDevices, - CanonicalJsonObject, OwnedEventId, OwnedRoomId, ServerName, + CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, }; use service::{ sending::{EDU_LIMIT, PDU_LIMIT}, Services, }; +use utils::millis_since_unix_epoch; use crate::{ utils::{self}, @@ -152,8 +155,8 @@ async fn handle( let results: ResolvedMap = pdus .into_iter() .try_stream() - .broad_and_then(|(room_id, pdus)| { - handle_room(services, client, origin, started, room_id, pdus) + .broad_and_then(|(room_id, pdus): (_, Vec<_>)| { + handle_room(services, client, origin, started, room_id, pdus.into_iter()) .map_ok(Vec::into_iter) .map_ok(IterStream::try_stream) }) @@ -176,7 +179,7 @@ async fn handle_room( origin: &ServerName, txn_start_time: Instant, room_id: OwnedRoomId, - pdus: Vec, + pdus: impl Iterator + Send, ) -> Result> { let _room_lock = services .rooms @@ -185,48 +188,53 @@ async fn handle_room( .lock(&room_id) .await; - let mut results = Vec::with_capacity(pdus.len()); - for (_, event_id, value) in pdus { - services.server.check_running()?; - let pdu_start_time = Instant::now(); - let result = services - .rooms - .event_handler - .handle_incoming_pdu(origin, &room_id, &event_id, value, true) - .await - .map(|_| ()); + let room_id = &room_id; + pdus.try_stream() + .and_then(|(_, event_id, value)| async move { + services.server.check_running()?; + let pdu_start_time = Instant::now(); + let result = services + .rooms + .event_handler + .handle_incoming_pdu(origin, room_id, &event_id, value, true) + .await + .map(|_| ()); - debug!( - pdu_elapsed = ?pdu_start_time.elapsed(), - txn_elapsed = ?txn_start_time.elapsed(), - "Finished PDU {event_id}", - ); + debug!( + pdu_elapsed = ?pdu_start_time.elapsed(), + txn_elapsed = ?txn_start_time.elapsed(), + "Finished PDU {event_id}", + ); - results.push((event_id, result)); - } - - Ok(results) + Ok((event_id, result)) + }) + .try_collect() + .await } async fn handle_edu(services: &Services, client: &IpAddr, origin: &ServerName, edu: Edu) { match edu { - | Edu::Presence(presence) => { - handle_edu_presence(services, client, origin, presence).await; - }, - | Edu::Receipt(receipt) => handle_edu_receipt(services, client, origin, receipt).await, - | Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await, - | Edu::DeviceListUpdate(content) => { - handle_edu_device_list_update(services, client, origin, content).await; - }, - | Edu::DirectToDevice(content) => { - handle_edu_direct_to_device(services, client, origin, content).await; - }, - | Edu::SigningKeyUpdate(content) => { - handle_edu_signing_key_update(services, client, origin, content).await; - }, - | Edu::_Custom(ref _custom) => { - debug_warn!(?edu, "received custom/unknown EDU"); - }, + | Edu::Presence(presence) if services.server.config.allow_incoming_presence => + handle_edu_presence(services, client, origin, presence).await, + + | Edu::Receipt(receipt) if services.server.config.allow_incoming_read_receipts => + handle_edu_receipt(services, client, origin, receipt).await, + + | Edu::Typing(typing) if services.server.config.allow_incoming_typing => + handle_edu_typing(services, client, origin, typing).await, + + | Edu::DeviceListUpdate(content) => + handle_edu_device_list_update(services, client, origin, content).await, + + | Edu::DirectToDevice(content) => + handle_edu_direct_to_device(services, client, origin, content).await, + + | Edu::SigningKeyUpdate(content) => + handle_edu_signing_key_update(services, client, origin, content).await, + + | Edu::_Custom(ref _custom) => debug_warn!(?edu, "received custom/unknown EDU"), + + | _ => trace!(?edu, "skipped"), } } @@ -236,32 +244,41 @@ async fn handle_edu_presence( origin: &ServerName, presence: PresenceContent, ) { - if !services.globals.allow_incoming_presence() { + presence + .push + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |update| { + handle_edu_presence_update(services, origin, update) + }) + .await; +} + +async fn handle_edu_presence_update( + services: &Services, + origin: &ServerName, + update: PresenceUpdate, +) { + if update.user_id.server_name() != origin { + debug_warn!( + %update.user_id, %origin, + "received presence EDU for user not belonging to origin" + ); return; } - for update in presence.push { - if update.user_id.server_name() != origin { - debug_warn!( - %update.user_id, %origin, - "received presence EDU for user not belonging to origin" - ); - continue; - } - - services - .presence - .set_presence( - &update.user_id, - &update.presence, - Some(update.currently_active), - Some(update.last_active_ago), - update.status_msg.clone(), - ) - .await - .log_err() - .ok(); - } + services + .presence + .set_presence( + &update.user_id, + &update.presence, + Some(update.currently_active), + Some(update.last_active_ago), + update.status_msg.clone(), + ) + .await + .log_err() + .ok(); } async fn handle_edu_receipt( @@ -270,66 +287,94 @@ async fn handle_edu_receipt( origin: &ServerName, receipt: ReceiptContent, ) { - if !services.globals.allow_incoming_read_receipts() { + receipt + .receipts + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |(room_id, room_updates)| { + handle_edu_receipt_room(services, origin, room_id, room_updates) + }) + .await; +} + +async fn handle_edu_receipt_room( + services: &Services, + origin: &ServerName, + room_id: OwnedRoomId, + room_updates: ReceiptMap, +) { + if services + .rooms + .event_handler + .acl_check(origin, &room_id) + .await + .is_err() + { + debug_warn!( + %origin, %room_id, + "received read receipt EDU from ACL'd server" + ); return; } - for (room_id, room_updates) in receipt.receipts { - if services - .rooms - .event_handler - .acl_check(origin, &room_id) - .await - .is_err() - { - debug_warn!( - %origin, %room_id, - "received read receipt EDU from ACL'd server" - ); - continue; - } + let room_id = &room_id; + room_updates + .read + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |(user_id, user_updates)| async move { + handle_edu_receipt_room_user(services, origin, room_id, &user_id, user_updates).await; + }) + .await; +} - for (user_id, user_updates) in room_updates.read { - if user_id.server_name() != origin { - debug_warn!( - %user_id, %origin, - "received read receipt EDU for user not belonging to origin" - ); - continue; - } - - if services - .rooms - .state_cache - .room_members(&room_id) - .ready_any(|member| member.server_name() == user_id.server_name()) - .await - { - for event_id in &user_updates.event_ids { - let user_receipts = - BTreeMap::from([(user_id.clone(), user_updates.data.clone())]); - let receipts = BTreeMap::from([(ReceiptType::Read, user_receipts)]); - let receipt_content = BTreeMap::from([(event_id.to_owned(), receipts)]); - let event = ReceiptEvent { - content: ReceiptEventContent(receipt_content), - room_id: room_id.clone(), - }; - - services - .rooms - .read_receipt - .readreceipt_update(&user_id, &room_id, &event) - .await; - } - } else { - debug_warn!( - %user_id, %room_id, %origin, - "received read receipt EDU from server who does not have a member in the room", - ); - continue; - } - } +async fn handle_edu_receipt_room_user( + services: &Services, + origin: &ServerName, + room_id: &RoomId, + user_id: &UserId, + user_updates: ReceiptData, +) { + if user_id.server_name() != origin { + debug_warn!( + %user_id, %origin, + "received read receipt EDU for user not belonging to origin" + ); + return; } + + if !services + .rooms + .state_cache + .server_in_room(origin, room_id) + .await + { + debug_warn!( + %user_id, %room_id, %origin, + "received read receipt EDU from server who does not have a member in the room", + ); + return; + } + + let data = &user_updates.data; + user_updates + .event_ids + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |event_id| async move { + let user_data = [(user_id.to_owned(), data.clone())]; + let receipts = [(ReceiptType::Read, BTreeMap::from(user_data))]; + let content = [(event_id.clone(), BTreeMap::from(receipts))]; + services + .rooms + .read_receipt + .readreceipt_update(user_id, room_id, &ReceiptEvent { + content: ReceiptEventContent(content.into()), + room_id: room_id.to_owned(), + }) + .await; + }) + .await; } async fn handle_edu_typing( @@ -338,10 +383,6 @@ async fn handle_edu_typing( origin: &ServerName, typing: TypingContent, ) { - if !services.server.config.allow_incoming_typing { - return; - } - if typing.user_id.server_name() != origin { debug_warn!( %typing.user_id, %origin, @@ -364,41 +405,38 @@ async fn handle_edu_typing( return; } - if services + if !services .rooms .state_cache .is_joined(&typing.user_id, &typing.room_id) .await { - if typing.typing { - let timeout = utils::millis_since_unix_epoch().saturating_add( - services - .server - .config - .typing_federation_timeout_s - .saturating_mul(1000), - ); - services - .rooms - .typing - .typing_add(&typing.user_id, &typing.room_id, timeout) - .await - .log_err() - .ok(); - } else { - services - .rooms - .typing - .typing_remove(&typing.user_id, &typing.room_id) - .await - .log_err() - .ok(); - } - } else { debug_warn!( %typing.user_id, %typing.room_id, %origin, "received typing EDU for user not in room" ); + return; + } + + if typing.typing { + let secs = services.server.config.typing_federation_timeout_s; + let timeout = millis_since_unix_epoch().saturating_add(secs.saturating_mul(1000)); + + services + .rooms + .typing + .typing_add(&typing.user_id, &typing.room_id, timeout) + .await + .log_err() + .ok(); + } else { + services + .rooms + .typing + .typing_remove(&typing.user_id, &typing.room_id) + .await + .log_err() + .ok(); } } @@ -427,7 +465,12 @@ async fn handle_edu_direct_to_device( origin: &ServerName, content: DirectDeviceContent, ) { - let DirectDeviceContent { sender, ev_type, message_id, messages } = content; + let DirectDeviceContent { + ref sender, + ref ev_type, + ref message_id, + messages, + } = content; if sender.server_name() != origin { debug_warn!( @@ -440,60 +483,88 @@ async fn handle_edu_direct_to_device( // Check if this is a new transaction id if services .transaction_ids - .existing_txnid(&sender, None, &message_id) + .existing_txnid(sender, None, message_id) .await .is_ok() { return; } - for (target_user_id, map) in &messages { - for (target_device_id_maybe, event) in map { - let Ok(event) = event.deserialize_as().map_err(|e| { - err!(Request(InvalidParam(error!("To-Device event is invalid: {e}")))) - }) else { - continue; - }; - - let ev_type = ev_type.to_string(); - match target_device_id_maybe { - | DeviceIdOrAllDevices::DeviceId(target_device_id) => { - services - .users - .add_to_device_event( - &sender, - target_user_id, - target_device_id, - &ev_type, - event, - ) - .await; - }, - - | DeviceIdOrAllDevices::AllDevices => { - let (sender, ev_type, event) = (&sender, &ev_type, &event); - services - .users - .all_device_ids(target_user_id) - .for_each(|target_device_id| { - services.users.add_to_device_event( - sender, - target_user_id, - target_device_id, - ev_type, - event.clone(), - ) - }) - .await; - }, - } - } - } + // process messages concurrently for different users + let ev_type = ev_type.to_string(); + messages + .into_iter() + .stream() + .for_each_concurrent(automatic_width(), |(target_user_id, map)| { + handle_edu_direct_to_device_user(services, target_user_id, sender, &ev_type, map) + }) + .await; // Save transaction id with empty data services .transaction_ids - .add_txnid(&sender, None, &message_id, &[]); + .add_txnid(sender, None, message_id, &[]); +} + +async fn handle_edu_direct_to_device_user( + services: &Services, + target_user_id: OwnedUserId, + sender: &UserId, + ev_type: &str, + map: BTreeMap>, +) { + for (target_device_id_maybe, event) in map { + let Ok(event) = event + .deserialize_as() + .map_err(|e| err!(Request(InvalidParam(error!("To-Device event is invalid: {e}"))))) + else { + continue; + }; + + handle_edu_direct_to_device_event( + services, + &target_user_id, + sender, + target_device_id_maybe, + ev_type, + event, + ) + .await; + } +} + +async fn handle_edu_direct_to_device_event( + services: &Services, + target_user_id: &UserId, + sender: &UserId, + target_device_id_maybe: DeviceIdOrAllDevices, + ev_type: &str, + event: serde_json::Value, +) { + match target_device_id_maybe { + | DeviceIdOrAllDevices::DeviceId(ref target_device_id) => { + services + .users + .add_to_device_event(sender, target_user_id, target_device_id, ev_type, event) + .await; + }, + + | DeviceIdOrAllDevices::AllDevices => { + services + .users + .all_device_ids(target_user_id) + .for_each(|target_device_id| { + services.users.add_to_device_event( + sender, + target_user_id, + target_device_id, + ev_type, + event.clone(), + ) + }) + .await; + }, + } } async fn handle_edu_signing_key_update( From b2a565b0b4e32cf998ee5877cecded31f1305240 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 15:44:52 +0000 Subject: [PATCH 0608/1248] propagate better error from server.check_running() --- src/core/error/response.rs | 1 + src/core/server.rs | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/core/error/response.rs b/src/core/error/response.rs index ede1a05d..75e4050d 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -106,6 +106,7 @@ pub(super) fn io_error_code(kind: std::io::ErrorKind) -> StatusCode { | ErrorKind::TimedOut => StatusCode::GATEWAY_TIMEOUT, | ErrorKind::FileTooLarge => StatusCode::PAYLOAD_TOO_LARGE, | ErrorKind::StorageFull => StatusCode::INSUFFICIENT_STORAGE, + | ErrorKind::Interrupted => StatusCode::SERVICE_UNAVAILABLE, | _ => StatusCode::INTERNAL_SERVER_ERROR, } } diff --git a/src/core/server.rs b/src/core/server.rs index 05a4aae7..0f2e61b0 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -9,7 +9,7 @@ use std::{ use ruma::OwnedServerName; use tokio::{runtime, sync::broadcast}; -use crate::{config, config::Config, err, log::Log, metrics::Metrics, Err, Result}; +use crate::{config, config::Config, log::Log, metrics::Metrics, Err, Result}; /// Server runtime state; public portion pub struct Server { @@ -127,9 +127,12 @@ impl Server { #[inline] pub fn check_running(&self) -> Result { + use std::{io, io::ErrorKind::Interrupted}; + self.running() .then_some(()) - .ok_or_else(|| err!(debug_warn!("Server is shutting down."))) + .ok_or_else(|| io::Error::new(Interrupted, "Server shutting down")) + .map_err(Into::into) } #[inline] From ffd0fd42424a234d4fbd564b66b79521595b5b5b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 21:46:46 +0000 Subject: [PATCH 0609/1248] pipeline pdu fetch for federation sending destination Signed-off-by: Jason Volk --- src/service/sending/sender.rs | 135 ++++++++++++++++------------------ 1 file changed, 64 insertions(+), 71 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index c91e1d31..47be01f1 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -8,12 +8,12 @@ use std::{ time::{Duration, Instant}, }; -use base64::{engine::general_purpose, Engine as _}; +use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; use conduwuit::{ debug, err, error, result::LogErr, trace, - utils::{calculate_hash, continue_exponential_backoff_secs, ReadyExt}, + utils::{calculate_hash, continue_exponential_backoff_secs, stream::IterStream, ReadyExt}, warn, Error, Result, }; use futures::{ @@ -38,7 +38,9 @@ use ruma::{ push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType, }, - push, uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, + push, + serde::Raw, + uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UInt, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -633,7 +635,7 @@ impl Service { } fn send_events(&self, dest: Destination, events: Vec) -> SendingFuture<'_> { - //debug_assert!(!events.is_empty(), "sending empty transaction"); + debug_assert!(!events.is_empty(), "sending empty transaction"); match dest { | Destination::Federation(server) => self.send_events_dest_federation(server, events).boxed(), @@ -698,7 +700,7 @@ impl Service { | SendingEvent::Flush => None, })); - let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); + let txn_id = &*URL_SAFE_NO_PAD.encode(txn_hash); //debug_assert!(pdu_jsons.len() + edu_jsons.len() > 0, "sending empty // transaction"); @@ -796,81 +798,72 @@ impl Service { Ok(Destination::Push(user_id, pushkey)) } - #[tracing::instrument( - name = "fed", - level = "debug", - skip(self, events), - fields( - events = %events.len(), - ), - )] async fn send_events_dest_federation( &self, server: OwnedServerName, events: Vec, ) -> SendingResult { - let mut pdu_jsons = Vec::with_capacity( - events - .iter() - .filter(|event| matches!(event, SendingEvent::Pdu(_))) - .count(), - ); - let mut edu_jsons = Vec::with_capacity( - events - .iter() - .filter(|event| matches!(event, SendingEvent::Edu(_))) - .count(), - ); + let pdus: Vec<_> = events + .iter() + .filter_map(|pdu| match pdu { + | SendingEvent::Pdu(pdu) => Some(pdu), + | _ => None, + }) + .stream() + .then(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id)) + .ready_filter_map(Result::ok) + .then(|pdu| self.convert_to_outgoing_federation_event(pdu)) + .collect() + .await; - for event in &events { - match event { - // TODO: check room version and remove event_id if needed - | SendingEvent::Pdu(pdu_id) => { - if let Ok(pdu) = self.services.timeline.get_pdu_json_from_id(pdu_id).await { - pdu_jsons.push(self.convert_to_outgoing_federation_event(pdu).await); - } - }, - | SendingEvent::Edu(edu) => - if let Ok(raw) = serde_json::from_slice(edu) { - edu_jsons.push(raw); - }, - | SendingEvent::Flush => {}, // flush only; no new content + let edus: Vec> = events + .iter() + .filter_map(|edu| match edu { + | SendingEvent::Edu(edu) => Some(edu.as_ref()), + | _ => None, + }) + .map(serde_json::from_slice) + .filter_map(Result::ok) + .collect(); + + if pdus.is_empty() && edus.is_empty() { + return Ok(Destination::Federation(server)); + } + + let preimage = pdus + .iter() + .map(|raw| raw.get().as_bytes()) + .chain(edus.iter().map(|raw| raw.json().get().as_bytes())); + + let txn_hash = calculate_hash(preimage); + let txn_id = &*URL_SAFE_NO_PAD.encode(txn_hash); + let request = send_transaction_message::v1::Request { + transaction_id: txn_id.into(), + origin: self.server.name.clone(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + pdus, + edus, + }; + + let result = self + .services + .federation + .execute_on(&self.services.client.sender, &server, request) + .await; + + for (event_id, result) in result.iter().flat_map(|resp| resp.pdus.iter()) { + if let Err(e) = result { + warn!( + %txn_id, %server, + "error sending PDU {event_id} to remote server: {e:?}" + ); } } - //debug_assert!(pdu_jsons.len() + edu_jsons.len() > 0, "sending empty - // transaction"); - - let txn_hash = calculate_hash(events.iter().filter_map(|e| match e { - | SendingEvent::Edu(b) => Some(&**b), - | SendingEvent::Pdu(b) => Some(b.as_ref()), - | SendingEvent::Flush => None, - })); - - let txn_id = &*general_purpose::URL_SAFE_NO_PAD.encode(txn_hash); - - let request = send_transaction_message::v1::Request { - origin: self.server.name.clone(), - pdus: pdu_jsons, - edus: edu_jsons, - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - transaction_id: txn_id.into(), - }; - - let client = &self.services.client.sender; - self.services.federation.execute_on(client, &server, request) - .await - .inspect(|response| { - response - .pdus - .iter() - .filter(|(_, res)| res.is_err()) - .for_each( - |(pdu_id, res)| warn!(%txn_id, %server, "error sending PDU {pdu_id} to remote server: {res:?}"), - ); - }) - .map_err(|e| (Destination::Federation(server.clone()), e)) - .map(|_| Destination::Federation(server)) + match result { + | Err(error) => Err((Destination::Federation(server), error)), + | Ok(_) => Ok(Destination::Federation(server)), + } } /// This does not return a full `Pdu` it is only to satisfy ruma's types. From 99fe88c21e54f46aedd731786d73d7d9a721dc04 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 21:47:52 +0000 Subject: [PATCH 0610/1248] use smallvec for the edu sending event buffer Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/api/client/to_device.rs | 28 ++++++++++++++----------- src/service/Cargo.toml | 1 + src/service/rooms/typing/mod.rs | 13 ++++++------ src/service/sending/data.rs | 6 +++--- src/service/sending/mod.rs | 19 +++++++++++------ src/service/sending/sender.rs | 36 +++++++++++++++++++++------------ 7 files changed, 64 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a435a10..e379aebb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -845,6 +845,7 @@ dependencies = [ "serde_json", "serde_yaml", "sha2", + "smallvec", "termimad", "tokio", "tracing", diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index 2ded04e7..1b942fba 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -10,6 +10,7 @@ use ruma::{ }, to_device::DeviceIdOrAllDevices, }; +use service::sending::EduBuf; use crate::Ruma; @@ -42,18 +43,21 @@ pub(crate) async fn send_event_to_device_route( messages.insert(target_user_id.clone(), map); let count = services.globals.next_count()?; - services.sending.send_edu_server( - target_user_id.server_name(), - serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( - DirectDeviceContent { - sender: sender_user.clone(), - ev_type: body.event_type.clone(), - message_id: count.to_string().into(), - messages, - }, - )) - .expect("DirectToDevice EDU can be serialized"), - )?; + let mut buf = EduBuf::new(); + serde_json::to_writer( + &mut buf, + &federation::transactions::edu::Edu::DirectToDevice(DirectDeviceContent { + sender: sender_user.clone(), + ev_type: body.event_type.clone(), + message_id: count.to_string().into(), + messages, + }), + ) + .expect("DirectToDevice EDU can be serialized"); + + services + .sending + .send_edu_server(target_user_id.server_name(), buf)?; continue; } diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 21fbb417..c4f75453 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -74,6 +74,7 @@ serde_json.workspace = true serde.workspace = true serde_yaml.workspace = true sha2.workspace = true +smallvec.workspace = true termimad.workspace = true termimad.optional = true tokio.workspace = true diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index a6123322..c710b33a 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -13,7 +13,7 @@ use ruma::{ }; use tokio::sync::{broadcast, RwLock}; -use crate::{globals, sending, users, Dep}; +use crate::{globals, sending, sending::EduBuf, users, Dep}; pub struct Service { server: Arc, @@ -228,12 +228,13 @@ impl Service { return Ok(()); } - let edu = Edu::Typing(TypingContent::new(room_id.to_owned(), user_id.to_owned(), typing)); + let content = TypingContent::new(room_id.to_owned(), user_id.to_owned(), typing); + let edu = Edu::Typing(content); - self.services - .sending - .send_edu_room(room_id, serde_json::to_vec(&edu).expect("Serialized Edu::Typing")) - .await?; + let mut buf = EduBuf::new(); + serde_json::to_writer(&mut buf, &edu).expect("Serialized Edu::Typing"); + + self.services.sending.send_edu_room(room_id, buf).await?; Ok(()) } diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 436f633e..4dd2d5aa 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -202,7 +202,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se if value.is_empty() { SendingEvent::Pdu(event.into()) } else { - SendingEvent::Edu(value.to_vec()) + SendingEvent::Edu(value.into()) }, ) } else if key.starts_with(b"$") { @@ -230,7 +230,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se SendingEvent::Pdu(event.into()) } else { // I'm pretty sure this should never be called - SendingEvent::Edu(value.to_vec()) + SendingEvent::Edu(value.into()) }, ) } else { @@ -252,7 +252,7 @@ fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, Se if value.is_empty() { SendingEvent::Pdu(event.into()) } else { - SendingEvent::Edu(value.to_vec()) + SendingEvent::Edu(value.into()) }, ) }) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 80bca112..b146ad49 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -21,6 +21,7 @@ use ruma::{ api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, }; +use smallvec::SmallVec; use tokio::task::JoinSet; use self::data::Data; @@ -67,10 +68,16 @@ struct Msg { #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum SendingEvent { Pdu(RawPduId), // pduid - Edu(Vec), // pdu json + Edu(EduBuf), // edu json Flush, // none } +pub type EduBuf = SmallVec<[u8; EDU_BUF_CAP]>; +pub type EduVec = SmallVec<[EduBuf; EDU_VEC_CAP]>; + +const EDU_BUF_CAP: usize = 128; +const EDU_VEC_CAP: usize = 1; + #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -177,7 +184,6 @@ impl Service { where S: Stream + Send + 'a, { - let _cork = self.db.db.cork(); let requests = servers .map(|server| { (Destination::Federation(server.into()), SendingEvent::Pdu(pdu_id.to_owned())) @@ -185,6 +191,7 @@ impl Service { .collect::>() .await; + let _cork = self.db.db.cork(); let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); for ((dest, event), queue_id) in requests.into_iter().zip(keys) { @@ -195,7 +202,7 @@ impl Service { } #[tracing::instrument(skip(self, server, serialized), level = "debug")] - pub fn send_edu_server(&self, server: &ServerName, serialized: Vec) -> Result<()> { + pub fn send_edu_server(&self, server: &ServerName, serialized: EduBuf) -> Result { let dest = Destination::Federation(server.to_owned()); let event = SendingEvent::Edu(serialized); let _cork = self.db.db.cork(); @@ -208,7 +215,7 @@ impl Service { } #[tracing::instrument(skip(self, room_id, serialized), level = "debug")] - pub async fn send_edu_room(&self, room_id: &RoomId, serialized: Vec) -> Result<()> { + pub async fn send_edu_room(&self, room_id: &RoomId, serialized: EduBuf) -> Result { let servers = self .services .state_cache @@ -219,11 +226,10 @@ impl Service { } #[tracing::instrument(skip(self, servers, serialized), level = "debug")] - pub async fn send_edu_servers<'a, S>(&self, servers: S, serialized: Vec) -> Result<()> + pub async fn send_edu_servers<'a, S>(&self, servers: S, serialized: EduBuf) -> Result where S: Stream + Send + 'a, { - let _cork = self.db.db.cork(); let requests = servers .map(|server| { ( @@ -234,6 +240,7 @@ impl Service { .collect::>() .await; + let _cork = self.db.db.cork(); let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); for ((dest, event), queue_id) in requests.into_iter().zip(keys) { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 47be01f1..363bb994 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -45,7 +45,9 @@ use ruma::{ }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use super::{appservice, data::QueueItem, Destination, Msg, SendingEvent, Service}; +use super::{ + appservice, data::QueueItem, Destination, EduBuf, EduVec, Msg, SendingEvent, Service, +}; #[derive(Debug)] enum TransactionStatus { @@ -313,7 +315,12 @@ impl Service { if let Destination::Federation(server_name) = dest { if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { debug_assert!(select_edus.len() <= EDU_LIMIT, "exceeded edus limit"); - events.extend(select_edus.into_iter().map(SendingEvent::Edu)); + let select_edus = select_edus + .into_iter() + .map(Into::into) + .map(SendingEvent::Edu); + + events.extend(select_edus); self.db.set_latest_educount(server_name, last_count); } } @@ -357,7 +364,7 @@ impl Service { level = "debug", skip_all, )] - async fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { + async fn select_edus(&self, server_name: &ServerName) -> Result<(EduVec, u64)> { // selection window let since = self.db.get_latest_educount(server_name).await; let since_upper = self.services.globals.current_count()?; @@ -405,8 +412,8 @@ impl Service { since: (u64, u64), max_edu_count: &AtomicU64, events_len: &AtomicUsize, - ) -> Vec> { - let mut events = Vec::new(); + ) -> EduVec { + let mut events = EduVec::new(); let server_rooms = self.services.state_cache.server_rooms(server_name); pin_mut!(server_rooms); @@ -441,10 +448,11 @@ impl Service { keys: None, }); - let edu = serde_json::to_vec(&edu) + let mut buf = EduBuf::new(); + serde_json::to_writer(&mut buf, &edu) .expect("failed to serialize device list update to JSON"); - events.push(edu); + events.push(buf); if events_len.fetch_add(1, Ordering::Relaxed) >= SELECT_EDU_LIMIT - 1 { return events; } @@ -465,7 +473,7 @@ impl Service { server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, - ) -> Option> { + ) -> Option { let server_rooms = self.services.state_cache.server_rooms(server_name); pin_mut!(server_rooms); @@ -487,10 +495,11 @@ impl Service { let receipt_content = Edu::Receipt(ReceiptContent { receipts }); - let receipt_content = serde_json::to_vec(&receipt_content) + let mut buf = EduBuf::new(); + serde_json::to_writer(&mut buf, &receipt_content) .expect("Failed to serialize Receipt EDU to JSON vec"); - Some(receipt_content) + Some(buf) } /// Look for read receipts in this room @@ -569,7 +578,7 @@ impl Service { server_name: &ServerName, since: (u64, u64), max_edu_count: &AtomicU64, - ) -> Option> { + ) -> Option { let presence_since = self.services.presence.presence_since(since.0); pin_mut!(presence_since); @@ -628,10 +637,11 @@ impl Service { push: presence_updates.into_values().collect(), }); - let presence_content = serde_json::to_vec(&presence_content) + let mut buf = EduBuf::new(); + serde_json::to_writer(&mut buf, &presence_content) .expect("failed to serialize Presence EDU to JSON"); - Some(presence_content) + Some(buf) } fn send_events(&self, dest: Destination, events: Vec) -> SendingFuture<'_> { From ed3cd99781f35dd7e38439ab45b78a851385ca8d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 28 Jan 2025 19:42:09 +0000 Subject: [PATCH 0611/1248] abstract the config reload checks Signed-off-by: Jason Volk --- src/admin/server/commands.rs | 14 ++++++++------ src/core/config/check.rs | 18 +++++++++++++++++- src/main/server.rs | 6 +++--- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 47509bad..5c0c2a10 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -32,13 +32,15 @@ pub(super) async fn reload_config( &self, path: Option, ) -> Result { - let path = path.as_deref().into_iter(); - let config = Config::load(path).and_then(|raw| Config::new(&raw))?; - if config.server_name != self.services.server.name { - return Err!("You can't change the server name."); - } + use conduwuit::config::check; - let _old = self.services.server.config.update(config)?; + let path = path.as_deref().into_iter(); + let new = Config::load(path).and_then(|raw| Config::new(&raw))?; + + let old = &self.services.server.config; + check::reload(old, &new)?; + + self.services.server.config.update(new)?; Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) } diff --git a/src/core/config/check.rs b/src/core/config/check.rs index d7be54b1..988d4143 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -6,8 +6,24 @@ use figment::Figment; use super::DEPRECATED_KEYS; use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result, Server}; +/// Performs check() with additional checks specific to reloading old config +/// with new config. +pub fn reload(old: &Config, new: &Config) -> Result { + check(new)?; + + if new.server_name != old.server_name { + return Err!(Config( + "server_name", + "You can't change the server's name from {:?}.", + old.server_name + )); + } + + Ok(()) +} + #[allow(clippy::cognitive_complexity)] -pub fn check(config: &Config) -> Result<()> { +pub fn check(config: &Config) -> Result { if cfg!(debug_assertions) { warn!("Note: conduwuit was built without optimisations (i.e. debug build)"); } diff --git a/src/main/server.rs b/src/main/server.rs index 74859f2b..7376b2fc 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -46,14 +46,14 @@ impl Server { .and_then(|raw| crate::clap::update(raw, args)) .and_then(|raw| Config::new(&raw))?; - #[cfg(feature = "sentry_telemetry")] - let sentry_guard = crate::sentry::init(&config); - let (tracing_reload_handle, tracing_flame_guard, capture) = crate::logging::init(&config)?; config.check()?; + #[cfg(feature = "sentry_telemetry")] + let sentry_guard = crate::sentry::init(&config); + #[cfg(unix)] sys::maximize_fd_limit() .expect("Unable to increase maximum soft and hard file descriptor limit"); From a567e314e96bb8efa2776770cd25a2b1190c9587 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 28 Jan 2025 20:02:29 +0000 Subject: [PATCH 0612/1248] simplify shutdown signal handlers Signed-off-by: Jason Volk --- src/core/server.rs | 2 +- src/router/run.rs | 27 +++++++-------------------- src/service/sync/watch.rs | 4 ++-- 3 files changed, 10 insertions(+), 23 deletions(-) diff --git a/src/core/server.rs b/src/core/server.rs index 0f2e61b0..45ba7420 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -112,7 +112,7 @@ impl Server { } #[inline] - pub async fn until_shutdown(self: Arc) { + pub async fn until_shutdown(self: &Arc) { while self.running() { self.signal.subscribe().recv().await.ok(); } diff --git a/src/router/run.rs b/src/router/run.rs index 95d12559..ea8a7666 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -9,6 +9,7 @@ use std::{ use axum_server::Handle as ServerHandle; use conduwuit::{debug, debug_error, debug_info, error, info, Error, Result, Server}; +use futures::FutureExt; use service::Services; use tokio::{ sync::broadcast::{self, Sender}, @@ -109,28 +110,14 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { #[tracing::instrument(skip_all)] async fn signal(server: Arc, tx: Sender<()>, handle: axum_server::Handle) { - loop { - let sig: &'static str = server - .signal - .subscribe() - .recv() - .await - .expect("channel error"); - - if !server.running() { - handle_shutdown(&server, &tx, &handle, sig).await; - break; - } - } + server + .clone() + .until_shutdown() + .then(move |()| handle_shutdown(server, tx, handle)) + .await; } -async fn handle_shutdown( - server: &Arc, - tx: &Sender<()>, - handle: &axum_server::Handle, - sig: &str, -) { - debug!("Received signal {sig}"); +async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_server::Handle) { if let Err(e) = tx.send(()) { error!("failed sending shutdown transaction to channel: {e}"); } diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs index 2b351c3a..0a9c5d15 100644 --- a/src/service/sync/watch.rs +++ b/src/service/sync/watch.rs @@ -97,8 +97,8 @@ pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result { ); // Server shutdown - let server_shutdown = self.services.server.clone().until_shutdown().boxed(); - futures.push(server_shutdown); + futures.push(self.services.server.until_shutdown().boxed()); + if !self.services.server.running() { return Ok(()); } From 2f449ba47db488ca1d3acb6f7228479af6bb97c2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 28 Jan 2025 20:55:28 +0000 Subject: [PATCH 0613/1248] support reloading config via SIGUSR1 Signed-off-by: Jason Volk --- conduwuit-example.toml | 5 ++++ src/admin/server/commands.rs | 11 ++------ src/core/config/mod.rs | 7 +++++ src/main/signal.rs | 2 ++ src/service/config/mod.rs | 55 ++++++++++++++++++++++++++++++++++++ src/service/mod.rs | 1 + src/service/services.rs | 4 ++- 7 files changed, 75 insertions(+), 10 deletions(-) create mode 100644 src/service/config/mod.rs diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 51d948e8..8534e5c6 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1524,6 +1524,11 @@ # #listening = true +# Enables configuration reload when the server receives SIGUSR1 on +# supporting platforms. +# +#config_reload_signal = true + [global.tls] # Path to a valid TLS certificate file. diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 5c0c2a10..910dce6e 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,6 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{info, utils::time, warn, Config, Err, Result}; +use conduwuit::{info, utils::time, warn, Err, Result}; use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; @@ -32,15 +32,8 @@ pub(super) async fn reload_config( &self, path: Option, ) -> Result { - use conduwuit::config::check; - let path = path.as_deref().into_iter(); - let new = Config::load(path).and_then(|raw| Config::new(&raw))?; - - let old = &self.services.server.config; - check::reload(old, &new)?; - - self.services.server.config.update(new)?; + self.services.config.reload(path)?; Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 94788fa4..8e8176ab 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1742,6 +1742,13 @@ pub struct Config { #[serde(default = "true_fn")] pub listening: bool, + /// Enables configuration reload when the server receives SIGUSR1 on + /// supporting platforms. + /// + /// default: true + #[serde(default = "true_fn")] + pub config_reload_signal: bool, + #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime diff --git a/src/main/signal.rs b/src/main/signal.rs index cecb718b..dfdca1d5 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -16,6 +16,7 @@ pub(super) async fn signal(server: Arc) { let mut quit = unix::signal(SignalKind::quit()).expect("SIGQUIT handler"); let mut term = unix::signal(SignalKind::terminate()).expect("SIGTERM handler"); + let mut usr1 = unix::signal(SignalKind::user_defined1()).expect("SIGUSR1 handler"); loop { trace!("Installed signal handlers"); let sig: &'static str; @@ -23,6 +24,7 @@ pub(super) async fn signal(server: Arc) { _ = signal::ctrl_c() => { sig = "SIGINT"; }, _ = quit.recv() => { sig = "SIGQUIT"; }, _ = term.recv() => { sig = "SIGTERM"; }, + _ = usr1.recv() => { sig = "SIGUSR1"; }, } warn!("Received {sig}"); diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs new file mode 100644 index 00000000..ef98f176 --- /dev/null +++ b/src/service/config/mod.rs @@ -0,0 +1,55 @@ +use std::{iter, path::Path, sync::Arc}; + +use async_trait::async_trait; +use conduwuit::{ + config::{check, Config}, + error, implement, Result, Server, +}; + +pub struct Service { + server: Arc, +} + +const SIGNAL: &str = "SIGUSR1"; + +#[async_trait] +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { server: args.server.clone() })) + } + + async fn worker(self: Arc) -> Result { + while self.server.running() { + if self.server.signal.subscribe().recv().await == Ok(SIGNAL) { + if let Err(e) = self.handle_reload() { + error!("Failed to reload config: {e}"); + } + } + } + + Ok(()) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +#[implement(Service)] +fn handle_reload(&self) -> Result { + if self.server.config.config_reload_signal { + self.reload(iter::empty())?; + } + + Ok(()) +} + +#[implement(Service)] +pub fn reload<'a, I>(&self, paths: I) -> Result> +where + I: Iterator, +{ + let old = self.server.config.clone(); + let new = Config::load(paths).and_then(|raw| Config::new(&raw))?; + + check::reload(&old, &new)?; + self.server.config.update(new) +} diff --git a/src/service/mod.rs b/src/service/mod.rs index 2102921f..71bd0eb4 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -9,6 +9,7 @@ pub mod account_data; pub mod admin; pub mod appservice; pub mod client; +pub mod config; pub mod emergency; pub mod federation; pub mod globals; diff --git a/src/service/services.rs b/src/service/services.rs index cb5cc12f..fb334b96 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -10,7 +10,7 @@ use database::Database; use tokio::sync::Mutex; use crate::{ - account_data, admin, appservice, client, emergency, federation, globals, key_backups, + account_data, admin, appservice, client, config, emergency, federation, globals, key_backups, manager::Manager, media, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, @@ -21,6 +21,7 @@ pub struct Services { pub account_data: Arc, pub admin: Arc, pub appservice: Arc, + pub config: Arc, pub client: Arc, pub emergency: Arc, pub globals: Arc, @@ -68,6 +69,7 @@ impl Services { appservice: build!(appservice::Service), resolver: build!(resolver::Service), client: build!(client::Service), + config: build!(config::Service), emergency: build!(emergency::Service), globals: build!(globals::Service), key_backups: build!(key_backups::Service), From 2c5af902a3b61cf07a07ccfff82a41874d7b10ba Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 28 Jan 2025 21:30:12 +0000 Subject: [PATCH 0614/1248] support executing configurable admin commands via SIGUSR2 Signed-off-by: Jason Volk --- conduwuit-example.toml | 7 +++ src/core/config/mod.rs | 9 ++++ src/main/signal.rs | 2 + src/service/admin/{startup.rs => execute.rs} | 56 ++++++++++++++------ src/service/admin/mod.rs | 8 ++- src/service/config/mod.rs | 9 +++- 6 files changed, 72 insertions(+), 19 deletions(-) rename src/service/admin/{startup.rs => execute.rs} (57%) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 8534e5c6..4062ba99 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1362,6 +1362,13 @@ # #admin_execute_errors_ignore = false +# List of admin commands to execute on SIGUSR2. +# +# Similar to admin_execute, but these commands are executed when the +# server receives SIGUSR2 on supporting platforms. +# +#admin_signal_execute = [] + # Controls the max log level for admin command log captures (logs # generated from running admin commands). Defaults to "info" on release # builds, else "debug" on debug builds. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 8e8176ab..415c9ba9 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1554,6 +1554,15 @@ pub struct Config { #[serde(default)] pub admin_execute_errors_ignore: bool, + /// List of admin commands to execute on SIGUSR2. + /// + /// Similar to admin_execute, but these commands are executed when the + /// server receives SIGUSR2 on supporting platforms. + /// + /// default: [] + #[serde(default)] + pub admin_signal_execute: Vec, + /// Controls the max log level for admin command log captures (logs /// generated from running admin commands). Defaults to "info" on release /// builds, else "debug" on debug builds. diff --git a/src/main/signal.rs b/src/main/signal.rs index dfdca1d5..343b95c9 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -17,6 +17,7 @@ pub(super) async fn signal(server: Arc) { let mut quit = unix::signal(SignalKind::quit()).expect("SIGQUIT handler"); let mut term = unix::signal(SignalKind::terminate()).expect("SIGTERM handler"); let mut usr1 = unix::signal(SignalKind::user_defined1()).expect("SIGUSR1 handler"); + let mut usr2 = unix::signal(SignalKind::user_defined2()).expect("SIGUSR2 handler"); loop { trace!("Installed signal handlers"); let sig: &'static str; @@ -25,6 +26,7 @@ pub(super) async fn signal(server: Arc) { _ = quit.recv() => { sig = "SIGQUIT"; }, _ = term.recv() => { sig = "SIGTERM"; }, _ = usr1.recv() => { sig = "SIGUSR1"; }, + _ = usr2.recv() => { sig = "SIGUSR2"; }, } warn!("Received {sig}"); diff --git a/src/service/admin/startup.rs b/src/service/admin/execute.rs similarity index 57% rename from src/service/admin/startup.rs rename to src/service/admin/execute.rs index 582e863d..462681da 100644 --- a/src/service/admin/startup.rs +++ b/src/service/admin/execute.rs @@ -2,6 +2,8 @@ use conduwuit::{debug, debug_info, error, implement, info, Err, Result}; use ruma::events::room::message::RoomMessageEventContent; use tokio::time::{sleep, Duration}; +pub(super) const SIGNAL: &str = "SIGUSR2"; + /// Possibly spawn the terminal console at startup if configured. #[implement(super::Service)] pub(super) async fn console_auto_start(&self) { @@ -22,7 +24,7 @@ pub(super) async fn console_auto_stop(&self) { /// Execute admin commands after startup #[implement(super::Service)] -pub(super) async fn startup_execute(&self) -> Result<()> { +pub(super) async fn startup_execute(&self) -> Result { // List of comamnds to execute let commands = &self.services.server.config.admin_execute; @@ -36,7 +38,7 @@ pub(super) async fn startup_execute(&self) -> Result<()> { sleep(Duration::from_millis(500)).await; for (i, command) in commands.iter().enumerate() { - if let Err(e) = self.startup_execute_command(i, command.clone()).await { + if let Err(e) = self.execute_command(i, command.clone()).await { if !errors { return Err(e); } @@ -59,16 +61,38 @@ pub(super) async fn startup_execute(&self) -> Result<()> { Ok(()) } -/// Execute one admin command after startup +/// Execute admin commands after signal #[implement(super::Service)] -async fn startup_execute_command(&self, i: usize, command: String) -> Result<()> { - debug!("Startup command #{i}: executing {command:?}"); +pub(super) async fn signal_execute(&self) -> Result { + // List of comamnds to execute + let commands = self.services.server.config.admin_signal_execute.clone(); + + // When true, errors are ignored and execution continues. + let ignore_errors = self.services.server.config.admin_execute_errors_ignore; + + for (i, command) in commands.iter().enumerate() { + if let Err(e) = self.execute_command(i, command.clone()).await { + if !ignore_errors { + return Err(e); + } + } + + tokio::task::yield_now().await; + } + + Ok(()) +} + +/// Execute one admin command after startup or signal +#[implement(super::Service)] +async fn execute_command(&self, i: usize, command: String) -> Result { + debug!("Execute command #{i}: executing {command:?}"); match self.command_in_place(command, None).await { - | Ok(Some(output)) => Self::startup_command_output(i, &output), - | Err(output) => Self::startup_command_error(i, &output), + | Ok(Some(output)) => Self::execute_command_output(i, &output), + | Err(output) => Self::execute_command_error(i, &output), | Ok(None) => { - info!("Startup command #{i} completed (no output)."); + info!("Execute command #{i} completed (no output)."); Ok(()) }, } @@ -76,28 +100,28 @@ async fn startup_execute_command(&self, i: usize, command: String) -> Result<()> #[cfg(feature = "console")] #[implement(super::Service)] -fn startup_command_output(i: usize, content: &RoomMessageEventContent) -> Result<()> { - debug_info!("Startup command #{i} completed:"); +fn execute_command_output(i: usize, content: &RoomMessageEventContent) -> Result { + debug_info!("Execute command #{i} completed:"); super::console::print(content.body()); Ok(()) } #[cfg(feature = "console")] #[implement(super::Service)] -fn startup_command_error(i: usize, content: &RoomMessageEventContent) -> Result<()> { +fn execute_command_error(i: usize, content: &RoomMessageEventContent) -> Result { super::console::print_err(content.body()); - Err!(debug_error!("Startup command #{i} failed.")) + Err!(debug_error!("Execute command #{i} failed.")) } #[cfg(not(feature = "console"))] #[implement(super::Service)] -fn startup_command_output(i: usize, content: &RoomMessageEventContent) -> Result<()> { - info!("Startup command #{i} completed:\n{:#}", content.body()); +fn execute_command_output(i: usize, content: &RoomMessageEventContent) -> Result { + info!("Execute command #{i} completed:\n{:#}", content.body()); Ok(()) } #[cfg(not(feature = "console"))] #[implement(super::Service)] -fn startup_command_error(i: usize, content: &RoomMessageEventContent) -> Result<()> { - Err!(error!("Startup command #{i} failed:\n{:#}", content.body())) +fn execute_command_error(i: usize, content: &RoomMessageEventContent) -> Result { + Err!(error!("Execute command #{i} failed:\n{:#}", content.body())) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index bc410631..31b046b7 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1,7 +1,7 @@ pub mod console; mod create; +mod execute; mod grant; -mod startup; use std::{ future::Future, @@ -183,7 +183,11 @@ impl Service { .map(|complete| complete(command)) } - async fn handle_signal(&self, #[allow(unused_variables)] sig: &'static str) { + async fn handle_signal(&self, sig: &'static str) { + if sig == execute::SIGNAL { + self.signal_execute().await.ok(); + } + #[cfg(feature = "console")] self.console.handle_signal(sig).await; } diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs index ef98f176..8bd09a52 100644 --- a/src/service/config/mod.rs +++ b/src/service/config/mod.rs @@ -1,4 +1,4 @@ -use std::{iter, path::Path, sync::Arc}; +use std::{iter, ops::Deref, path::Path, sync::Arc}; use async_trait::async_trait; use conduwuit::{ @@ -33,6 +33,13 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +impl Deref for Service { + type Target = Arc; + + #[inline] + fn deref(&self) -> &Self::Target { &self.server.config } +} + #[implement(Service)] fn handle_reload(&self) -> Result { if self.server.config.config_reload_signal { From ad0b0af955cda8b93b6d8c9c665905a2c4dd93d3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 25 Jan 2025 23:07:50 +0000 Subject: [PATCH 0615/1248] combine state_accessor data into mod Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/data.rs | 253 ----------------------- src/service/rooms/state_accessor/mod.rs | 183 +++++++++++++--- 2 files changed, 149 insertions(+), 287 deletions(-) delete mode 100644 src/service/rooms/state_accessor/data.rs diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs deleted file mode 100644 index 29b27a05..00000000 --- a/src/service/rooms/state_accessor/data.rs +++ /dev/null @@ -1,253 +0,0 @@ -use std::{borrow::Borrow, collections::HashMap, sync::Arc}; - -use conduwuit::{ - at, err, - utils::stream::{BroadbandExt, IterStream, ReadyExt}, - PduEvent, Result, -}; -use database::{Deserialized, Map}; -use futures::{FutureExt, StreamExt, TryFutureExt}; -use ruma::{events::StateEventType, EventId, OwnedEventId, RoomId}; -use serde::Deserialize; - -use crate::{ - rooms, - rooms::{ - short::{ShortEventId, ShortStateHash, ShortStateKey}, - state_compressor::parse_compressed_state_event, - }, - Dep, -}; - -pub(super) struct Data { - shorteventid_shortstatehash: Arc, - services: Services, -} - -struct Services { - short: Dep, - state: Dep, - state_compressor: Dep, - timeline: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - shorteventid_shortstatehash: db["shorteventid_shortstatehash"].clone(), - services: Services { - short: args.depend::("rooms::short"), - state: args.depend::("rooms::state"), - state_compressor: args - .depend::("rooms::state_compressor"), - timeline: args.depend::("rooms::timeline"), - }, - } - } - - pub(super) async fn state_full( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - let state = self - .state_full_pdus(shortstatehash) - .await? - .into_iter() - .filter_map(|pdu| Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu))) - .collect(); - - Ok(state) - } - - pub(super) async fn state_full_pdus( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - let short_ids = self.state_full_shortids(shortstatehash).await?; - - let full_pdus = self - .services - .short - .multi_get_eventid_from_short(short_ids.into_iter().map(at!(1)).stream()) - .ready_filter_map(Result::ok) - .broad_filter_map(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await.ok() - }) - .collect() - .await; - - Ok(full_pdus) - } - - pub(super) async fn state_full_ids( - &self, - shortstatehash: ShortStateHash, - ) -> Result> - where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, - ::Owned: Borrow, - { - let short_ids = self.state_full_shortids(shortstatehash).await?; - - let full_ids = self - .services - .short - .multi_get_eventid_from_short(short_ids.iter().map(at!(1)).stream()) - .zip(short_ids.iter().stream().map(at!(0))) - .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) - .collect() - .boxed() - .await; - - Ok(full_ids) - } - - pub(super) async fn state_full_shortids( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - let shortids = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database("Missing state IDs: {e}")))? - .pop() - .expect("there is always one layer") - .full_state - .iter() - .copied() - .map(parse_compressed_state_event) - .collect(); - - Ok(shortids) - } - - /// Returns a single EventId from `room_id` with key - /// (`event_type`,`state_key`). - pub(super) async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let full_state = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? - .pop() - .expect("there is always one layer") - .full_state; - - let compressed = full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .ok_or(err!(Database("No shortstatekey in compressed state")))?; - - let (_, shorteventid) = parse_compressed_state_event(*compressed); - - self.services - .short - .get_eventid_from_short(shorteventid) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub(super) async fn state_get( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await - }) - .await - } - - /// Returns the state hash for this pdu. - pub(super) async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() - } - - /// Returns the full room state. - pub(super) async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result> { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_full(shortstatehash)) - .map_err(|e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .await - } - - /// Returns the full room state's pdus. - #[allow(unused_qualifications)] // async traits - pub(super) async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result> { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_full_pdus(shortstatehash)) - .map_err(|e| err!(Database("Missing state pdus for {room_id:?}: {e:?}"))) - .await - } - - /// Returns a single EventId from `room_id` with key - /// (`event_type`,`state_key`). - pub(super) async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub(super) async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await - } -} diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index d89c8835..3d87534b 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,5 +1,3 @@ -mod data; - use std::{ borrow::Borrow, collections::HashMap, @@ -8,16 +6,18 @@ use std::{ }; use conduwuit::{ - err, error, + at, err, error, pdu::PduBuilder, utils, utils::{ math::{usize_from_f64, Expected}, - ReadyExt, + stream::BroadbandExt, + IterStream, ReadyExt, }, Err, Error, PduEvent, Result, }; -use futures::StreamExt; +use database::{Deserialized, Map}; +use futures::{FutureExt, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ events::{ @@ -38,33 +38,40 @@ use ruma::{ }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, EventId, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, ServerName, UserId, + EventEncryptionAlgorithm, EventId, JsOption, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, + OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use serde::Deserialize; -use self::data::Data; use crate::{ rooms, rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, state::RoomMutexGuard, + state_compressor::parse_compressed_state_event, }, Dep, }; pub struct Service { - services: Services, - db: Data, pub server_visibility_cache: Mutex>, pub user_visibility_cache: Mutex>, + services: Services, + db: Data, } struct Services { + short: Dep, + state: Dep, + state_compressor: Dep, state_cache: Dep, timeline: Dep, } +struct Data { + shorteventid_shortstatehash: Arc, +} + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -74,17 +81,23 @@ impl crate::Service for Service { f64::from(config.user_visibility_cache_capacity) * config.cache_capacity_modifier; Ok(Arc::new(Self { - services: Services { - state_cache: args.depend::("rooms::state_cache"), - timeline: args.depend::("rooms::timeline"), - }, - db: Data::new(&args), server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( server_visibility_cache_capacity, )?)), user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( user_visibility_cache_capacity, )?)), + services: Services { + state_cache: args.depend::("rooms::state_cache"), + timeline: args.depend::("rooms::timeline"), + short: args.depend::("rooms::short"), + state: args.depend::("rooms::state"), + state_compressor: args + .depend::("rooms::state_compressor"), + }, + db: Data { + shorteventid_shortstatehash: args.db["shorteventid_shortstatehash"].clone(), + }, })) } @@ -130,6 +143,37 @@ impl crate::Service for Service { } impl Service { + pub async fn state_full( + &self, + shortstatehash: ShortStateHash, + ) -> Result> { + let state = self + .state_full_pdus(shortstatehash) + .await? + .into_iter() + .filter_map(|pdu| Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu))) + .collect(); + + Ok(state) + } + + pub async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result> { + let short_ids = self.state_full_shortids(shortstatehash).await?; + + let full_pdus = self + .services + .short + .multi_get_eventid_from_short(short_ids.into_iter().map(at!(1)).stream()) + .ready_filter_map(Result::ok) + .broad_filter_map(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await.ok() + }) + .collect() + .await; + + Ok(full_pdus) + } + /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self), level = "debug")] @@ -141,7 +185,19 @@ impl Service { Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, ::Owned: Borrow, { - self.db.state_full_ids::(shortstatehash).await + let short_ids = self.state_full_shortids(shortstatehash).await?; + + let full_ids = self + .services + .short + .multi_get_eventid_from_short(short_ids.iter().map(at!(1)).stream()) + .zip(short_ids.iter().stream().map(at!(0))) + .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) + .collect() + .boxed() + .await; + + Ok(full_ids) } #[inline] @@ -149,14 +205,21 @@ impl Service { &self, shortstatehash: ShortStateHash, ) -> Result> { - self.db.state_full_shortids(shortstatehash).await - } + let shortids = self + .services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .await + .map_err(|e| err!(Database("Missing state IDs: {e}")))? + .pop() + .expect("there is always one layer") + .full_state + .iter() + .copied() + .map(parse_compressed_state_event) + .collect(); - pub async fn state_full( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - self.db.state_full(shortstatehash).await + Ok(shortids) } /// Returns a single EventId from `room_id` with key (`event_type`, @@ -172,22 +235,47 @@ impl Service { Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { - self.db - .state_get_id(shortstatehash, event_type, state_key) + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let full_state = self + .services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .await + .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? + .pop() + .expect("there is always one layer") + .full_state; + + let compressed = full_state + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + .ok_or(err!(Database("No shortstatekey in compressed state")))?; + + let (_, shorteventid) = parse_compressed_state_event(*compressed); + + self.services + .short + .get_eventid_from_short(shorteventid) .await } /// Returns a single PDU from `room_id` with key (`event_type`, /// `state_key`). - #[inline] pub async fn state_get( &self, shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, ) -> Result { - self.db - .state_get(shortstatehash, event_type, state_key) + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) .await } @@ -375,7 +463,18 @@ impl Service { /// Returns the state hash for this pdu. pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - self.db.pdu_shortstatehash(event_id).await + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.db + .shorteventid_shortstatehash + .aqry::(&shorteventid) + }) + .await + .deserialized() } /// Returns the full room state. @@ -384,13 +483,23 @@ impl Service { &self, room_id: &RoomId, ) -> Result> { - self.db.room_state_full(room_id).await + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_full(shortstatehash)) + .map_err(|e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .await } /// Returns the full room state pdus #[tracing::instrument(skip(self), level = "debug")] pub async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result> { - self.db.room_state_full_pdus(room_id).await + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_full_pdus(shortstatehash)) + .map_err(|e| err!(Database("Missing state pdus for {room_id:?}: {e:?}"))) + .await } /// Returns a single EventId from `room_id` with key (`event_type`, @@ -406,8 +515,10 @@ impl Service { Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { - self.db - .room_state_get_id(room_id, event_type, state_key) + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) .await } @@ -420,7 +531,11 @@ impl Service { event_type: &StateEventType, state_key: &str, ) -> Result { - self.db.room_state_get(room_id, event_type, state_key).await + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await } /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). From af399fd5179eed9c72bf0426858301af9ffc92d4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 01:04:02 +0000 Subject: [PATCH 0616/1248] flatten state accessor iterations Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 11 +- src/api/client/context.rs | 28 ++-- src/api/client/membership.rs | 18 +- src/api/client/message.rs | 6 +- src/api/client/room/initial_sync.rs | 9 +- src/api/client/search.rs | 12 +- src/api/client/state.rs | 12 +- src/api/client/sync/v3.rs | 48 +++--- src/api/client/sync/v4.rs | 8 +- src/api/client/sync/v5.rs | 8 +- src/api/server/send_join.rs | 14 +- src/api/server/state.rs | 10 +- src/api/server/state_ids.rs | 9 +- src/core/pdu/strip.rs | 12 +- .../rooms/event_handler/resolve_state.rs | 5 +- .../rooms/event_handler/state_at_incoming.rs | 17 +- src/service/rooms/spaces/mod.rs | 4 +- src/service/rooms/state_accessor/mod.rs | 155 ++++++++++-------- 18 files changed, 205 insertions(+), 181 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index cdd69c0f..cd892ded 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -9,7 +9,7 @@ use conduwuit::{ debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, PduId, RawPduId, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ api::{client::error::ErrorKind, federation::event::get_room_state}, events::room::message::RoomMessageEventContent, @@ -327,11 +327,10 @@ pub(super) async fn get_room_state( .services .rooms .state_accessor - .room_state_full(&room_id) - .await? - .values() - .map(PduEvent::to_state_event) - .collect(); + .room_state_full_pdus(&room_id) + .map_ok(PduEvent::into_state_event) + .try_collect() + .await?; if room_state.is_empty() { return Ok(RoomMessageEventContent::text_plain( diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 388bcf4d..7256683f 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - at, deref_at, err, ref_at, + at, err, ref_at, utils::{ future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, @@ -10,10 +10,10 @@ use conduwuit::{ }; use futures::{ future::{join, join3, try_join3, OptionFuture}, - FutureExt, StreamExt, TryFutureExt, + FutureExt, StreamExt, TryFutureExt, TryStreamExt, }; use ruma::{api::client::context::get_context, events::StateEventType, OwnedEventId, UserId}; -use service::rooms::{lazy_loading, lazy_loading::Options}; +use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use crate::{ client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, @@ -132,21 +132,29 @@ pub(crate) async fn get_context_route( .state_accessor .pdu_shortstatehash(state_at) .or_else(|_| services.rooms.state.get_room_shortstatehash(room_id)) - .and_then(|shortstatehash| services.rooms.state_accessor.state_full_ids(shortstatehash)) + .map_ok(|shortstatehash| { + services + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .map(Ok) + }) .map_err(|e| err!(Database("State not found: {e}"))) + .try_flatten_stream() + .try_collect() .boxed(); let (lazy_loading_witnessed, state_ids) = join(lazy_loading_witnessed, state_ids).await; - let state_ids = state_ids?; + let state_ids: Vec<(ShortStateKey, OwnedEventId)> = state_ids?; + let shortstatekeys = state_ids.iter().map(at!(0)).stream(); + let shorteventids = state_ids.iter().map(ref_at!(1)).stream(); let lazy_loading_witnessed = lazy_loading_witnessed.unwrap_or_default(); - let shortstatekeys = state_ids.iter().stream().map(deref_at!(0)); - let state: Vec<_> = services .rooms .short .multi_get_statekey_from_short(shortstatekeys) - .zip(state_ids.iter().stream().map(at!(1))) + .zip(shorteventids) .ready_filter_map(|item| Some((item.0.ok()?, item.1))) .ready_filter_map(|((event_type, state_key), event_id)| { if filter.lazy_load_options.is_enabled() @@ -162,9 +170,9 @@ pub(crate) async fn get_context_route( Some(event_id) }) .broad_filter_map(|event_id: &OwnedEventId| { - services.rooms.timeline.get_pdu(event_id).ok() + services.rooms.timeline.get_pdu(event_id.as_ref()).ok() }) - .map(|pdu| pdu.to_state_event()) + .map(PduEvent::into_state_event) .collect() .await; diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 2e23dab9..fccb9b53 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -8,14 +8,14 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, debug_info, debug_warn, err, info, + at, debug, debug_info, debug_warn, err, info, pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, trace, utils::{self, shuffle, IterStream, ReadyExt}, warn, Err, PduEvent, Result, }; -use futures::{join, FutureExt, StreamExt}; +use futures::{join, FutureExt, StreamExt, TryFutureExt}; use ruma::{ api::{ client::{ @@ -765,11 +765,12 @@ pub(crate) async fn get_member_events_route( .rooms .state_accessor .room_state_full(&body.room_id) - .await? - .iter() - .filter(|(key, _)| key.0 == StateEventType::RoomMember) - .map(|(_, pdu)| pdu.to_member_event()) - .collect(), + .ready_filter_map(Result::ok) + .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) + .map(at!(1)) + .map(PduEvent::into_member_event) + .collect() + .await, }) } @@ -1707,9 +1708,6 @@ pub async fn leave_room( room_id: &RoomId, reason: Option, ) -> Result<()> { - //use conduwuit::utils::stream::OptionStream; - use futures::TryFutureExt; - // Ask a remote server if we don't have this room and are not knocking on it if !services .rooms diff --git a/src/api/client/message.rs b/src/api/client/message.rs index a508b5da..321d8013 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -6,9 +6,9 @@ use conduwuit::{ stream::{BroadbandExt, TryIgnore, WidebandExt}, IterStream, ReadyExt, }, - Event, PduCount, Result, + Event, PduCount, PduEvent, Result, }; -use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt}; +use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt, TryFutureExt}; use ruma::{ api::{ client::{filter::RoomEventFilter, message::get_message_events}, @@ -220,8 +220,8 @@ async fn get_member_event( .rooms .state_accessor .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) + .map_ok(PduEvent::into_state_event) .await - .map(|member_event| member_event.to_state_event()) .ok() } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 301b6e8d..233d180f 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -2,7 +2,7 @@ use axum::extract::State; use conduwuit::{ at, utils::{stream::TryTools, BoolExt}, - Err, Result, + Err, PduEvent, Result, }; use futures::TryStreamExt; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; @@ -39,10 +39,9 @@ pub(crate) async fn room_initial_sync_route( .rooms .state_accessor .room_state_full_pdus(room_id) - .await? - .into_iter() - .map(|pdu| pdu.to_state_event()) - .collect(); + .map_ok(PduEvent::into_state_event) + .try_collect() + .await?; let messages = PaginationChunk { start: events.last().map(at!(0)).as_ref().map(ToString::to_string), diff --git a/src/api/client/search.rs b/src/api/client/search.rs index e60bd26d..898dfc7f 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -7,7 +7,7 @@ use conduwuit::{ utils::{stream::ReadyExt, IterStream}, Err, PduEvent, Result, }; -use futures::{future::OptionFuture, FutureExt, StreamExt, TryFutureExt}; +use futures::{future::OptionFuture, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ api::client::search::search_events::{ self, @@ -181,15 +181,15 @@ async fn category_room_events( } async fn procure_room_state(services: &Services, room_id: &RoomId) -> Result { - let state_map = services + let state = services .rooms .state_accessor - .room_state_full(room_id) + .room_state_full_pdus(room_id) + .map_ok(PduEvent::into_state_event) + .try_collect() .await?; - let state_events = state_map.values().map(PduEvent::to_state_event).collect(); - - Ok(state_events) + Ok(state) } async fn check_room_visible( diff --git a/src/api/client/state.rs b/src/api/client/state.rs index d00ee5e5..8555f88b 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,5 +1,6 @@ use axum::extract::State; use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, PduEvent, Result}; +use futures::TryStreamExt; use ruma::{ api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, events::{ @@ -82,11 +83,10 @@ pub(crate) async fn get_state_events_route( room_state: services .rooms .state_accessor - .room_state_full(&body.room_id) - .await? - .values() - .map(PduEvent::to_state_event) - .collect(), + .room_state_full_pdus(&body.room_id) + .map_ok(PduEvent::into_state_event) + .try_collect() + .await?, }) } @@ -133,7 +133,7 @@ pub(crate) async fn get_state_events_for_key_route( Ok(get_state_events_for_key::v3::Response { content: event_format.or(|| event.get_content_as_value()), - event: event_format.then(|| event.to_state_event_value()), + event: event_format.then(|| event.into_state_event_value()), }) } diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 7cca9616..cd4dfc90 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -28,7 +28,7 @@ use conduwuit_service::{ }; use futures::{ future::{join, join3, join4, join5, try_join, try_join4, OptionFuture}, - FutureExt, StreamExt, TryFutureExt, + FutureExt, StreamExt, TryFutureExt, TryStreamExt, }; use ruma::{ api::client::{ @@ -503,16 +503,20 @@ async fn handle_left_room( let mut left_state_events = Vec::new(); - let since_shortstatehash = services - .rooms - .user - .get_token_shortstatehash(room_id, since) - .await; + let since_shortstatehash = services.rooms.user.get_token_shortstatehash(room_id, since); - let since_state_ids = match since_shortstatehash { - | Ok(s) => services.rooms.state_accessor.state_full_ids(s).await?, - | Err(_) => HashMap::new(), - }; + let since_state_ids: HashMap<_, OwnedEventId> = since_shortstatehash + .map_ok(|since_shortstatehash| { + services + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .map(Ok) + }) + .try_flatten_stream() + .try_collect() + .await + .unwrap_or_default(); let Ok(left_event_id): Result = services .rooms @@ -534,11 +538,12 @@ async fn handle_left_room( return Ok(None); }; - let mut left_state_ids = services + let mut left_state_ids: HashMap<_, _> = services .rooms .state_accessor .state_full_ids(left_shortstatehash) - .await?; + .collect() + .await; let leave_shortstatekey = services .rooms @@ -960,19 +965,18 @@ async fn calculate_state_initial( current_shortstatehash: ShortStateHash, witness: Option<&Witness>, ) -> Result { - let state_events = services + let (shortstatekeys, event_ids): (Vec<_>, Vec<_>) = services .rooms .state_accessor .state_full_ids(current_shortstatehash) - .await?; - - let shortstatekeys = state_events.keys().copied().stream(); + .unzip() + .await; let state_events = services .rooms .short - .multi_get_statekey_from_short(shortstatekeys) - .zip(state_events.values().cloned().stream()) + .multi_get_statekey_from_short(shortstatekeys.into_iter().stream()) + .zip(event_ids.into_iter().stream()) .ready_filter_map(|item| Some((item.0.ok()?, item.1))) .ready_filter_map(|((event_type, state_key), event_id)| { let lazy_load_enabled = filter.room.state.lazy_load_options.is_enabled() @@ -1036,17 +1040,19 @@ async fn calculate_state_incremental( let current_state_ids = services .rooms .state_accessor - .state_full_ids(current_shortstatehash); + .state_full_ids(current_shortstatehash) + .collect(); let since_state_ids = services .rooms .state_accessor - .state_full_ids(since_shortstatehash); + .state_full_ids(since_shortstatehash) + .collect(); let (current_state_ids, since_state_ids): ( HashMap<_, OwnedEventId>, HashMap<_, OwnedEventId>, - ) = try_join(current_state_ids, since_state_ids).await?; + ) = join(current_state_ids, since_state_ids).await; current_state_ids .iter() diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index a82e9309..b7967498 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -241,13 +241,15 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_accessor .state_full_ids(current_shortstatehash) - .await?; + .collect() + .await; - let since_state_ids = services + let since_state_ids: HashMap<_, _> = services .rooms .state_accessor .state_full_ids(since_shortstatehash) - .await?; + .collect() + .await; for (key, id) in current_state_ids { if since_state_ids.get(&key) != Some(&id) { diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 1c4f3504..66647f0e 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -748,13 +748,15 @@ async fn collect_e2ee<'a>( .rooms .state_accessor .state_full_ids(current_shortstatehash) - .await?; + .collect() + .await; - let since_state_ids = services + let since_state_ids: HashMap<_, _> = services .rooms .state_accessor .state_full_ids(since_shortstatehash) - .await?; + .collect() + .await; for (key, id) in current_state_ids { if since_state_ids.get(&key) != Some(&id) { diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index e62089b4..2b8a0eef 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -1,10 +1,10 @@ #![allow(deprecated)] -use std::{borrow::Borrow, collections::HashMap}; +use std::borrow::Borrow; use axum::extract::State; use conduwuit::{ - err, + at, err, pdu::gen_event_id_canonical_json, utils::stream::{IterStream, TryBroadbandExt}, warn, Err, Result, @@ -211,14 +211,16 @@ async fn create_join_event( drop(mutex_lock); - let state_ids: HashMap<_, OwnedEventId> = services + let state_ids: Vec = services .rooms .state_accessor .state_full_ids(shortstatehash) - .await?; + .map(at!(1)) + .collect() + .await; let state = state_ids - .values() + .iter() .try_stream() .broad_and_then(|event_id| services.rooms.timeline.get_pdu_json(event_id)) .broad_and_then(|pdu| { @@ -231,7 +233,7 @@ async fn create_join_event( .boxed() .await?; - let starting_events = state_ids.values().map(Borrow::borrow); + let starting_events = state_ids.iter().map(Borrow::borrow); let auth_chain = services .rooms .auth_chain diff --git a/src/api/server/state.rs b/src/api/server/state.rs index 42f7e538..eab1f138 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{err, result::LogErr, utils::IterStream, Result}; +use conduwuit::{at, err, utils::IterStream, Result}; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{api::federation::event::get_room_state, OwnedEventId}; @@ -35,11 +35,9 @@ pub(crate) async fn get_room_state_route( .rooms .state_accessor .state_full_ids(shortstatehash) - .await - .log_err() - .map_err(|_| err!(Request(NotFound("PDU state IDs not found."))))? - .into_values() - .collect(); + .map(at!(1)) + .collect() + .await; let pdus = state_ids .iter() diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 186ef399..4973dd3a 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{err, Result}; +use conduwuit::{at, err, Result}; use futures::StreamExt; use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; @@ -36,10 +36,9 @@ pub(crate) async fn get_room_state_ids_route( .rooms .state_accessor .state_full_ids(shortstatehash) - .await - .map_err(|_| err!(Request(NotFound("State ids not found"))))? - .into_values() - .collect(); + .map(at!(1)) + .collect() + .await; let auth_chain_ids = services .rooms diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 8e1045db..7d2fb1d6 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -116,7 +116,7 @@ pub fn to_message_like_event(&self) -> Raw { #[must_use] #[implement(super::Pdu)] -pub fn to_state_event_value(&self) -> JsonValue { +pub fn into_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -127,7 +127,7 @@ pub fn to_state_event_value(&self) -> JsonValue { "state_key": self.state_key, }); - if let Some(unsigned) = &self.unsigned { + if let Some(unsigned) = self.unsigned { json["unsigned"] = json!(unsigned); } @@ -136,8 +136,8 @@ pub fn to_state_event_value(&self) -> JsonValue { #[must_use] #[implement(super::Pdu)] -pub fn to_state_event(&self) -> Raw { - serde_json::from_value(self.to_state_event_value()).expect("Raw::from_value always works") +pub fn into_state_event(self) -> Raw { + serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") } #[must_use] @@ -188,7 +188,7 @@ pub fn to_stripped_spacechild_state_event(&self) -> Raw Raw> { +pub fn into_member_event(self) -> Raw> { let mut json = json!({ "content": self.content, "type": self.kind, @@ -200,7 +200,7 @@ pub fn to_member_event(&self) -> Raw> { "state_key": self.state_key, }); - if let Some(unsigned) = &self.unsigned { + if let Some(unsigned) = self.unsigned { json["unsigned"] = json!(unsigned); } diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 0526d31c..1fd91ac6 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -33,11 +33,12 @@ pub async fn resolve_state( .await .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}"))))?; - let current_state_ids = self + let current_state_ids: HashMap<_, _> = self .services .state_accessor .state_full_ids(current_sstatehash) - .await?; + .collect() + .await; let fork_states = [current_state_ids, incoming_state]; let auth_chain_sets: Vec> = fork_states diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 9e7f8d2a..7ef047ab 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -31,15 +31,12 @@ pub(super) async fn state_at_incoming_degree_one( return Ok(None); }; - let Ok(mut state) = self + let mut state: HashMap<_, _> = self .services .state_accessor .state_full_ids(prev_event_sstatehash) - .await - .log_err() - else { - return Ok(None); - }; + .collect() + .await; debug!("Using cached state"); let prev_pdu = self @@ -103,14 +100,12 @@ pub(super) async fn state_at_incoming_resolved( let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); for (sstatehash, prev_event) in extremity_sstatehashes { - let Ok(mut leaf_state) = self + let mut leaf_state: HashMap<_, _> = self .services .state_accessor .state_full_ids(sstatehash) - .await - else { - continue; - }; + .collect() + .await; if let Some(state_key) = &prev_event.state_key { let shortstatekey = self diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index d60c4c9e..d12a01ab 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -624,8 +624,8 @@ impl Service { .services .state_accessor .state_full_ids(current_shortstatehash) - .await - .map_err(|e| err!(Database("State in space not found: {e}")))?; + .collect() + .await; let mut children_pdus = Vec::with_capacity(state.len()); for (key, id) in state { diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 3d87534b..0f5520bb 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,6 +1,5 @@ use std::{ borrow::Borrow, - collections::HashMap, fmt::Write, sync::{Arc, Mutex as StdMutex, Mutex}, }; @@ -17,7 +16,7 @@ use conduwuit::{ Err, Error, PduEvent, Result, }; use database::{Deserialized, Map}; -use futures::{FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ events::{ @@ -143,83 +142,74 @@ impl crate::Service for Service { } impl Service { - pub async fn state_full( + pub fn state_full( &self, shortstatehash: ShortStateHash, - ) -> Result> { - let state = self - .state_full_pdus(shortstatehash) - .await? - .into_iter() - .filter_map(|pdu| Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu))) - .collect(); - - Ok(state) + ) -> impl Stream + Send + '_ { + self.state_full_pdus(shortstatehash) + .ready_filter_map(|pdu| { + Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) + }) } - pub async fn state_full_pdus(&self, shortstatehash: ShortStateHash) -> Result> { - let short_ids = self.state_full_shortids(shortstatehash).await?; + pub fn state_full_pdus( + &self, + shortstatehash: ShortStateHash, + ) -> impl Stream + Send + '_ { + let short_ids = self + .state_full_shortids(shortstatehash) + .map(|result| result.expect("missing shortstatehash")) + .map(Vec::into_iter) + .map(|iter| iter.map(at!(1))) + .map(IterStream::stream) + .flatten_stream() + .boxed(); - let full_pdus = self - .services + self.services .short - .multi_get_eventid_from_short(short_ids.into_iter().map(at!(1)).stream()) + .multi_get_eventid_from_short(short_ids) .ready_filter_map(Result::ok) - .broad_filter_map(|event_id: OwnedEventId| async move { + .broad_filter_map(move |event_id: OwnedEventId| async move { self.services.timeline.get_pdu(&event_id).await.ok() }) - .collect() - .await; - - Ok(full_pdus) } /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_full_ids( - &self, + pub fn state_full_ids<'a, Id>( + &'a self, shortstatehash: ShortStateHash, - ) -> Result> + ) -> impl Stream + Send + 'a where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned, + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, ::Owned: Borrow, { - let short_ids = self.state_full_shortids(shortstatehash).await?; - - let full_ids = self - .services - .short - .multi_get_eventid_from_short(short_ids.iter().map(at!(1)).stream()) - .zip(short_ids.iter().stream().map(at!(0))) - .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) - .collect() - .boxed() - .await; - - Ok(full_ids) - } - - #[inline] - pub async fn state_full_shortids( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { let shortids = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database("Missing state IDs: {e}")))? - .pop() - .expect("there is always one layer") - .full_state - .iter() - .copied() - .map(parse_compressed_state_event) - .collect(); + .state_full_shortids(shortstatehash) + .map(|result| result.expect("missing shortstatehash")) + .map(|vec| vec.into_iter().unzip()) + .boxed() + .shared(); - Ok(shortids) + let shortstatekeys = shortids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = shortids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(shortstatekeys) + .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) } /// Returns a single EventId from `room_id` with key (`event_type`, @@ -264,6 +254,28 @@ impl Service { .await } + #[inline] + pub async fn state_full_shortids( + &self, + shortstatehash: ShortStateHash, + ) -> Result> { + let shortids = self + .services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .await + .map_err(|e| err!(Database("Missing state IDs: {e}")))? + .pop() + .expect("there is always one layer") + .full_state + .iter() + .copied() + .map(parse_compressed_state_event) + .collect(); + + Ok(shortids) + } + /// Returns a single PDU from `room_id` with key (`event_type`, /// `state_key`). pub async fn state_get( @@ -479,27 +491,30 @@ impl Service { /// Returns the full room state. #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result> { + pub fn room_state_full<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_full(shortstatehash)) - .map_err(|e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .await + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() } /// Returns the full room state pdus #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_full_pdus(&self, room_id: &RoomId) -> Result> { + pub fn room_state_full_pdus<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_full_pdus(shortstatehash)) - .map_err(|e| err!(Database("Missing state pdus for {room_id:?}: {e:?}"))) - .await + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() } /// Returns a single EventId from `room_id` with key (`event_type`, From 329925c661d6b166dfd6b73a94f7f076cf1ed9bc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 26 Jan 2025 04:46:10 +0000 Subject: [PATCH 0617/1248] additional info level span adjustments Signed-off-by: Jason Volk --- src/api/server/publicrooms.rs | 4 ++-- src/api/server/send.rs | 8 +++++--- src/core/debug.rs | 7 +++++++ src/service/federation/execute.rs | 7 ++++--- src/service/rooms/event_handler/handle_incoming_pdu.rs | 4 ++-- src/service/rooms/event_handler/handle_prev_pdu.rs | 5 +++-- src/service/rooms/spaces/mod.rs | 2 +- 7 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index 2c09385b..ff74574a 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -13,7 +13,7 @@ use crate::{Error, Result, Ruma}; /// # `POST /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[tracing::instrument(skip_all, fields(%client), name = "publicrooms")] +#[tracing::instrument(name = "publicrooms", level = "debug", skip_all, fields(%client))] pub(crate) async fn get_public_rooms_filtered_route( State(services): State, InsecureClientIp(client): InsecureClientIp, @@ -51,7 +51,7 @@ pub(crate) async fn get_public_rooms_filtered_route( /// # `GET /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[tracing::instrument(skip_all, fields(%client), "publicrooms")] +#[tracing::instrument(name = "publicrooms", level = "debug", skip_all, fields(%client))] pub(crate) async fn get_public_rooms_route( State(services): State, InsecureClientIp(client): InsecureClientIp, diff --git a/src/api/server/send.rs b/src/api/server/send.rs index f4903447..2e615a0c 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -3,7 +3,9 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, debug_warn, err, error, + debug, + debug::INFO_SPAN_LEVEL, + debug_warn, err, error, result::LogErr, trace, utils::{ @@ -49,8 +51,8 @@ type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); /// /// Push EDUs and PDUs to this server. #[tracing::instrument( - name = "send", - level = "debug", + name = "txn", + level = INFO_SPAN_LEVEL, skip_all, fields( %client, diff --git a/src/core/debug.rs b/src/core/debug.rs index aebfc833..ca0f2f2e 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -4,6 +4,7 @@ use std::{any::Any, panic}; // Export debug proc_macros pub use conduwuit_macros::recursion_depth; +use tracing::Level; // Export all of the ancillary tools from here as well. pub use crate::{result::DebugInspect, utils::debug::*}; @@ -51,6 +52,12 @@ macro_rules! debug_info { } } +pub const INFO_SPAN_LEVEL: Level = if cfg!(debug_assertions) { + Level::INFO +} else { + Level::DEBUG +}; + pub fn set_panic_trap() { let next = panic::take_hook(); panic::set_hook(Box::new(move |info| { diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 27d98968..3146bb8a 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -2,8 +2,8 @@ use std::{fmt::Debug, mem}; use bytes::Bytes; use conduwuit::{ - debug, debug_error, debug_warn, err, error::inspect_debug_log, implement, trace, - utils::string::EMPTY, Err, Error, Result, + debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, error::inspect_debug_log, + implement, trace, utils::string::EMPTY, Err, Error, Result, }; use http::{header::AUTHORIZATION, HeaderValue}; use ipaddress::IPAddress; @@ -48,7 +48,8 @@ where #[implement(super::Service)] #[tracing::instrument( - level = "debug" + name = "fed", + level = INFO_SPAN_LEVEL, skip(self, client, request), )] pub async fn execute_on( diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 7db71961..31c7762d 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -3,7 +3,7 @@ use std::{ time::Instant, }; -use conduwuit::{debug, err, implement, warn, Err, Result}; +use conduwuit::{debug, debug::INFO_SPAN_LEVEL, err, implement, warn, Err, Result}; use futures::{ future::{try_join5, OptionFuture}, FutureExt, @@ -42,7 +42,7 @@ use crate::rooms::timeline::RawPduId; #[implement(super::Service)] #[tracing::instrument( name = "pdu", - level = "debug", + level = INFO_SPAN_LEVEL, skip_all, fields(%room_id, %event_id), )] diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 32ab505f..f911f1fd 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -5,7 +5,8 @@ use std::{ }; use conduwuit::{ - debug, implement, utils::continue_exponential_backoff_secs, Err, PduEvent, Result, + debug, debug::INFO_SPAN_LEVEL, implement, utils::continue_exponential_backoff_secs, Err, + PduEvent, Result, }; use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; @@ -14,7 +15,7 @@ use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; #[allow(clippy::too_many_arguments)] #[tracing::instrument( name = "prev", - level = "debug", + level = INFO_SPAN_LEVEL, skip_all, fields(%prev_id), )] diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index d12a01ab..1ee2727c 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -268,7 +268,7 @@ impl Service { } /// Gets the summary of a space using solely federation - #[tracing::instrument(skip(self))] + #[tracing::instrument(level = "debug", skip(self))] async fn get_summary_and_children_federation( &self, current_room: &OwnedRoomId, From 936161d89ece2474dcba5424adaa159fc4e97b03 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 01:49:10 +0000 Subject: [PATCH 0618/1248] reduce bottommost compression underrides Signed-off-by: Jason Volk --- src/database/engine/descriptor.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index c735f402..934ef831 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -83,7 +83,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { write_size: 1024 * 1024 * 32, cache_shards: 128, compression_level: -3, - bottommost_level: Some(4), + bottommost_level: Some(-1), compressed_index: true, ..BASE }; @@ -94,8 +94,8 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { level_size: 1024 * 1024 * 32, file_size: 1024 * 1024 * 2, cache_shards: 128, - compression_level: -1, - bottommost_level: Some(6), + compression_level: -2, + bottommost_level: Some(-1), compression_shape: [0, 0, 1, 1, 1, 1, 1], compressed_index: false, ..BASE @@ -111,7 +111,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { block_size: 512, cache_shards: 64, compression_level: -4, - bottommost_level: Some(1), + bottommost_level: Some(-1), compression_shape: [0, 0, 0, 0, 0, 1, 1], compressed_index: false, ..RANDOM @@ -126,8 +126,8 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { block_size: 512, cache_shards: 64, block_index_hashing: Some(false), - compression_level: -2, - bottommost_level: Some(4), + compression_level: -4, + bottommost_level: Some(-2), compression_shape: [0, 0, 0, 0, 1, 1, 1], compressed_index: false, ..SEQUENTIAL From eb7d893c8675f955fa770c8ae6f1c32a2394284c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 06:36:14 +0000 Subject: [PATCH 0619/1248] fix malloc_conf feature-awareness Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 6bdf8b33..57143e85 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -8,6 +8,7 @@ use std::{ }; use arrayvec::ArrayVec; +use const_str::concat_bytes; use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; @@ -20,18 +21,24 @@ use crate::{ #[cfg(feature = "jemalloc_conf")] #[unsafe(no_mangle)] -pub static malloc_conf: &[u8] = b"\ -metadata_thp:always\ -,percpu_arena:percpu\ -,background_thread:true\ -,max_background_threads:-1\ -,lg_extent_max_active_fit:4\ -,oversize_threshold:16777216\ -,tcache_max:2097152\ -,dirty_decay_ms:16000\ -,muzzy_decay_ms:144000\ -,prof_active:false\ -\0"; +pub static malloc_conf: &[u8] = concat_bytes!( + "lg_extent_max_active_fit:4", + ",oversize_threshold:16777216", + ",tcache_max:2097152", + ",dirty_decay_ms:16000", + ",muzzy_decay_ms:144000", + ",percpu_arena:percpu", + ",metadata_thp:always", + ",background_thread:true", + ",max_background_threads:-1", + MALLOC_CONF_PROF, + 0 +); + +#[cfg(all(feature = "jemalloc_conf", feature = "jemalloc_prof"))] +const MALLOC_CONF_PROF: &str = ",prof_active:false"; +#[cfg(all(feature = "jemalloc_conf", not(feature = "jemalloc_prof")))] +const MALLOC_CONF_PROF: &str = ""; #[global_allocator] static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc; From 50acfe783289e6b9b8deb20b3c34f32653f61f11 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 08:39:44 +0000 Subject: [PATCH 0620/1248] flatten auth chain iterations Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 7 +- src/api/server/event_auth.rs | 4 +- src/api/server/send_join.rs | 2 - src/api/server/state.rs | 2 - src/api/server/state_ids.rs | 8 +- src/service/rooms/auth_chain/mod.rs | 154 +++++++++--------- .../rooms/event_handler/resolve_state.rs | 15 +- .../rooms/event_handler/state_at_incoming.rs | 9 +- 8 files changed, 90 insertions(+), 111 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index cd892ded..4e0ce2e3 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,8 +6,9 @@ use std::{ }; use conduwuit::{ - debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, PduId, - RawPduId, Result, + debug_error, err, info, trace, utils, + utils::{stream::ReadyExt, string::EMPTY}, + warn, Error, PduEvent, PduId, RawPduId, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ @@ -54,7 +55,7 @@ pub(super) async fn get_auth_chain( .rooms .auth_chain .event_ids_iter(room_id, once(event_id.as_ref())) - .await? + .ready_filter_map(Result::ok) .count() .await; diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 93e867a0..49dcd718 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{Error, Result}; +use conduwuit::{utils::stream::ReadyExt, Error, Result}; use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, @@ -48,7 +48,7 @@ pub(crate) async fn get_event_authorization_route( .rooms .auth_chain .event_ids_iter(room_id, once(body.event_id.borrow())) - .await? + .ready_filter_map(Result::ok) .filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() }) .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) .collect() diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 2b8a0eef..e81d7672 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -238,8 +238,6 @@ async fn create_join_event( .rooms .auth_chain .event_ids_iter(room_id, starting_events) - .await? - .map(Ok) .broad_and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) diff --git a/src/api/server/state.rs b/src/api/server/state.rs index eab1f138..b16e61a0 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -56,8 +56,6 @@ pub(crate) async fn get_room_state_route( .rooms .auth_chain .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .await? - .map(Ok) .and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await }) .and_then(|pdu| { services diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 4973dd3a..7d0440bf 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -2,7 +2,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; use conduwuit::{at, err, Result}; -use futures::StreamExt; +use futures::{StreamExt, TryStreamExt}; use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; use super::AccessCheck; @@ -44,10 +44,8 @@ pub(crate) async fn get_room_state_ids_route( .rooms .auth_chain .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .await? - .map(|id| (*id).to_owned()) - .collect() - .await; + .try_collect() + .await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids, pdu_ids }) } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index df2663b2..0ff96846 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -4,6 +4,7 @@ use std::{ collections::{BTreeSet, HashSet, VecDeque}, fmt::Debug, sync::Arc, + time::Instant, }; use conduwuit::{ @@ -14,7 +15,7 @@ use conduwuit::{ }, validated, warn, Err, Result, }; -use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; @@ -30,6 +31,8 @@ struct Services { timeline: Dep, } +type Bucket<'a> = BTreeSet<(u64, &'a EventId)>; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -45,42 +48,22 @@ impl crate::Service for Service { } #[implement(Service)] -pub async fn event_ids_iter<'a, I>( +pub fn event_ids_iter<'a, I>( &'a self, - room_id: &RoomId, + room_id: &'a RoomId, starting_events: I, -) -> Result + Send + '_> +) -> impl Stream> + Send + 'a where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { - let stream = self - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .stream(); - - Ok(stream) -} - -#[implement(Service)] -pub async fn get_event_ids<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, -) -> Result> -where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, -{ - let chain = self.get_auth_chain(room_id, starting_events).await?; - let event_ids = self - .services - .short - .multi_get_eventid_from_short(chain.into_iter().stream()) - .ready_filter_map(Result::ok) - .collect() - .await; - - Ok(event_ids) + self.get_auth_chain(room_id, starting_events) + .map_ok(|chain| { + self.services + .short + .multi_get_eventid_from_short(chain.into_iter().stream()) + .ready_filter(Result::is_ok) + }) + .try_flatten_stream() } #[implement(Service)] @@ -94,9 +77,9 @@ where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? - const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); + const BUCKET: Bucket<'_> = BTreeSet::new(); - let started = std::time::Instant::now(); + let started = Instant::now(); let mut starting_ids = self .services .short @@ -120,53 +103,7 @@ where let full_auth_chain: Vec = buckets .into_iter() .try_stream() - .broad_and_then(|chunk| async move { - let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); - - if chunk_key.is_empty() { - return Ok(Vec::new()); - } - - if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { - return Ok(cached.to_vec()); - } - - let chunk_cache: Vec<_> = chunk - .into_iter() - .try_stream() - .broad_and_then(|(shortid, event_id)| async move { - if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { - return Ok(cached.to_vec()); - } - - let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); - debug!( - ?event_id, - elapsed = ?started.elapsed(), - "Cache missed event" - ); - - Ok(auth_chain) - }) - .try_collect() - .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) - .map_ok(|mut chunk_cache: Vec<_>| { - chunk_cache.sort_unstable(); - chunk_cache.dedup(); - chunk_cache - }) - .await?; - - self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); - debug!( - chunk_cache_length = ?chunk_cache.len(), - elapsed = ?started.elapsed(), - "Cache missed chunk", - ); - - Ok(chunk_cache) - }) + .broad_and_then(|chunk| self.get_auth_chain_outer(room_id, started, chunk)) .try_collect() .map_ok(|auth_chain: Vec<_>| auth_chain.into_iter().flatten().collect()) .map_ok(|mut full_auth_chain: Vec<_>| { @@ -174,6 +111,7 @@ where full_auth_chain.dedup(); full_auth_chain }) + .boxed() .await?; debug!( @@ -185,6 +123,60 @@ where Ok(full_auth_chain) } +#[implement(Service)] +async fn get_auth_chain_outer( + &self, + room_id: &RoomId, + started: Instant, + chunk: Bucket<'_>, +) -> Result> { + let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); + + if chunk_key.is_empty() { + return Ok(Vec::new()); + } + + if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { + return Ok(cached.to_vec()); + } + + let chunk_cache: Vec<_> = chunk + .into_iter() + .try_stream() + .broad_and_then(|(shortid, event_id)| async move { + if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { + return Ok(cached.to_vec()); + } + + let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; + self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); + debug!( + ?event_id, + elapsed = ?started.elapsed(), + "Cache missed event" + ); + + Ok(auth_chain) + }) + .try_collect() + .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) + .map_ok(|mut chunk_cache: Vec<_>| { + chunk_cache.sort_unstable(); + chunk_cache.dedup(); + chunk_cache + }) + .await?; + + self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); + debug!( + chunk_cache_length = ?chunk_cache.len(), + elapsed = ?started.elapsed(), + "Cache missed chunk", + ); + + Ok(chunk_cache) +} + #[implement(Service)] #[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] async fn get_auth_chain_inner( diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 1fd91ac6..03f7e822 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -44,18 +44,11 @@ pub async fn resolve_state( let auth_chain_sets: Vec> = fork_states .iter() .try_stream() - .wide_and_then(|state| async move { - let starting_events = state.values().map(Borrow::borrow); - - let auth_chain = self - .services + .wide_and_then(|state| { + self.services .auth_chain - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .collect(); - - Ok(auth_chain) + .event_ids_iter(room_id, state.values().map(Borrow::borrow)) + .try_collect() }) .try_collect() .await?; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 7ef047ab..8730232a 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -10,7 +10,7 @@ use conduwuit::{ utils::stream::{BroadbandExt, IterStream}, PduEvent, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; // TODO: if we know the prev_events of the incoming event we can avoid the @@ -140,10 +140,9 @@ pub(super) async fn state_at_incoming_resolved( let auth_chain: HashSet = self .services .auth_chain - .get_event_ids(room_id, starting_events.into_iter()) - .await? - .into_iter() - .collect(); + .event_ids_iter(room_id, starting_events.into_iter()) + .try_collect() + .await?; auth_chain_sets.push(auth_chain); fork_states.push(state); From 3c8376d897e6a1b9b6b61f5ada05b2afec1ab937 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 23:07:12 +0000 Subject: [PATCH 0621/1248] parallelize state-res pre-gathering Signed-off-by: Jason Volk --- .../rooms/event_handler/resolve_state.rs | 63 +++---- .../rooms/event_handler/state_at_incoming.rs | 173 +++++++++--------- 2 files changed, 123 insertions(+), 113 deletions(-) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 03f7e822..c3de5f2f 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,11 +5,11 @@ use std::{ }; use conduwuit::{ - debug, err, implement, + err, implement, trace, utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, - Result, + Error, Result, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ state_res::{self, StateMap}, OwnedEventId, RoomId, RoomVersionId, @@ -25,13 +25,13 @@ pub async fn resolve_state( room_version_id: &RoomVersionId, incoming_state: HashMap, ) -> Result>> { - debug!("Loading current room state ids"); + trace!("Loading current room state ids"); let current_sstatehash = self .services .state .get_room_shortstatehash(room_id) - .await - .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}"))))?; + .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}")))) + .await?; let current_state_ids: HashMap<_, _> = self .services @@ -40,8 +40,9 @@ pub async fn resolve_state( .collect() .await; + trace!("Loading fork states"); let fork_states = [current_state_ids, incoming_state]; - let auth_chain_sets: Vec> = fork_states + let auth_chain_sets = fork_states .iter() .try_stream() .wide_and_then(|state| { @@ -50,36 +51,33 @@ pub async fn resolve_state( .event_ids_iter(room_id, state.values().map(Borrow::borrow)) .try_collect() }) - .try_collect() - .await?; + .try_collect::>>(); - debug!("Loading fork states"); - let fork_states: Vec> = fork_states - .into_iter() + let fork_states = fork_states + .iter() .stream() - .wide_then(|fork_state| async move { + .wide_then(|fork_state| { let shortstatekeys = fork_state.keys().copied().stream(); - - let event_ids = fork_state.values().cloned().stream().boxed(); - + let event_ids = fork_state.values().cloned().stream(); self.services .short .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) .collect() - .await }) - .collect() - .await; + .map(Ok::<_, Error>) + .try_collect::>>(); - debug!("Resolving state"); + let (fork_states, auth_chain_sets) = try_join(fork_states, auth_chain_sets).await?; + + trace!("Resolving state"); let state = self - .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) .boxed() .await?; - debug!("State resolution done."); + trace!("State resolution done."); let state_events: Vec<_> = state .iter() .stream() @@ -92,7 +90,7 @@ pub async fn resolve_state( .collect() .await; - debug!("Compressing state..."); + trace!("Compressing state..."); let new_room_state: HashSet<_> = self .services .state_compressor @@ -109,20 +107,23 @@ pub async fn resolve_state( #[implement(super::Service)] #[tracing::instrument(name = "ruma", level = "debug", skip_all)] -pub async fn state_resolution( - &self, - room_version: &RoomVersionId, - state_sets: &[StateMap], - auth_chain_sets: &[HashSet], -) -> Result> { +pub async fn state_resolution<'a, StateSets>( + &'a self, + room_version: &'a RoomVersionId, + state_sets: StateSets, + auth_chain_sets: &'a [HashSet], +) -> Result> +where + StateSets: Iterator> + Clone + Send, +{ state_res::resolve( room_version, - state_sets.iter(), + state_sets, auth_chain_sets, &|event_id| self.event_fetch(event_id), &|event_id| self.event_exists(event_id), automatic_width(), ) - .await .map_err(|e| err!(error!("State resolution failed: {e:?}"))) + .await } diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 8730232a..8ae6354c 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -1,18 +1,20 @@ use std::{ borrow::Borrow, collections::{HashMap, HashSet}, + iter::Iterator, sync::Arc, }; use conduwuit::{ - debug, err, implement, - result::LogErr, - utils::stream::{BroadbandExt, IterStream}, + debug, err, implement, trace, + utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, PduEvent, Result, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; +use crate::rooms::short::ShortStateHash; + // TODO: if we know the prev_events of the incoming event we can avoid the #[implement(super::Service)] // request and build the state from a known point and resolve if > 1 prev_event @@ -70,86 +72,44 @@ pub(super) async fn state_at_incoming_resolved( room_id: &RoomId, room_version_id: &RoomVersionId, ) -> Result>> { - debug!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len()); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let Ok(prev_event) = self.services.timeline.get_pdu(prev_eventid).await else { - okay = false; - break; - }; - - let Ok(sstatehash) = self - .services - .state_accessor - .pdu_shortstatehash(prev_eventid) - .await - else { - okay = false; - break; - }; - - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if !okay { + trace!("Calculating extremity statehashes..."); + let Ok(extremity_sstatehashes) = incoming_pdu + .prev_events + .iter() + .try_stream() + .broad_and_then(|prev_eventid| { + self.services + .timeline + .get_pdu(prev_eventid) + .map_ok(move |prev_event| (prev_eventid, prev_event)) + }) + .broad_and_then(|(prev_eventid, prev_event)| { + self.services + .state_accessor + .pdu_shortstatehash(prev_eventid) + .map_ok(move |sstatehash| (sstatehash, prev_event)) + }) + .try_collect::>() + .await + else { return Ok(None); - } + }; - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: HashMap<_, _> = self - .services - .state_accessor - .state_full_ids(sstatehash) - .collect() - .await; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) - .await; - - let event_id = &prev_event.event_id; - leaf_state.insert(shortstatekey, event_id.clone()); - // Now it's the state after the pdu - } - - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); - for (k, id) in &leaf_state { - if let Ok((ty, st_key)) = self - .services - .short - .get_statekey_from_short(*k) - .await - .log_err() - { - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); - } - - starting_events.push(id.borrow()); - } - - let auth_chain: HashSet = self - .services - .auth_chain - .event_ids_iter(room_id, starting_events.into_iter()) + trace!("Calculating fork states..."); + let (fork_states, auth_chain_sets): (Vec>, Vec>) = + extremity_sstatehashes + .into_iter() + .try_stream() + .wide_and_then(|(sstatehash, prev_event)| { + self.state_at_incoming_fork(room_id, sstatehash, prev_event) + }) .try_collect() + .map_ok(Vec::into_iter) + .map_ok(Iterator::unzip) .await?; - auth_chain_sets.push(auth_chain); - fork_states.push(state); - } - let Ok(new_state) = self - .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) .boxed() .await else { @@ -157,16 +117,65 @@ pub(super) async fn state_at_incoming_resolved( }; new_state - .iter() + .into_iter() .stream() - .broad_then(|((event_type, state_key), event_id)| { + .broad_then(|((event_type, state_key), event_id)| async move { self.services .short - .get_or_create_shortstatekey(event_type, state_key) - .map(move |shortstatekey| (shortstatekey, event_id.clone())) + .get_or_create_shortstatekey(&event_type, &state_key) + .map(move |shortstatekey| (shortstatekey, event_id)) + .await }) .collect() .map(Some) .map(Ok) .await } + +#[implement(super::Service)] +async fn state_at_incoming_fork( + &self, + room_id: &RoomId, + sstatehash: ShortStateHash, + prev_event: PduEvent, +) -> Result<(StateMap, HashSet)> { + let mut leaf_state: HashMap<_, _> = self + .services + .state_accessor + .state_full_ids(sstatehash) + .collect() + .await; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) + .await; + + let event_id = &prev_event.event_id; + leaf_state.insert(shortstatekey, event_id.clone()); + // Now it's the state after the pdu + } + + let auth_chain = self + .services + .auth_chain + .event_ids_iter(room_id, leaf_state.values().map(Borrow::borrow)) + .try_collect(); + + let fork_state = leaf_state + .iter() + .stream() + .broad_then(|(k, id)| { + self.services + .short + .get_statekey_from_short(*k) + .map_ok(|(ty, sk)| ((ty, sk), id.clone())) + }) + .ready_filter_map(Result::ok) + .collect() + .map(Ok); + + try_join(fork_state, auth_chain).await +} From 31c2968bb29e7447e56531333fb330da4ac08ede Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 21:10:33 +0000 Subject: [PATCH 0622/1248] move db files command w/ filter args; misc related cleanup Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 66 +++++++++++++++++++++++++++++------- src/admin/debug/mod.rs | 8 +++++ src/admin/server/commands.rs | 15 +++----- src/admin/server/mod.rs | 3 -- src/database/engine/files.rs | 35 +++++-------------- src/service/globals/data.rs | 3 -- 6 files changed, 75 insertions(+), 55 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 4e0ce2e3..dcf9879c 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -7,7 +7,10 @@ use std::{ use conduwuit::{ debug_error, err, info, trace, utils, - utils::{stream::ReadyExt, string::EMPTY}, + utils::{ + stream::{IterStream, ReadyExt}, + string::EMPTY, + }, warn, Error, PduEvent, PduId, RawPduId, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; @@ -640,6 +643,7 @@ pub(super) async fn force_set_room_state_from_server( room_id: room_id.clone().into(), event_id: first_pdu.event_id.clone(), }) + .boxed() .await?; for pdu in remote_state_response.pdus.clone() { @@ -648,6 +652,7 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .parse_incoming_pdu(&pdu) + .boxed() .await { | Ok(t) => t, @@ -711,6 +716,7 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .resolve_state(&room_id, &room_version, state) + .boxed() .await?; info!("Forcing new room state"); @@ -946,21 +952,57 @@ pub(super) async fn database_stats( property: Option, map: Option, ) -> Result { - let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); let map_name = map.as_ref().map_or(EMPTY, String::as_str); + let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); + self.services + .db + .iter() + .filter(|(&name, _)| map_name.is_empty() || map_name == name) + .try_stream() + .try_for_each(|(&name, map)| { + let res = map.property(&property).expect("invalid property"); + writeln!(self, "##### {name}:\n```\n{}\n```", res.trim()) + }) + .await?; - let mut out = String::new(); - for (&name, map) in self.services.db.iter() { - if !map_name.is_empty() && map_name != name { - continue; - } + Ok(RoomMessageEventContent::notice_plain("")) +} - let res = map.property(&property)?; - let res = res.trim(); - writeln!(out, "##### {name}:\n```\n{res}\n```")?; - } +#[admin_command] +pub(super) async fn database_files( + &self, + map: Option, + level: Option, +) -> Result { + let mut files: Vec<_> = self.services.db.db.file_list().collect::>()?; - Ok(RoomMessageEventContent::notice_markdown(out)) + files.sort_by_key(|f| f.name.clone()); + + writeln!(self, "| lev | sst | keys | dels | size | column |").await?; + writeln!(self, "| ---: | :--- | ---: | ---: | ---: | :--- |").await?; + files + .into_iter() + .filter(|file| { + map.as_deref() + .is_none_or(|map| map == file.column_family_name) + }) + .filter(|file| level.as_ref().is_none_or(|&level| level == file.level)) + .try_stream() + .try_for_each(|file| { + writeln!( + self, + "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", + file.level, + file.name, + file.num_entries, + file.num_deletions, + file.size, + file.column_family_name, + ) + }) + .await?; + + Ok(RoomMessageEventContent::notice_plain("")) } #[admin_command] diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index 07f7296b..db04ccf4 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -226,6 +226,14 @@ pub(super) enum DebugCommand { /// - Trim memory usage TrimMemory, + /// - List database files + DatabaseFiles { + map: Option, + + #[arg(long)] + level: Option, + }, + /// - Developer test stubs #[command(subcommand)] #[allow(non_snake_case)] diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 910dce6e..d4cfa7d5 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -92,7 +92,7 @@ pub(super) async fn clear_caches(&self) -> Result { #[admin_command] pub(super) async fn list_backups(&self) -> Result { - let result = self.services.globals.db.backup_list()?; + let result = self.services.db.db.backup_list()?; if result.is_empty() { Ok(RoomMessageEventContent::text_plain("No backups found.")) @@ -103,31 +103,24 @@ pub(super) async fn list_backups(&self) -> Result { #[admin_command] pub(super) async fn backup_database(&self) -> Result { - let globals = Arc::clone(&self.services.globals); + let db = Arc::clone(&self.services.db); let mut result = self .services .server .runtime() - .spawn_blocking(move || match globals.db.backup() { + .spawn_blocking(move || match db.db.backup() { | Ok(()) => String::new(), | Err(e) => e.to_string(), }) .await?; if result.is_empty() { - result = self.services.globals.db.backup_list()?; + result = self.services.db.db.backup_list()?; } Ok(RoomMessageEventContent::notice_markdown(result)) } -#[admin_command] -pub(super) async fn list_database_files(&self) -> Result { - let result = self.services.globals.db.file_list()?; - - Ok(RoomMessageEventContent::notice_markdown(result)) -} - #[admin_command] pub(super) async fn admin_notice(&self, message: Vec) -> Result { let message = message.join(" "); diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 3f3d6c5e..60615365 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -46,9 +46,6 @@ pub(super) enum ServerCommand { /// - List database backups ListBackups, - /// - List database files - ListDatabaseFiles, - /// - Send a message to the admin room. AdminNotice { message: Vec, diff --git a/src/database/engine/files.rs b/src/database/engine/files.rs index f603c57b..33d6fdc4 100644 --- a/src/database/engine/files.rs +++ b/src/database/engine/files.rs @@ -1,32 +1,15 @@ -use std::fmt::Write; - use conduwuit::{implement, Result}; +use rocksdb::LiveFile as SstFile; use super::Engine; +use crate::util::map_err; #[implement(Engine)] -pub fn file_list(&self) -> Result { - match self.db.live_files() { - | Err(e) => Ok(String::from(e)), - | Ok(mut files) => { - files.sort_by_key(|f| f.name.clone()); - let mut res = String::new(); - writeln!(res, "| lev | sst | keys | dels | size | column |")?; - writeln!(res, "| ---: | :--- | ---: | ---: | ---: | :--- |")?; - for file in files { - writeln!( - res, - "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", - file.level, - file.name, - file.num_entries, - file.num_deletions, - file.size, - file.column_family_name, - )?; - } - - Ok(res) - }, - } +pub fn file_list(&self) -> impl Iterator> + Send { + self.db + .live_files() + .map_err(map_err) + .into_iter() + .flat_map(Vec::into_iter) + .map(Ok) } diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 07b4ac2c..39cb9be1 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -79,7 +79,4 @@ impl Data { #[inline] pub fn backup_list(&self) -> Result { self.db.db.backup_list() } - - #[inline] - pub fn file_list(&self) -> Result { self.db.db.file_list() } } From 1a8482b3b4865a7f38c342929489ba925a98e05c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 30 Jan 2025 04:39:24 +0000 Subject: [PATCH 0623/1248] refactor incoming extremities retention; broad filter, single pass Signed-off-by: Jason Volk --- src/api/client/membership.rs | 7 +- .../event_handler/upgrade_outlier_pdu.rs | 74 +++++++++---------- src/service/rooms/state/mod.rs | 17 +++-- src/service/rooms/timeline/mod.rs | 44 ++++++----- 4 files changed, 74 insertions(+), 68 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index fccb9b53..d80aff0c 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,6 +1,7 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, + iter::once, net::IpAddr, sync::Arc, }; @@ -1216,7 +1217,7 @@ async fn join_room_by_id_helper_remote( .append_pdu( &parsed_join_pdu, join_event, - vec![(*parsed_join_pdu.event_id).to_owned()], + once(parsed_join_pdu.event_id.borrow()), &state_lock, ) .await?; @@ -2195,7 +2196,7 @@ async fn knock_room_helper_local( .append_pdu( &parsed_knock_pdu, knock_event, - vec![(*parsed_knock_pdu.event_id).to_owned()], + once(parsed_knock_pdu.event_id.borrow()), &state_lock, ) .await?; @@ -2394,7 +2395,7 @@ async fn knock_room_helper_remote( .append_pdu( &parsed_knock_pdu, knock_event, - vec![(*parsed_knock_pdu.event_id).to_owned()], + once(parsed_knock_pdu.event_id.borrow()), &state_lock, ) .await?; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index f0c8f0c5..ca351981 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,14 +1,18 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashSet}, + iter::once, sync::Arc, time::Instant, }; -use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; -use futures::{future::ready, StreamExt}; +use conduwuit::{ + debug, debug_info, err, implement, trace, + utils::stream::{BroadbandExt, ReadyExt}, + warn, Err, PduEvent, Result, +}; +use futures::{future::ready, FutureExt, StreamExt}; use ruma::{ - api::client::error::ErrorKind, events::{room::redaction::RoomRedactionEventContent, StateEventType, TimelineEventType}, state_res::{self, EventTypeExt}, CanonicalJsonValue, RoomId, RoomVersionId, ServerName, @@ -174,42 +178,34 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Now we calculate the set of extremities this room has after the incoming // event has been applied. We start with the previous extremities (aka leaves) trace!("Calculating extremities"); - let mut extremities: HashSet<_> = self + let extremities: Vec<_> = self .services .state .get_forward_extremities(room_id) .map(ToOwned::to_owned) + .ready_filter(|event_id| { + // Remove any that are referenced by this incoming event's prev_events + !incoming_pdu.prev_events.contains(event_id) + }) + .broad_filter_map(|event_id| async move { + // Only keep those extremities were not referenced yet + self.services + .pdu_metadata + .is_event_referenced(room_id, &event_id) + .await + .eq(&false) + .then_some(event_id) + }) .collect() .await; - // Remove any forward extremities that are referenced by this incoming event's - // prev_events - trace!( - "Calculated {} extremities; checking against {} prev_events", + debug!( + "Retained {} extremities checked against {} prev_events", extremities.len(), incoming_pdu.prev_events.len() ); - for prev_event in &incoming_pdu.prev_events { - extremities.remove(&(**prev_event)); - } - // Only keep those extremities were not referenced yet - let mut retained = HashSet::new(); - for id in &extremities { - if !self - .services - .pdu_metadata - .is_event_referenced(room_id, id) - .await - { - retained.insert(id.clone()); - } - } - - extremities.retain(|id| retained.contains(id)); - debug!("Retained {} extremities. Compressing state", extremities.len()); - - let state_ids_compressed: HashSet<_> = self + let state_ids_compressed: Arc> = self .services .state_compressor .compress_state_events( @@ -218,10 +214,9 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .map(|(ssk, eid)| (ssk, eid.borrow())), ) .collect() + .map(Arc::new) .await; - let state_ids_compressed = Arc::new(state_ids_compressed); - if incoming_pdu.state_key.is_some() { debug!("Event is a state-event. Deriving new room state"); @@ -260,12 +255,14 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // if not soft fail it if soft_fail { debug!("Soft failing event"); + let extremities = extremities.iter().map(Borrow::borrow); + self.services .timeline .append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(|e| (**e).to_owned()).collect(), + extremities, state_ids_compressed, soft_fail, &state_lock, @@ -273,27 +270,30 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .await?; // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {incoming_pdu:?}"); self.services .pdu_metadata .mark_event_soft_failed(&incoming_pdu.event_id); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + warn!("Event was soft failed: {incoming_pdu:?}"); + return Err!(Request(InvalidParam("Event has been soft failed"))); } - trace!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone()); - // Now that the event has passed all auth it is added into the timeline. // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. + trace!("Appending pdu to timeline"); + let extremities = extremities + .iter() + .map(Borrow::borrow) + .chain(once(incoming_pdu.event_id.borrow())); + let pdu_id = self .services .timeline .append_incoming_pdu( &incoming_pdu, val, - extremities.into_iter().collect(), + extremities, state_ids_compressed, soft_fail, &state_lock, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index fd303667..8cb4e586 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -398,13 +398,14 @@ impl Service { .ignore_err() } - pub async fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: Vec, - _state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room - * state mutex */ - ) { + pub async fn set_forward_extremities<'a, I>( + &'a self, + room_id: &'a RoomId, + event_ids: I, + _state_lock: &'a RoomMutexGuard, + ) where + I: Iterator + Send + 'a, + { let prefix = (room_id, Interfix); self.db .roomid_pduleaves @@ -413,7 +414,7 @@ impl Service { .ready_for_each(|key| self.db.roomid_pduleaves.remove(key)) .await; - for event_id in &event_ids { + for event_id in event_ids { let key = (room_id, event_id); self.db.roomid_pduleaves.put_raw(key, event_id); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index bf585a6b..8b3b67a7 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,6 +1,7 @@ mod data; use std::{ + borrow::Borrow, cmp, collections::{BTreeMap, HashSet}, fmt::Write, @@ -260,14 +261,16 @@ impl Service { /// /// Returns pdu id #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_pdu( - &self, - pdu: &PduEvent, + pub async fn append_pdu<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: Vec, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ - ) -> Result { + leafs: Leafs, + state_lock: &'a RoomMutexGuard, + ) -> Result + where + Leafs: Iterator + Send + 'a, + { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); @@ -335,7 +338,7 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, leaves, state_lock) + .set_forward_extremities(&pdu.room_id, leafs, state_lock) .await; let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; @@ -819,8 +822,7 @@ impl Service { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ + state_lock: &RoomMutexGuard, ) -> Result { let (pdu, pdu_json) = self .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) @@ -896,7 +898,7 @@ impl Service { pdu_json, // Since this PDU references all pdu_leaves we can update the leaves // of the room - vec![(*pdu.event_id).to_owned()], + once(pdu.event_id.borrow()), state_lock, ) .boxed() @@ -943,16 +945,18 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_incoming_pdu( - &self, - pdu: &PduEvent, + pub async fn append_incoming_pdu<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: Vec, + new_room_leafs: Leafs, state_ids_compressed: Arc>, soft_fail: bool, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ - ) -> Result> { + state_lock: &'a RoomMutexGuard, + ) -> Result> + where + Leafs: Iterator + Send + 'a, + { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. @@ -968,14 +972,14 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) + .set_forward_extremities(&pdu.room_id, new_room_leafs, state_lock) .await; return Ok(None); } let pdu_id = self - .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) + .append_pdu(pdu, pdu_json, new_room_leafs, state_lock) .await?; Ok(Some(pdu_id)) From ff8bbd4cfa6ad9426bd9efbe610547dd89030c85 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 30 Jan 2025 05:14:45 +0000 Subject: [PATCH 0624/1248] untwist the redaction check stanza Signed-off-by: Jason Volk --- src/core/pdu/redact.rs | 18 +++++++ .../event_handler/upgrade_outlier_pdu.rs | 52 ++++--------------- 2 files changed, 28 insertions(+), 42 deletions(-) diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index 5d33eeca..7c332719 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -90,3 +90,21 @@ pub fn copy_redacts(&self) -> (Option, Box) { (self.redacts.clone(), self.content.clone()) } + +#[implement(super::Pdu)] +#[must_use] +pub fn redacts_id(&self, room_version: &RoomVersionId) -> Option { + use RoomVersionId::*; + + if self.kind != TimelineEventType::RoomRedaction { + return None; + } + + match *room_version { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => self.redacts.clone(), + | _ => + self.get_content::() + .ok()? + .redacts, + } +} diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index ca351981..03697558 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -13,9 +13,9 @@ use conduwuit::{ }; use futures::{future::ready, FutureExt, StreamExt}; use ruma::{ - events::{room::redaction::RoomRedactionEventContent, StateEventType, TimelineEventType}, + events::StateEventType, state_res::{self, EventTypeExt}, - CanonicalJsonValue, RoomId, RoomVersionId, ServerName, + CanonicalJsonValue, RoomId, ServerName, }; use super::{get_room_version_id, to_room_version}; @@ -127,46 +127,14 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Soft fail check before doing state res debug!("Performing soft-fail check"); - let soft_fail = { - use RoomVersionId::*; - - !auth_check - || incoming_pdu.kind == TimelineEventType::RoomRedaction - && match room_version_id { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = &incoming_pdu.redacts { - !self - .services - .state_accessor - .user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - ) - .await? - } else { - false - } - }, - | _ => { - let content: RoomRedactionEventContent = incoming_pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - !self - .services - .state_accessor - .user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - ) - .await? - } else { - false - } - }, - } + let soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_id)) { + | (false, _) => true, + | (true, None) => false, + | (true, Some(redact_id)) => + self.services + .state_accessor + .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .await?, }; // 13. Use state resolution to find new room state From 69837671bbc02b1cfba351e1c1321be506ef88b1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 30 Jan 2025 09:28:34 +0000 Subject: [PATCH 0625/1248] simplify request handler task base Signed-off-by: Jason Volk --- src/core/metrics/mod.rs | 4 --- src/router/layers.rs | 33 ++++++++++++---------- src/router/request.rs | 59 +++++++-------------------------------- src/router/run.rs | 1 - src/router/serve/plain.rs | 7 ----- src/router/serve/unix.rs | 7 ++++- 6 files changed, 35 insertions(+), 76 deletions(-) diff --git a/src/core/metrics/mod.rs b/src/core/metrics/mod.rs index f2022166..8f7a5571 100644 --- a/src/core/metrics/mod.rs +++ b/src/core/metrics/mod.rs @@ -19,8 +19,6 @@ pub struct Metrics { runtime_intervals: std::sync::Mutex>, // TODO: move stats - pub requests_spawn_active: AtomicU32, - pub requests_spawn_finished: AtomicU32, pub requests_handle_active: AtomicU32, pub requests_handle_finished: AtomicU32, pub requests_panic: AtomicU32, @@ -48,8 +46,6 @@ impl Metrics { #[cfg(tokio_unstable)] runtime_intervals: std::sync::Mutex::new(runtime_intervals), - requests_spawn_active: AtomicU32::new(0), - requests_spawn_finished: AtomicU32::new(0), requests_handle_active: AtomicU32::new(0), requests_handle_finished: AtomicU32::new(0), requests_panic: AtomicU32::new(0), diff --git a/src/router/layers.rs b/src/router/layers.rs index 96bca4fd..c5227c22 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -5,7 +5,7 @@ use axum::{ Router, }; use axum_client_ip::SecureClientIpSource; -use conduwuit::{error, Result, Server}; +use conduwuit::{debug, error, Result, Server}; use conduwuit_api::router::state::Guard; use conduwuit_service::Services; use http::{ @@ -50,7 +50,6 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { let layers = layers .layer(SetSensitiveHeadersLayer::new([header::AUTHORIZATION])) - .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::spawn)) .layer( TraceLayer::new_for_http() .make_span_with(tracing_span::<_>) @@ -196,20 +195,26 @@ fn catch_panic( } fn tracing_span(request: &http::Request) -> tracing::Span { - let path = request.extensions().get::().map_or_else( - || { - request - .uri() - .path_and_query() - .expect("all requests have a path") - .as_str() - }, - truncated_matched_path, - ); + let path = request + .extensions() + .get::() + .map_or_else(|| request_path_str(request), truncated_matched_path); - let method = request.method(); + tracing::span! { + parent: None, + debug::INFO_SPAN_LEVEL, + "router", + method = %request.method(), + %path, + } +} - tracing::debug_span!(parent: None, "router", %method, %path) +fn request_path_str(request: &http::Request) -> &str { + request + .uri() + .path_and_query() + .expect("all requests have a path") + .as_str() } fn truncated_matched_path(path: &MatchedPath) -> &str { diff --git a/src/router/request.rs b/src/router/request.rs index ca063338..f7b94417 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -8,48 +8,6 @@ use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; use http::{Method, StatusCode, Uri}; -#[tracing::instrument( - parent = None, - level = "trace", - skip_all, - fields( - handled = %services - .server - .metrics - .requests_spawn_finished - .fetch_add(1, Ordering::Relaxed), - active = %services - .server - .metrics - .requests_spawn_active - .fetch_add(1, Ordering::Relaxed), - ) -)] -pub(crate) async fn spawn( - State(services): State>, - req: http::Request, - next: axum::middleware::Next, -) -> Result { - let server = &services.server; - - #[cfg(debug_assertions)] - conduwuit::defer! {{ - _ = server - .metrics - .requests_spawn_active - .fetch_sub(1, Ordering::Relaxed); - }}; - - if !server.running() { - debug_warn!("unavailable pending shutdown"); - return Err(StatusCode::SERVICE_UNAVAILABLE); - } - - let fut = next.run(req); - let task = server.runtime().spawn(fut); - task.await.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) -} - #[tracing::instrument( level = "debug", skip_all, @@ -71,17 +29,15 @@ pub(crate) async fn handle( req: http::Request, next: axum::middleware::Next, ) -> Result { - let server = &services.server; - #[cfg(debug_assertions)] conduwuit::defer! {{ - _ = server + _ = services.server .metrics .requests_handle_active .fetch_sub(1, Ordering::Relaxed); }}; - if !server.running() { + if !services.server.running() { debug_warn!( method = %req.method(), uri = %req.uri(), @@ -91,10 +47,15 @@ pub(crate) async fn handle( return Err(StatusCode::SERVICE_UNAVAILABLE); } - let uri = req.uri().clone(); let method = req.method().clone(); - let result = next.run(req).await; - handle_result(&method, &uri, result) + let uri = req.uri().clone(); + services + .server + .runtime() + .spawn(next.run(req)) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) + .and_then(|result| handle_result(&method, &uri, result)) } fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result { diff --git a/src/router/run.rs b/src/router/run.rs index ea8a7666..605168b8 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -125,7 +125,6 @@ async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_serve let timeout = Duration::from_secs(36); debug!( ?timeout, - spawn_active = ?server.metrics.requests_spawn_active.load(Ordering::Relaxed), handle_active = ?server.metrics.requests_handle_active.load(Ordering::Relaxed), "Notifying for graceful shutdown" ); diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index 0e971f3c..535282b9 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -24,27 +24,20 @@ pub(super) async fn serve( info!("Listening on {addrs:?}"); while join_set.join_next().await.is_some() {} - let spawn_active = server.metrics.requests_spawn_active.load(Ordering::Relaxed); let handle_active = server .metrics .requests_handle_active .load(Ordering::Relaxed); debug_info!( - spawn_finished = server - .metrics - .requests_spawn_finished - .load(Ordering::Relaxed), handle_finished = server .metrics .requests_handle_finished .load(Ordering::Relaxed), panics = server.metrics.requests_panic.load(Ordering::Relaxed), - spawn_active, handle_active, "Stopped listening on {addrs:?}", ); - debug_assert!(spawn_active == 0, "active request tasks are not joined"); debug_assert!(handle_active == 0, "active request handles still pending"); Ok(()) diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index 6855b34c..6a030c30 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -159,7 +159,12 @@ async fn fini(server: &Arc, listener: UnixListener, mut tasks: JoinSet<( drop(listener); debug!("Waiting for requests to finish..."); - while server.metrics.requests_spawn_active.load(Ordering::Relaxed) > 0 { + while server + .metrics + .requests_handle_active + .load(Ordering::Relaxed) + .gt(&0) + { tokio::select! { task = tasks.join_next() => if task.is_none() { break; }, () = sleep(FINI_POLL_INTERVAL) => {}, From f698254c412b5a142567f6b0ad710aa212c9b34d Mon Sep 17 00:00:00 2001 From: morguldir Date: Fri, 31 Jan 2025 02:36:14 +0100 Subject: [PATCH 0626/1248] make registration tokens reloadable, and allow configuring multiple Signed-off-by: morguldir --- conduwuit-example.toml | 5 +++-- src/admin/room/alias.rs | 15 +++++++++------ src/core/config/mod.rs | 5 +++-- src/service/uiaa/mod.rs | 35 ++++++++++++++++++++++++++--------- 4 files changed, 41 insertions(+), 19 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 4062ba99..3fd95044 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -406,8 +406,9 @@ # #registration_token = -# Path to a file on the system that gets read for the registration token. -# this config option takes precedence/priority over "registration_token". +# Path to a file on the system that gets read for additional registration +# tokens. Multiple tokens can be added if you separate them with +# whitespace # # conduwuit must be able to access the file, and it must not be empty # diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 9710cfc8..d3b956e1 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -72,7 +72,7 @@ pub(super) async fn reprocess( ))), }; match command { - | RoomAliasCommand::Set { force, room_id, .. } => + | RoomAliasCommand::Set { force, room_id, .. } => { match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) { | (true, Ok(id)) => { match services.rooms.alias.set_alias( @@ -106,8 +106,9 @@ pub(super) async fn reprocess( ))), } }, - }, - | RoomAliasCommand::Remove { .. } => + } + }, + | RoomAliasCommand::Remove { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { | Ok(id) => match services .rooms @@ -124,15 +125,17 @@ pub(super) async fn reprocess( }, | Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, - | RoomAliasCommand::Which { .. } => + } + }, + | RoomAliasCommand::Which { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { | Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( "Alias resolves to {id}" ))), | Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, + } + }, | RoomAliasCommand::List { .. } => unreachable!(), } }, diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 415c9ba9..ff038975 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -510,8 +510,9 @@ pub struct Config { /// display: sensitive pub registration_token: Option, - /// Path to a file on the system that gets read for the registration token. - /// this config option takes precedence/priority over "registration_token". + /// Path to a file on the system that gets read for additional registration + /// tokens. Multiple tokens can be added if you separate them with + /// whitespace /// /// conduwuit must be able to access the file, and it must not be empty /// diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index f7e55251..7084f32a 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashSet}, sync::{Arc, RwLock}, }; @@ -17,7 +17,7 @@ use ruma::{ CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, }; -use crate::{globals, users, Dep}; +use crate::{config, globals, users, Dep}; pub struct Service { userdevicesessionid_uiaarequest: RwLock, @@ -28,6 +28,7 @@ pub struct Service { struct Services { globals: Dep, users: Dep, + config: Dep, } struct Data { @@ -49,6 +50,7 @@ impl crate::Service for Service { services: Services { globals: args.depend::("globals"), users: args.depend::("users"), + config: args.depend::("config"), }, })) } @@ -56,6 +58,26 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +#[implement(Service)] +pub async fn read_tokens(&self) -> Result> { + let mut tokens = HashSet::new(); + if let Some(file) = &self.services.config.registration_token_file.as_ref() { + match std::fs::read_to_string(file) { + | Ok(text) => { + text.split_ascii_whitespace().for_each(|token| { + tokens.insert(token.to_owned()); + }); + }, + | Err(e) => error!("Failed to read the registration token file: {e}"), + } + }; + if let Some(token) = &self.services.config.registration_token { + tokens.insert(token.to_owned()); + } + + Ok(tokens) +} + /// Creates a new Uiaa session. Make sure the session token is unique. #[implement(Service)] pub fn create( @@ -152,13 +174,8 @@ pub async fn try_auth( uiaainfo.completed.push(AuthType::Password); }, | AuthData::RegistrationToken(t) => { - if self - .services - .globals - .registration_token - .as_ref() - .is_some_and(|reg_token| t.token.trim() == reg_token) - { + let tokens = self.read_tokens().await?; + if tokens.contains(t.token.trim()) { uiaainfo.completed.push(AuthType::RegistrationToken); } else { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { From e161e5dd61b006056ef35fbd034492130bffe150 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 00:54:00 +0000 Subject: [PATCH 0627/1248] add pair_of! macro Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 1a4b52da..c2d8ed45 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -84,6 +84,17 @@ macro_rules! apply { }; } +#[macro_export] +macro_rules! pair_of { + ($decl:ty) => { + ($decl, $decl) + }; + + ($init:expr) => { + ($init, $init) + }; +} + /// Functor for truthy #[macro_export] macro_rules! is_true { From 4ff1155bf0aefddd02e34ed9c709db25c0da3ecd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 01:23:27 +0000 Subject: [PATCH 0628/1248] reroll encrypted_room branch in incremental sync state Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 150 ++++++++++++++++++-------------------- 1 file changed, 69 insertions(+), 81 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index cd4dfc90..f5b612e4 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - at, err, error, extract_variant, is_equal_to, + at, err, error, extract_variant, is_equal_to, pair_of, pdu::EventHash, result::FlatOk, utils::{ @@ -16,7 +16,7 @@ use conduwuit::{ stream::{BroadbandExt, Tools, WidebandExt}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - Error, PduCount, PduEvent, Result, + PduCount, PduEvent, Result, }; use conduwuit_service::{ rooms::{ @@ -64,6 +64,8 @@ struct StateChanges { invited_member_count: Option, joined_since_last_sync: bool, state_events: Vec, + device_list_updates: HashSet, + left_encrypted_users: HashSet, } type PresenceUpdates = HashMap; @@ -325,18 +327,16 @@ pub(crate) async fn build_sync_events( // If the user doesn't share an encrypted room with the target anymore, we need // to tell them - let device_list_left = left_encrypted_users + let device_list_left: HashSet<_> = left_encrypted_users .into_iter() .stream() .broad_filter_map(|user_id| async move { - let no_shared_encrypted_room = - !share_encrypted_room(services, sender_user, &user_id, None).await; - no_shared_encrypted_room.then_some(user_id) - }) - .ready_fold(HashSet::new(), |mut device_list_left, user_id| { - device_list_left.insert(user_id); - device_list_left + share_encrypted_room(services, sender_user, &user_id, None) + .await + .eq(&false) + .then_some(user_id) }) + .collect() .await; let response = sync_events::v3::Response { @@ -730,14 +730,14 @@ async fn load_joined_room( .into(); let witness = witness.await; - let mut device_list_updates = HashSet::::new(); - let mut left_encrypted_users = HashSet::::new(); let StateChanges { heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events, + mut device_list_updates, + left_encrypted_users, } = if no_state_changes { StateChanges::default() } else { @@ -747,8 +747,6 @@ async fn load_joined_room( room_id, full_state, filter, - &mut device_list_updates, - &mut left_encrypted_users, since_shortstatehash, current_shortstatehash, joined_since_last_sync, @@ -919,8 +917,6 @@ async fn calculate_state_changes( room_id: &RoomId, full_state: bool, filter: &FilterDefinition, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, @@ -944,8 +940,6 @@ async fn calculate_state_changes( room_id, full_state, filter, - device_list_updates, - left_encrypted_users, since_shortstatehash, current_shortstatehash, joined_since_last_sync, @@ -1013,6 +1007,7 @@ async fn calculate_state_initial( invited_member_count, joined_since_last_sync: true, state_events, + ..Default::default() }) } @@ -1024,8 +1019,6 @@ async fn calculate_state_incremental( room_id: &RoomId, full_state: bool, _filter: &FilterDefinition, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, @@ -1063,79 +1056,72 @@ async fn calculate_state_incremental( .await; } - let encrypted_room = services - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); - let since_encryption = services .rooms .state_accessor .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") .is_ok(); - let (encrypted_room, since_encryption) = join(encrypted_room, since_encryption).await; + let encrypted_room = services + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") + .is_ok() + .await; - // Calculations: - let new_encrypted_room = encrypted_room && !since_encryption; + let (mut device_list_updates, left_encrypted_users) = delta_state_events + .iter() + .stream() + .ready_filter(|_| encrypted_room) + .ready_filter(|state_event| state_event.kind == RoomMember) + .ready_filter_map(|state_event| { + let content = state_event.get_content().ok()?; + let user_id = state_event.state_key.as_ref()?.parse().ok()?; + Some((content, user_id)) + }) + .ready_filter(|(_, user_id): &(RoomMemberEventContent, OwnedUserId)| { + user_id != sender_user + }) + .fold_default(|(mut dlu, mut leu): pair_of!(HashSet<_>), (content, user_id)| async move { + use MembershipState::*; + + let shares_encrypted_room = + |user_id| share_encrypted_room(services, sender_user, user_id, Some(room_id)); + + match content.membership { + | Join if !shares_encrypted_room(&user_id).await => dlu.insert(user_id), + | Leave => leu.insert(user_id), + | _ => false, + }; + + (dlu, leu) + }) + .await; + + // If the user is in a new encrypted room, give them all joined users + let new_encrypted_room = encrypted_room && !since_encryption.await; + if joined_since_last_sync && encrypted_room || new_encrypted_room { + services + .rooms + .state_cache + .room_members(room_id) + .ready_filter(|&user_id| sender_user != user_id) + .map(ToOwned::to_owned) + .broad_filter_map(|user_id| async move { + share_encrypted_room(services, sender_user, &user_id, Some(room_id)) + .await + .or_some(user_id) + }) + .ready_for_each(|user_id| { + device_list_updates.insert(user_id); + }) + .await; + } let send_member_count = delta_state_events .iter() .any(|event| event.kind == RoomMember); - if encrypted_room { - for state_event in &delta_state_events { - if state_event.kind != RoomMember { - continue; - } - - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == sender_user { - continue; - } - - let content: RoomMemberEventContent = state_event.get_content()?; - - match content.membership { - | MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .await - { - device_list_updates.insert(user_id.into()); - } - }, - | MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id.into()); - }, - | _ => {}, - } - } - } - } - - if joined_since_last_sync && encrypted_room || new_encrypted_room { - let updates: Vec = services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|user_id| sender_user != *user_id) - .filter_map(|user_id| { - share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .map(|res| res.or_some(user_id.to_owned())) - }) - .collect() - .await; - - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend(updates); - } - let (joined_member_count, invited_member_count, heroes) = if send_member_count { calculate_counts(services, room_id, sender_user).await? } else { @@ -1148,6 +1134,8 @@ async fn calculate_state_incremental( invited_member_count, joined_since_last_sync, state_events: delta_state_events, + device_list_updates, + left_encrypted_users, }) } From 4e0cedbe5122c478e63e26b3f5156475629ada3e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 05:05:32 +0000 Subject: [PATCH 0629/1248] simplify v3 sync presence collecting Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 55 +++++++-------------------------------- 1 file changed, 10 insertions(+), 45 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index f5b612e4..cd95fa42 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -1,6 +1,6 @@ use std::{ cmp::{self}, - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, time::Duration, }; @@ -45,7 +45,7 @@ use ruma::{ uiaa::UiaaResponse, }, events::{ - presence::PresenceEvent, + presence::{PresenceEvent, PresenceEventContent}, room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, @@ -68,7 +68,7 @@ struct StateChanges { left_encrypted_users: HashSet, } -type PresenceUpdates = HashMap; +type PresenceUpdates = HashMap; /// # `GET /_matrix/client/r0/sync` /// @@ -351,9 +351,11 @@ pub(crate) async fn build_sync_events( next_batch: next_batch.to_string(), presence: Presence { events: presence_updates - .unwrap_or_default() - .into_values() - .map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully")) + .into_iter() + .flat_map(IntoIterator::into_iter) + .map(|(sender, content)| PresenceEvent { content, sender }) + .map(|ref event| Raw::new(event)) + .filter_map(Result::ok) .collect(), }, rooms: Rooms { @@ -390,45 +392,8 @@ async fn process_presence_updates( .map_ok(move |event| (user_id, event)) .ok() }) - .ready_fold(PresenceUpdates::new(), |mut updates, (user_id, event)| { - match updates.entry(user_id.into()) { - | Entry::Vacant(slot) => { - let mut new_event = event; - new_event.content.last_active_ago = match new_event.content.currently_active { - | Some(true) => None, - | _ => new_event.content.last_active_ago, - }; - - slot.insert(new_event); - }, - | Entry::Occupied(mut slot) => { - let curr_event = slot.get_mut(); - let curr_content = &mut curr_event.content; - let new_content = event.content; - - // Update existing presence event with more info - curr_content.presence = new_content.presence; - curr_content.status_msg = new_content - .status_msg - .or_else(|| curr_content.status_msg.take()); - curr_content.displayname = new_content - .displayname - .or_else(|| curr_content.displayname.take()); - curr_content.avatar_url = new_content - .avatar_url - .or_else(|| curr_content.avatar_url.take()); - curr_content.currently_active = new_content - .currently_active - .or(curr_content.currently_active); - curr_content.last_active_ago = match curr_content.currently_active { - | Some(true) => None, - | _ => new_content.last_active_ago.or(curr_content.last_active_ago), - }; - }, - }; - - updates - }) + .map(|(user_id, event)| (user_id.to_owned(), event.content)) + .collect() .await } From a4ef04cd1427ea2eeb474775e7c4c86937d063ab Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 08:31:58 +0000 Subject: [PATCH 0630/1248] fix room join completion taking wrong sync branch Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index cd95fa42..e3f559f5 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -887,7 +887,7 @@ async fn calculate_state_changes( joined_since_last_sync: bool, witness: Option<&Witness>, ) -> Result { - if since_shortstatehash.is_none() || joined_since_last_sync { + if since_shortstatehash.is_none() { calculate_state_initial( services, sender_user, From 6983798487ec563be83a3ba8739afa9977d98741 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 08:34:32 +0000 Subject: [PATCH 0631/1248] implement lazy-loading for incremental sync Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 182 ++++++++++++++++++++------------------ 1 file changed, 98 insertions(+), 84 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index e3f559f5..49246514 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -55,7 +55,10 @@ use ruma::{ }; use super::{load_timeline, share_encrypted_room}; -use crate::{client::ignored_filter, Ruma, RumaResponse}; +use crate::{ + client::{ignored_filter, lazy_loading_witness}, + Ruma, RumaResponse, +}; #[derive(Default)] struct StateChanges { @@ -633,10 +636,6 @@ async fn load_joined_room( }) .into(); - let no_state_changes = timeline_pdus.is_empty() - && (since_shortstatehash.is_none() - || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))); - let since_sender_member: OptionFuture<_> = since_shortstatehash .map(|short| { services @@ -658,11 +657,7 @@ async fn load_joined_room( let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() || filter.room.timeline.lazy_load_options.is_enabled(); - let generate_witness = - lazy_loading_enabled && (since_shortstatehash.is_none() || joined_since_last_sync); - - let lazy_reset = lazy_loading_enabled && since_shortstatehash.is_none(); - + let lazy_reset = since_shortstatehash.is_none(); let lazy_loading_context = &lazy_loading::Context { user_id: sender_user, device_id: sender_device, @@ -677,24 +672,10 @@ async fn load_joined_room( .into(); lazy_load_reset.await; - let witness: Option = generate_witness.then(|| { - timeline_pdus - .iter() - .map(|(_, pdu)| pdu.sender.clone()) - .chain(receipt_events.keys().cloned()) - .collect() - }); - - let witness: OptionFuture<_> = witness - .map(|witness| { - services - .rooms - .lazy_loading - .witness_retain(witness, lazy_loading_context) - }) + let witness: OptionFuture<_> = lazy_loading_enabled + .then(|| lazy_loading_witness(services, lazy_loading_context, timeline_pdus.iter())) .into(); - let witness = witness.await; let StateChanges { heroes, joined_member_count, @@ -703,23 +684,19 @@ async fn load_joined_room( state_events, mut device_list_updates, left_encrypted_users, - } = if no_state_changes { - StateChanges::default() - } else { - calculate_state_changes( - services, - sender_user, - room_id, - full_state, - filter, - since_shortstatehash, - current_shortstatehash, - joined_since_last_sync, - witness.as_ref(), - ) - .boxed() - .await? - }; + } = calculate_state_changes( + services, + sender_user, + room_id, + full_state, + filter, + since_shortstatehash, + current_shortstatehash, + joined_since_last_sync, + witness.await.as_ref(), + ) + .boxed() + .await?; let account_data_events = services .account_data @@ -908,6 +885,7 @@ async fn calculate_state_changes( since_shortstatehash, current_shortstatehash, joined_since_last_sync, + witness, ) .await } @@ -920,7 +898,7 @@ async fn calculate_state_initial( sender_user: &UserId, room_id: &RoomId, full_state: bool, - filter: &FilterDefinition, + _filter: &FilterDefinition, current_shortstatehash: ShortStateHash, witness: Option<&Witness>, ) -> Result { @@ -938,20 +916,14 @@ async fn calculate_state_initial( .zip(event_ids.into_iter().stream()) .ready_filter_map(|item| Some((item.0.ok()?, item.1))) .ready_filter_map(|((event_type, state_key), event_id)| { - let lazy_load_enabled = filter.room.state.lazy_load_options.is_enabled() - || filter.room.timeline.lazy_load_options.is_enabled(); - - if lazy_load_enabled + let lazy = !full_state && event_type == StateEventType::RoomMember - && !full_state && state_key.as_str().try_into().is_ok_and(|user_id: &UserId| { sender_user != user_id && witness.is_some_and(|witness| !witness.contains(user_id)) - }) { - return None; - } + }); - Some(event_id) + lazy.or_some(event_id) }) .broad_filter_map(|event_id: OwnedEventId| async move { services.rooms.timeline.get_pdu(&event_id).await.ok() @@ -978,7 +950,7 @@ async fn calculate_state_initial( #[tracing::instrument(name = "incremental", level = "trace", skip_all)] #[allow(clippy::too_many_arguments)] -async fn calculate_state_incremental( +async fn calculate_state_incremental<'a>( services: &Services, sender_user: &UserId, room_id: &RoomId, @@ -987,39 +959,80 @@ async fn calculate_state_incremental( since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, + witness: Option<&'a Witness>, ) -> Result { - // Incremental /sync - let since_shortstatehash = - since_shortstatehash.expect("missing since_shortstatehash on incremental sync"); + let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash); - let mut delta_state_events = Vec::new(); + let state_changed = since_shortstatehash != current_shortstatehash; - if since_shortstatehash != current_shortstatehash { - let current_state_ids = services + let state_get_id = |user_id: &'a UserId| { + services .rooms .state_accessor - .state_full_ids(current_shortstatehash) - .collect(); + .state_get_id(current_shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .ok() + }; - let since_state_ids = services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .collect(); + let lazy_state_ids: OptionFuture<_> = witness + .map(|witness| { + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_id(user_id)) + .collect::>() + }) + .into(); - let (current_state_ids, since_state_ids): ( - HashMap<_, OwnedEventId>, - HashMap<_, OwnedEventId>, - ) = join(current_state_ids, since_state_ids).await; + let current_state_ids: OptionFuture<_> = state_changed + .then(|| { + services + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .collect::>() + }) + .into(); - current_state_ids - .iter() - .stream() - .ready_filter(|(key, id)| full_state || since_state_ids.get(key) != Some(id)) - .wide_filter_map(|(_, id)| services.rooms.timeline.get_pdu(id).ok()) - .ready_for_each(|pdu| delta_state_events.push(pdu)) - .await; - } + let since_state_ids: OptionFuture<_> = (state_changed && !full_state) + .then(|| { + services + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .collect::>() + }) + .into(); + + let lazy_state_ids = lazy_state_ids + .map(Option::into_iter) + .map(|iter| iter.flat_map(Vec::into_iter)) + .map(IterStream::stream) + .flatten_stream(); + + let ref since_state_ids = since_state_ids.shared(); + let delta_state_events = current_state_ids + .map(Option::into_iter) + .map(|iter| iter.flat_map(Vec::into_iter)) + .map(IterStream::stream) + .flatten_stream() + .filter_map(|(shortstatekey, event_id): (u64, OwnedEventId)| async move { + since_state_ids + .clone() + .await + .is_none_or(|since_state| since_state.get(&shortstatekey) != Some(&event_id)) + .then_some(event_id) + }) + .chain(lazy_state_ids) + .broad_filter_map(|event_id: OwnedEventId| async move { + services + .rooms + .timeline + .get_pdu(&event_id) + .await + .map(move |pdu| (event_id, pdu)) + .ok() + }) + .collect::>(); let since_encryption = services .rooms @@ -1031,11 +1044,12 @@ async fn calculate_state_incremental( .rooms .state_accessor .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok() - .await; + .is_ok(); + + let (delta_state_events, encrypted_room) = join(delta_state_events, encrypted_room).await; let (mut device_list_updates, left_encrypted_users) = delta_state_events - .iter() + .values() .stream() .ready_filter(|_| encrypted_room) .ready_filter(|state_event| state_event.kind == RoomMember) @@ -1084,7 +1098,7 @@ async fn calculate_state_incremental( } let send_member_count = delta_state_events - .iter() + .values() .any(|event| event.kind == RoomMember); let (joined_member_count, invited_member_count, heroes) = if send_member_count { @@ -1098,9 +1112,9 @@ async fn calculate_state_incremental( joined_member_count, invited_member_count, joined_since_last_sync, - state_events: delta_state_events, device_list_updates, left_encrypted_users, + state_events: delta_state_events.into_values().collect(), }) } From 09bc71caaba40321ec0f987574a94e788175c4f9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 09:08:13 +0000 Subject: [PATCH 0632/1248] fix missed concurrent fetch opportunities in sender (ffd0fd42424a) Signed-off-by: Jason Volk --- src/service/sending/sender.rs | 41 +++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 363bb994..f19b69da 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -13,7 +13,12 @@ use conduwuit::{ debug, err, error, result::LogErr, trace, - utils::{calculate_hash, continue_exponential_backoff_secs, stream::IterStream, ReadyExt}, + utils::{ + calculate_hash, continue_exponential_backoff_secs, + future::TryExtExt, + stream::{BroadbandExt, IterStream, WidebandExt}, + ReadyExt, + }, warn, Error, Result, }; use futures::{ @@ -474,20 +479,25 @@ impl Service { since: (u64, u64), max_edu_count: &AtomicU64, ) -> Option { - let server_rooms = self.services.state_cache.server_rooms(server_name); - - pin_mut!(server_rooms); let mut num = 0; - let mut receipts = BTreeMap::::new(); - while let Some(room_id) = server_rooms.next().await { - let receipt_map = self - .select_edus_receipts_room(room_id, since, max_edu_count, &mut num) - .await; + let receipts: BTreeMap = self + .services + .state_cache + .server_rooms(server_name) + .map(ToOwned::to_owned) + .broad_filter_map(|room_id| async move { + let receipt_map = self + .select_edus_receipts_room(&room_id, since, max_edu_count, &mut num) + .await; - if !receipt_map.read.is_empty() { - receipts.insert(room_id.into(), receipt_map); - } - } + receipt_map + .read + .is_empty() + .eq(&false) + .then_some((room_id, receipt_map)) + }) + .collect() + .await; if receipts.is_empty() { return None; @@ -820,9 +830,8 @@ impl Service { | _ => None, }) .stream() - .then(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id)) - .ready_filter_map(Result::ok) - .then(|pdu| self.convert_to_outgoing_federation_event(pdu)) + .wide_filter_map(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id).ok()) + .wide_then(|pdu| self.convert_to_outgoing_federation_event(pdu)) .collect() .await; From 2fa9621f3a358740917af7a55c5d0be1e1d79ae4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 11:54:06 +0000 Subject: [PATCH 0633/1248] flatten state_full_shortids Signed-off-by: Jason Volk --- src/service/rooms/state/mod.rs | 60 +++++++++++-------------- src/service/rooms/state_accessor/mod.rs | 47 +++++++++---------- 2 files changed, 48 insertions(+), 59 deletions(-) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 8cb4e586..1b0d0d58 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -429,60 +429,54 @@ impl Service { sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, - ) -> Result>> { + ) -> Result> { let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else { return Ok(HashMap::new()); }; - let mut sauthevents: HashMap<_, _> = - state_res::auth_types_for_event(kind, sender, state_key, content)? - .iter() - .stream() - .broad_filter_map(|(event_type, state_key)| { - self.services - .short - .get_shortstatekey(event_type, state_key) - .map_ok(move |ssk| (ssk, (event_type, state_key))) - .map(Result::ok) - }) - .map(|(ssk, (event_type, state_key))| { - (ssk, (event_type.to_owned(), state_key.to_owned())) - }) - .collect() - .await; + let auth_types = state_res::auth_types_for_event(kind, sender, state_key, content)?; + + let sauthevents: HashMap<_, _> = auth_types + .iter() + .stream() + .broad_filter_map(|(event_type, state_key)| { + self.services + .short + .get_shortstatekey(event_type, state_key) + .map_ok(move |ssk| (ssk, (event_type, state_key))) + .map(Result::ok) + }) + .collect() + .await; let (state_keys, event_ids): (Vec<_>, Vec<_>) = self .services .state_accessor .state_full_shortids(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?room_id, ?shortstatehash, "{e:?}"))))? - .into_iter() - .filter_map(|(shortstatekey, shorteventid)| { + .ready_filter_map(Result::ok) + .ready_filter_map(|(shortstatekey, shorteventid)| { sauthevents - .remove(&shortstatekey) - .map(|(event_type, state_key)| ((event_type, state_key), shorteventid)) + .get(&shortstatekey) + .map(|(ty, sk)| ((ty, sk), shorteventid)) }) - .unzip(); + .unzip() + .await; - let auth_pdus = self - .services + self.services .short .multi_get_eventid_from_short(event_ids.into_iter().stream()) .zip(state_keys.into_iter().stream()) - .ready_filter_map(|(event_id, tsk)| Some((tsk, event_id.ok()?))) - .broad_filter_map(|(tsk, event_id): (_, OwnedEventId)| async move { + .ready_filter_map(|(event_id, (ty, sk))| Some(((ty, sk), event_id.ok()?))) + .broad_filter_map(|((ty, sk), event_id): (_, OwnedEventId)| async move { self.services .timeline .get_pdu(&event_id) .await - .map(Arc::new) - .map(move |pdu| (tsk, pdu)) + .map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu)) .ok() }) .collect() - .await; - - Ok(auth_pdus) + .map(Ok) + .await } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 0f5520bb..98aac138 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,6 +1,7 @@ use std::{ borrow::Borrow, fmt::Write, + ops::Deref, sync::{Arc, Mutex as StdMutex, Mutex}, }; @@ -10,8 +11,7 @@ use conduwuit::{ utils, utils::{ math::{usize_from_f64, Expected}, - stream::BroadbandExt, - IterStream, ReadyExt, + stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, Err, Error, PduEvent, Result, }; @@ -158,12 +158,8 @@ impl Service { ) -> impl Stream + Send + '_ { let short_ids = self .state_full_shortids(shortstatehash) - .map(|result| result.expect("missing shortstatehash")) - .map(Vec::into_iter) - .map(|iter| iter.map(at!(1))) - .map(IterStream::stream) - .flatten_stream() - .boxed(); + .expect_ok() + .map(at!(1)); self.services .short @@ -187,9 +183,8 @@ impl Service { { let shortids = self .state_full_shortids(shortstatehash) - .map(|result| result.expect("missing shortstatehash")) - .map(|vec| vec.into_iter().unzip()) - .boxed() + .expect_ok() + .unzip() .shared(); let shortstatekeys = shortids @@ -255,25 +250,25 @@ impl Service { } #[inline] - pub async fn state_full_shortids( + pub fn state_full_shortids( &self, shortstatehash: ShortStateHash, - ) -> Result> { - let shortids = self - .services + ) -> impl Stream> + Send + '_ { + self.services .state_compressor .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database("Missing state IDs: {e}")))? - .pop() - .expect("there is always one layer") - .full_state - .iter() - .copied() - .map(parse_compressed_state_event) - .collect(); - - Ok(shortids) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .map_ok(|full_state| { + full_state + .deref() + .iter() + .copied() + .map(parse_compressed_state_event) + .collect() + }) + .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) + .try_flatten_stream() } /// Returns a single PDU from `room_id` with key (`event_type`, From ea49b60273c987cc673c3aad439c6fbb50bb795f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 1 Feb 2025 22:28:09 +0000 Subject: [PATCH 0634/1248] add Option support to database deserializer Signed-off-by: Jason Volk --- src/database/de.rs | 23 ++++-- src/database/tests.rs | 159 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 176 insertions(+), 6 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index 7cc8f00a..8e914fcc 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -22,7 +22,7 @@ pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result where T: Deserialize<'a>, { - let mut deserializer = Deserializer { buf, pos: 0, seq: false }; + let mut deserializer = Deserializer { buf, pos: 0, rec: 0, seq: false }; T::deserialize(&mut deserializer).debug_inspect(|_| { deserializer @@ -35,6 +35,7 @@ where pub(crate) struct Deserializer<'de> { buf: &'de [u8], pos: usize, + rec: usize, seq: bool, } @@ -107,7 +108,7 @@ impl<'de> Deserializer<'de> { /// consumed None is returned instead. #[inline] fn record_peek_byte(&self) -> Option { - let started = self.pos != 0; + let started = self.pos != 0 || self.rec > 0; let buf = &self.buf[self.pos..]; debug_assert!( !started || buf[0] == Self::SEP, @@ -121,13 +122,14 @@ impl<'de> Deserializer<'de> { /// the start of the next record. (Case for some sequences) #[inline] fn record_start(&mut self) { - let started = self.pos != 0; + let started = self.pos != 0 || self.rec > 0; debug_assert!( !started || self.buf[self.pos] == Self::SEP, "Missing expected record separator at current position" ); self.inc_pos(started.into()); + self.inc_rec(1); } /// Consume all remaining bytes, which may include record separators, @@ -157,6 +159,9 @@ impl<'de> Deserializer<'de> { debug_assert!(self.pos <= self.buf.len(), "pos out of range"); } + #[inline] + fn inc_rec(&mut self, n: usize) { self.rec = self.rec.saturating_add(n); } + /// Unconsumed input bytes. #[inline] fn remaining(&self) -> Result { @@ -270,8 +275,16 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_option>(self, _visitor: V) -> Result { - unhandled!("deserialize Option not implemented") + fn deserialize_option>(self, visitor: V) -> Result { + if self + .buf + .get(self.pos) + .is_none_or(|b| *b == Deserializer::SEP) + { + visitor.visit_none() + } else { + visitor.visit_some(self) + } } #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] diff --git a/src/database/tests.rs b/src/database/tests.rs index 2f143698..e6c85983 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -3,7 +3,7 @@ use std::fmt::Debug; use arrayvec::ArrayVec; -use conduwuit::ruma::{serde::Raw, RoomId, UserId}; +use conduwuit::ruma::{serde::Raw, EventId, RoomId, UserId}; use serde::Serialize; use crate::{ @@ -389,3 +389,160 @@ fn de_complex() { assert_eq!(arr, key, "deserialization of serialization does not match"); } + +#[test] +fn serde_tuple_option_value_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (&RoomId, Option<&UserId>) = (room_id, Some(user_id)); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (&RoomId, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.1, cc.1); + assert_eq!(cc.0, bb.0); +} + +#[test] +fn serde_tuple_option_value_none() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + + let bb: (&RoomId, Option<&UserId>) = (room_id, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (&RoomId, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.1); + assert_eq!(cc.0, bb.0); +} + +#[test] +fn serde_tuple_option_none_value() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, &UserId) = (None, user_id); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, &UserId) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.0); + assert_eq!(cc.1, bb.1); +} + +#[test] +fn serde_tuple_option_some_value() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, &UserId) = (Some(room_id), user_id); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, &UserId) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.0, cc.0); + assert_eq!(cc.1, bb.1); +} + +#[test] +fn serde_tuple_option_some_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, Option<&UserId>) = (Some(room_id), Some(user_id)); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(cc.0, bb.0); + assert_eq!(bb.1, cc.1); +} + +#[test] +fn serde_tuple_option_none_none() { + let aa = vec![0xFF]; + + let bb: (Option<&RoomId>, Option<&UserId>) = (None, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(cc.0, bb.0); + assert_eq!(None, cc.1); +} + +#[test] +fn serde_tuple_option_some_none_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + (Some(room_id), None, Some(user_id)); + + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.0, cc.0); + assert_eq!(None, cc.1); + assert_eq!(bb.1, cc.1); + assert_eq!(bb.2, cc.2); +} + +#[test] +fn serde_tuple_option_none_none_none() { + let aa = vec![0xFF, 0xFF]; + + let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = (None, None, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.0); + assert_eq!(bb, cc); +} From 4add39d0fedcbe7946c6dfffac33d1e48111ea8b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 15:50:09 +0000 Subject: [PATCH 0635/1248] cache compressed state in a sorted structure for logarithmic queries with partial keys Signed-off-by: Jason Volk --- src/api/client/membership.rs | 9 +- .../rooms/event_handler/resolve_state.rs | 6 +- .../event_handler/upgrade_outlier_pdu.rs | 15 ++- src/service/rooms/state/mod.rs | 28 +++--- src/service/rooms/state_accessor/mod.rs | 99 ++++++++++++++----- src/service/rooms/state_compressor/mod.rs | 30 +++--- src/service/rooms/timeline/mod.rs | 4 +- 7 files changed, 118 insertions(+), 73 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d80aff0c..449d44d5 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -46,7 +46,10 @@ use ruma::{ use service::{ appservice::RegistrationInfo, pdu::gen_event_id, - rooms::{state::RoomMutexGuard, state_compressor::HashSetCompressStateEvent}, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, Services, }; @@ -1169,7 +1172,7 @@ async fn join_room_by_id_helper_remote( } info!("Compressing state from send_join"); - let compressed: HashSet<_> = services + let compressed: CompressedState = services .rooms .state_compressor .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) @@ -2340,7 +2343,7 @@ async fn knock_room_helper_remote( } info!("Compressing state from send_knock"); - let compressed: HashSet<_> = services + let compressed: CompressedState = services .rooms .state_compressor .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index c3de5f2f..4d99b088 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -15,7 +15,7 @@ use ruma::{ OwnedEventId, RoomId, RoomVersionId, }; -use crate::rooms::state_compressor::CompressedStateEvent; +use crate::rooms::state_compressor::CompressedState; #[implement(super::Service)] #[tracing::instrument(name = "resolve", level = "debug", skip_all)] @@ -24,7 +24,7 @@ pub async fn resolve_state( room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap, -) -> Result>> { +) -> Result> { trace!("Loading current room state ids"); let current_sstatehash = self .services @@ -91,7 +91,7 @@ pub async fn resolve_state( .await; trace!("Compressing state..."); - let new_room_state: HashSet<_> = self + let new_room_state: CompressedState = self .services .state_compressor .compress_state_events( diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 03697558..132daca7 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,10 +1,4 @@ -use std::{ - borrow::Borrow, - collections::{BTreeMap, HashSet}, - iter::once, - sync::Arc, - time::Instant, -}; +use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ debug, debug_info, err, implement, trace, @@ -19,7 +13,10 @@ use ruma::{ }; use super::{get_room_version_id, to_room_version}; -use crate::rooms::{state_compressor::HashSetCompressStateEvent, timeline::RawPduId}; +use crate::rooms::{ + state_compressor::{CompressedState, HashSetCompressStateEvent}, + timeline::RawPduId, +}; #[implement(super::Service)] pub(super) async fn upgrade_outlier_to_timeline_pdu( @@ -173,7 +170,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( incoming_pdu.prev_events.len() ); - let state_ids_compressed: Arc> = self + let state_ids_compressed: Arc = self .services .state_compressor .compress_state_events( diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 1b0d0d58..de90a89c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,9 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Write, - iter::once, - sync::Arc, -}; +use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use conduwuit::{ err, @@ -33,7 +28,7 @@ use crate::{ globals, rooms, rooms::{ short::{ShortEventId, ShortStateHash}, - state_compressor::{parse_compressed_state_event, CompressedStateEvent}, + state_compressor::{parse_compressed_state_event, CompressedState}, }, Dep, }; @@ -102,10 +97,9 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - statediffnew: Arc>, - _statediffremoved: Arc>, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ + statediffnew: Arc, + _statediffremoved: Arc, + state_lock: &RoomMutexGuard, ) -> Result { let event_ids = statediffnew .iter() @@ -176,7 +170,7 @@ impl Service { &self, event_id: &EventId, room_id: &RoomId, - state_ids_compressed: Arc>, + state_ids_compressed: Arc, ) -> Result { const KEY_LEN: usize = size_of::(); const VAL_LEN: usize = size_of::(); @@ -209,12 +203,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed + let statediffnew: CompressedState = state_ids_compressed .difference(&parent_stateinfo.full_state) .copied() .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo + let statediffremoved: CompressedState = parent_stateinfo .full_state .difference(&state_ids_compressed) .copied() @@ -222,7 +216,7 @@ impl Service { (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (state_ids_compressed, Arc::new(HashSet::new())) + (state_ids_compressed, Arc::new(CompressedState::new())) }; self.services.state_compressor.save_state_from_diff( shortstatehash, @@ -300,10 +294,10 @@ impl Service { // TODO: statehash with deterministic inputs let shortstatehash = self.services.globals.next_count()?; - let mut statediffnew = HashSet::new(); + let mut statediffnew = CompressedState::new(); statediffnew.insert(new); - let mut statediffremoved = HashSet::new(); + let mut statediffremoved = CompressedState::new(); if let Some(replaces) = replaces { statediffremoved.insert(*replaces); } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 98aac138..8b56c8b6 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -11,6 +11,7 @@ use conduwuit::{ utils, utils::{ math::{usize_from_f64, Expected}, + result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, Err, Error, PduEvent, Result, @@ -47,7 +48,7 @@ use crate::{ rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, state::RoomMutexGuard, - state_compressor::parse_compressed_state_event, + state_compressor::{compress_state_event, parse_compressed_state_event}, }, Dep, }; @@ -220,36 +221,88 @@ impl Service { Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) .await?; - let full_state = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? - .pop() - .expect("there is always one layer") - .full_state; - - let compressed = full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .ok_or(err!(Database("No shortstatekey in compressed state")))?; - - let (_, shorteventid) = parse_compressed_state_event(*compressed); - self.services .short .get_eventid_from_short(shorteventid) .await } - #[inline] + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .map_ok(|full_state| { + full_state + .range(start..end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, + ) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .map_ok(|full_state| full_state.range(start..end).next().copied()) + .await + .flat_ok() + .is_some() + } + pub fn state_full_shortids( &self, shortstatehash: ShortStateHash, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 532df360..3d68dff6 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeSet, HashMap}, fmt::{Debug, Write}, mem::size_of, sync::{Arc, Mutex}, @@ -63,8 +63,8 @@ type StateInfoLruCache = LruCache; type ShortStateInfoVec = Vec; type ParentStatesVec = Vec; -pub(crate) type CompressedState = HashSet; -pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; +pub type CompressedState = BTreeSet; +pub type CompressedStateEvent = [u8; 2 * size_of::()]; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -249,8 +249,8 @@ impl Service { pub fn save_state_from_diff( &self, shortstatehash: ShortStateHash, - statediffnew: Arc>, - statediffremoved: Arc>, + statediffnew: Arc, + statediffremoved: Arc, diff_to_sibling: usize, mut parent_states: ParentStatesVec, ) -> Result { @@ -363,7 +363,7 @@ impl Service { pub async fn save_state( &self, room_id: &RoomId, - new_state_ids_compressed: Arc>, + new_state_ids_compressed: Arc, ) -> Result { let previous_shortstatehash = self .services @@ -396,12 +396,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = new_state_ids_compressed + let statediffnew: CompressedState = new_state_ids_compressed .difference(&parent_stateinfo.full_state) .copied() .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo + let statediffremoved: CompressedState = parent_stateinfo .full_state .difference(&new_state_ids_compressed) .copied() @@ -409,7 +409,7 @@ impl Service { (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (new_state_ids_compressed, Arc::new(HashSet::new())) + (new_state_ids_compressed, Arc::new(CompressedState::new())) }; if !already_existed { @@ -448,11 +448,11 @@ impl Service { .take_if(|parent| *parent != 0); debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride"); - let num_values = value.len() / STRIDE; + let _num_values = value.len() / STRIDE; let mut add_mode = true; - let mut added = HashSet::with_capacity(num_values); - let mut removed = HashSet::with_capacity(num_values); + let mut added = CompressedState::new(); + let mut removed = CompressedState::new(); let mut i = STRIDE; while let Some(v) = value.get(i..expected!(i + 2 * STRIDE)) { @@ -469,8 +469,6 @@ impl Service { i = expected!(i + 2 * STRIDE); } - added.shrink_to_fit(); - removed.shrink_to_fit(); Ok(StateDiff { parent, added: Arc::new(added), @@ -507,7 +505,7 @@ impl Service { #[inline] #[must_use] -fn compress_state_event( +pub(crate) fn compress_state_event( shortstatekey: ShortStateKey, shorteventid: ShortEventId, ) -> CompressedStateEvent { @@ -523,7 +521,7 @@ fn compress_state_event( #[inline] #[must_use] -pub fn parse_compressed_state_event( +pub(crate) fn parse_compressed_state_event( compressed_event: CompressedStateEvent, ) -> (ShortStateKey, ShortEventId) { use utils::u64_from_u8; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 8b3b67a7..a913034d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -49,7 +49,7 @@ use crate::{ account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, - rooms::{short::ShortRoomId, state_compressor::CompressedStateEvent}, + rooms::{short::ShortRoomId, state_compressor::CompressedState}, sending, server_keys, users, Dep, }; @@ -950,7 +950,7 @@ impl Service { pdu: &'a PduEvent, pdu_json: CanonicalJsonObject, new_room_leafs: Leafs, - state_ids_compressed: Arc>, + state_ids_compressed: Arc, soft_fail: bool, state_lock: &'a RoomMutexGuard, ) -> Result> From 7ce782ddf4cb6989caff7a3781cfc667183b9b63 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 1 Feb 2025 01:17:28 +0000 Subject: [PATCH 0636/1248] fix jemalloc cfgs lacking msvc conditions Signed-off-by: Jason Volk --- src/core/config/check.rs | 2 +- src/database/pool.rs | 9 ++++++--- src/main/runtime.rs | 6 +++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 988d4143..5532c5a2 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -38,7 +38,7 @@ pub fn check(config: &Config) -> Result { )); } - if cfg!(all(feature = "hardened_malloc", feature = "jemalloc")) { + if cfg!(all(feature = "hardened_malloc", feature = "jemalloc", not(target_env = "msvc"))) { debug_warn!( "hardened_malloc and jemalloc compile-time features are both enabled, this causes \ jemalloc to be used." diff --git a/src/database/pool.rs b/src/database/pool.rs index 86516c31..c753855a 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -13,7 +13,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ debug, debug_warn, err, error, implement, - result::{DebugInspect, LogDebugErr}, + result::DebugInspect, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, Error, Result, Server, @@ -290,9 +290,12 @@ fn worker_init(&self, id: usize) { // affinity is empty (no-op) if there's only one queue set_affinity(affinity.clone()); - #[cfg(feature = "jemalloc")] + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] if affinity.clone().count() == 1 && conduwuit::alloc::je::is_affine_arena() { - use conduwuit::alloc::je::this_thread::{arena_id, set_arena}; + use conduwuit::{ + alloc::je::this_thread::{arena_id, set_arena}, + result::LogDebugErr, + }; let id = affinity.clone().next().expect("at least one id"); diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 9f4f60f8..02b9931f 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -122,7 +122,7 @@ fn set_worker_affinity() { set_worker_mallctl(id); } -#[cfg(feature = "jemalloc")] +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] fn set_worker_mallctl(id: usize) { use conduwuit::alloc::je::{ is_affine_arena, @@ -143,7 +143,7 @@ fn set_worker_mallctl(id: usize) { } } -#[cfg(not(feature = "jemalloc"))] +#[cfg(any(not(feature = "jemalloc"), target_env = "msvc"))] fn set_worker_mallctl(_: usize) {} #[tracing::instrument( @@ -189,7 +189,7 @@ fn thread_park() { } fn gc_on_park() { - #[cfg(feature = "jemalloc")] + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] conduwuit::alloc::je::this_thread::decay() .log_debug_err() .ok(); From b4d22bd05e3cf81476669cc2e37eef60eeade07e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 1 Feb 2025 23:41:05 +0000 Subject: [PATCH 0637/1248] remove unnecessary cf arc refcnt workaround log errors and panics propagating through the request task join Signed-off-by: Jason Volk --- src/database/engine.rs | 6 ++--- src/database/engine/open.rs | 6 ++--- src/database/map.rs | 12 ++++----- src/database/map/open.rs | 5 +--- src/router/request.rs | 52 ++++++++++++++++++++++++++++--------- 5 files changed, 53 insertions(+), 28 deletions(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index be3d62cf..22e2b9c8 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -30,13 +30,13 @@ use crate::{ }; pub struct Engine { + pub(crate) db: Db, + pub(crate) pool: Arc, + pub(crate) ctx: Arc, pub(super) read_only: bool, pub(super) secondary: bool, pub(crate) checksums: bool, corks: AtomicU32, - pub(crate) db: Db, - pub(crate) pool: Arc, - pub(crate) ctx: Arc, } pub(crate) type Db = DBWithThreadMode; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index ad724765..59dabce1 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -56,13 +56,13 @@ pub(crate) async fn open(ctx: Arc, desc: &[Descriptor]) -> Result, - cf: Arc, watchers: Watchers, - write_options: WriteOptions, + cf: Arc, + db: Arc, read_options: ReadOptions, cache_read_options: ReadOptions, + write_options: WriteOptions, } impl Map { pub(crate) fn open(db: &Arc, name: &'static str) -> Result> { Ok(Arc::new(Self { name, - db: db.clone(), - cf: open::open(db, name), watchers: Watchers::default(), - write_options: write_options_default(db), + cf: open::open(db, name), + db: db.clone(), read_options: read_options_default(db), cache_read_options: cache_read_options_default(db), + write_options: write_options_default(db), })) } diff --git a/src/database/map/open.rs b/src/database/map/open.rs index 6ecec044..07f7a0c6 100644 --- a/src/database/map/open.rs +++ b/src/database/map/open.rs @@ -30,8 +30,5 @@ pub(super) fn open(db: &Arc, name: &str) -> Arc { // lifetime parameter. We should not hold this handle, even in its Arc, after // closing the database (dropping `Engine`). Since `Arc` is a sibling // member along with this handle in `Map`, that is prevented. - unsafe { - Arc::increment_strong_count(cf_ptr); - Arc::from_raw(cf_ptr) - } + unsafe { Arc::from_raw(cf_ptr) } } diff --git a/src/router/request.rs b/src/router/request.rs index f7b94417..19cd751b 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,4 +1,7 @@ -use std::sync::{atomic::Ordering, Arc}; +use std::{ + fmt::Debug, + sync::{atomic::Ordering, Arc}, +}; use axum::{ extract::State, @@ -12,16 +15,16 @@ use http::{Method, StatusCode, Uri}; level = "debug", skip_all, fields( - handled = %services - .server - .metrics - .requests_handle_finished - .fetch_add(1, Ordering::Relaxed), active = %services .server .metrics .requests_handle_active .fetch_add(1, Ordering::Relaxed), + handled = %services + .server + .metrics + .requests_handle_finished + .load(Ordering::Relaxed), ) )] pub(crate) async fn handle( @@ -31,6 +34,10 @@ pub(crate) async fn handle( ) -> Result { #[cfg(debug_assertions)] conduwuit::defer! {{ + _ = services.server + .metrics + .requests_handle_finished + .fetch_add(1, Ordering::Relaxed); _ = services.server .metrics .requests_handle_active @@ -47,21 +54,35 @@ pub(crate) async fn handle( return Err(StatusCode::SERVICE_UNAVAILABLE); } - let method = req.method().clone(); let uri = req.uri().clone(); - services + let method = req.method().clone(); + let services_ = services.clone(); + let task = services .server .runtime() - .spawn(next.run(req)) - .await - .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) - .and_then(|result| handle_result(&method, &uri, result)) + .spawn(async move { execute(services_, req, next).await }); + + task.await + .map_err(unhandled) + .and_then(move |result| handle_result(&method, &uri, result)) +} + +async fn execute( + // we made a safety contract that Services will not go out of scope + // during the request; this ensures a reference is accounted for at + // the base frame of the task regardless of its detachment. + _services: Arc, + req: http::Request, + next: axum::middleware::Next, +) -> Response { + next.run(req).await } fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result { let status = result.status(); let reason = status.canonical_reason().unwrap_or("Unknown Reason"); let code = status.as_u16(); + if status.is_server_error() { error!(method = ?method, uri = ?uri, "{code} {reason}"); } else if status.is_client_error() { @@ -78,3 +99,10 @@ fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result(e: Error) -> StatusCode { + error!("unhandled error or panic during request: {e:?}"); + + StatusCode::INTERNAL_SERVER_ERROR +} From bd6d4bc58f45251313b33e65947a4131ea9114e7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 10:07:00 +0000 Subject: [PATCH 0638/1248] enforce timeout on request layers Signed-off-by: Jason Volk --- Cargo.toml | 3 ++- conduwuit-example.toml | 12 ++++++++++++ src/core/config/mod.rs | 24 ++++++++++++++++++++++++ src/router/layers.rs | 4 ++++ src/router/request.rs | 23 ++++++++++++++++++----- 5 files changed, 60 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c4af4a7c..1cf787c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -127,12 +127,13 @@ version = "0.6.2" default-features = false features = [ "add-extension", + "catch-panic", "cors", "sensitive-headers", "set-header", + "timeout", "trace", "util", - "catch-panic", ] [workspace.dependencies.rustls] diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3fd95044..f4f42365 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -377,6 +377,18 @@ # #pusher_idle_timeout = 15 +# Maximum time to receive a request from a client (seconds). +# +#client_receive_timeout = 75 + +# Maximum time to process a request received from a client (seconds). +# +#client_request_timeout = 180 + +# Maximum time to transmit a response to a client (seconds) +# +#client_response_timeout = 120 + # Enables registration. If set to false, no users can register on this # server. # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index ff038975..b8cfd91b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -480,6 +480,24 @@ pub struct Config { #[serde(default = "default_pusher_idle_timeout")] pub pusher_idle_timeout: u64, + /// Maximum time to receive a request from a client (seconds). + /// + /// default: 75 + #[serde(default = "default_client_receive_timeout")] + pub client_receive_timeout: u64, + + /// Maximum time to process a request received from a client (seconds). + /// + /// default: 180 + #[serde(default = "default_client_request_timeout")] + pub client_request_timeout: u64, + + /// Maximum time to transmit a response to a client (seconds) + /// + /// default: 120 + #[serde(default = "default_client_response_timeout")] + pub client_response_timeout: u64, + /// Enables registration. If set to false, no users can register on this /// server. /// @@ -2170,3 +2188,9 @@ fn default_stream_width_default() -> usize { 32 } fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } + +fn default_client_receive_timeout() -> u64 { 75 } + +fn default_client_request_timeout() -> u64 { 180 } + +fn default_client_response_timeout() -> u64 { 120 } diff --git a/src/router/layers.rs b/src/router/layers.rs index c5227c22..e8a8b7e8 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -18,6 +18,7 @@ use tower_http::{ cors::{self, CorsLayer}, sensitive_headers::SetSensitiveHeadersLayer, set_header::SetResponseHeaderLayer, + timeout::{RequestBodyTimeoutLayer, ResponseBodyTimeoutLayer, TimeoutLayer}, trace::{DefaultOnFailure, DefaultOnRequest, DefaultOnResponse, TraceLayer}, }; use tracing::Level; @@ -59,6 +60,9 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ) .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::handle)) .layer(SecureClientIpSource::ConnectInfo.into_extension()) + .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs(server.config.client_response_timeout))) + .layer(RequestBodyTimeoutLayer::new(Duration::from_secs(server.config.client_receive_timeout))) + .layer(TimeoutLayer::new(Duration::from_secs(server.config.client_request_timeout))) .layer(SetResponseHeaderLayer::if_not_present( HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster HeaderValue::from_static("?1"), diff --git a/src/router/request.rs b/src/router/request.rs index 19cd751b..68ea742c 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -10,8 +10,10 @@ use axum::{ use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; use http::{Method, StatusCode, Uri}; +use tracing::Span; #[tracing::instrument( + name = "request", level = "debug", skip_all, fields( @@ -57,23 +59,34 @@ pub(crate) async fn handle( let uri = req.uri().clone(); let method = req.method().clone(); let services_ = services.clone(); - let task = services - .server - .runtime() - .spawn(async move { execute(services_, req, next).await }); + let parent = Span::current(); + let task = services.server.runtime().spawn(async move { + tokio::select! { + response = execute(&services_, req, next, parent) => response, + () = services_.server.until_shutdown() => + StatusCode::SERVICE_UNAVAILABLE.into_response(), + } + }); task.await .map_err(unhandled) .and_then(move |result| handle_result(&method, &uri, result)) } +#[tracing::instrument( + name = "handle", + level = "debug", + parent = parent, + skip_all, +)] async fn execute( // we made a safety contract that Services will not go out of scope // during the request; this ensures a reference is accounted for at // the base frame of the task regardless of its detachment. - _services: Arc, + _services: &Arc, req: http::Request, next: axum::middleware::Next, + parent: Span, ) -> Response { next.run(req).await } From ffe3b0faf2740faa53415a661466c19d4fe722ad Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 10:43:02 +0000 Subject: [PATCH 0639/1248] make shutdown grace periods configurable Signed-off-by: Jason Volk --- conduwuit-example.toml | 8 ++++++++ src/core/config/mod.rs | 16 ++++++++++++++++ src/router/request.rs | 13 +++++++++++-- src/router/run.rs | 3 ++- src/service/sending/sender.rs | 5 ++--- 5 files changed, 39 insertions(+), 6 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index f4f42365..3e64522c 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -389,6 +389,14 @@ # #client_response_timeout = 120 +# Grace period for clean shutdown of client requests (seconds). +# +#client_shutdown_timeout = 10 + +# Grace period for clean shutdown of federation requests (seconds). +# +#sender_shutdown_timeout = 5 + # Enables registration. If set to false, no users can register on this # server. # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index b8cfd91b..ff80d1cf 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -498,6 +498,18 @@ pub struct Config { #[serde(default = "default_client_response_timeout")] pub client_response_timeout: u64, + /// Grace period for clean shutdown of client requests (seconds). + /// + /// default: 10 + #[serde(default = "default_client_shutdown_timeout")] + pub client_shutdown_timeout: u64, + + /// Grace period for clean shutdown of federation requests (seconds). + /// + /// default: 5 + #[serde(default = "default_sender_shutdown_timeout")] + pub sender_shutdown_timeout: u64, + /// Enables registration. If set to false, no users can register on this /// server. /// @@ -2194,3 +2206,7 @@ fn default_client_receive_timeout() -> u64 { 75 } fn default_client_request_timeout() -> u64 { 180 } fn default_client_response_timeout() -> u64 { 120 } + +fn default_client_shutdown_timeout() -> u64 { 15 } + +fn default_sender_shutdown_timeout() -> u64 { 5 } diff --git a/src/router/request.rs b/src/router/request.rs index 68ea742c..e0373646 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,6 +1,7 @@ use std::{ fmt::Debug, sync::{atomic::Ordering, Arc}, + time::Duration, }; use axum::{ @@ -9,7 +10,9 @@ use axum::{ }; use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; +use futures::FutureExt; use http::{Method, StatusCode, Uri}; +use tokio::time::sleep; use tracing::Span; #[tracing::instrument( @@ -63,8 +66,14 @@ pub(crate) async fn handle( let task = services.server.runtime().spawn(async move { tokio::select! { response = execute(&services_, req, next, parent) => response, - () = services_.server.until_shutdown() => - StatusCode::SERVICE_UNAVAILABLE.into_response(), + response = services_.server.until_shutdown() + .then(|()| { + let timeout = services_.server.config.client_shutdown_timeout; + let timeout = Duration::from_secs(timeout); + sleep(timeout) + }) + .map(|()| StatusCode::SERVICE_UNAVAILABLE) + .map(IntoResponse::into_response) => response, } }); diff --git a/src/router/run.rs b/src/router/run.rs index 605168b8..26701735 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -122,7 +122,8 @@ async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_serve error!("failed sending shutdown transaction to channel: {e}"); } - let timeout = Duration::from_secs(36); + let timeout = server.config.client_shutdown_timeout; + let timeout = Duration::from_secs(timeout); debug!( ?timeout, handle_active = ?server.metrics.requests_handle_active.load(Ordering::Relaxed), diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index f19b69da..3e86de2d 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -67,8 +67,6 @@ type SendingFuture<'a> = BoxFuture<'a, SendingResult>; type SendingFutures<'a> = FuturesUnordered>; type CurTransactionStatus = HashMap; -const CLEANUP_TIMEOUT_MS: u64 = 3500; - const SELECT_PRESENCE_LIMIT: usize = 256; const SELECT_RECEIPT_LIMIT: usize = 256; const SELECT_EDU_LIMIT: usize = EDU_LIMIT - 2; @@ -216,8 +214,9 @@ impl Service { time::{sleep_until, Instant}, }; + let timeout = self.server.config.sender_shutdown_timeout; + let timeout = Duration::from_secs(timeout); let now = Instant::now(); - let timeout = Duration::from_millis(CLEANUP_TIMEOUT_MS); let deadline = now.checked_add(timeout).unwrap_or(now); loop { trace!("Waiting for {} requests to complete...", futures.len()); From a774afe8370bd6eed3deed6e663229e8457d73c7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 08:59:14 +0000 Subject: [PATCH 0640/1248] modernize remove_to_device_events Signed-off-by: Jason Volk --- src/service/users/mod.rs | 43 ++++++++++++++++------------------------ 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b2d3a94a..e5caed47 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,12 +1,12 @@ -use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; +use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ debug_warn, err, trace, utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, }; -use database::{Database, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; +use database::{Deserialized, Ignore, Interfix, Json, Map}; +use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, @@ -28,7 +28,6 @@ pub struct Service { struct Services { server: Arc, - db: Arc, account_data: Dep, admin: Dep, globals: Dep, @@ -64,7 +63,6 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { server: args.server.clone(), - db: args.db.clone(), account_data: args.depend::("account_data"), admin: args.depend::("admin"), globals: args.depend::("globals"), @@ -801,35 +799,28 @@ impl Service { .map(|(_, val): (Ignore, Raw)| val) } - pub async fn remove_to_device_events( + pub async fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, - until: u64, - ) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); + until: Until, + ) where + Until: Into> + Send, + { + type Key<'a> = (&'a UserId, &'a DeviceId, u64); - let mut last = prefix.clone(); - last.extend_from_slice(&until.to_be_bytes()); - - let _cork = self.services.db.cork_and_flush(); + let until = until.into().unwrap_or(u64::MAX); + let from = (user_id, device_id, until); self.db .todeviceid_events - .rev_raw_keys_from(&last) // this includes last + .rev_keys_from(&from) .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix)) - .map(|key| { - let len = key.len(); - let start = len.saturating_sub(size_of::()); - let count = utils::u64_from_u8(&key[start..len]); - (key, count) + .ready_take_while(move |(user_id_, device_id_, _): &Key<'_>| { + user_id == *user_id_ && device_id == *device_id_ + }) + .ready_for_each(|key: Key<'_>| { + self.db.todeviceid_events.del(key); }) - .ready_take_while(move |(_, count)| *count <= until) - .ready_for_each(|(key, _)| self.db.todeviceid_events.remove(&key)) - .boxed() .await; } From 5e59ce37c4799c24723997326e1ccc26bb3345b0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 13:51:39 +0000 Subject: [PATCH 0641/1248] snapshot sync results at next_batch upper-bound Signed-off-by: Jason Volk --- src/admin/query/account_data.rs | 2 +- src/admin/query/users.rs | 2 +- src/api/client/sync/v3.rs | 10 +++++----- src/api/client/sync/v4.rs | 13 +++++++++---- src/api/client/sync/v5.rs | 8 ++++---- src/service/account_data/mod.rs | 12 +++++++----- src/service/users/mod.rs | 18 ++++++++++++++---- 7 files changed, 41 insertions(+), 24 deletions(-) diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index b75d8234..bb8ddeff 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -41,7 +41,7 @@ async fn changes_since( let results: Vec<_> = self .services .account_data - .changes_since(room_id.as_deref(), &user_id, since) + .changes_since(room_id.as_deref(), &user_id, since, None) .collect() .await; let query_time = timer.elapsed(); diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 3715ac25..c517d9dd 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -413,7 +413,7 @@ async fn get_to_device_events( let result = self .services .users - .get_to_device_events(&user_id, &device_id) + .get_to_device_events(&user_id, &device_id, None, None) .collect::>() .await; let query_time = timer.elapsed(); diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 49246514..b548aa23 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -290,20 +290,20 @@ pub(crate) async fn build_sync_events( let account_data = services .account_data - .changes_since(None, sender_user, since) + .changes_since(None, sender_user, since, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect(); // Look for device list updates of this account let keys_changed = services .users - .keys_changed(sender_user, since, None) + .keys_changed(sender_user, since, Some(next_batch)) .map(ToOwned::to_owned) .collect::>(); let to_device_events = services .users - .get_to_device_events(sender_user, sender_device) + .get_to_device_events(sender_user, sender_device, Some(since), Some(next_batch)) .collect::>(); let device_one_time_keys_count = services @@ -700,14 +700,14 @@ async fn load_joined_room( let account_data_events = services .account_data - .changes_since(Some(room_id), sender_user, since) + .changes_since(Some(room_id), sender_user, since, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect(); // Look for device list updates in this room let device_updates = services .users - .room_keys_changed(room_id, since, None) + .room_keys_changed(room_id, since, Some(next_batch)) .map(|(user_id, _)| user_id) .map(ToOwned::to_owned) .collect::>(); diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index b7967498..66793ba1 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -153,7 +153,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.account_data.enabled.unwrap_or(false) { account_data.global = services .account_data - .changes_since(None, sender_user, globalsince) + .changes_since(None, sender_user, globalsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect() .await; @@ -164,7 +164,7 @@ pub(crate) async fn sync_events_v4_route( room.clone(), services .account_data - .changes_since(Some(&room), sender_user, globalsince) + .changes_since(Some(&room), sender_user, globalsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -531,7 +531,7 @@ pub(crate) async fn sync_events_v4_route( room_id.to_owned(), services .account_data - .changes_since(Some(room_id), sender_user, *roomsince) + .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -779,7 +779,12 @@ pub(crate) async fn sync_events_v4_route( Some(sync_events::v4::ToDevice { events: services .users - .get_to_device_events(sender_user, &sender_device) + .get_to_device_events( + sender_user, + &sender_device, + Some(globalsince), + Some(next_batch), + ) .collect() .await, next_batch: next_batch.to_string(), diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 66647f0e..e7b5fe74 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -390,7 +390,7 @@ async fn process_rooms( room_id.to_owned(), services .account_data - .changes_since(Some(room_id), sender_user, *roomsince) + .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -644,7 +644,7 @@ async fn collect_account_data( account_data.global = services .account_data - .changes_since(None, sender_user, globalsince) + .changes_since(None, sender_user, globalsince, None) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect() .await; @@ -655,7 +655,7 @@ async fn collect_account_data( room.clone(), services .account_data - .changes_since(Some(room), sender_user, globalsince) + .changes_since(Some(room), sender_user, globalsince, None) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -876,7 +876,7 @@ async fn collect_to_device( next_batch: next_batch.to_string(), events: services .users - .get_to_device_events(sender_user, sender_device) + .get_to_device_events(sender_user, sender_device, None, Some(next_batch)) .collect() .await, }) diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index ddbc15a4..5a943f88 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -5,7 +5,7 @@ use conduwuit::{ utils::{result::LogErr, stream::TryIgnore, ReadyExt}, Err, Result, }; -use database::{Deserialized, Handle, Interfix, Json, Map}; +use database::{Deserialized, Handle, Ignore, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ events::{ @@ -131,18 +131,20 @@ pub fn changes_since<'a>( room_id: Option<&'a RoomId>, user_id: &'a UserId, since: u64, + to: Option, ) -> impl Stream + Send + 'a { - let prefix = (room_id, user_id, Interfix); - let prefix = database::serialize_key(prefix).expect("failed to serialize prefix"); + type Key<'a> = (Option<&'a RoomId>, &'a UserId, u64, Ignore); // Skip the data that's exactly at since, because we sent that last time let first_possible = (room_id, user_id, since.saturating_add(1)); self.db .roomuserdataid_accountdata - .stream_from_raw(&first_possible) + .stream_from(&first_possible) .ignore_err() - .ready_take_while(move |(k, _)| k.starts_with(&prefix)) + .ready_take_while(move |((room_id_, user_id_, count, _), _): &(Key<'_>, _)| { + room_id == *room_id_ && user_id == *user_id_ && to.is_none_or(|to| *count <= to) + }) .map(move |(_, v)| { match room_id { | Some(_) => serde_json::from_slice::>(v) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index e5caed47..68b87541 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,7 +1,7 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - debug_warn, err, trace, + at, debug_warn, err, trace, utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, }; @@ -790,13 +790,23 @@ impl Service { &'a self, user_id: &'a UserId, device_id: &'a DeviceId, + since: Option, + to: Option, ) -> impl Stream> + Send + 'a { - let prefix = (user_id, device_id, Interfix); + type Key<'a> = (&'a UserId, &'a DeviceId, u64); + + let from = (user_id, device_id, since.map_or(0, |since| since.saturating_add(1))); + self.db .todeviceid_events - .stream_prefix(&prefix) + .stream_from(&from) .ignore_err() - .map(|(_, val): (Ignore, Raw)| val) + .ready_take_while(move |((user_id_, device_id_, count), _): &(Key<'_>, _)| { + user_id == *user_id_ + && device_id == *device_id_ + && to.is_none_or(|to| *count <= to) + }) + .map(at!(1)) } pub async fn remove_to_device_events( From 32f990fc72c6bfbf4a869dac9f5b2b88ee334684 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 23:19:35 +0000 Subject: [PATCH 0642/1248] fix the panic counter in the tower layer Signed-off-by: Jason Volk --- src/router/layers.rs | 18 +++++++------- src/router/request.rs | 56 ++++++++++++++++++++----------------------- 2 files changed, 35 insertions(+), 39 deletions(-) diff --git a/src/router/layers.rs b/src/router/layers.rs index e8a8b7e8..7ebec16e 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -49,6 +49,7 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ))] let layers = layers.layer(compression_layer(server)); + let services_ = services.clone(); let layers = layers .layer(SetSensitiveHeadersLayer::new([header::AUTHORIZATION])) .layer( @@ -89,7 +90,7 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { )) .layer(cors_layer(server)) .layer(body_limit_layer(server)) - .layer(CatchPanicLayer::custom(catch_panic)); + .layer(CatchPanicLayer::custom(move |panic| catch_panic(panic, services_.clone()))); let (router, guard) = router::build(services); Ok((router.layer(layers), guard)) @@ -167,15 +168,14 @@ fn body_limit_layer(server: &Server) -> DefaultBodyLimit { #[allow(clippy::needless_pass_by_value)] fn catch_panic( err: Box, + services: Arc, ) -> http::Response> { - //TODO: XXX - /* - conduwuit_service::services() - .server - .metrics - .requests_panic - .fetch_add(1, std::sync::atomic::Ordering::Release); - */ + services + .server + .metrics + .requests_panic + .fetch_add(1, std::sync::atomic::Ordering::Release); + let details = if let Some(s) = err.downcast_ref::() { s.clone() } else if let Some(s) = err.downcast_ref::<&str>() { diff --git a/src/router/request.rs b/src/router/request.rs index e0373646..b6c22d45 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -15,40 +15,12 @@ use http::{Method, StatusCode, Uri}; use tokio::time::sleep; use tracing::Span; -#[tracing::instrument( - name = "request", - level = "debug", - skip_all, - fields( - active = %services - .server - .metrics - .requests_handle_active - .fetch_add(1, Ordering::Relaxed), - handled = %services - .server - .metrics - .requests_handle_finished - .load(Ordering::Relaxed), - ) -)] +#[tracing::instrument(name = "request", level = "debug", skip_all)] pub(crate) async fn handle( State(services): State>, req: http::Request, next: axum::middleware::Next, ) -> Result { - #[cfg(debug_assertions)] - conduwuit::defer! {{ - _ = services.server - .metrics - .requests_handle_finished - .fetch_add(1, Ordering::Relaxed); - _ = services.server - .metrics - .requests_handle_active - .fetch_sub(1, Ordering::Relaxed); - }}; - if !services.server.running() { debug_warn!( method = %req.method(), @@ -87,16 +59,40 @@ pub(crate) async fn handle( level = "debug", parent = parent, skip_all, + fields( + active = %services + .server + .metrics + .requests_handle_active + .fetch_add(1, Ordering::Relaxed), + handled = %services + .server + .metrics + .requests_handle_finished + .load(Ordering::Relaxed), + ) )] async fn execute( // we made a safety contract that Services will not go out of scope // during the request; this ensures a reference is accounted for at // the base frame of the task regardless of its detachment. - _services: &Arc, + services: &Arc, req: http::Request, next: axum::middleware::Next, parent: Span, ) -> Response { + #[cfg(debug_assertions)] + conduwuit::defer! {{ + _ = services.server + .metrics + .requests_handle_finished + .fetch_add(1, Ordering::Relaxed); + _ = services.server + .metrics + .requests_handle_active + .fetch_sub(1, Ordering::Relaxed); + }}; + next.run(req).await } From da4b94d80dc9939ad385860af764ed1a1837b84e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 22:13:27 +0000 Subject: [PATCH 0643/1248] trap panics when running in gdb Signed-off-by: Jason Volk --- src/core/debug.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/core/debug.rs b/src/core/debug.rs index ca0f2f2e..8a5eccfd 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -1,6 +1,6 @@ #![allow(clippy::disallowed_macros)] -use std::{any::Any, panic}; +use std::{any::Any, env, panic, sync::LazyLock}; // Export debug proc_macros pub use conduwuit_macros::recursion_depth; @@ -58,16 +58,26 @@ pub const INFO_SPAN_LEVEL: Level = if cfg!(debug_assertions) { Level::DEBUG }; -pub fn set_panic_trap() { +pub static DEBUGGER: LazyLock = + LazyLock::new(|| env::var("_").unwrap_or_default().ends_with("gdb")); + +#[cfg_attr(debug_assertions, crate::ctor)] +#[cfg_attr(not(debug_assertions), allow(dead_code))] +fn set_panic_trap() { + if !*DEBUGGER { + return; + } + let next = panic::take_hook(); panic::set_hook(Box::new(move |info| { panic_handler(info, &next); })); } -#[inline(always)] +#[cold] +#[inline(never)] #[allow(deprecated_in_future)] -fn panic_handler(info: &panic::PanicHookInfo<'_>, next: &dyn Fn(&panic::PanicHookInfo<'_>)) { +pub fn panic_handler(info: &panic::PanicHookInfo<'_>, next: &dyn Fn(&panic::PanicHookInfo<'_>)) { trap(); next(info); } From 106bcd30b75b6846be197fc5431063b0b82c4336 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 07:40:08 +0000 Subject: [PATCH 0644/1248] optimize incremental sync state diff Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 366 +++++++++-------- src/service/rooms/state_accessor/mod.rs | 523 +++++++++++++----------- 2 files changed, 474 insertions(+), 415 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index b548aa23..a97e4329 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -7,13 +7,13 @@ use std::{ use axum::extract::State; use conduwuit::{ at, err, error, extract_variant, is_equal_to, pair_of, - pdu::EventHash, + pdu::{Event, EventHash}, + ref_at, result::FlatOk, utils::{ self, - future::OptionExt, math::ruma_from_u64, - stream::{BroadbandExt, Tools, WidebandExt}, + stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, PduCount, PduEvent, Result, @@ -53,19 +53,16 @@ use ruma::{ serde::Raw, uint, DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, }; +use service::rooms::short::{ShortEventId, ShortStateKey}; use super::{load_timeline, share_encrypted_room}; -use crate::{ - client::{ignored_filter, lazy_loading_witness}, - Ruma, RumaResponse, -}; +use crate::{client::ignored_filter, Ruma, RumaResponse}; #[derive(Default)] struct StateChanges { heroes: Option>, joined_member_count: Option, invited_member_count: Option, - joined_since_last_sync: bool, state_events: Vec, device_list_updates: HashSet, left_encrypted_users: HashSet, @@ -625,6 +622,40 @@ async fn load_joined_room( .await?; let (timeline_pdus, limited) = timeline; + let initial = since_shortstatehash.is_none(); + let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() + || filter.room.timeline.lazy_load_options.is_enabled(); + + let lazy_loading_context = &lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: Some(since), + options: Some(&filter.room.state.lazy_load_options), + }; + + // Reset lazy loading because this is an initial sync + let lazy_load_reset: OptionFuture<_> = initial + .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) + .into(); + + lazy_load_reset.await; + let witness: OptionFuture<_> = lazy_loading_enabled + .then(|| { + let witness: Witness = timeline_pdus + .iter() + .map(ref_at!(1)) + .map(Event::sender) + .map(Into::into) + .chain(receipt_events.keys().map(Into::into)) + .collect(); + + services + .rooms + .lazy_loading + .witness_retain(witness, lazy_loading_context) + }) + .into(); let last_notification_read: OptionFuture<_> = timeline_pdus .is_empty() @@ -646,41 +677,20 @@ async fn load_joined_room( }) .into(); + let (last_notification_read, since_sender_member, witness) = + join3(last_notification_read, since_sender_member, witness).await; + let joined_since_last_sync = since_sender_member - .await .flatten() .is_none_or(|content: RoomMemberEventContent| { content.membership != MembershipState::Join }); - let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() - || filter.room.timeline.lazy_load_options.is_enabled(); - - let lazy_reset = since_shortstatehash.is_none(); - let lazy_loading_context = &lazy_loading::Context { - user_id: sender_user, - device_id: sender_device, - room_id, - token: None, - options: Some(&filter.room.state.lazy_load_options), - }; - - // Reset lazy loading because this is an initial sync - let lazy_load_reset: OptionFuture<_> = lazy_reset - .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) - .into(); - - lazy_load_reset.await; - let witness: OptionFuture<_> = lazy_loading_enabled - .then(|| lazy_loading_witness(services, lazy_loading_context, timeline_pdus.iter())) - .into(); - let StateChanges { heroes, joined_member_count, invited_member_count, - joined_since_last_sync, state_events, mut device_list_updates, left_encrypted_users, @@ -693,7 +703,7 @@ async fn load_joined_room( since_shortstatehash, current_shortstatehash, joined_since_last_sync, - witness.await.as_ref(), + witness.as_ref(), ) .boxed() .await?; @@ -719,28 +729,7 @@ async fn load_joined_room( .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); - let typing_events = services - .rooms - .typing - .last_typing_update(room_id) - .and_then(|count| async move { - if count <= since { - return Ok(Vec::>::new()); - } - - let typings = services - .rooms - .typing - .typings_all(room_id, sender_user) - .await?; - - Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) - }) - .unwrap_or(Vec::new()); - - let send_notification_counts = last_notification_read - .is_none_or(|&count| count > since) - .await; + let send_notification_counts = last_notification_read.is_none_or(|count| count > since); let notification_count: OptionFuture<_> = send_notification_counts .then(|| { @@ -764,8 +753,27 @@ async fn load_joined_room( }) .into(); - let events = join3(room_events, account_data_events, typing_events); + let typing_events = services + .rooms + .typing + .last_typing_update(room_id) + .and_then(|count| async move { + if count <= since { + return Ok(Vec::>::new()); + } + + let typings = services + .rooms + .typing + .typings_all(room_id, sender_user) + .await?; + + Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) + }) + .unwrap_or(Vec::new()); + let unread_notifications = join(notification_count, highlight_count); + let events = join3(room_events, account_data_events, typing_events); let (unread_notifications, events, device_updates) = join3(unread_notifications, events, device_updates) .boxed() @@ -942,7 +950,6 @@ async fn calculate_state_initial( heroes, joined_member_count, invited_member_count, - joined_since_last_sync: true, state_events, ..Default::default() }) @@ -952,7 +959,7 @@ async fn calculate_state_initial( #[allow(clippy::too_many_arguments)] async fn calculate_state_incremental<'a>( services: &Services, - sender_user: &UserId, + sender_user: &'a UserId, room_id: &RoomId, full_state: bool, _filter: &FilterDefinition, @@ -965,102 +972,130 @@ async fn calculate_state_incremental<'a>( let state_changed = since_shortstatehash != current_shortstatehash; - let state_get_id = |user_id: &'a UserId| { - services - .rooms - .state_accessor - .state_get_id(current_shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .ok() - }; - - let lazy_state_ids: OptionFuture<_> = witness - .map(|witness| { - witness - .iter() - .stream() - .broad_filter_map(|user_id| state_get_id(user_id)) - .collect::>() - }) - .into(); - - let current_state_ids: OptionFuture<_> = state_changed - .then(|| { - services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .collect::>() - }) - .into(); - - let since_state_ids: OptionFuture<_> = (state_changed && !full_state) - .then(|| { - services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .collect::>() - }) - .into(); - - let lazy_state_ids = lazy_state_ids - .map(Option::into_iter) - .map(|iter| iter.flat_map(Vec::into_iter)) - .map(IterStream::stream) - .flatten_stream(); - - let ref since_state_ids = since_state_ids.shared(); - let delta_state_events = current_state_ids - .map(Option::into_iter) - .map(|iter| iter.flat_map(Vec::into_iter)) - .map(IterStream::stream) - .flatten_stream() - .filter_map(|(shortstatekey, event_id): (u64, OwnedEventId)| async move { - since_state_ids - .clone() - .await - .is_none_or(|since_state| since_state.get(&shortstatekey) != Some(&event_id)) - .then_some(event_id) - }) - .chain(lazy_state_ids) - .broad_filter_map(|event_id: OwnedEventId| async move { - services - .rooms - .timeline - .get_pdu(&event_id) - .await - .map(move |pdu| (event_id, pdu)) - .ok() - }) - .collect::>(); - - let since_encryption = services - .rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); - let encrypted_room = services .rooms .state_accessor .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); + .is_ok() + .await; - let (delta_state_events, encrypted_room) = join(delta_state_events, encrypted_room).await; + let state_get_shorteventid = |user_id: &'a UserId| { + services + .rooms + .state_accessor + .state_get_shortid( + current_shortstatehash, + &StateEventType::RoomMember, + user_id.as_str(), + ) + .ok() + }; - let (mut device_list_updates, left_encrypted_users) = delta_state_events - .values() + let lazy_state_ids: OptionFuture<_> = witness + .filter(|_| !full_state && !encrypted_room) + .map(|witness| { + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_shorteventid(user_id)) + .into_future() + }) + .into(); + + let state_diff: OptionFuture<_> = (!full_state && state_changed) + .then(|| { + services + .rooms + .state_accessor + .state_added((since_shortstatehash, current_shortstatehash)) + .boxed() + .into_future() + }) + .into(); + + let current_state_ids: OptionFuture<_> = full_state + .then(|| { + services + .rooms + .state_accessor + .state_full_shortids(current_shortstatehash) + .expect_ok() + .boxed() + .into_future() + }) + .into(); + + let lazy_state_ids = lazy_state_ids + .map(|opt| { + opt.map(|(curr, next)| { + let opt = curr; + let iter = Option::into_iter(opt); + IterStream::stream(iter).chain(next) + }) + }) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten(); + + let state_diff_ids = state_diff + .map(|opt| { + opt.map(|(curr, next)| { + let opt = curr; + let iter = Option::into_iter(opt); + IterStream::stream(iter).chain(next) + }) + }) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten(); + + let state_events = current_state_ids + .map(|opt| { + opt.map(|(curr, next)| { + let opt = curr; + let iter = Option::into_iter(opt); + IterStream::stream(iter).chain(next) + }) + }) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten() + .chain(state_diff_ids) + .broad_filter_map(|(shortstatekey, shorteventid)| async move { + if witness.is_none() || encrypted_room { + return Some(shorteventid); + } + + lazy_filter(services, sender_user, shortstatekey, shorteventid).await + }) + .chain(lazy_state_ids) + .broad_filter_map(|shorteventid| { + services + .rooms + .short + .get_eventid_from_short(shorteventid) + .ok() + }) + .broad_filter_map(|event_id: OwnedEventId| async move { + services.rooms.timeline.get_pdu(&event_id).await.ok() + }) + .collect::>() + .await; + + let (device_list_updates, left_encrypted_users) = state_events + .iter() .stream() .ready_filter(|_| encrypted_room) .ready_filter(|state_event| state_event.kind == RoomMember) .ready_filter_map(|state_event| { - let content = state_event.get_content().ok()?; - let user_id = state_event.state_key.as_ref()?.parse().ok()?; + let content: RoomMemberEventContent = state_event.get_content().ok()?; + let user_id: OwnedUserId = state_event.state_key.as_ref()?.parse().ok()?; + Some((content, user_id)) }) - .ready_filter(|(_, user_id): &(RoomMemberEventContent, OwnedUserId)| { - user_id != sender_user - }) .fold_default(|(mut dlu, mut leu): pair_of!(HashSet<_>), (content, user_id)| async move { use MembershipState::*; @@ -1068,8 +1103,9 @@ async fn calculate_state_incremental<'a>( |user_id| share_encrypted_room(services, sender_user, user_id, Some(room_id)); match content.membership { - | Join if !shares_encrypted_room(&user_id).await => dlu.insert(user_id), | Leave => leu.insert(user_id), + | Join if joined_since_last_sync || !shares_encrypted_room(&user_id).await => + dlu.insert(user_id), | _ => false, }; @@ -1077,29 +1113,7 @@ async fn calculate_state_incremental<'a>( }) .await; - // If the user is in a new encrypted room, give them all joined users - let new_encrypted_room = encrypted_room && !since_encryption.await; - if joined_since_last_sync && encrypted_room || new_encrypted_room { - services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|&user_id| sender_user != user_id) - .map(ToOwned::to_owned) - .broad_filter_map(|user_id| async move { - share_encrypted_room(services, sender_user, &user_id, Some(room_id)) - .await - .or_some(user_id) - }) - .ready_for_each(|user_id| { - device_list_updates.insert(user_id); - }) - .await; - } - - let send_member_count = delta_state_events - .values() - .any(|event| event.kind == RoomMember); + let send_member_count = state_events.iter().any(|event| event.kind == RoomMember); let (joined_member_count, invited_member_count, heroes) = if send_member_count { calculate_counts(services, room_id, sender_user).await? @@ -1111,13 +1125,29 @@ async fn calculate_state_incremental<'a>( heroes, joined_member_count, invited_member_count, - joined_since_last_sync, + state_events, device_list_updates, left_encrypted_users, - state_events: delta_state_events.into_values().collect(), }) } +async fn lazy_filter( + services: &Services, + sender_user: &UserId, + shortstatekey: ShortStateKey, + shorteventid: ShortEventId, +) -> Option { + let (event_type, state_key) = services + .rooms + .short + .get_statekey_from_short(shortstatekey) + .await + .ok()?; + + (event_type != StateEventType::RoomMember || state_key == sender_user.as_str()) + .then_some(shorteventid) +} + async fn calculate_counts( services: &Services, room_id: &RoomId, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 8b56c8b6..bed8d210 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use conduwuit::{ - at, err, error, + at, err, error, pair_of, pdu::PduBuilder, utils, utils::{ @@ -17,7 +17,7 @@ use conduwuit::{ Err, Error, PduEvent, Result, }; use database::{Deserialized, Map}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ events::{ @@ -48,7 +48,7 @@ use crate::{ rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, state::RoomMutexGuard, - state_compressor::{compress_state_event, parse_compressed_state_event}, + state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, }, Dep, }; @@ -143,6 +143,256 @@ impl crate::Service for Service { } impl Service { + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub async fn room_state_get_content( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + T: for<'de> Deserialize<'de>, + { + self.room_state_get(room_id, event_type, state_key) + .await + .and_then(|event| event.get_content()) + } + + /// Returns the full room state. + #[tracing::instrument(skip(self), level = "debug")] + pub fn room_state_full<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() + } + + /// Returns the full room state pdus + #[tracing::instrument(skip(self), level = "debug")] + pub fn room_state_full_pdus<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() + } + + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, + { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) + .await + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await + } + + /// The user was a joined member at this state (potentially in the past) + #[inline] + async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id).await == MembershipState::Join + } + + /// The user was an invited or joined room member at this state (potentially + /// in the past) + #[inline] + async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + let s = self.user_membership(shortstatehash, user_id).await; + s == MembershipState::Join || s == MembershipState::Invite + } + + /// Get membership for given user in state + async fn user_membership( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, + ) -> MembershipState { + self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .await + .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) + } + + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub async fn state_get_content( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + T: for<'de> Deserialize<'de>, + { + self.state_get(shortstatehash, event_type, state_key) + .await + .and_then(|event| event.get_content()) + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, + ) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.load_full_state(shortstatehash) + .map_ok(|full_state| full_state.range(start..end).next().copied()) + .await + .flat_ok() + .is_some() + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + pub async fn state_get( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) + .await + } + + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_get_id( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, + { + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) + .await?; + + self.services + .short + .get_eventid_from_short(shorteventid) + .await + } + + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .range(start..end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? + } + + /// Returns the state events removed between the interval (present in .0 but + /// not in .1) + #[inline] + pub fn state_removed( + &self, + shortstatehash: pair_of!(ShortStateHash), + ) -> impl Stream + Send + '_ { + self.state_added((shortstatehash.1, shortstatehash.0)) + } + + /// Returns the state events added between the interval (present in .1 but + /// not in .0) + #[tracing::instrument(skip(self), level = "debug")] + pub fn state_added<'a>( + &'a self, + shortstatehash: pair_of!(ShortStateHash), + ) -> impl Stream + Send + 'a { + let a = self.load_full_state(shortstatehash.0); + let b = self.load_full_state(shortstatehash.1); + try_join(a, b) + .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) + .map_ok(IterStream::try_stream) + .try_flatten_stream() + .expect_ok() + .map(parse_compressed_state_event) + } + pub fn state_full( &self, shortstatehash: ShortStateHash, @@ -208,110 +458,11 @@ impl Service { .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) } - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - let shorteventid = self - .state_get_shortid(shortstatehash, event_type, state_key) - .await?; - - self.services - .short - .get_eventid_from_short(shorteventid) - .await - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_shortid( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .map_ok(|full_state| { - full_state - .range(start..end) - .next() - .copied() - .map(parse_compressed_state_event) - .map(at!(1)) - .ok_or(err!(Request(NotFound("Not found in room state")))) - }) - .await? - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> bool { - let Ok(shortstatekey) = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await - else { - return false; - }; - - self.state_contains_shortstatekey(shortstatehash, shortstatekey) - .await - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains_shortstatekey( - &self, - shortstatehash: ShortStateHash, - shortstatekey: ShortStateKey, - ) -> bool { - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .map_ok(|full_state| full_state.range(start..end).next().copied()) - .await - .flat_ok() - .is_some() - } - pub fn state_full_shortids( &self, shortstatehash: ShortStateHash, ) -> impl Stream> + Send + '_ { - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_err(|e| err!(Database("Missing state IDs: {e}"))) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + self.load_full_state(shortstatehash) .map_ok(|full_state| { full_state .deref() @@ -324,59 +475,32 @@ impl Service { .try_flatten_stream() } - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub async fn state_get( + async fn load_full_state( &self, shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await + ) -> Result> { + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .await + } + + /// Returns the state hash for this pdu. + pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.db + .shorteventid_shortstatehash + .aqry::(&shorteventid) }) .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn state_get_content( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.state_get(shortstatehash, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - /// Get membership for given user in state - async fn user_membership( - &self, - shortstatehash: ShortStateHash, - user_id: &UserId, - ) -> MembershipState { - self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .await - .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) - } - - /// The user was a joined member at this state (potentially in the past) - #[inline] - async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id).await == MembershipState::Join - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - #[inline] - async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - let s = self.user_membership(shortstatehash, user_id).await; - s == MembershipState::Join || s == MembershipState::Invite + .deserialized() } /// Whether a server is allowed to see an event through federation, based on @@ -521,101 +645,6 @@ impl Service { } } - /// Returns the state hash for this pdu. - pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.db - .shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() - } - - /// Returns the full room state. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns the full room state pdus - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full_pdus<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn room_state_get_content( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.room_state_get(room_id, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - pub async fn get_name(&self, room_id: &RoomId) -> Result { self.room_state_get_content(room_id, &StateEventType::RoomName, "") .await From b3271e0d653de1c585b1b5db95447045b0453b06 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 17:27:39 +0000 Subject: [PATCH 0645/1248] split state_accessor Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/mod.rs | 634 +----------------- .../rooms/state_accessor/room_state.rs | 90 +++ .../rooms/state_accessor/server_can.rs | 73 ++ src/service/rooms/state_accessor/state.rs | 320 +++++++++ src/service/rooms/state_accessor/user_can.rs | 187 ++++++ 5 files changed, 684 insertions(+), 620 deletions(-) create mode 100644 src/service/rooms/state_accessor/room_state.rs create mode 100644 src/service/rooms/state_accessor/server_can.rs create mode 100644 src/service/rooms/state_accessor/state.rs create mode 100644 src/service/rooms/state_accessor/user_can.rs diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index bed8d210..b7952ce6 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,23 +1,19 @@ +mod room_state; +mod server_can; +mod state; +mod user_can; + use std::{ - borrow::Borrow, fmt::Write, - ops::Deref, sync::{Arc, Mutex as StdMutex, Mutex}, }; use conduwuit::{ - at, err, error, pair_of, - pdu::PduBuilder, - utils, - utils::{ - math::{usize_from_f64, Expected}, - result::FlatOk, - stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, - }, - Err, Error, PduEvent, Result, + err, utils, + utils::math::{usize_from_f64, Expected}, + Result, }; -use database::{Deserialized, Map}; -use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; +use database::Map; use lru_cache::LruCache; use ruma::{ events::{ @@ -29,29 +25,19 @@ use ruma::{ guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent, RoomMembership}, - member::{MembershipState, RoomMemberEventContent}, + member::RoomMemberEventContent, name::RoomNameEventContent, - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, topic::RoomTopicEventContent, }, - StateEventType, TimelineEventType, + StateEventType, }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, EventId, JsOption, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, UserId, }; -use serde::Deserialize; -use crate::{ - rooms, - rooms::{ - short::{ShortEventId, ShortStateHash, ShortStateKey}, - state::RoomMutexGuard, - state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, - }, - Dep, -}; +use crate::{rooms, rooms::short::ShortStateHash, Dep}; pub struct Service { pub server_visibility_cache: Mutex>, @@ -143,508 +129,6 @@ impl crate::Service for Service { } impl Service { - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn room_state_get_content( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.room_state_get(room_id, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns the full room state pdus - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full_pdus<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await - } - - /// The user was a joined member at this state (potentially in the past) - #[inline] - async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id).await == MembershipState::Join - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - #[inline] - async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - let s = self.user_membership(shortstatehash, user_id).await; - s == MembershipState::Join || s == MembershipState::Invite - } - - /// Get membership for given user in state - async fn user_membership( - &self, - shortstatehash: ShortStateHash, - user_id: &UserId, - ) -> MembershipState { - self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .await - .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn state_get_content( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.state_get(shortstatehash, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> bool { - let Ok(shortstatekey) = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await - else { - return false; - }; - - self.state_contains_shortstatekey(shortstatehash, shortstatekey) - .await - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains_shortstatekey( - &self, - shortstatehash: ShortStateHash, - shortstatekey: ShortStateKey, - ) -> bool { - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - - self.load_full_state(shortstatehash) - .map_ok(|full_state| full_state.range(start..end).next().copied()) - .await - .flat_ok() - .is_some() - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub async fn state_get( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await - }) - .await - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - let shorteventid = self - .state_get_shortid(shortstatehash, event_type, state_key) - .await?; - - self.services - .short - .get_eventid_from_short(shorteventid) - .await - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_shortid( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - self.load_full_state(shortstatehash) - .map_ok(|full_state| { - full_state - .range(start..end) - .next() - .copied() - .map(parse_compressed_state_event) - .map(at!(1)) - .ok_or(err!(Request(NotFound("Not found in room state")))) - }) - .await? - } - - /// Returns the state events removed between the interval (present in .0 but - /// not in .1) - #[inline] - pub fn state_removed( - &self, - shortstatehash: pair_of!(ShortStateHash), - ) -> impl Stream + Send + '_ { - self.state_added((shortstatehash.1, shortstatehash.0)) - } - - /// Returns the state events added between the interval (present in .1 but - /// not in .0) - #[tracing::instrument(skip(self), level = "debug")] - pub fn state_added<'a>( - &'a self, - shortstatehash: pair_of!(ShortStateHash), - ) -> impl Stream + Send + 'a { - let a = self.load_full_state(shortstatehash.0); - let b = self.load_full_state(shortstatehash.1); - try_join(a, b) - .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) - .map_ok(IterStream::try_stream) - .try_flatten_stream() - .expect_ok() - .map(parse_compressed_state_event) - } - - pub fn state_full( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + '_ { - self.state_full_pdus(shortstatehash) - .ready_filter_map(|pdu| { - Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) - }) - } - - pub fn state_full_pdus( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + '_ { - let short_ids = self - .state_full_shortids(shortstatehash) - .expect_ok() - .map(at!(1)); - - self.services - .short - .multi_get_eventid_from_short(short_ids) - .ready_filter_map(Result::ok) - .broad_filter_map(move |event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await.ok() - }) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self), level = "debug")] - pub fn state_full_ids<'a, Id>( - &'a self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + 'a - where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, - ::Owned: Borrow, - { - let shortids = self - .state_full_shortids(shortstatehash) - .expect_ok() - .unzip() - .shared(); - - let shortstatekeys = shortids - .clone() - .map(at!(0)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - let shorteventids = shortids - .map(at!(1)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - self.services - .short - .multi_get_eventid_from_short(shorteventids) - .zip(shortstatekeys) - .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) - } - - pub fn state_full_shortids( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream> + Send + '_ { - self.load_full_state(shortstatehash) - .map_ok(|full_state| { - full_state - .deref() - .iter() - .copied() - .map(parse_compressed_state_event) - .collect() - }) - .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) - .try_flatten_stream() - } - - async fn load_full_state( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_err(|e| err!(Database("Missing state IDs: {e}"))) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .await - } - - /// Returns the state hash for this pdu. - pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.db - .shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() - } - - /// Whether a server is allowed to see an event through federation, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn server_can_see_event( - &self, - origin: &ServerName, - room_id: &RoomId, - event_id: &EventId, - ) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - if let Some(visibility) = self - .server_visibility_cache - .lock() - .expect("locked") - .get_mut(&(origin.to_owned(), shortstatehash)) - { - return *visibility; - } - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - let current_server_members = self - .services - .state_cache - .room_members(room_id) - .ready_filter(|member| member.server_name() == origin); - - let visibility = match history_visibility { - | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - current_server_members - .any(|member| self.user_was_invited(shortstatehash, member)) - .await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - current_server_members - .any(|member| self.user_was_joined(shortstatehash, member)) - .await - }, - | _ => { - error!("Unknown history visibility {history_visibility}"); - false - }, - }; - - self.server_visibility_cache - .lock() - .expect("locked") - .insert((origin.to_owned(), shortstatehash), visibility); - - visibility - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn user_can_see_event( - &self, - user_id: &UserId, - room_id: &RoomId, - event_id: &EventId, - ) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - if let Some(visibility) = self - .user_visibility_cache - .lock() - .expect("locked") - .get_mut(&(user_id.to_owned(), shortstatehash)) - { - return *visibility; - } - - let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - let visibility = match history_visibility { - | HistoryVisibility::WorldReadable => true, - | HistoryVisibility::Shared => currently_member, - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, user_id).await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, user_id).await - }, - | _ => { - error!("Unknown history visibility {history_visibility}"); - false - }, - }; - - self.user_visibility_cache - .lock() - .expect("locked") - .insert((user_id.to_owned(), shortstatehash), visibility); - - visibility - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { - if self.services.state_cache.is_joined(user_id, room_id).await { - return true; - } - - let history_visibility = self - .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - match history_visibility { - | HistoryVisibility::Invited => - self.services.state_cache.is_invited(user_id, room_id).await, - | HistoryVisibility::WorldReadable => true, - | _ => false, - } - } - pub async fn get_name(&self, room_id: &RoomId) -> Result { self.room_state_get_content(room_id, &StateEventType::RoomName, "") .await @@ -669,28 +153,6 @@ impl Service { .await } - pub async fn user_can_invite( - &self, - room_id: &RoomId, - sender: &UserId, - target_user: &UserId, - state_lock: &RoomMutexGuard, - ) -> bool { - self.services - .timeline - .create_hash_and_sign_event( - PduBuilder::state( - target_user.into(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - sender, - room_id, - state_lock, - ) - .await - .is_ok() - } - /// Checks if guests are able to view room content without joining pub async fn is_world_readable(&self, room_id: &RoomId) -> bool { self.room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") @@ -726,74 +188,6 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Checks if a given user can redact a given event - /// - /// If federation is true, it allows redaction events from any user of the - /// same server as the original event sender - pub async fn user_can_redact( - &self, - redacts: &EventId, - sender: &UserId, - room_id: &RoomId, - federation: bool, - ) -> Result { - let redacting_event = self.services.timeline.get_pdu(redacts).await; - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) - { - return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); - } - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) - { - return Err!(Request(Forbidden( - "Redacting m.room.server_acl will result in the room being inaccessible for \ - everyone (empty allow key), forbidding." - ))); - } - - if let Ok(pl_event_content) = self - .room_state_get_content::( - room_id, - &StateEventType::RoomPowerLevels, - "", - ) - .await - { - let pl_event: RoomPowerLevels = pl_event_content.into(); - Ok(pl_event.user_can_redact_event_of_other(sender) - || pl_event.user_can_redact_own_event(sender) - && if let Ok(redacting_event) = redacting_event { - if federation { - redacting_event.sender.server_name() == sender.server_name() - } else { - redacting_event.sender == sender - } - } else { - false - }) - } else { - // Falling back on m.room.create to judge power level - if let Ok(room_create) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(room_create.sender == sender - || redacting_event - .as_ref() - .is_ok_and(|redacting_event| redacting_event.sender == sender)) - } else { - Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", - )) - } - } - } - /// Returns the join rule (`SpaceRoomJoinRule`) for a given room pub async fn get_join_rule( &self, diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs new file mode 100644 index 00000000..98a82cea --- /dev/null +++ b/src/service/rooms/state_accessor/room_state.rs @@ -0,0 +1,90 @@ +use std::borrow::Borrow; + +use conduwuit::{err, implement, PduEvent, Result}; +use futures::{Stream, StreamExt, TryFutureExt}; +use ruma::{events::StateEventType, EventId, RoomId}; +use serde::Deserialize; + +/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). +#[implement(super::Service)] +pub async fn room_state_get_content( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.room_state_get(room_id, event_type, state_key) + .await + .and_then(|event| event.get_content()) +} + +/// Returns the full room state. +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_state_full<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() +} + +/// Returns the full room state pdus +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_state_full_pdus<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, +{ + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) + .await +} + +/// Returns a single PDU from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await +} diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs new file mode 100644 index 00000000..4d834227 --- /dev/null +++ b/src/service/rooms/state_accessor/server_can.rs @@ -0,0 +1,73 @@ +use conduwuit::{error, implement, utils::stream::ReadyExt}; +use futures::StreamExt; +use ruma::{ + events::{ + room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + StateEventType, + }, + EventId, RoomId, ServerName, +}; + +/// Whether a server is allowed to see an event through federation, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn server_can_see_event( + &self, + origin: &ServerName, + room_id: &RoomId, + event_id: &EventId, +) -> bool { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { + return true; + }; + + if let Some(visibility) = self + .server_visibility_cache + .lock() + .expect("locked") + .get_mut(&(origin.to_owned(), shortstatehash)) + { + return *visibility; + } + + let history_visibility = self + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + let current_server_members = self + .services + .state_cache + .room_members(room_id) + .ready_filter(|member| member.server_name() == origin); + + let visibility = match history_visibility { + | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, + | HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + current_server_members + .any(|member| self.user_was_invited(shortstatehash, member)) + .await + }, + | HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + current_server_members + .any(|member| self.user_was_joined(shortstatehash, member)) + .await + }, + | _ => { + error!("Unknown history visibility {history_visibility}"); + false + }, + }; + + self.server_visibility_cache + .lock() + .expect("locked") + .insert((origin.to_owned(), shortstatehash), visibility); + + visibility +} diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs new file mode 100644 index 00000000..c47a5693 --- /dev/null +++ b/src/service/rooms/state_accessor/state.rs @@ -0,0 +1,320 @@ +use std::{borrow::Borrow, ops::Deref, sync::Arc}; + +use conduwuit::{ + at, err, implement, pair_of, + utils::{ + result::FlatOk, + stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, + }, + PduEvent, Result, +}; +use database::Deserialized; +use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; +use ruma::{ + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, + }, + EventId, OwnedEventId, UserId, +}; +use serde::Deserialize; + +use crate::rooms::{ + short::{ShortEventId, ShortStateHash, ShortStateKey}, + state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, +}; + +/// The user was a joined member at this state (potentially in the past) +#[implement(super::Service)] +#[inline] +pub async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id).await == MembershipState::Join +} + +/// The user was an invited or joined room member at this state (potentially +/// in the past) +#[implement(super::Service)] +#[inline] +pub async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + let s = self.user_membership(shortstatehash, user_id).await; + s == MembershipState::Join || s == MembershipState::Invite +} + +/// Get membership for given user in state +#[implement(super::Service)] +pub async fn user_membership( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, +) -> MembershipState { + self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .await + .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) +} + +/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). +#[implement(super::Service)] +pub async fn state_get_content( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.state_get(shortstatehash, event_type, state_key) + .await + .and_then(|event| event.get_content()) +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, +) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.load_full_state(shortstatehash) + .map_ok(|full_state| full_state.range(start..=end).next().copied()) + .await + .flat_ok() + .is_some() +} + +/// Returns a single PDU from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +pub async fn state_get( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result { + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) + .await +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_get_id( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, +{ + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) + .await?; + + self.services + .short + .get_eventid_from_short(shorteventid) + .await +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .range(start..=end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? +} + +/// Returns the state events removed between the interval (present in .0 but +/// not in .1) +#[implement(super::Service)] +#[inline] +pub fn state_removed( + &self, + shortstatehash: pair_of!(ShortStateHash), +) -> impl Stream + Send + '_ { + self.state_added((shortstatehash.1, shortstatehash.0)) +} + +/// Returns the state events added between the interval (present in .1 but +/// not in .0) +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn state_added<'a>( + &'a self, + shortstatehash: pair_of!(ShortStateHash), +) -> impl Stream + Send + 'a { + let a = self.load_full_state(shortstatehash.0); + let b = self.load_full_state(shortstatehash.1); + try_join(a, b) + .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) + .map_ok(IterStream::try_stream) + .try_flatten_stream() + .expect_ok() + .map(parse_compressed_state_event) +} + +#[implement(super::Service)] +pub fn state_full( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + '_ { + self.state_full_pdus(shortstatehash) + .ready_filter_map(|pdu| { + Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) + }) +} + +#[implement(super::Service)] +pub fn state_full_pdus( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + '_ { + let short_ids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .map(at!(1)); + + self.services + .short + .multi_get_eventid_from_short(short_ids) + .ready_filter_map(Result::ok) + .broad_filter_map(move |event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await.ok() + }) +} + +/// Builds a StateMap by iterating over all keys that start +/// with state_hash, this gives the full state for the given state_hash. +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn state_full_ids<'a, Id>( + &'a self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + 'a +where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, + ::Owned: Borrow, +{ + let shortids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .unzip() + .shared(); + + let shortstatekeys = shortids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = shortids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(shortstatekeys) + .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) +} + +#[implement(super::Service)] +pub fn state_full_shortids( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream> + Send + '_ { + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .deref() + .iter() + .copied() + .map(parse_compressed_state_event) + .collect() + }) + .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) + .try_flatten_stream() +} + +#[implement(super::Service)] +async fn load_full_state(&self, shortstatehash: ShortStateHash) -> Result> { + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .await +} + +/// Returns the state hash for this pdu. +#[implement(super::Service)] +pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.db + .shorteventid_shortstatehash + .aqry::(&shorteventid) + }) + .await + .deserialized() +} diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs new file mode 100644 index 00000000..725a4fba --- /dev/null +++ b/src/service/rooms/state_accessor/user_can.rs @@ -0,0 +1,187 @@ +use conduwuit::{error, implement, pdu::PduBuilder, Err, Error, Result}; +use ruma::{ + events::{ + room::{ + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + member::{MembershipState, RoomMemberEventContent}, + power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, + StateEventType, TimelineEventType, + }, + EventId, RoomId, UserId, +}; + +use crate::rooms::state::RoomMutexGuard; + +/// Checks if a given user can redact a given event +/// +/// If federation is true, it allows redaction events from any user of the +/// same server as the original event sender +#[implement(super::Service)] +pub async fn user_can_redact( + &self, + redacts: &EventId, + sender: &UserId, + room_id: &RoomId, + federation: bool, +) -> Result { + let redacting_event = self.services.timeline.get_pdu(redacts).await; + + if redacting_event + .as_ref() + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) + { + return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); + } + + if redacting_event + .as_ref() + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) + { + return Err!(Request(Forbidden( + "Redacting m.room.server_acl will result in the room being inaccessible for \ + everyone (empty allow key), forbidding." + ))); + } + + if let Ok(pl_event_content) = self + .room_state_get_content::( + room_id, + &StateEventType::RoomPowerLevels, + "", + ) + .await + { + let pl_event: RoomPowerLevels = pl_event_content.into(); + Ok(pl_event.user_can_redact_event_of_other(sender) + || pl_event.user_can_redact_own_event(sender) + && if let Ok(redacting_event) = redacting_event { + if federation { + redacting_event.sender.server_name() == sender.server_name() + } else { + redacting_event.sender == sender + } + } else { + false + }) + } else { + // Falling back on m.room.create to judge power level + if let Ok(room_create) = self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + Ok(room_create.sender == sender + || redacting_event + .as_ref() + .is_ok_and(|redacting_event| redacting_event.sender == sender)) + } else { + Err(Error::bad_database( + "No m.room.power_levels or m.room.create events in database for room", + )) + } + } +} + +/// Whether a user is allowed to see an event, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn user_can_see_event( + &self, + user_id: &UserId, + room_id: &RoomId, + event_id: &EventId, +) -> bool { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { + return true; + }; + + if let Some(visibility) = self + .user_visibility_cache + .lock() + .expect("locked") + .get_mut(&(user_id.to_owned(), shortstatehash)) + { + return *visibility; + } + + let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; + + let history_visibility = self + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + let visibility = match history_visibility { + | HistoryVisibility::WorldReadable => true, + | HistoryVisibility::Shared => currently_member, + | HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + self.user_was_invited(shortstatehash, user_id).await + }, + | HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + self.user_was_joined(shortstatehash, user_id).await + }, + | _ => { + error!("Unknown history visibility {history_visibility}"); + false + }, + }; + + self.user_visibility_cache + .lock() + .expect("locked") + .insert((user_id.to_owned(), shortstatehash), visibility); + + visibility +} + +/// Whether a user is allowed to see an event, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { + if self.services.state_cache.is_joined(user_id, room_id).await { + return true; + } + + let history_visibility = self + .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + match history_visibility { + | HistoryVisibility::Invited => + self.services.state_cache.is_invited(user_id, room_id).await, + | HistoryVisibility::WorldReadable => true, + | _ => false, + } +} + +#[implement(super::Service)] +pub async fn user_can_invite( + &self, + room_id: &RoomId, + sender: &UserId, + target_user: &UserId, + state_lock: &RoomMutexGuard, +) -> bool { + self.services + .timeline + .create_hash_and_sign_event( + PduBuilder::state( + target_user.into(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + sender, + room_id, + state_lock, + ) + .await + .is_ok() +} From d32534164c0092a30ac351337b7dd34aa8f5d456 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 4 Feb 2025 20:30:33 +0000 Subject: [PATCH 0646/1248] fix soft-failed redaction regression (ff8bbd4cfa) Signed-off-by: Jason Volk --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 132daca7..b33b0388 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -128,7 +128,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( | (false, _) => true, | (true, None) => false, | (true, Some(redact_id)) => - self.services + !self + .services .state_accessor .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) .await?, From 80277f6aa2629a8b9dc2b4e96a64d8e508d47270 Mon Sep 17 00:00:00 2001 From: Nineko Date: Tue, 4 Feb 2025 16:46:00 -0500 Subject: [PATCH 0647/1248] Adds .gitattributes to the projects to prevent LN and CLRF conflicts. (#681) --- .gitattributes | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..3dfaca65 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,87 @@ +# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Rust.gitattributes +# Auto detect text files and perform normalization +* text=auto + +*.rs text diff=rust +*.toml text diff=toml +Cargo.lock text + +# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Common.gitattributes +# Documents +*.bibtex text diff=bibtex +*.doc diff=astextplain +*.DOC diff=astextplain +*.docx diff=astextplain +*.DOCX diff=astextplain +*.dot diff=astextplain +*.DOT diff=astextplain +*.pdf diff=astextplain +*.PDF diff=astextplain +*.rtf diff=astextplain +*.RTF diff=astextplain +*.md text diff=markdown +*.mdx text diff=markdown +*.tex text diff=tex +*.adoc text +*.textile text +*.mustache text +*.csv text eol=crlf +*.tab text +*.tsv text +*.txt text +*.sql text +*.epub diff=astextplain + +# Graphics +*.png binary +*.jpg binary +*.jpeg binary +*.gif binary +*.tif binary +*.tiff binary +*.ico binary +# SVG treated as text by default. +*.svg text +*.eps binary + +# Scripts +*.bash text eol=lf +*.fish text eol=lf +*.ksh text eol=lf +*.sh text eol=lf +*.zsh text eol=lf +# These are explicitly windows files and should use crlf +*.bat text eol=crlf +*.cmd text eol=crlf +*.ps1 text eol=crlf + +# Serialisation +*.json text +*.toml text +*.xml text +*.yaml text +*.yml text + +# Archives +*.7z binary +*.bz binary +*.bz2 binary +*.bzip2 binary +*.gz binary +*.lz binary +*.lzma binary +*.rar binary +*.tar binary +*.taz binary +*.tbz binary +*.tbz2 binary +*.tgz binary +*.tlz binary +*.txz binary +*.xz binary +*.Z binary +*.zip binary +*.zst binary + +# Text files where line endings should be preserved +*.patch -text \ No newline at end of file From 62180897c02d9c306b2179f3685e60ffdc615c1f Mon Sep 17 00:00:00 2001 From: Niko Date: Sat, 1 Feb 2025 18:35:23 -0500 Subject: [PATCH 0648/1248] Added blurhash.rs to fascilitate blurhashing. Signed-off-by: Niko --- Cargo.lock | 373 +++++++++++++++++++++++++++++++++- Cargo.toml | 8 +- conduwuit-example.toml | 18 ++ src/api/Cargo.toml | 1 + src/api/client/media.rs | 21 ++ src/core/Cargo.toml | 1 + src/core/config/mod.rs | 40 +++- src/main/Cargo.toml | 1 + src/service/Cargo.toml | 3 + src/service/media/blurhash.rs | 159 +++++++++++++++ src/service/media/mod.rs | 3 +- 11 files changed, 621 insertions(+), 7 deletions(-) create mode 100644 src/service/media/blurhash.rs diff --git a/Cargo.lock b/Cargo.lock index e379aebb..b710d6fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,6 +26,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aa90d7ce82d4be67b64039a3d588d38dbcc6736577de4a847025ce5b0c468d1" + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -53,12 +59,29 @@ version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +[[package]] +name = "arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" + [[package]] name = "arc-swap" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "arg_enum_proc_macro" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "argon2" version = "0.5.3" @@ -173,6 +196,29 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "av1-grain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6678909d8c5d46a42abcf571271e15fdbc0a225e3646cf23762cd415046c78bf" +dependencies = [ + "anyhow", + "arrayvec", + "log", + "nom", + "num-rational", + "v_frame", +] + +[[package]] +name = "avif-serialize" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e335041290c43101ca215eed6f43ec437eb5a42125573f600fc3fa42b9bddd62" +dependencies = [ + "arrayvec", +] + [[package]] name = "aws-lc-rs" version = "1.12.1" @@ -385,6 +431,12 @@ dependencies = [ "which", ] +[[package]] +name = "bit_field" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" + [[package]] name = "bitflags" version = "1.3.2" @@ -397,6 +449,12 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +[[package]] +name = "bitstream-io" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6099cdc01846bc367c4e7dd630dc5966dccf36b652fae7a74e17b640411a91b2" + [[package]] name = "blake2" version = "0.10.6" @@ -415,6 +473,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blurhash" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79769241dcd44edf79a732545e8b5cec84c247ac060f5252cd51885d093a8fc" +dependencies = [ + "image", +] + [[package]] name = "brotli" version = "7.0.0" @@ -436,6 +503,12 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "built" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" + [[package]] name = "bumpalo" version = "3.16.0" @@ -513,6 +586,16 @@ dependencies = [ "nom", ] +[[package]] +name = "cfg-expr" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" +dependencies = [ + "smallvec", + "target-lexicon", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -822,6 +905,7 @@ dependencies = [ "arrayvec", "async-trait", "base64 0.22.1", + "blurhash", "bytes", "conduwuit_core", "conduwuit_database", @@ -1071,6 +1155,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + [[package]] name = "crypto-common" version = "0.1.6" @@ -1252,7 +1342,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1275,6 +1365,21 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "exr" +version = "1.73.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83197f59927b46c04a183a619b7c29df34e63e63c7869320862268c0ef687e0" +dependencies = [ + "bit_field", + "half", + "lebe", + "miniz_oxide", + "rayon-core", + "smallvec", + "zune-inflate", +] + [[package]] name = "fdeflate" version = "0.3.7" @@ -1519,6 +1624,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hardened_malloc-rs" version = "0.1.2+12" @@ -1973,10 +2088,16 @@ dependencies = [ "bytemuck", "byteorder-lite", "color_quant", + "exr", "gif", "image-webp", "num-traits", "png", + "qoi", + "ravif", + "rayon", + "rgb", + "tiff", "zune-core", "zune-jpeg", ] @@ -1991,6 +2112,12 @@ dependencies = [ "quick-error 2.0.1", ] +[[package]] +name = "imgref" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0263a3d970d5c054ed9312c0057b4f3bde9c0b33836d3637361d4a9e6e7a408" + [[package]] name = "indexmap" version = "1.9.3" @@ -2024,6 +2151,17 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" +[[package]] +name = "interpolate_name" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "ipaddress" version = "0.1.3" @@ -2089,6 +2227,12 @@ dependencies = [ "libc", ] +[[package]] +name = "jpeg-decoder" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" + [[package]] name = "js-sys" version = "0.3.77" @@ -2172,12 +2316,28 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +[[package]] +name = "lebe" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" + [[package]] name = "libc" version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +[[package]] +name = "libfuzzer-sys" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf78f52d400cf2d84a3a973a78a592b4adc535739e0a5597a0da6f0c357adc75" +dependencies = [ + "arbitrary", + "cc", +] + [[package]] name = "libloading" version = "0.8.6" @@ -2185,7 +2345,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -2243,6 +2403,15 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "loop9" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fae87c125b03c1d2c0150c90365d7d6bcc53fb73a9acaef207d2d065860f062" +dependencies = [ + "imgref", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -2321,6 +2490,16 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "maybe-rayon" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" +dependencies = [ + "cfg-if", + "rayon", +] + [[package]] name = "memchr" version = "2.7.4" @@ -2434,6 +2613,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +[[package]] +name = "noop_proc_macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8" + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2483,6 +2668,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "num-integer" version = "0.1.46" @@ -2907,6 +3103,25 @@ dependencies = [ "yansi", ] +[[package]] +name = "profiling" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afbdc74edc00b6f6a218ca6a5364d6226a259d4b8ea1af4a0ea063f27e179f4d" +dependencies = [ + "profiling-procmacros", +] + +[[package]] +name = "profiling-procmacros" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" +dependencies = [ + "quote", + "syn 2.0.96", +] + [[package]] name = "prost" version = "0.13.4" @@ -2957,6 +3172,15 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "007d8adb5ddab6f8e3f491ac63566a7d5002cc7ed73901f72057943fa71ae1ae" +[[package]] +name = "qoi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001" +dependencies = [ + "bytemuck", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -3018,7 +3242,7 @@ dependencies = [ "once_cell", "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3060,6 +3284,76 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rav1e" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd87ce80a7665b1cce111f8a16c1f3929f6547ce91ade6addf4ec86a8dda5ce9" +dependencies = [ + "arbitrary", + "arg_enum_proc_macro", + "arrayvec", + "av1-grain", + "bitstream-io", + "built", + "cfg-if", + "interpolate_name", + "itertools 0.12.1", + "libc", + "libfuzzer-sys", + "log", + "maybe-rayon", + "new_debug_unreachable", + "noop_proc_macro", + "num-derive", + "num-traits", + "once_cell", + "paste", + "profiling", + "rand", + "rand_chacha", + "simd_helpers", + "system-deps", + "thiserror 1.0.69", + "v_frame", + "wasm-bindgen", +] + +[[package]] +name = "ravif" +version = "0.11.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2413fd96bd0ea5cdeeb37eaf446a22e6ed7b981d792828721e74ded1980a45c6" +dependencies = [ + "avif-serialize", + "imgref", + "loop9", + "quick-error 2.0.1", + "rav1e", + "rayon", + "rgb", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + [[package]] name = "redox_syscall" version = "0.5.8" @@ -3172,6 +3466,12 @@ dependencies = [ "quick-error 1.2.3", ] +[[package]] +name = "rgb" +version = "0.8.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" + [[package]] name = "ring" version = "0.17.8" @@ -3479,7 +3779,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3945,6 +4245,15 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +[[package]] +name = "simd_helpers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95890f873bec569a0362c235787f3aca6e1e887302ba4840839bcc6459c42da6" +dependencies = [ + "quote", +] + [[package]] name = "siphasher" version = "0.3.11" @@ -4096,6 +4405,25 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "system-deps" +version = "6.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e535eb8dded36d55ec13eddacd30dec501792ff23a0b1682c38601b8cf2349" +dependencies = [ + "cfg-expr", + "heck", + "pkg-config", + "toml", + "version-compare", +] + +[[package]] +name = "target-lexicon" +version = "0.12.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" + [[package]] name = "tendril" version = "0.4.3" @@ -4205,6 +4533,17 @@ dependencies = [ "threadpool", ] +[[package]] +name = "tiff" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba1310fcea54c6a9a4fd1aad794ecc02c31682f6bfbecdf460bf19533eed1e3e" +dependencies = [ + "flate2", + "jpeg-decoder", + "weezl", +] + [[package]] name = "tikv-jemalloc-ctl" version = "0.6.0" @@ -4744,6 +5083,17 @@ dependencies = [ "serde", ] +[[package]] +name = "v_frame" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6f32aaa24bacd11e488aa9ba66369c7cd514885742c9fe08cfe85884db3e92b" +dependencies = [ + "aligned-vec", + "num-traits", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -4756,6 +5106,12 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "version-compare" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" + [[package]] name = "version_check" version = "0.9.5" @@ -5324,6 +5680,15 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" +[[package]] +name = "zune-inflate" +version = "0.2.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02" +dependencies = [ + "simd-adler32", +] + [[package]] name = "zune-jpeg" version = "0.4.14" diff --git a/Cargo.toml b/Cargo.toml index 1cf787c6..c580d22d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -179,7 +179,7 @@ version = "0.5.3" features = ["alloc", "rand"] default-features = false -# Used to generate thumbnails for images +# Used to generate thumbnails for images & blurhashes [workspace.dependencies.image] version = "0.25.5" default-features = false @@ -190,6 +190,12 @@ features = [ "webp", ] +[workspace.dependencies.blurhash] +version = "0.2.3" +default-features = false +features = [ + "fast-linear-to-srgb","image" +] # logging [workspace.dependencies.log] version = "0.4.22" diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3e64522c..f9da856d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1607,3 +1607,21 @@ # This item is undocumented. Please contribute documentation for it. # #support_mxid = + +[global.blurhashing] + +# blurhashing x component, 4 is recommended by https://blurha.sh/ +# +#components_x = 4 + +# blurhashing y component, 3 is recommended by https://blurha.sh/ +# +#components_y = 3 + +# Max raw size that the server will blurhash, this is the size of the +# image after converting it to raw data, it should be higher than the +# upload limit but not too high. The higher it is the higher the +# potential load will be for clients requesting blurhashes. The default +# is 33.55MB. Setting it to 0 disables blurhashing. +# +#blurhash_max_raw_size = 33554432 diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 385e786f..8a5ef3f0 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,6 +17,7 @@ crate-type = [ ] [features] +blurhashing=[] element_hacks = [] release_max_log_level = [ "tracing/max_level_trace", diff --git a/src/api/client/media.rs b/src/api/client/media.rs index afbc218a..115f2581 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -62,6 +62,27 @@ pub(crate) async fn create_content_route( media_id: &utils::random_string(MXC_LENGTH), }; + #[cfg(feature = "blurhashing")] + { + if body.generate_blurhash { + let (blurhash, create_media_result) = tokio::join!( + services + .media + .create_blurhash(&body.file, content_type, filename), + services.media.create( + &mxc, + Some(user), + Some(&content_disposition), + content_type, + &body.file + ) + ); + return create_media_result.map(|()| create_content::v3::Response { + content_uri: mxc.to_string().into(), + blurhash, + }); + } + } services .media .create(&mxc, Some(user), Some(&content_disposition), content_type, &body.file) diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index ef2df4ff..5d46ec3b 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -54,6 +54,7 @@ sentry_telemetry = [] conduwuit_mods = [ "dep:libloading" ] +blurhashing = [] [dependencies] argon2.workspace = true diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index ff80d1cf..9514f7a0 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -52,7 +52,7 @@ use crate::{err, error::Error, utils::sys, Result}; ### For more information, see: ### https://conduwuit.puppyirl.gay/configuration.html "#, - ignore = "catchall well_known tls" + ignore = "catchall well_known tls blurhashing" )] pub struct Config { /// The server_name is the pretty name of this server. It is used as a @@ -1789,6 +1789,9 @@ pub struct Config { #[serde(default = "true_fn")] pub config_reload_signal: bool, + // external structure; separate section + #[serde(default)] + pub blurhashing: BlurhashConfig, #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime @@ -1839,6 +1842,31 @@ pub struct WellKnownConfig { pub support_mxid: Option, } +#[derive(Clone, Copy, Debug, Deserialize, Default)] +#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] +#[config_example_generator(filename = "conduwuit-example.toml", section = "global.blurhashing")] +pub struct BlurhashConfig { + /// blurhashing x component, 4 is recommended by https://blurha.sh/ + /// + /// default: 4 + #[serde(default = "default_blurhash_x_component")] + pub components_x: u32, + /// blurhashing y component, 3 is recommended by https://blurha.sh/ + /// + /// default: 3 + #[serde(default = "default_blurhash_y_component")] + pub components_y: u32, + /// Max raw size that the server will blurhash, this is the size of the + /// image after converting it to raw data, it should be higher than the + /// upload limit but not too high. The higher it is the higher the + /// potential load will be for clients requesting blurhashes. The default + /// is 33.55MB. Setting it to 0 disables blurhashing. + /// + /// default: 33554432 + #[serde(default = "default_blurhash_max_raw_size")] + pub blurhash_max_raw_size: u64, +} + #[derive(Deserialize, Clone, Debug)] #[serde(transparent)] struct ListeningPort { @@ -2210,3 +2238,13 @@ fn default_client_response_timeout() -> u64 { 120 } fn default_client_shutdown_timeout() -> u64 { 15 } fn default_sender_shutdown_timeout() -> u64 { 5 } + +// blurhashing defaults recommended by https://blurha.sh/ +// 2^25 +pub(super) fn default_blurhash_max_raw_size() -> u64 { 33_554_432 } + +pub(super) fn default_blurhash_x_component() -> u32 { 4 } + +pub(super) fn default_blurhash_y_component() -> u32 { 3 } + +// end recommended & blurhashing defaults diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index f774c37a..7e1cb86b 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -101,6 +101,7 @@ perf_measurements = [ "conduwuit-core/perf_measurements", "conduwuit-core/sentry_telemetry", ] +blurhashing =["conduwuit-service/blurhashing","conduwuit-core/blurhashing","conduwuit-api/blurhashing"] # increases performance, reduces build times, and reduces binary size by not compiling or # genreating code for log level filters that users will generally not use (debug and trace) release_max_log_level = [ diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index c4f75453..30183179 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -44,6 +44,7 @@ url_preview = [ zstd_compression = [ "reqwest/zstd", ] +blurhashing = ["dep:image","dep:blurhash"] [dependencies] arrayvec.workspace = true @@ -82,6 +83,8 @@ tracing.workspace = true url.workspace = true webpage.workspace = true webpage.optional = true +blurhash.workspace = true +blurhash.optional = true [lints] workspace = true diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs new file mode 100644 index 00000000..c470925c --- /dev/null +++ b/src/service/media/blurhash.rs @@ -0,0 +1,159 @@ +use std::{fmt::Display, io::Cursor, path::Path}; + +use blurhash::encode_image; +use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, debug_error, implement, trace}; +use image::{DynamicImage, ImageDecoder, ImageError, ImageFormat, ImageReader}; + +use super::Service; +#[implement(Service)] +pub async fn create_blurhash( + &self, + file: &[u8], + content_type: Option<&str>, + file_name: Option<&str>, +) -> Option { + let config = BlurhashConfig::from(self.services.server.config.blurhashing); + if config.size_limit == 0 { + trace!("since 0 means disabled blurhashing, skipped blurhashing logic"); + return None; + } + let file_data = file.to_owned(); + let content_type = content_type.map(String::from); + let file_name = file_name.map(String::from); + + let blurhashing_result = tokio::task::spawn_blocking(move || { + get_blurhash_from_request(&file_data, content_type, file_name, config) + }) + .await + .expect("no join error"); + + match blurhashing_result { + | Ok(result) => Some(result), + | Err(e) => { + debug_error!("Error when blurhashing: {e}"); + None + }, + } +} + +/// Returns the blurhash or a blurhash error which implements Display. +fn get_blurhash_from_request( + data: &[u8], + mime: Option, + filename: Option, + config: BlurhashConfig, +) -> Result { + // Get format image is supposed to be in + let format = get_format_from_data_mime_and_filename(data, mime, filename)?; + // Get the image reader for said image format + let decoder = get_image_decoder_with_format_and_data(format, data)?; + // Check image size makes sense before unpacking whole image + if is_image_above_size_limit(&decoder, config) { + return Err(BlurhashingError::ImageTooLarge); + } + // decode the image finally + let image = DynamicImage::from_decoder(decoder)?; + + blurhash_an_image(&image, config) +} + +/// Gets the Image Format value from the data,mime, and filename +/// It first checks if the mime is a valid image format +/// Then it checks if the filename has a format, otherwise just guess based on +/// the binary data Assumes that mime and filename extension won't be for a +/// different file format than file. +fn get_format_from_data_mime_and_filename( + data: &[u8], + mime: Option, + filename: Option, +) -> Result { + let mut image_format = None; + if let Some(mime) = mime { + image_format = ImageFormat::from_mime_type(mime); + } + if let (Some(filename), None) = (filename, image_format) { + if let Some(extension) = Path::new(&filename).extension() { + image_format = ImageFormat::from_mime_type(extension.to_string_lossy()); + } + } + + if let Some(format) = image_format { + Ok(format) + } else { + image::guess_format(data).map_err(Into::into) + } +} + +fn get_image_decoder_with_format_and_data( + image_format: ImageFormat, + data: &[u8], +) -> Result, BlurhashingError> { + let mut image_reader = ImageReader::new(Cursor::new(data)); + image_reader.set_format(image_format); + Ok(Box::new(image_reader.into_decoder()?)) +} + +fn is_image_above_size_limit( + decoder: &T, + blurhash_config: BlurhashConfig, +) -> bool { + decoder.total_bytes() >= blurhash_config.size_limit +} +#[inline] +fn blurhash_an_image( + image: &DynamicImage, + blurhash_config: BlurhashConfig, +) -> Result { + Ok(encode_image( + blurhash_config.components_x, + blurhash_config.components_y, + &image.to_rgba8(), + )?) +} +#[derive(Clone, Copy)] +pub struct BlurhashConfig { + components_x: u32, + components_y: u32, + /// size limit in bytes + size_limit: u64, +} + +impl From for BlurhashConfig { + fn from(value: CoreBlurhashConfig) -> Self { + Self { + components_x: value.components_x, + components_y: value.components_y, + size_limit: value.blurhash_max_raw_size, + } + } +} + +#[derive(Debug)] +pub(crate) enum BlurhashingError { + ImageError(Box), + HashingLibError(Box), + ImageTooLarge, +} +impl From for BlurhashingError { + fn from(value: ImageError) -> Self { Self::ImageError(Box::new(value)) } +} + +impl From for BlurhashingError { + fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } +} + +impl Display for BlurhashingError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Blurhash Error:")?; + match &self { + | Self::ImageTooLarge => write!(f, "Image was too large to blurhash")?, + | Self::HashingLibError(e) => + write!(f, "There was an error with the blurhashing library => {e}")?, + + | Self::ImageError(e) => + write!(f, "There was an error with the image loading library => {e}")?, + }; + + Ok(()) + } +} diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 0d98853d..7775173a 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,10 +1,11 @@ +#[cfg(feature = "blurhashing")] +pub mod blurhash; mod data; pub(super) mod migrations; mod preview; mod remote; mod tests; mod thumbnail; - use std::{path::PathBuf, sync::Arc, time::SystemTime}; use async_trait::async_trait; From 442bb9889c45e5b17cdf5c7fd90e4751f7582400 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 4 Feb 2025 02:24:50 +0000 Subject: [PATCH 0649/1248] improvements on blurhashing feature Signed-off-by: Jason Volk --- Cargo.toml | 4 +- src/api/Cargo.toml | 1 - src/api/client/media.rs | 44 +++++-------- src/core/Cargo.toml | 1 - src/main/Cargo.toml | 4 +- src/service/media/blurhash.rs | 113 +++++++++++++++++++--------------- src/service/media/mod.rs | 1 - 7 files changed, 87 insertions(+), 81 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c580d22d..b25d9175 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -194,8 +194,10 @@ features = [ version = "0.2.3" default-features = false features = [ - "fast-linear-to-srgb","image" + "fast-linear-to-srgb", + "image", ] + # logging [workspace.dependencies.log] version = "0.4.22" diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 8a5ef3f0..385e786f 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,7 +17,6 @@ crate-type = [ ] [features] -blurhashing=[] element_hacks = [] release_max_log_level = [ "tracing/max_level_trace", diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 115f2581..0cff8185 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -57,40 +57,28 @@ pub(crate) async fn create_content_route( let filename = body.filename.as_deref(); let content_type = body.content_type.as_deref(); let content_disposition = make_content_disposition(None, content_type, filename); - let mxc = Mxc { + let ref mxc = Mxc { server_name: services.globals.server_name(), media_id: &utils::random_string(MXC_LENGTH), }; - #[cfg(feature = "blurhashing")] - { - if body.generate_blurhash { - let (blurhash, create_media_result) = tokio::join!( - services - .media - .create_blurhash(&body.file, content_type, filename), - services.media.create( - &mxc, - Some(user), - Some(&content_disposition), - content_type, - &body.file - ) - ); - return create_media_result.map(|()| create_content::v3::Response { - content_uri: mxc.to_string().into(), - blurhash, - }); - } - } services .media - .create(&mxc, Some(user), Some(&content_disposition), content_type, &body.file) - .await - .map(|()| create_content::v3::Response { - content_uri: mxc.to_string().into(), - blurhash: None, - }) + .create(mxc, Some(user), Some(&content_disposition), content_type, &body.file) + .await?; + + let blurhash = body.generate_blurhash.then(|| { + services + .media + .create_blurhash(&body.file, content_type, filename) + .ok() + .flatten() + }); + + Ok(create_content::v3::Response { + content_uri: mxc.to_string().into(), + blurhash: blurhash.flatten(), + }) } /// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 5d46ec3b..ef2df4ff 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -54,7 +54,6 @@ sentry_telemetry = [] conduwuit_mods = [ "dep:libloading" ] -blurhashing = [] [dependencies] argon2.workspace = true diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 7e1cb86b..87ca48c8 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -49,6 +49,9 @@ default = [ "zstd_compression", ] +blurhashing = [ + "conduwuit-service/blurhashing", +] brotli_compression = [ "conduwuit-api/brotli_compression", "conduwuit-core/brotli_compression", @@ -101,7 +104,6 @@ perf_measurements = [ "conduwuit-core/perf_measurements", "conduwuit-core/sentry_telemetry", ] -blurhashing =["conduwuit-service/blurhashing","conduwuit-core/blurhashing","conduwuit-api/blurhashing"] # increases performance, reduces build times, and reduces binary size by not compiling or # genreating code for log level filters that users will generally not use (debug and trace) release_max_log_level = [ diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index c470925c..aa6685b2 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -1,56 +1,58 @@ -use std::{fmt::Display, io::Cursor, path::Path}; +use std::{error::Error, ffi::OsStr, fmt::Display, io::Cursor, path::Path}; -use blurhash::encode_image; -use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, debug_error, implement, trace}; +use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, err, implement, Result}; use image::{DynamicImage, ImageDecoder, ImageError, ImageFormat, ImageReader}; use super::Service; #[implement(Service)] -pub async fn create_blurhash( +pub fn create_blurhash( &self, file: &[u8], content_type: Option<&str>, file_name: Option<&str>, -) -> Option { +) -> Result> { + if !cfg!(feature = "blurhashing") { + return Ok(None); + } + let config = BlurhashConfig::from(self.services.server.config.blurhashing); + + // since 0 means disabled blurhashing, skipped blurhashing if config.size_limit == 0 { - trace!("since 0 means disabled blurhashing, skipped blurhashing logic"); - return None; + return Ok(None); } - let file_data = file.to_owned(); - let content_type = content_type.map(String::from); - let file_name = file_name.map(String::from); - let blurhashing_result = tokio::task::spawn_blocking(move || { - get_blurhash_from_request(&file_data, content_type, file_name, config) - }) - .await - .expect("no join error"); - - match blurhashing_result { - | Ok(result) => Some(result), - | Err(e) => { - debug_error!("Error when blurhashing: {e}"); - None - }, - } + get_blurhash_from_request(file, content_type, file_name, config) + .map_err(|e| err!(debug_error!("blurhashing error: {e}"))) + .map(Some) } /// Returns the blurhash or a blurhash error which implements Display. +#[tracing::instrument( + name = "blurhash", + level = "debug", + skip(data), + fields( + bytes = data.len(), + ), +)] fn get_blurhash_from_request( data: &[u8], - mime: Option, - filename: Option, + mime: Option<&str>, + filename: Option<&str>, config: BlurhashConfig, ) -> Result { // Get format image is supposed to be in let format = get_format_from_data_mime_and_filename(data, mime, filename)?; + // Get the image reader for said image format let decoder = get_image_decoder_with_format_and_data(format, data)?; + // Check image size makes sense before unpacking whole image if is_image_above_size_limit(&decoder, config) { return Err(BlurhashingError::ImageTooLarge); } + // decode the image finally let image = DynamicImage::from_decoder(decoder)?; @@ -64,24 +66,17 @@ fn get_blurhash_from_request( /// different file format than file. fn get_format_from_data_mime_and_filename( data: &[u8], - mime: Option, - filename: Option, + mime: Option<&str>, + filename: Option<&str>, ) -> Result { - let mut image_format = None; - if let Some(mime) = mime { - image_format = ImageFormat::from_mime_type(mime); - } - if let (Some(filename), None) = (filename, image_format) { - if let Some(extension) = Path::new(&filename).extension() { - image_format = ImageFormat::from_mime_type(extension.to_string_lossy()); - } - } + let extension = filename + .map(Path::new) + .and_then(Path::extension) + .map(OsStr::to_string_lossy); - if let Some(format) = image_format { - Ok(format) - } else { - image::guess_format(data).map_err(Into::into) - } + mime.or(extension.as_deref()) + .and_then(ImageFormat::from_mime_type) + .map_or_else(|| image::guess_format(data).map_err(Into::into), Ok) } fn get_image_decoder_with_format_and_data( @@ -99,23 +94,37 @@ fn is_image_above_size_limit( ) -> bool { decoder.total_bytes() >= blurhash_config.size_limit } + +#[cfg(feature = "blurhashing")] +#[tracing::instrument(name = "encode", level = "debug", skip_all)] #[inline] fn blurhash_an_image( image: &DynamicImage, blurhash_config: BlurhashConfig, ) -> Result { - Ok(encode_image( + Ok(blurhash::encode_image( blurhash_config.components_x, blurhash_config.components_y, &image.to_rgba8(), )?) } -#[derive(Clone, Copy)] + +#[cfg(not(feature = "blurhashing"))] +#[inline] +fn blurhash_an_image( + _image: &DynamicImage, + _blurhash_config: BlurhashConfig, +) -> Result { + Err(BlurhashingError::Unavailable) +} + +#[derive(Clone, Copy, Debug)] pub struct BlurhashConfig { - components_x: u32, - components_y: u32, + pub components_x: u32, + pub components_y: u32, + /// size limit in bytes - size_limit: u64, + pub size_limit: u64, } impl From for BlurhashConfig { @@ -129,15 +138,20 @@ impl From for BlurhashConfig { } #[derive(Debug)] -pub(crate) enum BlurhashingError { +pub enum BlurhashingError { + HashingLibError(Box), ImageError(Box), - HashingLibError(Box), ImageTooLarge, + + #[cfg(not(feature = "blurhashing"))] + Unavailable, } + impl From for BlurhashingError { fn from(value: ImageError) -> Self { Self::ImageError(Box::new(value)) } } +#[cfg(feature = "blurhashing")] impl From for BlurhashingError { fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } } @@ -152,6 +166,9 @@ impl Display for BlurhashingError { | Self::ImageError(e) => write!(f, "There was an error with the image loading library => {e}")?, + + #[cfg(not(feature = "blurhashing"))] + | Self::Unavailable => write!(f, "Blurhashing is not supported")?, }; Ok(()) diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 7775173a..f5913f43 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,4 +1,3 @@ -#[cfg(feature = "blurhashing")] pub mod blurhash; mod data; pub(super) mod migrations; From 04656a78865dfd60176965c5ae531d1939e0dd7d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 5 Feb 2025 03:00:47 +0000 Subject: [PATCH 0650/1248] fix spaces pagination bug Signed-off-by: Jason Volk --- src/service/rooms/spaces/mod.rs | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1ee2727c..11794752 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -582,7 +582,7 @@ impl Service { parents.pop_front(); parents.push_back(room); - let short_room_ids: Vec<_> = parents + let next_short_room_ids: Vec<_> = parents .iter() .stream() .filter_map(|room_id| async move { @@ -591,16 +591,18 @@ impl Service { .collect() .await; - Some( - PaginationToken { - short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string(), + (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( + || { + PaginationToken { + short_room_ids: next_short_room_ids, + limit: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + max_depth: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + suggested_only, + } + .to_string() + }, ) } else { None From 9158edfb7c98229af43b2124e972723b1ab4e75a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 5 Feb 2025 05:10:30 +0000 Subject: [PATCH 0651/1248] fix empty join timeline bug Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 48 ++++++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a97e4329..1d1a91ba 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -691,7 +691,7 @@ async fn load_joined_room( heroes, joined_member_count, invited_member_count, - state_events, + mut state_events, mut device_list_updates, left_encrypted_users, } = calculate_state_changes( @@ -708,6 +708,39 @@ async fn load_joined_room( .boxed() .await?; + let is_sender_membership = |pdu: &PduEvent| { + pdu.kind == StateEventType::RoomMember.into() + && pdu + .state_key + .as_deref() + .is_some_and(is_equal_to!(sender_user.as_str())) + }; + + let joined_sender_member: Option<_> = (joined_since_last_sync && timeline_pdus.is_empty()) + .then(|| { + state_events + .iter() + .position(is_sender_membership) + .map(|pos| state_events.swap_remove(pos)) + }) + .flatten(); + + let prev_batch = timeline_pdus.first().map(at!(0)).or_else(|| { + joined_sender_member + .is_some() + .then_some(since) + .map(Into::into) + }); + + let room_events = timeline_pdus + .into_iter() + .stream() + .wide_filter_map(|item| ignored_filter(services, item, sender_user)) + .map(at!(1)) + .chain(joined_sender_member.into_iter().stream()) + .map(|pdu| pdu.to_sync_room_event()) + .collect::>(); + let account_data_events = services .account_data .changes_since(Some(room_id), sender_user, since, Some(next_batch)) @@ -722,13 +755,6 @@ async fn load_joined_room( .map(ToOwned::to_owned) .collect::>(); - let room_events = timeline_pdus - .iter() - .stream() - .wide_filter_map(|item| ignored_filter(services, item.clone(), sender_user)) - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - let send_notification_counts = last_notification_read.is_none_or(|count| count > since); let notification_count: OptionFuture<_> = send_notification_counts @@ -830,12 +856,8 @@ async fn load_joined_room( unread_notifications: UnreadNotificationsCount { highlight_count, notification_count }, timeline: Timeline { limited: limited || joined_since_last_sync, + prev_batch: prev_batch.as_ref().map(ToString::to_string), events: room_events, - prev_batch: timeline_pdus - .first() - .map(at!(0)) - .as_ref() - .map(ToString::to_string), }, state: RoomState { events: state_events From f80d85e1076f1f155a7484d5ad80acbb58a9b1ac Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 5 Feb 2025 01:43:27 -0500 Subject: [PATCH 0652/1248] add SIGUSR1 systemctl reload config support to systemd units Signed-off-by: strawberry --- arch/conduwuit.service | 3 ++- debian/conduwuit.service | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/conduwuit.service b/arch/conduwuit.service index 7c05c259..4b7853e3 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -7,7 +7,8 @@ RequiresMountsFor=/var/lib/private/conduwuit [Service] DynamicUser=yes -Type=notify +Type=notify-reload +ReloadSignal=SIGUSR1 AmbientCapabilities= CapabilityBoundingSet= diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 3c2ec49d..452544bf 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -8,7 +8,8 @@ Documentation=https://conduwuit.puppyirl.gay/ DynamicUser=yes User=conduwuit Group=conduwuit -Type=notify +Type=notify-reload +ReloadSignal=SIGUSR1 Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" From f6dfc9538f8b625c2ae28a462ecf4b7e3d208f85 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 5 Feb 2025 01:44:49 -0500 Subject: [PATCH 0653/1248] bump ruwuma to stop erroring on duplicate yaml values on appservice EDUs (we dont implement this atm anyways) Signed-off-by: strawberry --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b710d6fc..926099b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3490,7 +3490,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "assign", "js_int", @@ -3512,7 +3512,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "ruma-common", @@ -3524,7 +3524,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "as_variant", "assign", @@ -3547,7 +3547,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "as_variant", "base64 0.22.1", @@ -3578,7 +3578,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3603,7 +3603,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "bytes", "http", @@ -3621,7 +3621,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3630,7 +3630,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "ruma-common", @@ -3640,7 +3640,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3655,7 +3655,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "ruma-common", @@ -3667,7 +3667,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "headers", "http", @@ -3680,7 +3680,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3696,7 +3696,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index b25d9175..ce483bbc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -342,7 +342,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "b560338b2a50dbf61ecfe80808b9b095ad4cec00" +rev = "517ac4572276a2e0ad587113776c544b51166f08" features = [ "compat", "rand", From fda8b3680986dc8e038d51b93f7d36bf5c991ef6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 5 Feb 2025 01:45:21 -0500 Subject: [PATCH 0654/1248] add more systemd notify integration with stopping/reloading/ready states Signed-off-by: strawberry --- src/core/server.rs | 12 ++++++++++-- src/router/run.rs | 4 ---- src/service/config/mod.rs | 8 ++++++++ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/core/server.rs b/src/core/server.rs index 45ba7420..80493c94 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -69,6 +69,10 @@ impl Server { return Err!("Reloading not enabled"); } + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) + .expect("failed to notify systemd of reloading state"); + if self.reloading.swap(true, Ordering::AcqRel) { return Err!("Reloading already in progress"); } @@ -83,7 +87,7 @@ impl Server { }) } - pub fn restart(&self) -> Result<()> { + pub fn restart(&self) -> Result { if self.restarting.swap(true, Ordering::AcqRel) { return Err!("Restart already in progress"); } @@ -93,7 +97,11 @@ impl Server { }) } - pub fn shutdown(&self) -> Result<()> { + pub fn shutdown(&self) -> Result { + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) + .expect("failed to notify systemd of stopping state"); + if self.stopping.swap(true, Ordering::AcqRel) { return Err!("Shutdown already in progress"); } diff --git a/src/router/run.rs b/src/router/run.rs index 26701735..024cb813 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -100,10 +100,6 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { ); } - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) - .expect("failed to notify systemd of stopping state"); - info!("Shutdown complete."); Ok(()) } diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs index 8bd09a52..c9ac37a3 100644 --- a/src/service/config/mod.rs +++ b/src/service/config/mod.rs @@ -43,7 +43,15 @@ impl Deref for Service { #[implement(Service)] fn handle_reload(&self) -> Result { if self.server.config.config_reload_signal { + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) + .expect("failed to notify systemd of reloading state"); + self.reload(iter::empty())?; + + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) + .expect("failed to notify systemd of ready state"); } Ok(()) From 62d80b97e65237539a103ded87f4e650ddafe4b8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 03:14:37 +0000 Subject: [PATCH 0655/1248] add systemd unit logging mode Signed-off-by: Jason Volk --- src/core/log/console.rs | 77 +++++++++++++++++++++++++++++++++--- src/core/log/mod.rs | 4 +- src/main/logging.rs | 4 +- src/service/admin/console.rs | 5 ++- 4 files changed, 78 insertions(+), 12 deletions(-) diff --git a/src/core/log/console.rs b/src/core/log/console.rs index 0bc44fa7..1f04ba26 100644 --- a/src/core/log/console.rs +++ b/src/core/log/console.rs @@ -1,3 +1,5 @@ +use std::{env, io, sync::LazyLock}; + use tracing::{ field::{Field, Visit}, Event, Level, Subscriber, @@ -7,12 +9,59 @@ use tracing_subscriber::{ fmt, fmt::{ format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, - FmtContext, FormatEvent, FormatFields, + FmtContext, FormatEvent, FormatFields, MakeWriter, }, registry::LookupSpan, }; -use crate::{Config, Result}; +use crate::{apply, Config, Result}; + +static SYSTEMD_MODE: LazyLock = + LazyLock::new(|| env::var("SYSTEMD_EXEC_PID").is_ok() && env::var("JOURNAL_STREAM").is_ok()); + +pub struct ConsoleWriter { + stdout: io::Stdout, + stderr: io::Stderr, + _journal_stream: [u64; 2], + use_stderr: bool, +} + +impl ConsoleWriter { + #[must_use] + pub fn new(_config: &Config) -> Self { + let journal_stream = get_journal_stream(); + Self { + stdout: io::stdout(), + stderr: io::stderr(), + _journal_stream: journal_stream.into(), + use_stderr: journal_stream.0 != 0, + } + } +} + +impl<'a> MakeWriter<'a> for ConsoleWriter { + type Writer = &'a Self; + + fn make_writer(&'a self) -> Self::Writer { self } +} + +impl io::Write for &'_ ConsoleWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + if self.use_stderr { + self.stderr.lock().write(buf) + } else { + self.stdout.lock().write(buf) + } + } + + fn flush(&mut self) -> io::Result<()> { + if self.use_stderr { + self.stderr.lock().flush() + } else { + self.stdout.lock().flush() + } + } +} pub struct ConsoleFormat { _compact: Format, @@ -20,10 +69,6 @@ pub struct ConsoleFormat { pretty: Format, } -struct ConsoleVisitor<'a> { - visitor: DefaultVisitor<'a>, -} - impl ConsoleFormat { #[must_use] pub fn new(config: &Config) -> Self { @@ -68,6 +113,10 @@ where } } +struct ConsoleVisitor<'a> { + visitor: DefaultVisitor<'a>, +} + impl<'writer> FormatFields<'writer> for ConsoleFormat { fn format_fields(&self, writer: Writer<'writer>, fields: R) -> Result<(), std::fmt::Error> where @@ -92,3 +141,19 @@ impl Visit for ConsoleVisitor<'_> { self.visitor.record_debug(field, value); } } + +#[must_use] +fn get_journal_stream() -> (u64, u64) { + is_systemd_mode() + .then(|| env::var("JOURNAL_STREAM").ok()) + .flatten() + .as_deref() + .and_then(|s| s.split_once(':')) + .map(apply!(2, str::parse)) + .map(apply!(2, Result::unwrap_or_default)) + .unwrap_or((0, 0)) +} + +#[inline] +#[must_use] +pub fn is_systemd_mode() -> bool { *SYSTEMD_MODE } diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 0c51a383..0c1840d0 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -2,14 +2,14 @@ pub mod capture; pub mod color; -mod console; +pub mod console; pub mod fmt; pub mod fmt_span; mod reload; mod suppress; pub use capture::Capture; -pub use console::ConsoleFormat; +pub use console::{is_systemd_mode, ConsoleFormat, ConsoleWriter}; pub use reload::{LogLevelReloadHandles, ReloadHandle}; pub use suppress::Suppress; pub use tracing::Level; diff --git a/src/main/logging.rs b/src/main/logging.rs index 85945e8a..35e482de 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use conduwuit::{ config::Config, debug_warn, err, - log::{capture, fmt_span, ConsoleFormat, LogLevelReloadHandles}, + log::{capture, fmt_span, ConsoleFormat, ConsoleWriter, LogLevelReloadHandles}, result::UnwrapOrErr, Result, }; @@ -30,7 +30,7 @@ pub(crate) fn init( .with_span_events(console_span_events) .event_format(ConsoleFormat::new(config)) .fmt_fields(ConsoleFormat::new(config)) - .map_writer(|w| w); + .with_writer(ConsoleWriter::new(config)); let (console_reload_filter, console_reload_handle) = reload::Layer::new(console_filter.clone()); diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index de201f4b..59b9a31b 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -1,10 +1,11 @@ #![cfg(feature = "console")] + use std::{ collections::VecDeque, sync::{Arc, Mutex}, }; -use conduwuit::{debug, defer, error, log, Server}; +use conduwuit::{debug, defer, error, log, log::is_systemd_mode, Server}; use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; @@ -123,7 +124,7 @@ impl Console { } async fn readline(self: &Arc) -> Result { - let _suppression = log::Suppress::new(&self.server); + let _suppression = (!is_systemd_mode()).then(|| log::Suppress::new(&self.server)); let (mut readline, _writer) = Readline::new(PROMPT.to_owned())?; let self_ = Arc::clone(self); From 16b07ae3ecf6dee591b79dd6198cb3e5a99410be Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:47:10 -0500 Subject: [PATCH 0656/1248] add default systemd support for a TTY to use console mode from Signed-off-by: strawberry --- arch/conduwuit.service | 12 ++++++++++++ debian/conduwuit.service | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/arch/conduwuit.service b/arch/conduwuit.service index 4b7853e3..fa3616d8 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -10,6 +10,18 @@ DynamicUser=yes Type=notify-reload ReloadSignal=SIGUSR1 +TTYPath=/dev/tty25 +DeviceAllow=char-tty +StandardInput=tty-force +StandardOutput=tty +StandardError=journal+console +TTYReset=yes +# uncomment to allow buffer to be cleared every restart +TTYVTDisallocate=no + +TTYColumns=120 +TTYRows=40 + AmbientCapabilities= CapabilityBoundingSet= diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 452544bf..4d6f4eef 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -11,6 +11,18 @@ Group=conduwuit Type=notify-reload ReloadSignal=SIGUSR1 +TTYPath=/dev/tty25 +DeviceAllow=char-tty +StandardInput=tty-force +StandardOutput=tty +StandardError=journal+console +TTYReset=yes +# uncomment to allow buffer to be cleared every restart +TTYVTDisallocate=no + +TTYColumns=120 +TTYRows=40 + Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" ExecStart=/usr/sbin/conduwuit From f761d4d5c9e347699725bff0437a8df3b1b3db59 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:48:19 -0500 Subject: [PATCH 0657/1248] bump db version to 17, cleanup, rerun old migrations for users who downgraded Signed-off-by: strawberry --- src/service/globals/data.rs | 3 +-- src/service/migrations.rs | 32 +++++++++++++------------------- 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 39cb9be1..26a18607 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -69,9 +69,8 @@ impl Data { } #[inline] - pub fn bump_database_version(&self, new_version: u64) -> Result<()> { + pub fn bump_database_version(&self, new_version: u64) { self.global.raw_put(b"version", new_version); - Ok(()) } #[inline] diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 27b4ab5a..9c3ea293 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -27,15 +27,7 @@ use crate::{media, Services}; /// - If database is opened at lesser version we apply migrations up to this. /// Note that named-feature migrations may also be performed when opening at /// equal or lesser version. These are expected to be backward-compatible. -pub(crate) const DATABASE_VERSION: u64 = 13; - -/// Conduit's database version. -/// -/// Conduit bumped the database version to 16, but did not introduce any -/// breaking changes. Their database migrations are extremely fragile and risky, -/// and also do not really apply to us, so just to retain Conduit -> conduwuit -/// compatibility we'll check for both versions. -pub(crate) const CONDUIT_DATABASE_VERSION: u64 = 16; +pub(crate) const DATABASE_VERSION: u64 = 17; pub(crate) async fn migrations(services: &Services) -> Result<()> { let users_count = services.users.count().await; @@ -63,10 +55,7 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> { async fn fresh(services: &Services) -> Result<()> { let db = &services.db; - services - .globals - .db - .bump_database_version(DATABASE_VERSION)?; + services.globals.db.bump_database_version(DATABASE_VERSION); db["global"].insert(b"feat_sha256_media", []); db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); @@ -130,6 +119,7 @@ async fn migrate(services: &Services) -> Result<()> { .get(b"fix_referencedevents_missing_sep") .await .is_not_found() + || services.globals.db.database_version().await < 17 { fix_referencedevents_missing_sep(services).await?; } @@ -138,15 +128,19 @@ async fn migrate(services: &Services) -> Result<()> { .get(b"fix_readreceiptid_readreceipt_duplicates") .await .is_not_found() + || services.globals.db.database_version().await < 17 { fix_readreceiptid_readreceipt_duplicates(services).await?; } - let version_match = services.globals.db.database_version().await == DATABASE_VERSION - || services.globals.db.database_version().await == CONDUIT_DATABASE_VERSION; + if services.globals.db.database_version().await < 17 { + services.globals.db.bump_database_version(17); + info!("Migration: Bumped database version to 17"); + } - assert!( - version_match, + assert_eq!( + services.globals.db.database_version().await, + DATABASE_VERSION, "Failed asserting local database version {} is equal to known latest conduwuit database \ version {}", services.globals.db.database_version().await, @@ -290,7 +284,7 @@ async fn db_lt_12(services: &Services) -> Result<()> { .await?; } - services.globals.db.bump_database_version(12)?; + services.globals.db.bump_database_version(12); info!("Migration: 11 -> 12 finished"); Ok(()) } @@ -335,7 +329,7 @@ async fn db_lt_13(services: &Services) -> Result<()> { .await?; } - services.globals.db.bump_database_version(13)?; + services.globals.db.bump_database_version(13); info!("Migration: 12 -> 13 finished"); Ok(()) } From ef2d307c15dba1731dc6b4d67e758f27590640c6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:48:48 -0500 Subject: [PATCH 0658/1248] fix warnings and errors when building with no features Signed-off-by: strawberry --- src/main/runtime.rs | 11 ++--- src/service/media/blurhash.rs | 87 +++++++++++++++++---------------- src/service/media/migrations.rs | 8 +-- 3 files changed, 51 insertions(+), 55 deletions(-) diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 02b9931f..474b373b 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -8,13 +8,11 @@ use std::{ time::Duration, }; +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] +use conduwuit::result::LogDebugErr; use conduwuit::{ is_true, - result::LogDebugErr, - utils::{ - available_parallelism, - sys::compute::{nth_core_available, set_affinity}, - }, + utils::sys::compute::{nth_core_available, set_affinity}, Result, }; use tokio::runtime::Builder; @@ -25,6 +23,7 @@ const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] const DISABLE_MUZZY_THRESHOLD: usize = 4; static WORKER_AFFINITY: OnceLock = OnceLock::new(); @@ -137,7 +136,7 @@ fn set_worker_mallctl(id: usize) { .get() .expect("GC_MUZZY initialized by runtime::new()"); - let muzzy_auto_disable = available_parallelism() >= DISABLE_MUZZY_THRESHOLD; + let muzzy_auto_disable = conduwuit::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { set_muzzy_decay(-1).log_debug_err().ok(); } diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index aa6685b2..60ade723 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -1,20 +1,30 @@ -use std::{error::Error, ffi::OsStr, fmt::Display, io::Cursor, path::Path}; - -use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, err, implement, Result}; -use image::{DynamicImage, ImageDecoder, ImageError, ImageFormat, ImageReader}; +#[cfg(feature = "blurhashing")] +use conduwuit::config::BlurhashConfig as CoreBlurhashConfig; +use conduwuit::{implement, Result}; use super::Service; + #[implement(Service)] +#[cfg(not(feature = "blurhashing"))] +pub fn create_blurhash( + &self, + _file: &[u8], + _content_type: Option<&str>, + _file_name: Option<&str>, +) -> Result> { + conduwuit::debug_warn!("blurhashing on upload support was not compiled"); + + Ok(None) +} + +#[implement(Service)] +#[cfg(feature = "blurhashing")] pub fn create_blurhash( &self, file: &[u8], content_type: Option<&str>, file_name: Option<&str>, ) -> Result> { - if !cfg!(feature = "blurhashing") { - return Ok(None); - } - let config = BlurhashConfig::from(self.services.server.config.blurhashing); // since 0 means disabled blurhashing, skipped blurhashing @@ -23,7 +33,7 @@ pub fn create_blurhash( } get_blurhash_from_request(file, content_type, file_name, config) - .map_err(|e| err!(debug_error!("blurhashing error: {e}"))) + .map_err(|e| conduwuit::err!(debug_error!("blurhashing error: {e}"))) .map(Some) } @@ -36,6 +46,7 @@ pub fn create_blurhash( bytes = data.len(), ), )] +#[cfg(feature = "blurhashing")] fn get_blurhash_from_request( data: &[u8], mime: Option<&str>, @@ -53,8 +64,7 @@ fn get_blurhash_from_request( return Err(BlurhashingError::ImageTooLarge); } - // decode the image finally - let image = DynamicImage::from_decoder(decoder)?; + let image = image::DynamicImage::from_decoder(decoder)?; blurhash_an_image(&image, config) } @@ -64,31 +74,34 @@ fn get_blurhash_from_request( /// Then it checks if the filename has a format, otherwise just guess based on /// the binary data Assumes that mime and filename extension won't be for a /// different file format than file. +#[cfg(feature = "blurhashing")] fn get_format_from_data_mime_and_filename( data: &[u8], mime: Option<&str>, filename: Option<&str>, -) -> Result { +) -> Result { let extension = filename - .map(Path::new) - .and_then(Path::extension) - .map(OsStr::to_string_lossy); + .map(std::path::Path::new) + .and_then(std::path::Path::extension) + .map(std::ffi::OsStr::to_string_lossy); mime.or(extension.as_deref()) - .and_then(ImageFormat::from_mime_type) + .and_then(image::ImageFormat::from_mime_type) .map_or_else(|| image::guess_format(data).map_err(Into::into), Ok) } +#[cfg(feature = "blurhashing")] fn get_image_decoder_with_format_and_data( - image_format: ImageFormat, + image_format: image::ImageFormat, data: &[u8], -) -> Result, BlurhashingError> { - let mut image_reader = ImageReader::new(Cursor::new(data)); +) -> Result, BlurhashingError> { + let mut image_reader = image::ImageReader::new(std::io::Cursor::new(data)); image_reader.set_format(image_format); Ok(Box::new(image_reader.into_decoder()?)) } -fn is_image_above_size_limit( +#[cfg(feature = "blurhashing")] +fn is_image_above_size_limit( decoder: &T, blurhash_config: BlurhashConfig, ) -> bool { @@ -99,7 +112,7 @@ fn is_image_above_size_limit( #[tracing::instrument(name = "encode", level = "debug", skip_all)] #[inline] fn blurhash_an_image( - image: &DynamicImage, + image: &image::DynamicImage, blurhash_config: BlurhashConfig, ) -> Result { Ok(blurhash::encode_image( @@ -109,15 +122,6 @@ fn blurhash_an_image( )?) } -#[cfg(not(feature = "blurhashing"))] -#[inline] -fn blurhash_an_image( - _image: &DynamicImage, - _blurhash_config: BlurhashConfig, -) -> Result { - Err(BlurhashingError::Unavailable) -} - #[derive(Clone, Copy, Debug)] pub struct BlurhashConfig { pub components_x: u32, @@ -127,6 +131,7 @@ pub struct BlurhashConfig { pub size_limit: u64, } +#[cfg(feature = "blurhashing")] impl From for BlurhashConfig { fn from(value: CoreBlurhashConfig) -> Self { Self { @@ -138,17 +143,17 @@ impl From for BlurhashConfig { } #[derive(Debug)] +#[cfg(feature = "blurhashing")] pub enum BlurhashingError { - HashingLibError(Box), - ImageError(Box), + HashingLibError(Box), + #[cfg(feature = "blurhashing")] + ImageError(Box), ImageTooLarge, - - #[cfg(not(feature = "blurhashing"))] - Unavailable, } -impl From for BlurhashingError { - fn from(value: ImageError) -> Self { Self::ImageError(Box::new(value)) } +#[cfg(feature = "blurhashing")] +impl From for BlurhashingError { + fn from(value: image::ImageError) -> Self { Self::ImageError(Box::new(value)) } } #[cfg(feature = "blurhashing")] @@ -156,19 +161,17 @@ impl From for BlurhashingError { fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } } -impl Display for BlurhashingError { +#[cfg(feature = "blurhashing")] +impl std::fmt::Display for BlurhashingError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Blurhash Error:")?; match &self { | Self::ImageTooLarge => write!(f, "Image was too large to blurhash")?, | Self::HashingLibError(e) => write!(f, "There was an error with the blurhashing library => {e}")?, - + #[cfg(feature = "blurhashing")] | Self::ImageError(e) => write!(f, "There was an error with the image loading library => {e}")?, - - #[cfg(not(feature = "blurhashing"))] - | Self::Unavailable => write!(f, "Blurhashing is not supported")?, }; Ok(()) diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 9555edd7..8526ffcd 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -13,7 +13,7 @@ use conduwuit::{ warn, Config, Result, }; -use crate::{migrations, Services}; +use crate::Services; /// Migrates a media directory from legacy base64 file names to sha2 file names. /// All errors are fatal. Upon success the database is keyed to not perform this @@ -48,12 +48,6 @@ pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { } } - // Apply fix from when sha256_media was backward-incompat and bumped the schema - // version from 13 to 14. For users satisfying these conditions we can go back. - if services.globals.db.database_version().await == 14 && migrations::DATABASE_VERSION == 13 { - services.globals.db.bump_database_version(13)?; - } - db["global"].insert(b"feat_sha256_media", []); info!("Finished applying sha256_media"); Ok(()) From c7c9f0e4a60ffd4b497bb8e426ffc34c5e118913 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:57:30 -0500 Subject: [PATCH 0659/1248] catch clippy lints for --no-default-features builds Signed-off-by: strawberry --- engage.toml | 50 +++++++++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/engage.toml b/engage.toml index 1d6a5475..279e999c 100644 --- a/engage.toml +++ b/engage.toml @@ -101,7 +101,6 @@ direnv exec . \ cargo clippy \ --workspace \ --profile test \ - --all-targets \ --color=always \ -- \ -D warnings @@ -116,13 +115,27 @@ env DIRENV_DEVSHELL=all-features \ cargo clippy \ --workspace \ --profile test \ - --all-targets \ --all-features \ --color=always \ -- \ -D warnings """ +[[task]] +name = "clippy/no-features" +group = "lints" +script = """ +env DIRENV_DEVSHELL=no-features \ + direnv exec . \ + cargo clippy \ + --workspace \ + --profile test \ + --no-default-features \ + --color=always \ + -- \ + -D warnings +""" + [[task]] name = "clippy/jemalloc" group = "lints" @@ -131,26 +144,12 @@ direnv exec . \ cargo clippy \ --workspace \ --profile test \ - --features jemalloc \ - --all-targets \ + --features=jemalloc \ --color=always \ -- \ -D warnings """ -#[[task]] -#name = "clippy/hardened_malloc" -#group = "lints" -#script = """ -#cargo clippy \ -# --workspace \ -# --features hardened_malloc \ -# --all-targets \ -# --color=always \ -# -- \ -# -D warnings -#""" - [[task]] name = "lychee" group = "lints" @@ -170,7 +169,6 @@ env DIRENV_DEVSHELL=all-features \ cargo test \ --workspace \ --profile test \ - --all-targets \ --all-features \ --color=always \ -- \ @@ -186,7 +184,21 @@ env DIRENV_DEVSHELL=default \ cargo test \ --workspace \ --profile test \ - --all-targets \ + --color=always \ + -- \ + --color=always +""" + +[[task]] +name = "cargo/no-features" +group = "tests" +script = """ +env DIRENV_DEVSHELL=no-features \ + direnv exec . \ + cargo test \ + --workspace \ + --profile test \ + --no-default-features \ --color=always \ -- \ --color=always From 43e6c27bb772461722409e9c56146a106d6c6343 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 18:07:49 -0500 Subject: [PATCH 0660/1248] misc nix tweaks to maybe speedup ci Signed-off-by: strawberry --- bin/complement | 3 +- flake.nix | 14 +------- nix/pkgs/complement/config.toml | 21 +++++++++--- nix/pkgs/complement/default.nix | 6 ---- nix/pkgs/main/default.nix | 58 +++++++++++++++++---------------- src/router/serve/tls.rs | 9 +++-- 6 files changed, 54 insertions(+), 57 deletions(-) diff --git a/bin/complement b/bin/complement index a1db4b32..a4c62856 100755 --- a/bin/complement +++ b/bin/complement @@ -34,7 +34,8 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null -bin/nix-build-and-cache just .#linux-complement +#bin/nix-build-and-cache just .#linux-complement +bin/nix-build-and-cache just .#complement docker load < result popd > /dev/null diff --git a/flake.nix b/flake.nix index 920d3d14..3cef1af5 100644 --- a/flake.nix +++ b/flake.nix @@ -169,21 +169,9 @@ # used for rust caching in CI to speed it up sccache - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo ] # liburing is Linux-exclusive - ++ lib.optional stdenv.hostPlatform.isLinux liburing - # needed to build Rust applications on macOS - ++ lib.optionals stdenv.hostPlatform.isDarwin [ - # https://github.com/NixOS/nixpkgs/issues/206242 - # ld: library not found for -liconv - libiconv - # https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell - # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 - pkgsBuildHost.darwin.apple_sdk.frameworks.Security - ]) + ++ lib.optional stdenv.hostPlatform.isLinux liburing) ++ scope.main.buildInputs ++ scope.main.propagatedBuildInputs ++ scope.main.nativeBuildInputs; diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index f20abee2..99c151c5 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -17,19 +17,32 @@ ip_range_denylist = [] url_preview_domain_contains_allowlist = ["*"] url_preview_domain_explicit_denylist = ["*"] media_compat_file_link = false -media_startup_check = false -prune_missing_media = false +media_startup_check = true +prune_missing_media = true log_colors = false admin_room_notices = false allow_check_for_updates = false -allow_unstable_room_versions = true +intentionally_unknown_config_option_for_testing = true rocksdb_log_level = "debug" rocksdb_max_log_files = 1 rocksdb_recovery_mode = 0 rocksdb_paranoid_file_checks = true log_guest_registrations = false allow_legacy_media = true -startup_netburst = false +startup_netburst = true +startup_netburst_keep = -1 + +# valgrind makes things so slow +dns_timeout = 60 +dns_attempts = 20 +request_conn_timeout = 60 +request_timeout = 120 +well_known_conn_timeout = 60 +well_known_timeout = 60 +federation_idle_timeout = 300 +sender_timeout = 300 +sender_idle_timeout = 300 +sender_retry_backoff_limit = 300 [global.tls] certs = "/certificate.crt" diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index e35cbf04..d9af0779 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -18,18 +18,12 @@ let all_features = true; disable_release_max_log_level = true; disable_features = [ - # no reason to use jemalloc for complement, just has compatibility/build issues - "jemalloc" - "jemalloc_stats" - "jemalloc_prof" # console/CLI stuff isn't used or relevant for complement "console" "tokio_console" # sentry telemetry isn't useful for complement, disabled by default anyways "sentry_telemetry" "perf_measurements" - # the containers don't use or need systemd signal support - "systemd" # this is non-functional on nix for some reason "hardened_malloc" # dont include experimental features diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index d7424d11..4150b389 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -82,7 +82,7 @@ rust-jemalloc-sys' = (rust-jemalloc-sys.override { buildDepsOnlyEnv = let rocksdb' = (rocksdb.override { - jemalloc = rust-jemalloc-sys'; + jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'; # rocksdb fails to build with prefixed jemalloc, which is required on # darwin due to [1]. In this case, fall back to building rocksdb with # libc malloc. This should not cause conflicts, because all of the @@ -103,6 +103,12 @@ buildDepsOnlyEnv = ++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ]) ) ++ old.cmakeFlags; + + # outputs has "tools" which we dont need or use + outputs = [ "out" ]; + + # preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use + preInstall = ""; }); in { @@ -156,6 +162,19 @@ commonAttrs = { ]; }; + # This is redundant with CI + doCheck = false; + + cargoTestCommand = "cargo test --locked "; + cargoExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + cargoTestExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + dontStrip = profile == "dev" || profile == "test"; dontPatchELF = profile == "dev" || profile == "test"; @@ -181,27 +200,7 @@ commonAttrs = { # differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious # rebuilds of bindgen and its depedents. jq - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo - ] - # needed to build Rust applications on macOS - ++ lib.optionals stdenv.hostPlatform.isDarwin [ - # https://github.com/NixOS/nixpkgs/issues/206242 - # ld: library not found for -liconv - libiconv - - # https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell - # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 - pkgsBuildHost.darwin.apple_sdk.frameworks.Security - ]; - - # for some reason gcc and other weird deps are added to OCI images and bloats it up - # - # - postInstall = with pkgsBuildHost; '' - find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${llvm} -t ${rustc.unwrapped} -t ${rustc} '{}' + - ''; + ]; }; in @@ -210,15 +209,18 @@ craneLib.buildPackage ( commonAttrs // { env = buildDepsOnlyEnv; }); - cargoExtraArgs = "--no-default-features " + # This is redundant with CI + doCheck = false; + + cargoTestCommand = "cargo test --locked "; + cargoExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + cargoTestExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - - # This is redundant with CI - cargoTestCommand = ""; - cargoCheckCommand = ""; - doCheck = false; env = buildPackageEnv; diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index 9d3fbd3b..ab1a9371 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -17,14 +17,13 @@ pub(super) async fn serve( addrs: Vec, ) -> Result { let tls = &server.config.tls; - let certs = tls - .certs - .as_ref() - .ok_or(err!(Config("tls.certs", "Missing required value in tls config section")))?; + let certs = tls.certs.as_ref().ok_or_else(|| { + err!(Config("tls.certs", "Missing required value in tls config section")) + })?; let key = tls .key .as_ref() - .ok_or(err!(Config("tls.key", "Missing required value in tls config section")))?; + .ok_or_else(|| err!(Config("tls.key", "Missing required value in tls config section")))?; // we use ring for ruma and hashing state, but aws-lc-rs is the new default. // without this, TLS mode will panic. From add2e0e9eefc2cfcc154b1e4877988f15ca682a7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 18:20:02 -0500 Subject: [PATCH 0661/1248] bump rust-rocksdb Signed-off-by: strawberry --- Cargo.lock | 4 ++-- deps/rust-rocksdb/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 926099b5..82962421 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" dependencies = [ "bindgen", "bzip2-sys", @@ -3728,7 +3728,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index ba8259a3..c6af428d 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "1f032427d3a0e7b0f13c04b4e34712bd8610291b" +rev = "7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" #branch = "master" default-features = false From 8345ea2cd31d26bcf5c5eb61bbda5cd9958c11c5 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 18:47:54 -0500 Subject: [PATCH 0662/1248] add --locked and --no-fail-fast to cargo test, add other feature test Signed-off-by: strawberry --- engage.toml | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/engage.toml b/engage.toml index 279e999c..c1a2be1f 100644 --- a/engage.toml +++ b/engage.toml @@ -86,6 +86,7 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo doc \ --workspace \ + --locked \ --profile test \ --all-features \ --no-deps \ @@ -100,6 +101,7 @@ script = """ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ --color=always \ -- \ @@ -114,6 +116,7 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ --all-features \ --color=always \ @@ -129,6 +132,7 @@ env DIRENV_DEVSHELL=no-features \ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ --no-default-features \ --color=always \ @@ -137,14 +141,16 @@ env DIRENV_DEVSHELL=no-features \ """ [[task]] -name = "clippy/jemalloc" +name = "clippy/other-features" group = "lints" script = """ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ - --features=jemalloc \ + --no-default-features \ + --features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression,blurhashing \ --color=always \ -- \ -D warnings @@ -168,7 +174,10 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ + --all-targets \ + --no-fail-fast \ --all-features \ --color=always \ -- \ @@ -183,7 +192,10 @@ env DIRENV_DEVSHELL=default \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ + --all-targets \ + --no-fail-fast \ --color=always \ -- \ --color=always @@ -197,7 +209,10 @@ env DIRENV_DEVSHELL=no-features \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ + --all-targets \ + --no-fail-fast \ --no-default-features \ --color=always \ -- \ From 88e7e50daff94ef8e3fe3d67e72214f002fdb22b Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Feb 2025 11:49:00 -0500 Subject: [PATCH 0663/1248] add missing source OCI image label metadata Signed-off-by: strawberry --- nix/pkgs/oci-image/default.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 5520c920..1650053d 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -36,6 +36,7 @@ dockerTools.buildLayeredImage { "org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/"; "org.opencontainers.image.licenses" = "Apache-2.0"; "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; + "org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit"; "org.opencontainers.image.title" = main.pname; "org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/"; "org.opencontainers.image.vendor" = "girlbossceo"; From cfcd6eb1a6a117db94e6f9e631a0d881a62d3299 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Feb 2025 18:00:58 -0500 Subject: [PATCH 0664/1248] bump ruwuma to stop erroring on empty push response body Signed-off-by: strawberry --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82962421..caef5859 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3490,7 +3490,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "assign", "js_int", @@ -3512,7 +3512,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "ruma-common", @@ -3524,7 +3524,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "as_variant", "assign", @@ -3547,7 +3547,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "as_variant", "base64 0.22.1", @@ -3578,7 +3578,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3603,7 +3603,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "bytes", "http", @@ -3621,7 +3621,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3630,7 +3630,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "ruma-common", @@ -3640,7 +3640,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3655,7 +3655,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "ruma-common", @@ -3667,7 +3667,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "headers", "http", @@ -3680,7 +3680,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3696,7 +3696,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index ce483bbc..38654be3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -342,7 +342,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "517ac4572276a2e0ad587113776c544b51166f08" +rev = "f5667c6292adb43fbe4725d31d6b5127a0cf60ce" features = [ "compat", "rand", From b6e9dc3d98704c56027219d3775336910a0136c6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 9 Feb 2025 10:17:28 -0500 Subject: [PATCH 0665/1248] comment out borked ci thing for now Signed-off-by: strawberry --- .github/workflows/ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 35d60aa1..24f2db45 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -128,7 +128,7 @@ jobs: - name: Restore and cache Nix store # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -191,14 +191,14 @@ jobs: - name: Run sccache-cache # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -323,7 +323,7 @@ jobs: - name: Restore and cache Nix store # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -379,14 +379,14 @@ jobs: - name: Run sccache-cache # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -679,7 +679,7 @@ jobs: - name: Run sccache-cache # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache From e3b81f7b6488b5c483e8b13e3959fe591bf4cb92 Mon Sep 17 00:00:00 2001 From: Dzming Li Date: Mon, 10 Feb 2025 22:45:57 +0800 Subject: [PATCH 0666/1248] Fix in caddyfile guide If the reverse_proxy directive is omitted before 127.0.0.1:6167 in your Caddyfile, enabling the service with systemctl enable will result in an error. --- docs/deploying/generic.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index cc50544e..8ca2f387 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -216,7 +216,7 @@ your server name). ```caddyfile your.server.name, your.server.name:8448 { # TCP reverse_proxy - 127.0.0.1:6167 + reverse_proxy 127.0.0.1:6167 # UNIX socket #reverse_proxy unix//run/conduwuit/conduwuit.sock } From 3ec43be95965488d720403264c4edc6170c67c02 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 04:30:17 +0000 Subject: [PATCH 0667/1248] join initial fetches in get_relations() skip recursion for max_depth=0 Signed-off-by: Jason Volk --- src/service/rooms/pdu_metadata/mod.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 4cb14ebc..ba289f9b 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,7 +2,7 @@ mod data; use std::sync::Arc; use conduwuit::{PduCount, Result}; -use futures::StreamExt; +use futures::{future::try_join, StreamExt}; use ruma::{api::Direction, EventId, RoomId, UserId}; use self::data::{Data, PdusIterItem}; @@ -54,10 +54,16 @@ impl Service { max_depth: u8, dir: Direction, ) -> Vec { - let room_id = self.services.short.get_or_create_shortroomid(room_id).await; + let room_id = self.services.short.get_shortroomid(room_id); - let target = match self.services.timeline.get_pdu_count(target).await { - | Ok(PduCount::Normal(c)) => c, + let target = self.services.timeline.get_pdu_count(target); + + let Ok((room_id, target)) = try_join(room_id, target).await else { + return Vec::new(); + }; + + let target = match target { + | PduCount::Normal(c) => c, // TODO: Support backfilled relations | _ => 0, // This will result in an empty iterator }; @@ -68,7 +74,11 @@ impl Service { .collect() .await; - let mut stack: Vec<_> = pdus.iter().map(|pdu| (pdu.clone(), 1)).collect(); + let mut stack: Vec<_> = pdus + .iter() + .filter(|_| max_depth > 0) + .map(|pdu| (pdu.clone(), 1)) + .collect(); 'limit: while let Some(stack_pdu) = stack.pop() { let target = match stack_pdu.0 .0 { From 2d71d5590a81cd26f22181131d2e5a6439fe391d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 09:53:53 +0000 Subject: [PATCH 0668/1248] fix pdu add_relation() helper Signed-off-by: Jason Volk --- src/core/pdu/unsigned.rs | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index fa305d71..fe4d6a1c 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -46,23 +46,26 @@ pub fn add_age(&mut self) -> Result { } #[implement(Pdu)] -pub fn add_relation(&mut self, name: &str, pdu: &Pdu) -> Result { - let mut unsigned: BTreeMap = self +pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { + use serde_json::Map; + + let mut unsigned: Map = self .unsigned .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .map_or_else(|| Ok(Map::new()), |u| serde_json::from_str(u.get())) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; - let relations: &mut JsonValue = unsigned.entry("m.relations".into()).or_default(); - if relations.as_object_mut().is_none() { - let mut object = serde_json::Map::::new(); - _ = relations.as_object_mut().insert(&mut object); - } + let pdu = pdu + .map(serde_json::to_value) + .transpose()? + .unwrap_or_else(|| JsonValue::Object(Map::new())); - relations + unsigned + .entry("m.relations") + .or_insert(JsonValue::Object(Map::new())) .as_object_mut() - .expect("we just created it") - .insert(name.to_owned(), serde_json::to_value(pdu)?); + .unwrap() + .insert(name.to_owned(), pdu); self.unsigned = to_raw_value(&unsigned) .map(Some) From 565837ad753bbd6d346157c5b52a6a0275984e50 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 04:21:39 +0000 Subject: [PATCH 0669/1248] request auth media first Signed-off-by: Jason Volk --- src/service/media/remote.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index ca73c3ef..72f1184e 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -32,12 +32,12 @@ pub async fn fetch_remote_thumbnail( self.check_fetch_authorized(mxc)?; let result = self - .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) + .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) .await; if let Err(Error::Request(NotFound, ..)) = &result { return self - .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) + .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) .await; } @@ -55,12 +55,12 @@ pub async fn fetch_remote_content( self.check_fetch_authorized(mxc)?; let result = self - .fetch_content_unauthenticated(mxc, user, server, timeout_ms) + .fetch_content_authenticated(mxc, user, server, timeout_ms) .await; if let Err(Error::Request(NotFound, ..)) = &result { return self - .fetch_content_authenticated(mxc, user, server, timeout_ms) + .fetch_content_unauthenticated(mxc, user, server, timeout_ms) .await; } From 31ab84e9284ce7d5b6ec9fb212970b1a9e18fe7f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 10:23:17 +0000 Subject: [PATCH 0670/1248] simplify client event endpoint Signed-off-by: Jason Volk --- src/api/client/message.rs | 53 ++++++++++++++++++++++++++---------- src/api/client/room/event.rs | 40 +++++++++++---------------- src/core/pdu/unsigned.rs | 31 +++++++++++---------- 3 files changed, 71 insertions(+), 53 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 321d8013..bb4e72dd 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - at, is_equal_to, + at, utils::{ result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, @@ -30,7 +30,7 @@ use service::{ use crate::Ruma; /// list of safe and common non-state events to ignore if the user is ignored -const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 17] = &[ +const IGNORED_MESSAGE_TYPES: &[TimelineEventType] = &[ Audio, CallInvite, Emote, @@ -225,34 +225,50 @@ async fn get_member_event( .ok() } +#[inline] pub(crate) async fn ignored_filter( services: &Services, item: PdusIterItem, user_id: &UserId, ) -> Option { - let (_, pdu) = &item; + let (_, ref pdu) = item; + is_ignored_pdu(services, pdu, user_id) + .await + .eq(&false) + .then_some(item) +} + +#[inline] +pub(crate) async fn is_ignored_pdu( + services: &Services, + pdu: &PduEvent, + user_id: &UserId, +) -> bool { // exclude Synapse's dummy events from bloating up response bodies. clients // don't need to see this. if pdu.kind.to_cow_str() == "org.matrix.dummy_event" { - return None; + return true; } - if IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok() - && (services.users.user_is_ignored(&pdu.sender, user_id).await - || services - .server - .config - .forbidden_remote_server_names - .iter() - .any(is_equal_to!(pdu.sender().server_name()))) + let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); + + let ignored_server = services + .server + .config + .forbidden_remote_server_names + .contains(pdu.sender().server_name()); + + if ignored_type + && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) { - return None; + return true; } - Some(item) + false } +#[inline] pub(crate) async fn visibility_filter( services: &Services, item: PdusIterItem, @@ -268,7 +284,16 @@ pub(crate) async fn visibility_filter( .then_some(item) } +#[inline] pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { let (_, pdu) = &item; pdu.matches(filter).then_some(item) } + +#[cfg_attr(debug_assertions, conduwuit::ctor)] +fn _is_sorted() { + debug_assert!( + IGNORED_MESSAGE_TYPES.is_sorted(), + "IGNORED_MESSAGE_TYPES must be sorted by the developer" + ); +} diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index bc5ec0d7..f0ae64dd 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -1,52 +1,44 @@ use axum::extract::State; use conduwuit::{err, Err, Event, Result}; -use futures::{try_join, FutureExt, TryFutureExt}; +use futures::{future::try_join, FutureExt, TryFutureExt}; use ruma::api::client::room::get_room_event; -use crate::{client::ignored_filter, Ruma}; +use crate::{client::is_ignored_pdu, Ruma}; /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` /// /// Gets a single event. pub(crate) async fn get_room_event_route( - State(services): State, + State(ref services): State, ref body: Ruma, ) -> Result { + let event_id = &body.event_id; + let room_id = &body.room_id; + let event = services .rooms .timeline - .get_pdu(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))); - - let token = services - .rooms - .timeline - .get_pdu_count(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event not found.")))); + .get_pdu(event_id) + .map_err(|_| err!(Request(NotFound("Event {} not found.", event_id)))); let visible = services .rooms .state_accessor - .user_can_see_event(body.sender_user(), &body.room_id, &body.event_id) + .user_can_see_event(body.sender_user(), room_id, event_id) .map(Ok); - let (token, mut event, visible) = try_join!(token, event, visible)?; + let (mut event, visible) = try_join(event, visible).await?; - if !visible - || ignored_filter(&services, (token, event.clone()), body.sender_user()) - .await - .is_none() - { + if !visible || is_ignored_pdu(services, &event, body.sender_user()).await { return Err!(Request(Forbidden("You don't have permission to view this event."))); } - if event.event_id() != &body.event_id || event.room_id() != body.room_id { - return Err!(Request(NotFound("Event not found"))); - } + debug_assert!( + event.event_id() == event_id && event.room_id() == room_id, + "Fetched PDU must match requested" + ); event.add_age().ok(); - let event = event.to_room_event(); - - Ok(get_room_event::v3::Response { event }) + Ok(get_room_event::v3::Response { event: event.to_room_event() }) } diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index fe4d6a1c..8482a48a 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -9,11 +9,13 @@ use crate::{err, implement, is_true, Result}; #[implement(Pdu)] pub fn remove_transaction_id(&mut self) -> Result { + use BTreeMap as Map; + let Some(unsigned) = &self.unsigned else { return Ok(()); }; - let mut unsigned: BTreeMap> = serde_json::from_str(unsigned.get()) + let mut unsigned: Map<&str, Box> = serde_json::from_str(unsigned.get()) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; unsigned.remove("transaction_id"); @@ -26,10 +28,13 @@ pub fn remove_transaction_id(&mut self) -> Result { #[implement(Pdu)] pub fn add_age(&mut self) -> Result { - let mut unsigned: BTreeMap> = self + use BTreeMap as Map; + + let mut unsigned: Map<&str, Box> = self .unsigned - .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .as_deref() + .map(RawJsonValue::get) + .map_or_else(|| Ok(Map::new()), serde_json::from_str) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; // deliberately allowing for the possibility of negative age @@ -37,10 +42,8 @@ pub fn add_age(&mut self) -> Result { let then: i128 = self.origin_server_ts.into(); let this_age = now.saturating_sub(then); - unsigned.insert("age".to_owned(), to_raw_value(&this_age).expect("age is valid")); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); + unsigned.insert("age", to_raw_value(&this_age)?); + self.unsigned = Some(to_raw_value(&unsigned)?); Ok(()) } @@ -51,8 +54,9 @@ pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { let mut unsigned: Map = self .unsigned - .as_ref() - .map_or_else(|| Ok(Map::new()), |u| serde_json::from_str(u.get())) + .as_deref() + .map(RawJsonValue::get) + .map_or_else(|| Ok(Map::new()), serde_json::from_str) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; let pdu = pdu @@ -64,12 +68,9 @@ pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { .entry("m.relations") .or_insert(JsonValue::Object(Map::new())) .as_object_mut() - .unwrap() - .insert(name.to_owned(), pdu); + .map(|object| object.insert(name.to_owned(), pdu)); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); + self.unsigned = Some(to_raw_value(&unsigned)?); Ok(()) } From d8e94ee965d961fd7c8a042b0ed32d7a38190668 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 20:08:00 +0000 Subject: [PATCH 0671/1248] split spaces service Signed-off-by: Jason Volk --- src/api/client/space.rs | 183 ++++++++++- src/api/server/hierarchy.rs | 70 ++++- src/service/rooms/spaces/mod.rs | 311 +------------------ src/service/rooms/spaces/pagination_token.rs | 76 +++++ 4 files changed, 318 insertions(+), 322 deletions(-) create mode 100644 src/service/rooms/spaces/pagination_token.rs diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 409c9083..8f54de2a 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -1,9 +1,15 @@ -use std::str::FromStr; +use std::{collections::VecDeque, str::FromStr}; use axum::extract::State; +use conduwuit::{checked, pdu::ShortRoomId, utils::stream::IterStream}; +use futures::{StreamExt, TryFutureExt}; use ruma::{ api::client::{error::ErrorKind, space::get_hierarchy}, - UInt, + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, +}; +use service::{ + rooms::spaces::{get_parent_children_via, summary_to_chunk, SummaryAccessibility}, + Services, }; use crate::{service::rooms::spaces::PaginationToken, Error, Result, Ruma}; @@ -16,8 +22,6 @@ pub(crate) async fn get_hierarchy_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = body .limit .unwrap_or_else(|| UInt::from(10_u32)) @@ -43,16 +47,163 @@ pub(crate) async fn get_hierarchy_route( } } - services - .rooms - .spaces - .get_client_hierarchy( - sender_user, - &body.room_id, - limit.try_into().unwrap_or(10), - key.map_or(vec![], |token| token.short_room_ids), - max_depth.into(), - body.suggested_only, - ) - .await + get_client_hierarchy( + &services, + body.sender_user(), + &body.room_id, + limit.try_into().unwrap_or(10), + key.map_or(vec![], |token| token.short_room_ids), + max_depth.into(), + body.suggested_only, + ) + .await +} + +async fn get_client_hierarchy( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + limit: usize, + short_room_ids: Vec, + max_depth: u64, + suggested_only: bool, +) -> Result { + let mut parents = VecDeque::new(); + + // Don't start populating the results if we have to start at a specific room. + let mut populate_results = short_room_ids.is_empty(); + + let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { + | Some(server_name) => vec![server_name.into()], + | None => vec![], + })]]; + + let mut results = Vec::with_capacity(limit); + + while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } { + if results.len() >= limit { + break; + } + + match ( + services + .rooms + .spaces + .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) + .await?, + current_room == room_id, + ) { + | (Some(SummaryAccessibility::Accessible(summary)), _) => { + let mut children: Vec<(OwnedRoomId, Vec)> = + get_parent_children_via(&summary, suggested_only) + .into_iter() + .filter(|(room, _)| parents.iter().all(|parent| parent != room)) + .rev() + .collect(); + + if populate_results { + results.push(summary_to_chunk(*summary.clone())); + } else { + children = children + .iter() + .rev() + .stream() + .skip_while(|(room, _)| { + services + .rooms + .short + .get_shortroomid(room) + .map_ok(|short| Some(&short) != short_room_ids.get(parents.len())) + .unwrap_or_else(|_| false) + }) + .map(Clone::clone) + .collect::)>>() + .await + .into_iter() + .rev() + .collect(); + + if children.is_empty() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room IDs in token were not found.", + )); + } + + // We have reached the room after where we last left off + let parents_len = parents.len(); + if checked!(parents_len + 1)? == short_room_ids.len() { + populate_results = true; + } + } + + let parents_len: u64 = parents.len().try_into()?; + if !children.is_empty() && parents_len < max_depth { + parents.push_back(current_room.clone()); + stack.push(children); + } + // Root room in the space hierarchy, we return an error + // if this one fails. + }, + | (Some(SummaryAccessibility::Inaccessible), true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room is inaccessible", + )); + }, + | (None, true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room was not found", + )); + }, + // Just ignore other unavailable rooms + | (None | Some(SummaryAccessibility::Inaccessible), false) => (), + } + } + + Ok(get_hierarchy::v1::Response { + next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { + parents.pop_front(); + parents.push_back(room); + + let next_short_room_ids: Vec<_> = parents + .iter() + .stream() + .filter_map(|room_id| async move { + services.rooms.short.get_shortroomid(room_id).await.ok() + }) + .collect() + .await; + + (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( + || { + PaginationToken { + short_room_ids: next_short_room_ids, + limit: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + max_depth: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + suggested_only, + } + .to_string() + }, + ) + } else { + None + }, + rooms: results, + }) +} + +fn next_room_to_traverse( + stack: &mut Vec)>>, + parents: &mut VecDeque, +) -> Option<(OwnedRoomId, Vec)> { + while stack.last().is_some_and(Vec::is_empty) { + stack.pop(); + parents.pop_back(); + } + + stack.last_mut().and_then(Vec::pop) } diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index a10df6ac..bcf2f7bc 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,7 +1,12 @@ use axum::extract::State; -use ruma::api::{client::error::ErrorKind, federation::space::get_hierarchy}; +use conduwuit::{Err, Result}; +use ruma::{api::federation::space::get_hierarchy, RoomId, ServerName}; +use service::{ + rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}, + Services, +}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/hierarchy/{roomId}` /// @@ -11,13 +16,58 @@ pub(crate) async fn get_hierarchy_route( State(services): State, body: Ruma, ) -> Result { - if services.rooms.metadata.exists(&body.room_id).await { - services - .rooms - .spaces - .get_federation_hierarchy(&body.room_id, body.origin(), body.suggested_only) - .await - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Room does not exist.")) + if !services.rooms.metadata.exists(&body.room_id).await { + return Err!(Request(NotFound("Room does not exist."))); + } + + get_hierarchy(&services, &body.room_id, body.origin(), body.suggested_only).await +} + +/// Gets the response for the space hierarchy over federation request +/// +/// Errors if the room does not exist, so a check if the room exists should +/// be done +async fn get_hierarchy( + services: &Services, + room_id: &RoomId, + server_name: &ServerName, + suggested_only: bool, +) -> Result { + match services + .rooms + .spaces + .get_summary_and_children_local(&room_id.to_owned(), Identifier::ServerName(server_name)) + .await? + { + | Some(SummaryAccessibility::Accessible(room)) => { + let mut children = Vec::new(); + let mut inaccessible_children = Vec::new(); + + for (child, _via) in get_parent_children_via(&room, suggested_only) { + match services + .rooms + .spaces + .get_summary_and_children_local(&child, Identifier::ServerName(server_name)) + .await? + { + | Some(SummaryAccessibility::Accessible(summary)) => { + children.push((*summary).into()); + }, + | Some(SummaryAccessibility::Inaccessible) => { + inaccessible_children.push(child); + }, + | None => (), + } + } + + Ok(get_hierarchy::v1::Response { + room: *room, + children, + inaccessible_children, + }) + }, + | Some(SummaryAccessibility::Inaccessible) => + Err!(Request(NotFound("The requested room is inaccessible"))), + | None => Err!(Request(NotFound("The requested room was not found"))), } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 11794752..1e2b0a9f 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,22 +1,14 @@ +mod pagination_token; mod tests; -use std::{ - collections::{HashMap, VecDeque}, - fmt::{Display, Formatter}, - str::FromStr, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; -use conduwuit::{ - checked, debug_info, err, - utils::{math::usize_from_f64, IterStream}, - Error, Result, -}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{debug_info, err, utils::math::usize_from_f64, Error, Result}; +use futures::StreamExt; use lru_cache::LruCache; use ruma::{ api::{ - client::{self, error::ErrorKind, space::SpaceHierarchyRoomsChunk}, + client::{error::ErrorKind, space::SpaceHierarchyRoomsChunk}, federation::{ self, space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary}, @@ -29,11 +21,12 @@ use ruma::{ }, serde::Raw, space::SpaceRoomJoinRule, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UInt, UserId, + OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; use tokio::sync::Mutex; -use crate::{rooms, rooms::short::ShortRoomId, sending, Dep}; +pub use self::pagination_token::PaginationToken; +use crate::{rooms, sending, Dep}; pub struct CachedSpaceHierarchySummary { summary: SpaceHierarchyParentSummary, @@ -44,81 +37,10 @@ pub enum SummaryAccessibility { Inaccessible, } -// TODO: perhaps use some better form of token rather than just room count -#[derive(Debug, Eq, PartialEq)] -pub struct PaginationToken { - /// Path down the hierarchy of the room to start the response at, - /// excluding the root space. - pub short_room_ids: Vec, - pub limit: UInt, - pub max_depth: UInt, - pub suggested_only: bool, -} - -impl FromStr for PaginationToken { - type Err = Error; - - fn from_str(value: &str) -> Result { - let mut values = value.split('_'); - - let mut pag_tok = || { - let rooms = values - .next()? - .split(',') - .filter_map(|room_s| u64::from_str(room_s).ok()) - .collect(); - - Some(Self { - short_room_ids: rooms, - limit: UInt::from_str(values.next()?).ok()?, - max_depth: UInt::from_str(values.next()?).ok()?, - suggested_only: { - let slice = values.next()?; - - if values.next().is_none() { - if slice == "true" { - true - } else if slice == "false" { - false - } else { - None? - } - } else { - None? - } - }, - }) - }; - - if let Some(token) = pag_tok() { - Ok(token) - } else { - Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) - } - } -} - -impl Display for PaginationToken { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}_{}_{}_{}", - self.short_room_ids - .iter() - .map(ToString::to_string) - .collect::>() - .join(","), - self.limit, - self.max_depth, - self.suggested_only - ) - } -} - /// Identifier used to check if rooms are accessible /// /// None is used if you want to return the room, no matter if accessible or not -enum Identifier<'a> { +pub enum Identifier<'a> { UserId(&'a UserId), ServerName(&'a ServerName), } @@ -164,60 +86,8 @@ impl crate::Service for Service { } impl Service { - /// Gets the response for the space hierarchy over federation request - /// - /// Errors if the room does not exist, so a check if the room exists should - /// be done - pub async fn get_federation_hierarchy( - &self, - room_id: &RoomId, - server_name: &ServerName, - suggested_only: bool, - ) -> Result { - match self - .get_summary_and_children_local( - &room_id.to_owned(), - Identifier::ServerName(server_name), - ) - .await? - { - | Some(SummaryAccessibility::Accessible(room)) => { - let mut children = Vec::new(); - let mut inaccessible_children = Vec::new(); - - for (child, _via) in get_parent_children_via(&room, suggested_only) { - match self - .get_summary_and_children_local( - &child, - Identifier::ServerName(server_name), - ) - .await? - { - | Some(SummaryAccessibility::Accessible(summary)) => { - children.push((*summary).into()); - }, - | Some(SummaryAccessibility::Inaccessible) => { - inaccessible_children.push(child); - }, - | None => (), - } - } - - Ok(federation::space::get_hierarchy::v1::Response { - room: *room, - children, - inaccessible_children, - }) - }, - | Some(SummaryAccessibility::Inaccessible) => - Err(Error::BadRequest(ErrorKind::NotFound, "The requested room is inaccessible")), - | None => - Err(Error::BadRequest(ErrorKind::NotFound, "The requested room was not found")), - } - } - /// Gets the summary of a space using solely local information - async fn get_summary_and_children_local( + pub async fn get_summary_and_children_local( &self, current_room: &OwnedRoomId, identifier: Identifier<'_>, @@ -366,7 +236,7 @@ impl Service { /// Gets the summary of a space using either local or remote (federation) /// sources - async fn get_summary_and_children_client( + pub async fn get_summary_and_children_client( &self, current_room: &OwnedRoomId, suggested_only: bool, @@ -470,147 +340,6 @@ impl Service { }) } - pub async fn get_client_hierarchy( - &self, - sender_user: &UserId, - room_id: &RoomId, - limit: usize, - short_room_ids: Vec, - max_depth: u64, - suggested_only: bool, - ) -> Result { - let mut parents = VecDeque::new(); - - // Don't start populating the results if we have to start at a specific room. - let mut populate_results = short_room_ids.is_empty(); - - let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { - | Some(server_name) => vec![server_name.into()], - | None => vec![], - })]]; - - let mut results = Vec::with_capacity(limit); - - while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } - { - if results.len() >= limit { - break; - } - - match ( - self.get_summary_and_children_client( - ¤t_room, - suggested_only, - sender_user, - &via, - ) - .await?, - current_room == room_id, - ) { - | (Some(SummaryAccessibility::Accessible(summary)), _) => { - let mut children: Vec<(OwnedRoomId, Vec)> = - get_parent_children_via(&summary, suggested_only) - .into_iter() - .filter(|(room, _)| parents.iter().all(|parent| parent != room)) - .rev() - .collect(); - - if populate_results { - results.push(summary_to_chunk(*summary.clone())); - } else { - children = children - .iter() - .rev() - .stream() - .skip_while(|(room, _)| { - self.services - .short - .get_shortroomid(room) - .map_ok(|short| { - Some(&short) != short_room_ids.get(parents.len()) - }) - .unwrap_or_else(|_| false) - }) - .map(Clone::clone) - .collect::)>>() - .await - .into_iter() - .rev() - .collect(); - - if children.is_empty() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room IDs in token were not found.", - )); - } - - // We have reached the room after where we last left off - let parents_len = parents.len(); - if checked!(parents_len + 1)? == short_room_ids.len() { - populate_results = true; - } - } - - let parents_len: u64 = parents.len().try_into()?; - if !children.is_empty() && parents_len < max_depth { - parents.push_back(current_room.clone()); - stack.push(children); - } - // Root room in the space hierarchy, we return an error - // if this one fails. - }, - | (Some(SummaryAccessibility::Inaccessible), true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room is inaccessible", - )); - }, - | (None, true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room was not found", - )); - }, - // Just ignore other unavailable rooms - | (None | Some(SummaryAccessibility::Inaccessible), false) => (), - } - } - - Ok(client::space::get_hierarchy::v1::Response { - next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { - parents.pop_front(); - parents.push_back(room); - - let next_short_room_ids: Vec<_> = parents - .iter() - .stream() - .filter_map(|room_id| async move { - self.services.short.get_shortroomid(room_id).await.ok() - }) - .collect() - .await; - - (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( - || { - PaginationToken { - short_room_ids: next_short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string() - }, - ) - } else { - None - }, - rooms: results, - }) - } - /// Simply returns the stripped m.space.child events of a room async fn get_stripped_space_child_events( &self, @@ -757,7 +486,8 @@ impl From for SpaceHierarchyRoomsChunk { /// Here because cannot implement `From` across ruma-federation-api and /// ruma-client-api types -fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { +#[must_use] +pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { let SpaceHierarchyParentSummary { canonical_alias, name, @@ -790,7 +520,8 @@ fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRooms /// Returns the children of a SpaceHierarchyParentSummary, making use of the /// children_state field -fn get_parent_children_via( +#[must_use] +pub fn get_parent_children_via( parent: &SpaceHierarchyParentSummary, suggested_only: bool, ) -> Vec<(OwnedRoomId, Vec)> { @@ -808,15 +539,3 @@ fn get_parent_children_via( }) .collect() } - -fn next_room_to_traverse( - stack: &mut Vec)>>, - parents: &mut VecDeque, -) -> Option<(OwnedRoomId, Vec)> { - while stack.last().is_some_and(Vec::is_empty) { - stack.pop(); - parents.pop_back(); - } - - stack.last_mut().and_then(Vec::pop) -} diff --git a/src/service/rooms/spaces/pagination_token.rs b/src/service/rooms/spaces/pagination_token.rs new file mode 100644 index 00000000..8f019e8d --- /dev/null +++ b/src/service/rooms/spaces/pagination_token.rs @@ -0,0 +1,76 @@ +use std::{ + fmt::{Display, Formatter}, + str::FromStr, +}; + +use conduwuit::{Error, Result}; +use ruma::{api::client::error::ErrorKind, UInt}; + +use crate::rooms::short::ShortRoomId; + +// TODO: perhaps use some better form of token rather than just room count +#[derive(Debug, Eq, PartialEq)] +pub struct PaginationToken { + /// Path down the hierarchy of the room to start the response at, + /// excluding the root space. + pub short_room_ids: Vec, + pub limit: UInt, + pub max_depth: UInt, + pub suggested_only: bool, +} + +impl FromStr for PaginationToken { + type Err = Error; + + fn from_str(value: &str) -> Result { + let mut values = value.split('_'); + let mut pag_tok = || { + let short_room_ids = values + .next()? + .split(',') + .filter_map(|room_s| u64::from_str(room_s).ok()) + .collect(); + + let limit = UInt::from_str(values.next()?).ok()?; + let max_depth = UInt::from_str(values.next()?).ok()?; + let slice = values.next()?; + let suggested_only = if values.next().is_none() { + if slice == "true" { + true + } else if slice == "false" { + false + } else { + None? + } + } else { + None? + }; + + Some(Self { + short_room_ids, + limit, + max_depth, + suggested_only, + }) + }; + + if let Some(token) = pag_tok() { + Ok(token) + } else { + Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) + } + } +} + +impl Display for PaginationToken { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let short_room_ids = self + .short_room_ids + .iter() + .map(ToString::to_string) + .collect::>() + .join(","); + + write!(f, "{short_room_ids}_{}_{}_{}", self.limit, self.max_depth, self.suggested_only) + } +} From 5428526120cf49efda7b129d48b5a35ea1d87dde Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 23:03:24 +0000 Subject: [PATCH 0672/1248] add tail-efficient logic extension Signed-off-by: Jason Volk --- src/core/utils/future/bool_ext.rs | 82 +++++++++++++++++++++++++++++++ src/core/utils/future/mod.rs | 2 + 2 files changed, 84 insertions(+) create mode 100644 src/core/utils/future/bool_ext.rs diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs new file mode 100644 index 00000000..6cb2f1fe --- /dev/null +++ b/src/core/utils/future/bool_ext.rs @@ -0,0 +1,82 @@ +//! Extended external extensions to futures::FutureExt + +use std::marker::Unpin; + +use futures::{ + future::{select_ok, try_join, try_join_all, try_select}, + Future, FutureExt, +}; + +pub trait BoolExt +where + Self: Future + Send, +{ + fn and(self, b: B) -> impl Future + Send + where + B: Future + Send, + Self: Sized; + + fn or(self, b: B) -> impl Future + Send + where + B: Future + Send + Unpin, + Self: Sized + Unpin; +} + +pub async fn and(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + try_join_all(args).map(|result| result.is_ok()) +} + +pub async fn or(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send + Unpin, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + select_ok(args).map(|result| result.is_ok()) +} + +impl BoolExt for Fut +where + Fut: Future + Send, +{ + #[inline] + fn and(self, b: B) -> impl Future + Send + where + B: Future + Send, + Self: Sized, + { + type Result = crate::Result<(), ()>; + + let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); + + let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); + + try_join(a, b).map(|result| result.is_ok()) + } + + #[inline] + fn or(self, b: B) -> impl Future + Send + where + B: Future + Send + Unpin, + Self: Sized + Unpin, + { + type Result = crate::Result<(), ()>; + + let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); + + let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); + + try_select(a, b).map(|result| result.is_ok()) + } +} diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 153dcfe1..2198a84f 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -1,7 +1,9 @@ +mod bool_ext; mod ext_ext; mod option_ext; mod try_ext_ext; +pub use bool_ext::{and, or, BoolExt}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use try_ext_ext::TryExtExt; From 59c073d0d86ca8a6b9606037e2278890b5b84821 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 23:58:45 +0000 Subject: [PATCH 0673/1248] add unconstrained feature to service worker Signed-off-by: Jason Volk --- src/service/manager.rs | 9 +++++++-- src/service/sending/mod.rs | 13 +++++++++++-- src/service/service.rs | 5 +++++ 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/service/manager.rs b/src/service/manager.rs index ea33d285..e0d885c2 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,7 +1,7 @@ use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; use conduwuit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; -use futures::FutureExt; +use futures::{FutureExt, TryFutureExt}; use tokio::{ sync::{Mutex, MutexGuard}, task::{JoinHandle, JoinSet}, @@ -183,9 +183,14 @@ async fn worker(service: Arc) -> WorkerResult { let service_ = Arc::clone(&service); let result = AssertUnwindSafe(service_.worker()) .catch_unwind() - .await .map_err(Error::from_panic); + let result = if service.unconstrained() { + tokio::task::unconstrained(result).await + } else { + result.await + }; + // flattens JoinError for panic into worker's Error (service, result.unwrap_or_else(Err)) } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b146ad49..86b219f7 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -22,7 +22,7 @@ use ruma::{ RoomId, ServerName, UserId, }; use smallvec::SmallVec; -use tokio::task::JoinSet; +use tokio::{task, task::JoinSet}; use self::data::Data; pub use self::{ @@ -111,8 +111,15 @@ impl crate::Service for Service { .enumerate() .fold(JoinSet::new(), |mut joinset, (id, _)| { let self_ = self.clone(); + let worker = self_.sender(id); + let worker = if self.unconstrained() { + task::unconstrained(worker).boxed() + } else { + worker.boxed() + }; + let runtime = self.server.runtime(); - let _abort = joinset.spawn_on(self_.sender(id).boxed(), runtime); + let _abort = joinset.spawn_on(worker, runtime); joinset }); @@ -139,6 +146,8 @@ impl crate::Service for Service { } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + + fn unconstrained(&self) -> bool { true } } impl Service { diff --git a/src/service/service.rs b/src/service/service.rs index 7adb189e..cad01437 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -39,6 +39,11 @@ pub(crate) trait Service: Any + Send + Sync { /// Return the name of the service. /// i.e. `crate::service::make_name(std::module_path!())` fn name(&self) -> &str; + + /// Return true if the service worker opts out of the tokio cooperative + /// budgeting. This can reduce tail latency at the risk of event loop + /// starvation. + fn unconstrained(&self) -> bool { false } } /// Args are passed to `Service::build` when a service is constructed. This From e123a5b660a21ae444e154ac60812468c878ec58 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 7 Feb 2025 01:16:46 +0000 Subject: [PATCH 0674/1248] add state accessories for iterating state_keys of a type Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/state.rs | 124 ++++++++++++++++++++-- 1 file changed, 114 insertions(+), 10 deletions(-) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index c47a5693..3cf168c1 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -9,7 +9,7 @@ use conduwuit::{ PduEvent, Result, }; use database::Deserialized; -use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{future::try_join, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; use ruma::{ events::{ room::member::{MembershipState, RoomMemberEventContent}, @@ -69,7 +69,6 @@ where } #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub async fn state_contains( &self, shortstatehash: ShortStateHash, @@ -90,7 +89,18 @@ pub async fn state_contains( } #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_contains_type( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, +) -> bool { + let state_keys = self.state_keys(shortstatehash, event_type); + + pin_mut!(state_keys); + state_keys.next().await.is_some() +} + +#[implement(super::Service)] pub async fn state_contains_shortstatekey( &self, shortstatehash: ShortStateHash, @@ -125,7 +135,6 @@ pub async fn state_get( /// Returns a single EventId from `room_id` with key (`event_type`, /// `state_key`). #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub async fn state_get_id( &self, shortstatehash: ShortStateHash, @@ -149,7 +158,6 @@ where /// Returns a single EventId from `room_id` with key (`event_type`, /// `state_key`). #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub async fn state_get_shortid( &self, shortstatehash: ShortStateHash, @@ -177,6 +185,103 @@ pub async fn state_get_shortid( .await? } +/// Iterates the state_keys for an event_type in the state; current state +/// event_id included. +#[implement(super::Service)] +pub fn state_keys_with_ids<'a, Id>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a +where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, + ::Owned: Borrow, +{ + let state_keys_with_short_ids = self + .state_keys_with_shortids(shortstatehash, event_type) + .unzip() + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .shared(); + + let state_keys = state_keys_with_short_ids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = state_keys_with_short_ids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(state_keys) + .ready_filter_map(|(eid, sk)| eid.map(move |eid| (sk, eid)).ok()) +} + +/// Iterates the state_keys for an event_type in the state; current state +/// event_id included. +#[implement(super::Service)] +pub fn state_keys_with_shortids<'a>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a { + let short_ids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .unzip() + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .shared(); + + let shortstatekeys = short_ids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = short_ids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(shorteventids) + .ready_filter_map(|(res, id)| res.map(|res| (res, id)).ok()) + .ready_filter_map(move |((event_type_, state_key), event_id)| { + event_type_.eq(event_type).then_some((state_key, event_id)) + }) +} + +/// Iterates the state_keys for an event_type in the state +#[implement(super::Service)] +pub fn state_keys<'a>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a { + let short_ids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .map(at!(0)); + + self.services + .short + .multi_get_statekey_from_short(short_ids) + .ready_filter_map(Result::ok) + .ready_filter_map(move |(event_type_, state_key)| { + event_type_.eq(event_type).then_some(state_key) + }) +} + /// Returns the state events removed between the interval (present in .0 but /// not in .1) #[implement(super::Service)] @@ -191,11 +296,10 @@ pub fn state_removed( /// Returns the state events added between the interval (present in .1 but /// not in .0) #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn state_added<'a>( - &'a self, +pub fn state_added( + &self, shortstatehash: pair_of!(ShortStateHash), -) -> impl Stream + Send + 'a { +) -> impl Stream + Send + '_ { let a = self.load_full_state(shortstatehash.0); let b = self.load_full_state(shortstatehash.1); try_join(a, b) @@ -239,7 +343,6 @@ pub fn state_full_pdus( /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub fn state_full_ids<'a, Id>( &'a self, shortstatehash: ShortStateHash, @@ -293,6 +396,7 @@ pub fn state_full_shortids( } #[implement(super::Service)] +#[tracing::instrument(name = "load", level = "debug", skip(self))] async fn load_full_state(&self, shortstatehash: ShortStateHash) -> Result> { self.services .state_compressor From ecc9099127cc6779cd74723ae6169f7a22276ab7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 7 Feb 2025 23:18:02 +0000 Subject: [PATCH 0675/1248] add conf item to re-disable atomic flush Signed-off-by: Jason Volk --- conduwuit-example.toml | 7 +++++++ src/core/config/mod.rs | 7 +++++++ src/database/engine/db_opts.rs | 4 ++-- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index f9da856d..9b6f6ce0 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -925,6 +925,13 @@ # #rocksdb_checksums = true +# Enables the "atomic flush" mode in rocksdb. This option is not intended +# for users. It may be removed or ignored in future versions. Atomic flush +# may be enabled by the paranoid to possibly improve database integrity at +# the cost of performance. +# +#rocksdb_atomic_flush = false + # Database repair mode (for RocksDB SST corruption). # # Use this option when the server reports corruption while running or diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 9514f7a0..e66532ee 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1089,6 +1089,13 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_checksums: bool, + /// Enables the "atomic flush" mode in rocksdb. This option is not intended + /// for users. It may be removed or ignored in future versions. Atomic flush + /// may be enabled by the paranoid to possibly improve database integrity at + /// the cost of performance. + #[serde(default)] + pub rocksdb_atomic_flush: bool, + /// Database repair mode (for RocksDB SST corruption). /// /// Use this option when the server reports corruption while running or diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 01847257..6abeb4b0 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -29,9 +29,9 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul opts.set_max_file_opening_threads(0); // IO - opts.set_atomic_flush(true); opts.set_manual_wal_flush(true); - opts.set_enable_pipelined_write(false); + opts.set_atomic_flush(config.rocksdb_atomic_flush); + opts.set_enable_pipelined_write(!config.rocksdb_atomic_flush); if config.rocksdb_direct_io { opts.set_use_direct_reads(true); opts.set_use_direct_io_for_flush_and_compaction(true); From b872f8e593afaee437331edd429a2d801f069aab Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 00:16:37 +0000 Subject: [PATCH 0676/1248] optimize with SmallString; consolidate related re-exports Signed-off-by: Jason Volk --- Cargo.lock | 16 ++++++++++++---- Cargo.toml | 4 ++++ src/api/client/room/create.rs | 8 ++++---- src/api/client/room/upgrade.rs | 12 ++++++------ src/api/client/state.rs | 2 +- src/api/client/sync/v3.rs | 2 +- src/api/client/sync/v4.rs | 17 +++++++---------- src/api/client/sync/v5.rs | 18 +++++++----------- src/core/Cargo.toml | 2 ++ src/core/mod.rs | 5 ++++- src/core/pdu/builder.rs | 9 ++++++--- src/core/pdu/mod.rs | 8 +++++--- src/core/pdu/state_key.rs | 8 ++++++++ src/database/Cargo.toml | 2 -- src/database/de.rs | 5 +++-- src/database/keyval.rs | 3 +-- src/database/map/contains.rs | 2 +- src/database/map/insert.rs | 3 +-- src/database/map/qry.rs | 3 +-- src/database/map/remove.rs | 3 +-- src/database/pool.rs | 2 +- src/database/tests.rs | 6 ++++-- src/service/Cargo.toml | 2 -- src/service/migrations.rs | 4 +++- src/service/resolver/cache.rs | 2 +- src/service/resolver/fed.rs | 3 +-- src/service/resolver/mod.rs | 3 +-- .../rooms/event_handler/handle_outlier_pdu.rs | 10 ++++------ .../rooms/event_handler/resolve_state.rs | 1 + .../rooms/event_handler/state_at_incoming.rs | 1 + src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/search/mod.rs | 2 +- src/service/rooms/short/mod.rs | 9 ++++----- src/service/rooms/state_accessor/room_state.rs | 4 ++-- src/service/rooms/state_accessor/state.rs | 12 ++++++------ src/service/rooms/state_accessor/user_can.rs | 2 +- src/service/rooms/state_compressor/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 8 ++++---- src/service/sending/mod.rs | 2 +- 39 files changed, 113 insertions(+), 96 deletions(-) create mode 100644 src/core/pdu/state_key.rs diff --git a/Cargo.lock b/Cargo.lock index caef5859..5981a2a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -822,6 +822,8 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", + "smallstr", + "smallvec", "thiserror 2.0.11", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", @@ -839,7 +841,6 @@ dependencies = [ name = "conduwuit_database" version = "0.5.0" dependencies = [ - "arrayvec", "async-channel", "conduwuit_core", "const-str", @@ -850,7 +851,6 @@ dependencies = [ "rust-rocksdb-uwu", "serde", "serde_json", - "smallvec", "tokio", "tracing", ] @@ -902,7 +902,6 @@ dependencies = [ name = "conduwuit_service" version = "0.5.0" dependencies = [ - "arrayvec", "async-trait", "base64 0.22.1", "blurhash", @@ -929,7 +928,6 @@ dependencies = [ "serde_json", "serde_yaml", "sha2", - "smallvec", "termimad", "tokio", "tracing", @@ -4275,6 +4273,16 @@ dependencies = [ "autocfg", ] +[[package]] +name = "smallstr" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63b1aefdf380735ff8ded0b15f31aab05daf1f70216c01c02a12926badd1df9d" +dependencies = [ + "serde", + "smallvec", +] + [[package]] name = "smallvec" version = "1.13.2" diff --git a/Cargo.toml b/Cargo.toml index 38654be3..b93877bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,10 @@ features = [ "write", ] +[workspace.dependencies.smallstr] +version = "0.3" +features = ["ffi", "std", "union"] + [workspace.dependencies.const-str] version = "0.5.7" diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index a401b63d..e362b3b3 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, + debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, StateKey, }; use futures::FutureExt; use ruma::{ @@ -198,7 +198,7 @@ pub(crate) async fn create_room_route( event_type: TimelineEventType::RoomCreate, content: to_raw_value(&create_content) .expect("create event content serialization"), - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -267,7 +267,7 @@ pub(crate) async fn create_room_route( event_type: TimelineEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) .expect("serialized power_levels event content"), - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -371,7 +371,7 @@ pub(crate) async fn create_room_route( } // Implicit state key defaults to "" - pdu_builder.state_key.get_or_insert_with(String::new); + pdu_builder.state_key.get_or_insert_with(StateKey::new); // Silently skip encryption events if they are not allowed if pdu_builder.event_type == TimelineEventType::RoomEncryption diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 2f9706f4..a624f95f 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,7 +1,7 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{err, info, pdu::PduBuilder, Error, Result}; +use conduwuit::{err, info, pdu::PduBuilder, Error, Result, StateKey}; use futures::StreamExt; use ruma::{ api::client::{error::ErrorKind, room::upgrade_room}, @@ -77,7 +77,7 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomTombstoneEventContent { + PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), }), @@ -159,7 +159,7 @@ pub(crate) async fn upgrade_room_route( content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(String::new()), + state_key: Some(StateKey::new()), redacts: None, timestamp: None, }, @@ -188,7 +188,7 @@ pub(crate) async fn upgrade_room_route( }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_user.to_string()), + state_key: Some(sender_user.as_str().into()), redacts: None, timestamp: None, }, @@ -217,7 +217,7 @@ pub(crate) async fn upgrade_room_route( PduBuilder { event_type: event_type.to_string().into(), content: event_content, - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -272,7 +272,7 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { + PduBuilder::state(StateKey::new(), &RoomPowerLevelsEventContent { events_default: new_level, invite: new_level, ..power_levels_event_content diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 8555f88b..f73ffa46 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -172,7 +172,7 @@ async fn send_state_event_for_key_helper( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get())?, - state_key: Some(String::from(state_key)), + state_key: Some(state_key.into()), timestamp, ..Default::default() }, diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 1d1a91ba..f9dcd5ec 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -441,7 +441,7 @@ async fn handle_left_room( kind: RoomMember, content: serde_json::from_str(r#"{"membership":"leave"}"#) .expect("this is valid JSON"), - state_key: Some(sender_user.to_string()), + state_key: Some(sender_user.as_str().into()), unsigned: None, // The following keys are dropped on conversion room_id: room_id.clone(), diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 66793ba1..4e474ef3 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -29,7 +29,7 @@ use ruma::{ TimelineEventType::*, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, + uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, }; use service::rooms::read_receipt::pack_receipts; @@ -258,12 +258,9 @@ pub(crate) async fn sync_events_v4_route( continue; }; if pdu.kind == RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - OwnedUserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - + if let Some(Ok(user_id)) = + pdu.state_key.as_deref().map(UserId::parse) + { if user_id == *sender_user { continue; } @@ -275,18 +272,18 @@ pub(crate) async fn sync_events_v4_route( if !share_encrypted_room( &services, sender_user, - &user_id, + user_id, Some(room_id), ) .await { - device_list_changes.insert(user_id); + device_list_changes.insert(user_id.to_owned()); } }, | MembershipState::Leave => { // Write down users that have left encrypted rooms we // are in - left_encrypted_users.insert(user_id); + left_encrypted_users.insert(user_id.to_owned()); }, | _ => {}, } diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index e7b5fe74..f8ee1047 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -25,7 +25,7 @@ use ruma::{ }, serde::Raw, state_res::TypeStateKey, - uint, DeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, + uint, DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, }; use service::{rooms::read_receipt::pack_receipts, PduCount}; @@ -765,13 +765,9 @@ async fn collect_e2ee<'a>( continue; }; if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - OwnedUserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - if user_id == *sender_user { + if let Some(Ok(user_id)) = pdu.state_key.as_deref().map(UserId::parse) + { + if user_id == sender_user { continue; } @@ -782,18 +778,18 @@ async fn collect_e2ee<'a>( if !share_encrypted_room( &services, sender_user, - &user_id, + user_id, Some(room_id), ) .await { - device_list_changes.insert(user_id); + device_list_changes.insert(user_id.to_owned()); } }, | MembershipState::Leave => { // Write down users that have left encrypted rooms we // are in - left_encrypted_users.insert(user_id); + left_encrypted_users.insert(user_id.to_owned()); }, | _ => {}, } diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index ef2df4ff..d4b0c83b 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -92,6 +92,8 @@ serde_json.workspace = true serde_regex.workspace = true serde_yaml.workspace = true serde.workspace = true +smallvec.workspace = true +smallstr.workspace = true thiserror.workspace = true tikv-jemallocator.optional = true tikv-jemallocator.workspace = true diff --git a/src/core/mod.rs b/src/core/mod.rs index 1416ed9e..ee128628 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -10,14 +10,17 @@ pub mod pdu; pub mod server; pub mod utils; +pub use ::arrayvec; pub use ::http; pub use ::ruma; +pub use ::smallstr; +pub use ::smallvec; pub use ::toml; pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; -pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId}; +pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; pub use server::Server; pub use utils::{ctor, dtor, implement, result, result::Result}; diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index b25d4e9e..0efee128 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -7,6 +7,8 @@ use ruma::{ use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use super::StateKey; + /// Build the start of a PDU in order to add it to the Database. #[derive(Debug, Deserialize)] pub struct Builder { @@ -17,7 +19,7 @@ pub struct Builder { pub unsigned: Option, - pub state_key: Option, + pub state_key: Option, pub redacts: Option, @@ -29,15 +31,16 @@ pub struct Builder { type Unsigned = BTreeMap; impl Builder { - pub fn state(state_key: String, content: &T) -> Self + pub fn state(state_key: S, content: &T) -> Self where T: EventContent, + S: Into, { Self { event_type: content.event_type().into(), content: to_raw_value(content) .expect("Builder failed to serialize state event content to RawValue"), - state_key: Some(state_key), + state_key: Some(state_key.into()), ..Self::default() } } diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 1a8f6a70..9cb42239 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -8,6 +8,7 @@ mod id; mod raw_id; mod redact; mod relation; +mod state_key; mod strip; #[cfg(test)] mod tests; @@ -17,7 +18,7 @@ use std::cmp::Ordering; use ruma::{ events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, - OwnedRoomId, OwnedUserId, UInt, + OwnedRoomId, OwnedServerName, OwnedUserId, UInt, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; @@ -29,6 +30,7 @@ pub use self::{ event_id::*, id::*, raw_id::*, + state_key::{ShortStateKey, StateKey}, Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, }; use crate::Result; @@ -40,13 +42,13 @@ pub struct Pdu { pub room_id: OwnedRoomId, pub sender: OwnedUserId, #[serde(skip_serializing_if = "Option::is_none")] - pub origin: Option, + pub origin: Option, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: TimelineEventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] - pub state_key: Option, + pub state_key: Option, pub prev_events: Vec, pub depth: UInt, pub auth_events: Vec, diff --git a/src/core/pdu/state_key.rs b/src/core/pdu/state_key.rs new file mode 100644 index 00000000..4af4fcf7 --- /dev/null +++ b/src/core/pdu/state_key.rs @@ -0,0 +1,8 @@ +use smallstr::SmallString; + +use super::ShortId; + +pub type StateKey = SmallString<[u8; INLINE_SIZE]>; +pub type ShortStateKey = ShortId; + +const INLINE_SIZE: usize = 48; diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 557c9a3e..067c6f5f 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -34,7 +34,6 @@ zstd_compression = [ ] [dependencies] -arrayvec.workspace = true async-channel.workspace = true conduwuit-core.workspace = true const-str.workspace = true @@ -45,7 +44,6 @@ minicbor-serde.workspace = true rust-rocksdb.workspace = true serde.workspace = true serde_json.workspace = true -smallvec.workspace = true tokio.workspace = true tracing.workspace = true diff --git a/src/database/de.rs b/src/database/de.rs index 8e914fcc..441bb4ec 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -1,5 +1,6 @@ -use arrayvec::ArrayVec; -use conduwuit::{checked, debug::DebugInspect, err, utils::string, Error, Result}; +use conduwuit::{ + arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, Error, Result, +}; use serde::{ de, de::{DeserializeSeed, Visitor}, diff --git a/src/database/keyval.rs b/src/database/keyval.rs index 056e53d1..f572d15f 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -1,6 +1,5 @@ -use conduwuit::Result; +use conduwuit::{smallvec::SmallVec, Result}; use serde::{Deserialize, Serialize}; -use smallvec::SmallVec; use crate::{de, ser}; diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index 424f8970..7a09b358 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -1,7 +1,7 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, err, implement, utils::{future::TryExtExt, result::FlatOk}, Result, diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index 68c305af..6f010097 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -5,8 +5,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; -use arrayvec::ArrayVec; -use conduwuit::implement; +use conduwuit::{arrayvec::ArrayVec, implement}; use rocksdb::WriteBatchWithTransaction; use serde::Serialize; diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs index 401eba43..178f4a61 100644 --- a/src/database/map/qry.rs +++ b/src/database/map/qry.rs @@ -1,7 +1,6 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; -use arrayvec::ArrayVec; -use conduwuit::{implement, Result}; +use conduwuit::{arrayvec::ArrayVec, implement, Result}; use futures::Future; use serde::Serialize; diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs index ec37bbfe..a7ae9133 100644 --- a/src/database/map/remove.rs +++ b/src/database/map/remove.rs @@ -1,7 +1,6 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; -use arrayvec::ArrayVec; -use conduwuit::implement; +use conduwuit::{arrayvec::ArrayVec, implement}; use serde::Serialize; use crate::{keyval::KeyBuf, ser, util::or_else}; diff --git a/src/database/pool.rs b/src/database/pool.rs index c753855a..7636ff5e 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -14,6 +14,7 @@ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ debug, debug_warn, err, error, implement, result::DebugInspect, + smallvec::SmallVec, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, Error, Result, Server, @@ -21,7 +22,6 @@ use conduwuit::{ use futures::{channel::oneshot, TryFutureExt}; use oneshot::Sender as ResultSender; use rocksdb::Direction; -use smallvec::SmallVec; use self::configure::configure; use crate::{keyval::KeyBuf, stream, Handle, Map}; diff --git a/src/database/tests.rs b/src/database/tests.rs index e6c85983..594170e8 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -2,8 +2,10 @@ use std::fmt::Debug; -use arrayvec::ArrayVec; -use conduwuit::ruma::{serde::Raw, EventId, RoomId, UserId}; +use conduwuit::{ + arrayvec::ArrayVec, + ruma::{serde::Raw, EventId, RoomId, UserId}, +}; use serde::Serialize; use crate::{ diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 30183179..caeea318 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -47,7 +47,6 @@ zstd_compression = [ blurhashing = ["dep:image","dep:blurhash"] [dependencies] -arrayvec.workspace = true async-trait.workspace = true base64.workspace = true bytes.workspace = true @@ -75,7 +74,6 @@ serde_json.workspace = true serde.workspace = true serde_yaml.workspace = true sha2.workspace = true -smallvec.workspace = true termimad.workspace = true termimad.optional = true tokio.workspace = true diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 9c3ea293..69b1be4e 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -507,8 +507,10 @@ async fn fix_referencedevents_missing_sep(services: &Services) -> Result { } async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result { + use conduwuit::arrayvec::ArrayString; use ruma::identifiers_validation::MAX_BYTES; - type ArrayId = arrayvec::ArrayString; + + type ArrayId = ArrayString; type Key<'a> = (&'a RoomId, u64, &'a UserId); warn!("Fixing undeleted entries in readreceiptid_readreceipt..."); diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 22a92865..7b4f104d 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -1,7 +1,7 @@ use std::{net::IpAddr, sync::Arc, time::SystemTime}; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, at, err, implement, utils::{math::Expected, rand, stream::TryIgnore}, Result, diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index bfe100e7..e5bee9ac 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -4,8 +4,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use arrayvec::ArrayString; -use conduwuit::utils::math::Expected; +use conduwuit::{arrayvec::ArrayString, utils::math::Expected}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 090e562d..6be9d42d 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,8 +6,7 @@ mod tests; use std::sync::Arc; -use arrayvec::ArrayString; -use conduwuit::{utils::MutexMap, Result, Server}; +use conduwuit::{arrayvec::ArrayString, utils::MutexMap, Result, Server}; use self::{cache::Cache, dns::Resolver}; use crate::{client, Dep}; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index a35aabe0..b7c38313 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -6,10 +6,8 @@ use std::{ use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; use futures::{future::ready, TryFutureExt}; use ruma::{ - api::client::error::ErrorKind, - events::StateEventType, - state_res::{self, EventTypeExt}, - CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, + api::client::error::ErrorKind, events::StateEventType, state_res, CanonicalJsonObject, + CanonicalJsonValue, EventId, RoomId, ServerName, }; use super::{check_room_id, get_room_version_id, to_room_version}; @@ -123,7 +121,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // The original create event must be in the auth events if !matches!( auth_events - .get(&(StateEventType::RoomCreate, String::new())) + .get(&(StateEventType::RoomCreate, String::new().into())) .map(AsRef::as_ref), Some(_) | None ) { @@ -134,7 +132,7 @@ pub(super) async fn handle_outlier_pdu<'a>( } let state_fetch = |ty: &'static StateEventType, sk: &str| { - let key = ty.with_state_key(sk); + let key = (ty.to_owned(), sk.into()); ready(auth_events.get(&key)) }; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 4d99b088..eb9ca01f 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -64,6 +64,7 @@ pub async fn resolve_state( .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) + .map(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) .collect() }) .map(Ok::<_, Error>) diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 8ae6354c..7bf3b8f8 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -172,6 +172,7 @@ async fn state_at_incoming_fork( .short .get_statekey_from_short(*k) .map_ok(|(ty, sk)| ((ty, sk), id.clone())) + .map_ok(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) }) .ready_filter_map(Result::ok) .collect() diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 2e6ecbb5..26e11ded 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,7 +1,7 @@ use std::{mem::size_of, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, result::LogErr, utils::{ stream::{TryIgnore, WidebandExt}, diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 35cfd444..cc015237 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, implement, utils::{ set, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index dd586d02..8728325a 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; -pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId}; -use conduwuit::{err, implement, utils, utils::IterStream, Result}; +pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; +use conduwuit::{err, implement, utils, utils::IterStream, Result, StateKey}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; @@ -28,7 +28,6 @@ struct Services { } pub type ShortStateHash = ShortId; -pub type ShortStateKey = ShortId; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -181,7 +180,7 @@ where pub async fn get_statekey_from_short( &self, shortstatekey: ShortStateKey, -) -> Result<(StateEventType, String)> { +) -> Result<(StateEventType, StateKey)> { const BUFSIZE: usize = size_of::(); self.db @@ -200,7 +199,7 @@ pub async fn get_statekey_from_short( pub fn multi_get_statekey_from_short<'a, S>( &'a self, shortstatekey: S, -) -> impl Stream> + Send + 'a +) -> impl Stream> + Send + 'a where S: Stream + Send + 'a, { diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index 98a82cea..e3ec55fe 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -1,6 +1,6 @@ use std::borrow::Borrow; -use conduwuit::{err, implement, PduEvent, Result}; +use conduwuit::{err, implement, PduEvent, Result, StateKey}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{events::StateEventType, EventId, RoomId}; use serde::Deserialize; @@ -27,7 +27,7 @@ where pub fn room_state_full<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { +) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 3cf168c1..da1500cb 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -6,7 +6,7 @@ use conduwuit::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, - PduEvent, Result, + PduEvent, Result, StateKey, }; use database::Deserialized; use futures::{future::try_join, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; @@ -192,7 +192,7 @@ pub fn state_keys_with_ids<'a, Id>( &'a self, shortstatehash: ShortStateHash, event_type: &'a StateEventType, -) -> impl Stream + Send + 'a +) -> impl Stream + Send + 'a where Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, ::Owned: Borrow, @@ -200,7 +200,7 @@ where let state_keys_with_short_ids = self .state_keys_with_shortids(shortstatehash, event_type) .unzip() - .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) .shared(); let state_keys = state_keys_with_short_ids @@ -230,7 +230,7 @@ pub fn state_keys_with_shortids<'a>( &'a self, shortstatehash: ShortStateHash, event_type: &'a StateEventType, -) -> impl Stream + Send + 'a { +) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) .expect_ok() @@ -267,7 +267,7 @@ pub fn state_keys<'a>( &'a self, shortstatehash: ShortStateHash, event_type: &'a StateEventType, -) -> impl Stream + Send + 'a { +) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) .expect_ok() @@ -314,7 +314,7 @@ pub fn state_added( pub fn state_full( &self, shortstatehash: ShortStateHash, -) -> impl Stream + Send + '_ { +) -> impl Stream + Send + '_ { self.state_full_pdus(shortstatehash) .ready_filter_map(|pdu| { Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 725a4fba..0332c227 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -175,7 +175,7 @@ pub async fn user_can_invite( .timeline .create_hash_and_sign_event( PduBuilder::state( - target_user.into(), + target_user.as_str(), &RoomMemberEventContent::new(MembershipState::Invite), ), sender, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 3d68dff6..18731809 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -5,8 +5,8 @@ use std::{ sync::{Arc, Mutex}, }; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, Result, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a913034d..a7edd4a4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -38,7 +38,7 @@ use ruma::{ push::{Action, Ruleset, Tweak}, state_res::{self, Event, RoomVersion}, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -387,10 +387,10 @@ impl Service { if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { - let target_user_id = OwnedUserId::parse(state_key)?; + let target_user_id = UserId::parse(state_key)?; - if self.services.users.is_active_local(&target_user_id).await { - push_target.insert(target_user_id); + if self.services.users.is_active_local(target_user_id).await { + push_target.insert(target_user_id.to_owned()); } } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 86b219f7..b46ce7a8 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -13,6 +13,7 @@ use std::{ use async_trait::async_trait; use conduwuit::{ debug, debug_warn, err, error, + smallvec::SmallVec, utils::{available_parallelism, math::usize_from_u64_truncated, ReadyExt, TryReadyExt}, warn, Result, Server, }; @@ -21,7 +22,6 @@ use ruma::{ api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, }; -use smallvec::SmallVec; use tokio::{task, task::JoinSet}; use self::data::Data; From 0a9a9b3c92852cae269aaf2cb3894658b5e35a54 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 5 Feb 2025 12:22:22 +0000 Subject: [PATCH 0677/1248] larcen state-res from ruma --- Cargo.toml | 1 - src/api/client/membership.rs | 6 +- src/api/client/sync/v5.rs | 3 +- src/core/error/mod.rs | 2 +- src/core/mod.rs | 2 + src/core/pdu/event.rs | 2 +- src/core/state_res/LICENSE | 17 + src/core/state_res/error.rs | 23 + src/core/state_res/event_auth.rs | 1418 ++++++++++++++ src/core/state_res/mod.rs | 1644 +++++++++++++++++ src/core/state_res/outcomes.txt | 104 ++ src/core/state_res/power_levels.rs | 256 +++ src/core/state_res/room_version.rs | 149 ++ src/core/state_res/state_event.rs | 102 + src/core/state_res/state_res_bench.rs | 648 +++++++ src/core/state_res/test_utils.rs | 688 +++++++ src/service/rooms/event_handler/fetch_prev.rs | 11 +- .../rooms/event_handler/handle_outlier_pdu.rs | 6 +- src/service/rooms/event_handler/mod.rs | 6 +- .../rooms/event_handler/resolve_state.rs | 9 +- .../rooms/event_handler/state_at_incoming.rs | 4 +- .../event_handler/upgrade_outlier_pdu.rs | 10 +- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 2 +- 24 files changed, 5082 insertions(+), 33 deletions(-) create mode 100644 src/core/state_res/LICENSE create mode 100644 src/core/state_res/error.rs create mode 100644 src/core/state_res/event_auth.rs create mode 100644 src/core/state_res/mod.rs create mode 100644 src/core/state_res/outcomes.txt create mode 100644 src/core/state_res/power_levels.rs create mode 100644 src/core/state_res/room_version.rs create mode 100644 src/core/state_res/state_event.rs create mode 100644 src/core/state_res/state_res_bench.rs create mode 100644 src/core/state_res/test_utils.rs diff --git a/Cargo.toml b/Cargo.toml index b93877bd..d8f34544 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -355,7 +355,6 @@ features = [ "federation-api", "markdown", "push-gateway-api-c", - "state-res", "server-util", "unstable-exhaustive-types", "ring-compat", diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 449d44d5..1045b014 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -12,7 +12,7 @@ use conduwuit::{ at, debug, debug_info, debug_warn, err, info, pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, - trace, + state_res, trace, utils::{self, shuffle, IterStream, ReadyExt}, warn, Err, PduEvent, Result, }; @@ -40,8 +40,8 @@ use ruma::{ }, StateEventType, }, - state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use service::{ appservice::RegistrationInfo, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index f8ee1047..63731688 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -11,7 +11,7 @@ use conduwuit::{ math::{ruma_from_usize, usize_from_ruma}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - warn, Error, Result, + warn, Error, Result, TypeStateKey, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ @@ -24,7 +24,6 @@ use ruma::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, }, serde::Raw, - state_res::TypeStateKey, uint, DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, }; use service::{rooms::read_receipt::pack_receipts, PduCount}; diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 88ac6d09..16613b7e 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -121,7 +121,7 @@ pub enum Error { #[error(transparent)] Signatures(#[from] ruma::signatures::Error), #[error(transparent)] - StateRes(#[from] ruma::state_res::Error), + StateRes(#[from] crate::state_res::Error), #[error("uiaa")] Uiaa(ruma::api::client::uiaa::UiaaInfo), diff --git a/src/core/mod.rs b/src/core/mod.rs index ee128628..cd56774a 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -8,6 +8,7 @@ pub mod metrics; pub mod mods; pub mod pdu; pub mod server; +pub mod state_res; pub mod utils; pub use ::arrayvec; @@ -22,6 +23,7 @@ pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; pub use server::Server; +pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; pub use utils::{ctor, dtor, implement, result, result::Result}; pub use crate as conduwuit_core; diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs index 6a92afe8..d5c0561e 100644 --- a/src/core/pdu/event.rs +++ b/src/core/pdu/event.rs @@ -1,8 +1,8 @@ -pub use ruma::state_res::Event; use ruma::{events::TimelineEventType, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; use serde_json::value::RawValue as RawJsonValue; use super::Pdu; +pub use crate::state_res::Event; impl Event for Pdu { type Id = OwnedEventId; diff --git a/src/core/state_res/LICENSE b/src/core/state_res/LICENSE new file mode 100644 index 00000000..c103a044 --- /dev/null +++ b/src/core/state_res/LICENSE @@ -0,0 +1,17 @@ +//! Permission is hereby granted, free of charge, to any person obtaining a copy +//! of this software and associated documentation files (the "Software"), to +//! deal in the Software without restriction, including without limitation the +//! rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +//! sell copies of the Software, and to permit persons to whom the Software is +//! furnished to do so, subject to the following conditions: + +//! The above copyright notice and this permission notice shall be included in +//! all copies or substantial portions of the Software. + +//! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +//! FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +//! IN THE SOFTWARE. diff --git a/src/core/state_res/error.rs b/src/core/state_res/error.rs new file mode 100644 index 00000000..7711d878 --- /dev/null +++ b/src/core/state_res/error.rs @@ -0,0 +1,23 @@ +use serde_json::Error as JsonError; +use thiserror::Error; + +/// Represents the various errors that arise when resolving state. +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum Error { + /// A deserialization error. + #[error(transparent)] + SerdeJson(#[from] JsonError), + + /// The given option or version is unsupported. + #[error("Unsupported room version: {0}")] + Unsupported(String), + + /// The given event was not found. + #[error("Not found error: {0}")] + NotFound(String), + + /// Invalid fields in the given PDU. + #[error("Invalid PDU: {0}")] + InvalidPdu(String), +} diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs new file mode 100644 index 00000000..72a0216c --- /dev/null +++ b/src/core/state_res/event_auth.rs @@ -0,0 +1,1418 @@ +use std::{borrow::Borrow, collections::BTreeSet}; + +use futures::{ + future::{join3, OptionFuture}, + Future, +}; +use ruma::{ + events::room::{ + create::RoomCreateEventContent, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, ThirdPartyInvite}, + power_levels::RoomPowerLevelsEventContent, + third_party_invite::RoomThirdPartyInviteEventContent, + }, + int, + serde::{Base64, Raw}, + Int, OwnedUserId, RoomVersionId, UserId, +}; +use serde::{ + de::{Error as _, IgnoredAny}, + Deserialize, +}; +use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; +use tracing::{debug, error, instrument, trace, warn}; + +use super::{ + power_levels::{ + deserialize_power_levels, deserialize_power_levels_content_fields, + deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, + }, + room_version::RoomVersion, + Error, Event, Result, StateEventType, TimelineEventType, +}; + +// FIXME: field extracting could be bundled for `content` +#[derive(Deserialize)] +struct GetMembership { + membership: MembershipState, +} + +#[derive(Deserialize)] +struct RoomMemberContentFields { + membership: Option>, + join_authorised_via_users_server: Option>, +} + +/// For the given event `kind` what are the relevant auth events that are needed +/// to authenticate this `content`. +/// +/// # Errors +/// +/// This function will return an error if the supplied `content` is not a JSON +/// object. +pub fn auth_types_for_event( + kind: &TimelineEventType, + sender: &UserId, + state_key: Option<&str>, + content: &RawJsonValue, +) -> serde_json::Result> { + if kind == &TimelineEventType::RoomCreate { + return Ok(vec![]); + } + + let mut auth_types = vec![ + (StateEventType::RoomPowerLevels, String::new()), + (StateEventType::RoomMember, sender.to_string()), + (StateEventType::RoomCreate, String::new()), + ]; + + if kind == &TimelineEventType::RoomMember { + #[derive(Deserialize)] + struct RoomMemberContentFields { + membership: Option>, + third_party_invite: Option>, + join_authorised_via_users_server: Option>, + } + + if let Some(state_key) = state_key { + let content: RoomMemberContentFields = from_json_str(content.get())?; + + if let Some(Ok(membership)) = content.membership.map(|m| m.deserialize()) { + if [MembershipState::Join, MembershipState::Invite, MembershipState::Knock] + .contains(&membership) + { + let key = (StateEventType::RoomJoinRules, String::new()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + + if let Some(Ok(u)) = content + .join_authorised_via_users_server + .map(|m| m.deserialize()) + { + let key = (StateEventType::RoomMember, u.to_string()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + } + } + + let key = (StateEventType::RoomMember, state_key.to_owned()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + + if membership == MembershipState::Invite { + if let Some(Ok(t_id)) = content.third_party_invite.map(|t| t.deserialize()) { + let key = (StateEventType::RoomThirdPartyInvite, t_id.signed.token); + if !auth_types.contains(&key) { + auth_types.push(key); + } + } + } + } + } + } + + Ok(auth_types) +} + +/// Authenticate the incoming `event`. +/// +/// The steps of authentication are: +/// +/// * check that the event is being authenticated for the correct room +/// * then there are checks for specific event types +/// +/// The `fetch_state` closure should gather state from a state snapshot. We need +/// to know if the event passes auth against some state not a recursive +/// collection of auth_events fields. +#[instrument(level = "debug", skip_all, fields(event_id = incoming_event.event_id().borrow().as_str()))] +pub async fn auth_check( + room_version: &RoomVersion, + incoming_event: &Incoming, + current_third_party_invite: Option<&Incoming>, + fetch_state: F, +) -> Result +where + F: Fn(&'static StateEventType, &str) -> Fut, + Fut: Future> + Send, + Fetched: Event + Send, + Incoming: Event + Send, +{ + debug!( + "auth_check beginning for {} ({})", + incoming_event.event_id(), + incoming_event.event_type() + ); + + // [synapse] check that all the events are in the same room as `incoming_event` + + // [synapse] do_sig_check check the event has valid signatures for member events + + // TODO do_size_check is false when called by `iterative_auth_check` + // do_size_check is also mostly accomplished by ruma with the exception of + // checking event_type, state_key, and json are below a certain size (255 and + // 65_536 respectively) + + let sender = incoming_event.sender(); + + // Implementation of https://spec.matrix.org/latest/rooms/v1/#authorization-rules + // + // 1. If type is m.room.create: + if *incoming_event.event_type() == TimelineEventType::RoomCreate { + #[derive(Deserialize)] + struct RoomCreateContentFields { + room_version: Option>, + creator: Option>, + } + + debug!("start m.room.create check"); + + // If it has any previous events, reject + if incoming_event.prev_events().next().is_some() { + warn!("the room creation event had previous events"); + return Ok(false); + } + + // If the domain of the room_id does not match the domain of the sender, reject + let Some(room_id_server_name) = incoming_event.room_id().server_name() else { + warn!("room ID has no servername"); + return Ok(false); + }; + + if room_id_server_name != sender.server_name() { + warn!("servername of room ID does not match servername of sender"); + return Ok(false); + } + + // If content.room_version is present and is not a recognized version, reject + let content: RoomCreateContentFields = from_json_str(incoming_event.content().get())?; + if content + .room_version + .is_some_and(|v| v.deserialize().is_err()) + { + warn!("invalid room version found in m.room.create event"); + return Ok(false); + } + + if !room_version.use_room_create_sender { + // If content has no creator field, reject + if content.creator.is_none() { + warn!("no creator field found in m.room.create content"); + return Ok(false); + } + } + + debug!("m.room.create event was allowed"); + return Ok(true); + } + + /* + // TODO: In the past this code caused problems federating with synapse, maybe this has been + // resolved already. Needs testing. + // + // 2. Reject if auth_events + // a. auth_events cannot have duplicate keys since it's a BTree + // b. All entries are valid auth events according to spec + let expected_auth = auth_types_for_event( + incoming_event.kind, + sender, + incoming_event.state_key, + incoming_event.content().clone(), + ); + + dbg!(&expected_auth); + + for ev_key in auth_events.keys() { + // (b) + if !expected_auth.contains(ev_key) { + warn!("auth_events contained invalid auth event"); + return Ok(false); + } + } + */ + + let (room_create_event, power_levels_event, sender_member_event) = join3( + fetch_state(&StateEventType::RoomCreate, ""), + fetch_state(&StateEventType::RoomPowerLevels, ""), + fetch_state(&StateEventType::RoomMember, sender.as_str()), + ) + .await; + + let room_create_event = match room_create_event { + | None => { + warn!("no m.room.create event in auth chain"); + return Ok(false); + }, + | Some(e) => e, + }; + + // 3. If event does not have m.room.create in auth_events reject + if !incoming_event + .auth_events() + .any(|id| id.borrow() == room_create_event.event_id().borrow()) + { + warn!("no m.room.create event in auth events"); + return Ok(false); + } + + // If the create event content has the field m.federate set to false and the + // sender domain of the event does not match the sender domain of the create + // event, reject. + #[derive(Deserialize)] + struct RoomCreateContentFederate { + #[serde(rename = "m.federate", default = "ruma::serde::default_true")] + federate: bool, + } + let room_create_content: RoomCreateContentFederate = + from_json_str(room_create_event.content().get())?; + if !room_create_content.federate + && room_create_event.sender().server_name() != incoming_event.sender().server_name() + { + warn!( + "room is not federated and event's sender domain does not match create event's \ + sender domain" + ); + return Ok(false); + } + + // Only in some room versions 6 and below + if room_version.special_case_aliases_auth { + // 4. If type is m.room.aliases + if *incoming_event.event_type() == TimelineEventType::RoomAliases { + debug!("starting m.room.aliases check"); + + // If sender's domain doesn't matches state_key, reject + if incoming_event.state_key() != Some(sender.server_name().as_str()) { + warn!("state_key does not match sender"); + return Ok(false); + } + + debug!("m.room.aliases event was allowed"); + return Ok(true); + } + } + + // If type is m.room.member + if *incoming_event.event_type() == TimelineEventType::RoomMember { + debug!("starting m.room.member check"); + let state_key = match incoming_event.state_key() { + | None => { + warn!("no statekey in member event"); + return Ok(false); + }, + | Some(s) => s, + }; + + let content: RoomMemberContentFields = from_json_str(incoming_event.content().get())?; + if content + .membership + .as_ref() + .and_then(|m| m.deserialize().ok()) + .is_none() + { + warn!("no valid membership field found for m.room.member event content"); + return Ok(false); + } + + let target_user = + <&UserId>::try_from(state_key).map_err(|e| Error::InvalidPdu(format!("{e}")))?; + + let user_for_join_auth = content + .join_authorised_via_users_server + .as_ref() + .and_then(|u| u.deserialize().ok()); + + let user_for_join_auth_event: OptionFuture<_> = user_for_join_auth + .as_ref() + .map(|auth_user| fetch_state(&StateEventType::RoomMember, auth_user.as_str())) + .into(); + + let target_user_member_event = + fetch_state(&StateEventType::RoomMember, target_user.as_str()); + + let join_rules_event = fetch_state(&StateEventType::RoomJoinRules, ""); + + let (join_rules_event, target_user_member_event, user_for_join_auth_event) = + join3(join_rules_event, target_user_member_event, user_for_join_auth_event).await; + + let user_for_join_auth_membership = user_for_join_auth_event + .and_then(|mem| from_json_str::(mem?.content().get()).ok()) + .map_or(MembershipState::Leave, |mem| mem.membership); + + if !valid_membership_change( + room_version, + target_user, + target_user_member_event.as_ref(), + sender, + sender_member_event.as_ref(), + incoming_event, + current_third_party_invite, + power_levels_event.as_ref(), + join_rules_event.as_ref(), + user_for_join_auth.as_deref(), + &user_for_join_auth_membership, + room_create_event, + )? { + return Ok(false); + } + + debug!("m.room.member event was allowed"); + return Ok(true); + } + + // If the sender's current membership state is not join, reject + let sender_member_event = match sender_member_event { + | Some(mem) => mem, + | None => { + warn!("sender not found in room"); + return Ok(false); + }, + }; + + let sender_membership_event_content: RoomMemberContentFields = + from_json_str(sender_member_event.content().get())?; + let membership_state = sender_membership_event_content + .membership + .expect("we should test before that this field exists") + .deserialize()?; + + if !matches!(membership_state, MembershipState::Join) { + warn!("sender's membership is not join"); + return Ok(false); + } + + // If type is m.room.third_party_invite + let sender_power_level = if let Some(pl) = &power_levels_event { + let content = deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + if let Some(level) = content.get_user_power(sender) { + *level + } else { + content.users_default + } + } else { + // If no power level event found the creator gets 100 everyone else gets 0 + let is_creator = if room_version.use_room_create_sender { + room_create_event.sender() == sender + } else { + #[allow(deprecated)] + from_json_str::(room_create_event.content().get()) + .is_ok_and(|create| create.creator.unwrap() == *sender) + }; + + if is_creator { + int!(100) + } else { + int!(0) + } + }; + + // Allow if and only if sender's current power level is greater than + // or equal to the invite level + if *incoming_event.event_type() == TimelineEventType::RoomThirdPartyInvite { + let invite_level = match &power_levels_event { + | Some(power_levels) => + deserialize_power_levels_content_invite( + power_levels.content().get(), + room_version, + )? + .invite, + | None => int!(0), + }; + + if sender_power_level < invite_level { + warn!("sender's cannot send invites in this room"); + return Ok(false); + } + + debug!("m.room.third_party_invite event was allowed"); + return Ok(true); + } + + // If the event type's required power level is greater than the sender's power + // level, reject If the event has a state_key that starts with an @ and does + // not match the sender, reject. + if !can_send_event(incoming_event, power_levels_event.as_ref(), sender_power_level) { + warn!("user cannot send event"); + return Ok(false); + } + + // If type is m.room.power_levels + if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels { + debug!("starting m.room.power_levels check"); + + if let Some(required_pwr_lvl) = check_power_levels( + room_version, + incoming_event, + power_levels_event.as_ref(), + sender_power_level, + ) { + if !required_pwr_lvl { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + } + } else { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + } + debug!("m.room.power_levels event allowed"); + } + + // Room version 3: Redaction events are always accepted (provided the event is + // allowed by `events` and `events_default` in the power levels). However, + // servers should not apply or send redaction's to clients until both the + // redaction event and original event have been seen, and are valid. Servers + // should only apply redaction's to events where the sender's domains match, or + // the sender of the redaction has the appropriate permissions per the + // power levels. + + if room_version.extra_redaction_checks + && *incoming_event.event_type() == TimelineEventType::RoomRedaction + { + let redact_level = match power_levels_event { + | Some(pl) => + deserialize_power_levels_content_redact(pl.content().get(), room_version)?.redact, + | None => int!(50), + }; + + if !check_redaction(room_version, incoming_event, sender_power_level, redact_level)? { + return Ok(false); + } + } + + debug!("allowing event passed all checks"); + Ok(true) +} + +// TODO deserializing the member, power, join_rules event contents is done in +// conduit just before this is called. Could they be passed in? +/// Does the user who sent this member event have required power levels to do +/// so. +/// +/// * `user` - Information about the membership event and user making the +/// request. +/// * `auth_events` - The set of auth events that relate to a membership event. +/// +/// This is generated by calling `auth_types_for_event` with the membership +/// event and the current State. +#[allow(clippy::too_many_arguments)] +fn valid_membership_change( + room_version: &RoomVersion, + target_user: &UserId, + target_user_membership_event: Option, + sender: &UserId, + sender_membership_event: Option, + current_event: impl Event, + current_third_party_invite: Option, + power_levels_event: Option, + join_rules_event: Option, + user_for_join_auth: Option<&UserId>, + user_for_join_auth_membership: &MembershipState, + create_room: impl Event, +) -> Result { + #[derive(Deserialize)] + struct GetThirdPartyInvite { + third_party_invite: Option>, + } + let content = current_event.content(); + + let target_membership = from_json_str::(content.get())?.membership; + let third_party_invite = + from_json_str::(content.get())?.third_party_invite; + + let sender_membership = match &sender_membership_event { + | Some(pdu) => from_json_str::(pdu.content().get())?.membership, + | None => MembershipState::Leave, + }; + let sender_is_joined = sender_membership == MembershipState::Join; + + let target_user_current_membership = match &target_user_membership_event { + | Some(pdu) => from_json_str::(pdu.content().get())?.membership, + | None => MembershipState::Leave, + }; + + let power_levels: RoomPowerLevelsEventContent = match &power_levels_event { + | Some(ev) => from_json_str(ev.content().get())?, + | None => RoomPowerLevelsEventContent::default(), + }; + + let sender_power = power_levels + .users + .get(sender) + .or_else(|| sender_is_joined.then_some(&power_levels.users_default)); + + let target_power = power_levels.users.get(target_user).or_else(|| { + (target_membership == MembershipState::Join).then_some(&power_levels.users_default) + }); + + let mut join_rules = JoinRule::Invite; + if let Some(jr) = &join_rules_event { + join_rules = from_json_str::(jr.content().get())?.join_rule; + } + + let power_levels_event_id = power_levels_event.as_ref().map(Event::event_id); + let sender_membership_event_id = sender_membership_event.as_ref().map(Event::event_id); + let target_user_membership_event_id = + target_user_membership_event.as_ref().map(Event::event_id); + + let user_for_join_auth_is_valid = if let Some(user_for_join_auth) = user_for_join_auth { + // Is the authorised user allowed to invite users into this room + let (auth_user_pl, invite_level) = if let Some(pl) = &power_levels_event { + // TODO Refactor all powerlevel parsing + let invite = + deserialize_power_levels_content_invite(pl.content().get(), room_version)?.invite; + + let content = + deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + let user_pl = if let Some(level) = content.get_user_power(user_for_join_auth) { + *level + } else { + content.users_default + }; + + (user_pl, invite) + } else { + (int!(0), int!(0)) + }; + (user_for_join_auth_membership == &MembershipState::Join) + && (auth_user_pl >= invite_level) + } else { + // No auth user was given + false + }; + + Ok(match target_membership { + | MembershipState::Join => { + // 1. If the only previous event is an m.room.create and the state_key is the + // creator, + // allow + let mut prev_events = current_event.prev_events(); + + let prev_event_is_create_event = prev_events + .next() + .is_some_and(|event_id| event_id.borrow() == create_room.event_id().borrow()); + let no_more_prev_events = prev_events.next().is_none(); + + if prev_event_is_create_event && no_more_prev_events { + let is_creator = if room_version.use_room_create_sender { + let creator = create_room.sender(); + + creator == sender && creator == target_user + } else { + #[allow(deprecated)] + let creator = from_json_str::(create_room.content().get())? + .creator + .ok_or_else(|| serde_json::Error::missing_field("creator"))?; + + creator == sender && creator == target_user + }; + + if is_creator { + return Ok(true); + } + } + + if sender != target_user { + // If the sender does not match state_key, reject. + warn!("Can't make other user join"); + false + } else if target_user_current_membership == MembershipState::Ban { + // If the sender is banned, reject. + warn!(?target_user_membership_event_id, "Banned user can't join"); + false + } else if (join_rules == JoinRule::Invite + || room_version.allow_knocking && join_rules == JoinRule::Knock) + // If the join_rule is invite then allow if membership state is invite or join + && (target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Invite) + { + true + } else if room_version.restricted_join_rules + && matches!(join_rules, JoinRule::Restricted(_)) + || room_version.knock_restricted_join_rule + && matches!(join_rules, JoinRule::KnockRestricted(_)) + { + // If the join_rule is restricted or knock_restricted + if matches!( + target_user_current_membership, + MembershipState::Invite | MembershipState::Join + ) { + // If membership state is join or invite, allow. + true + } else { + // If the join_authorised_via_users_server key in content is not a user with + // sufficient permission to invite other users, reject. + // Otherwise, allow. + user_for_join_auth_is_valid + } + } else { + // If the join_rule is public, allow. + // Otherwise, reject. + join_rules == JoinRule::Public + } + }, + | MembershipState::Invite => { + // If content has third_party_invite key + if let Some(tp_id) = third_party_invite.and_then(|i| i.deserialize().ok()) { + if target_user_current_membership == MembershipState::Ban { + warn!(?target_user_membership_event_id, "Can't invite banned user"); + false + } else { + let allow = verify_third_party_invite( + Some(target_user), + sender, + &tp_id, + current_third_party_invite, + ); + if !allow { + warn!("Third party invite invalid"); + } + allow + } + } else if !sender_is_joined + || target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Ban + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't invite user if sender not joined or the user is currently joined or \ + banned", + ); + false + } else { + let allow = sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some(); + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to invite", + ); + } + allow + } + }, + | MembershipState::Leave => + if sender == target_user { + let allow = target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Invite + || target_user_current_membership == MembershipState::Knock; + if !allow { + warn!( + ?target_user_membership_event_id, + ?target_user_current_membership, + "Can't leave if sender is not already invited, knocked, or joined" + ); + } + allow + } else if !sender_is_joined + || target_user_current_membership == MembershipState::Ban + && sender_power.filter(|&p| p < &power_levels.ban).is_some() + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't kick if sender not joined or user is already banned", + ); + false + } else { + let allow = sender_power.filter(|&p| p >= &power_levels.kick).is_some() + && target_power < sender_power; + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to kick", + ); + } + allow + }, + | MembershipState::Ban => + if !sender_is_joined { + warn!(?sender_membership_event_id, "Can't ban user if sender is not joined"); + false + } else { + let allow = sender_power.filter(|&p| p >= &power_levels.ban).is_some() + && target_power < sender_power; + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to ban", + ); + } + allow + }, + | MembershipState::Knock if room_version.allow_knocking => { + // 1. If the `join_rule` is anything other than `knock` or `knock_restricted`, + // reject. + if !matches!(join_rules, JoinRule::KnockRestricted(_) | JoinRule::Knock) { + warn!( + "Join rule is not set to knock or knock_restricted, knocking is not allowed" + ); + false + } else if matches!(join_rules, JoinRule::KnockRestricted(_)) + && !room_version.knock_restricted_join_rule + { + // 2. If the `join_rule` is `knock_restricted`, but the room does not support + // `knock_restricted`, reject. + warn!( + "Join rule is set to knock_restricted but room version does not support \ + knock_restricted, knocking is not allowed" + ); + false + } else if sender != target_user { + // 3. If `sender` does not match `state_key`, reject. + warn!( + ?sender, + ?target_user, + "Can't make another user knock, sender did not match target" + ); + false + } else if matches!( + sender_membership, + MembershipState::Ban | MembershipState::Invite | MembershipState::Join + ) { + // 4. If the `sender`'s current membership is not `ban`, `invite`, or `join`, + // allow. + // 5. Otherwise, reject. + warn!( + ?target_user_membership_event_id, + "Knocking with a membership state of ban, invite or join is invalid", + ); + false + } else { + true + } + }, + | _ => { + warn!("Unknown membership transition"); + false + }, + }) +} + +/// Is the user allowed to send a specific event based on the rooms power +/// levels. +/// +/// Does the event have the correct userId as its state_key if it's not the "" +/// state_key. +fn can_send_event(event: impl Event, ple: Option, user_level: Int) -> bool { + let event_type_power_level = get_send_level(event.event_type(), event.state_key(), ple); + + debug!( + required_level = i64::from(event_type_power_level), + user_level = i64::from(user_level), + state_key = ?event.state_key(), + "permissions factors", + ); + + if user_level < event_type_power_level { + return false; + } + + if event.state_key().is_some_and(|k| k.starts_with('@')) + && event.state_key() != Some(event.sender().as_str()) + { + return false; // permission required to post in this room + } + + true +} + +/// Confirm that the event sender has the required power levels. +fn check_power_levels( + room_version: &RoomVersion, + power_event: impl Event, + previous_power_event: Option, + user_level: Int, +) -> Option { + match power_event.state_key() { + | Some("") => {}, + | Some(key) => { + error!(state_key = key, "m.room.power_levels event has non-empty state key"); + return None; + }, + | None => { + error!("check_power_levels requires an m.room.power_levels *state* event argument"); + return None; + }, + } + + // - If any of the keys users_default, events_default, state_default, ban, + // redact, kick, or invite in content are present and not an integer, reject. + // - If either of the keys events or notifications in content are present and + // not a dictionary with values that are integers, reject. + // - If users key in content is not a dictionary with keys that are valid user + // IDs with values that are integers, reject. + let user_content: RoomPowerLevelsEventContent = + deserialize_power_levels(power_event.content().get(), room_version)?; + + // Validation of users is done in Ruma, synapse for loops validating user_ids + // and integers here + debug!("validation of power event finished"); + + let current_state = match previous_power_event { + | Some(current_state) => current_state, + // If there is no previous m.room.power_levels event in the room, allow + | None => return Some(true), + }; + + let current_content: RoomPowerLevelsEventContent = + deserialize_power_levels(current_state.content().get(), room_version)?; + + let mut user_levels_to_check = BTreeSet::new(); + let old_list = ¤t_content.users; + let user_list = &user_content.users; + for user in old_list.keys().chain(user_list.keys()) { + let user: &UserId = user; + user_levels_to_check.insert(user); + } + + trace!(set = ?user_levels_to_check, "user levels to check"); + + let mut event_levels_to_check = BTreeSet::new(); + let old_list = ¤t_content.events; + let new_list = &user_content.events; + for ev_id in old_list.keys().chain(new_list.keys()) { + event_levels_to_check.insert(ev_id); + } + + trace!(set = ?event_levels_to_check, "event levels to check"); + + let old_state = ¤t_content; + let new_state = &user_content; + + // synapse does not have to split up these checks since we can't combine UserIds + // and EventTypes we do 2 loops + + // UserId loop + for user in user_levels_to_check { + let old_level = old_state.users.get(user); + let new_level = new_state.users.get(user); + if old_level.is_some() && new_level.is_some() && old_level == new_level { + continue; + } + + // If the current value is equal to the sender's current power level, reject + if user != power_event.sender() && old_level == Some(&user_level) { + warn!("m.room.power_level cannot remove ops == to own"); + return Some(false); // cannot remove ops level == to own + } + + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > Some(&user_level); + let new_level_too_big = new_level > Some(&user_level); + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + + // EventType loop + for ev_type in event_levels_to_check { + let old_level = old_state.events.get(ev_type); + let new_level = new_state.events.get(ev_type); + if old_level.is_some() && new_level.is_some() && old_level == new_level { + continue; + } + + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > Some(&user_level); + let new_level_too_big = new_level > Some(&user_level); + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + + // Notifications, currently there is only @room + if room_version.limit_notifications_power_levels { + let old_level = old_state.notifications.room; + let new_level = new_state.notifications.room; + if old_level != new_level { + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > user_level; + let new_level_too_big = new_level > user_level; + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + } + + let levels = [ + "users_default", + "events_default", + "state_default", + "ban", + "redact", + "kick", + "invite", + ]; + let old_state = serde_json::to_value(old_state).unwrap(); + let new_state = serde_json::to_value(new_state).unwrap(); + for lvl_name in &levels { + if let Some((old_lvl, new_lvl)) = get_deserialize_levels(&old_state, &new_state, lvl_name) + { + let old_level_too_big = old_lvl > user_level; + let new_level_too_big = new_lvl > user_level; + + if old_level_too_big || new_level_too_big { + warn!("cannot add ops > than own"); + return Some(false); + } + } + } + + Some(true) +} + +fn get_deserialize_levels( + old: &serde_json::Value, + new: &serde_json::Value, + name: &str, +) -> Option<(Int, Int)> { + Some(( + serde_json::from_value(old.get(name)?.clone()).ok()?, + serde_json::from_value(new.get(name)?.clone()).ok()?, + )) +} + +/// Does the event redacting come from a user with enough power to redact the +/// given event. +fn check_redaction( + _room_version: &RoomVersion, + redaction_event: impl Event, + user_level: Int, + redact_level: Int, +) -> Result { + if user_level >= redact_level { + debug!("redaction allowed via power levels"); + return Ok(true); + } + + // If the domain of the event_id of the event being redacted is the same as the + // domain of the event_id of the m.room.redaction, allow + if redaction_event.event_id().borrow().server_name() + == redaction_event + .redacts() + .as_ref() + .and_then(|&id| id.borrow().server_name()) + { + debug!("redaction event allowed via room version 1 rules"); + return Ok(true); + } + + Ok(false) +} + +/// Helper function to fetch the power level needed to send an event of type +/// `e_type` based on the rooms "m.room.power_level" event. +fn get_send_level( + e_type: &TimelineEventType, + state_key: Option<&str>, + power_lvl: Option, +) -> Int { + power_lvl + .and_then(|ple| { + from_json_str::(ple.content().get()) + .map(|content| { + content.events.get(e_type).copied().unwrap_or_else(|| { + if state_key.is_some() { + content.state_default + } else { + content.events_default + } + }) + }) + .ok() + }) + .unwrap_or_else(|| if state_key.is_some() { int!(50) } else { int!(0) }) +} + +fn verify_third_party_invite( + target_user: Option<&UserId>, + sender: &UserId, + tp_id: &ThirdPartyInvite, + current_third_party_invite: Option, +) -> bool { + // 1. Check for user being banned happens before this is called + // checking for mxid and token keys is done by ruma when deserializing + + // The state key must match the invitee + if target_user != Some(&tp_id.signed.mxid) { + return false; + } + + // If there is no m.room.third_party_invite event in the current room state with + // state_key matching token, reject + let current_tpid = match current_third_party_invite { + | Some(id) => id, + | None => return false, + }; + + if current_tpid.state_key() != Some(&tp_id.signed.token) { + return false; + } + + if sender != current_tpid.sender() { + return false; + } + + // If any signature in signed matches any public key in the + // m.room.third_party_invite event, allow + let tpid_ev = + match from_json_str::(current_tpid.content().get()) { + | Ok(ev) => ev, + | Err(_) => return false, + }; + + let decoded_invite_token = match Base64::parse(&tp_id.signed.token) { + | Ok(tok) => tok, + // FIXME: Log a warning? + | Err(_) => return false, + }; + + // A list of public keys in the public_keys field + for key in tpid_ev.public_keys.unwrap_or_default() { + if key.public_key == decoded_invite_token { + return true; + } + } + + // A single public key in the public_key field + tpid_ev.public_key == decoded_invite_token +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use ruma_events::{ + room::{ + join_rules::{ + AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, + }, + member::{MembershipState, RoomMemberEventContent}, + }, + StateEventType, TimelineEventType, + }; + use serde_json::value::to_raw_value as to_raw_json_value; + + use crate::{ + event_auth::valid_membership_change, + test_utils::{ + alice, charlie, ella, event_id, member_content_ban, member_content_join, room_id, + to_pdu_event, PduEvent, INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, + }, + Event, EventTypeExt, RoomVersion, StateMap, + }; + + #[test] + fn test_ban_pass() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + alice(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_ban(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = charlie(); + let sender = alice(); + + assert!(valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_join_non_creator() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS_CREATE_ROOM(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = charlie(); + let sender = charlie(); + + assert!(!valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_join_creator() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS_CREATE_ROOM(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = alice(); + let sender = alice(); + + assert!(valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_ban_fail() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + charlie(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_ban(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = alice(); + let sender = charlie(); + + assert!(!valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_restricted_join_rule() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let mut events = INITIAL_EVENTS(); + *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Restricted( + Restricted::new(vec![AllowRule::RoomMembership(RoomMembership::new( + room_id().to_owned(), + ))]), + ))) + .unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ); + + let mut member = RoomMemberEventContent::new(MembershipState::Join); + member.join_authorized_via_users_server = Some(alice().to_owned()); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap(), + &["CREATE", "IJR", "IPOWER", "new"], + &["new"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = ella(); + let sender = ella(); + + assert!(valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + Some(alice()), + &MembershipState::Join, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + + assert!(!valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + Some(ella()), + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_knock() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let mut events = INITIAL_EVENTS(); + *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Knock)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Knock)).unwrap(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = ella(); + let sender = ella(); + + assert!(valid_membership_change( + &RoomVersion::V7, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } +} diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs new file mode 100644 index 00000000..e4054377 --- /dev/null +++ b/src/core/state_res/mod.rs @@ -0,0 +1,1644 @@ +pub(crate) mod error; +pub mod event_auth; +mod power_levels; +mod room_version; +mod state_event; + +#[cfg(test)] +mod test_utils; + +use std::{ + borrow::Borrow, + cmp::{Ordering, Reverse}, + collections::{BinaryHeap, HashMap, HashSet}, + fmt::Debug, + hash::Hash, +}; + +use futures::{future, stream, Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use ruma::{ + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, TimelineEventType, + }, + int, EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, +}; +use serde_json::from_str as from_json_str; + +pub(crate) use self::error::Error; +use self::power_levels::PowerLevelsContentFields; +pub use self::{ + event_auth::{auth_check, auth_types_for_event}, + room_version::RoomVersion, + state_event::Event, +}; +use crate::{debug, trace, warn}; + +/// A mapping of event type and state_key to some value `T`, usually an +/// `EventId`. +pub type StateMap = HashMap; +pub type StateMapItem = (TypeStateKey, T); +pub type TypeStateKey = (StateEventType, String); + +type Result = crate::Result; + +/// Resolve sets of state events as they come in. +/// +/// Internally `StateResolution` builds a graph and an auth chain to allow for +/// state conflict resolution. +/// +/// ## Arguments +/// +/// * `state_sets` - The incoming state to resolve. Each `StateMap` represents a +/// possible fork in the state of a room. +/// +/// * `auth_chain_sets` - The full recursive set of `auth_events` for each event +/// in the `state_sets`. +/// +/// * `event_fetch` - Any event not found in the `event_map` will defer to this +/// closure to find the event. +/// +/// * `parallel_fetches` - The number of asynchronous fetch requests in-flight +/// for any given operation. +/// +/// ## Invariants +/// +/// The caller of `resolve` must ensure that all the events are from the same +/// room. Although this function takes a `RoomId` it does not check that each +/// event is part of the same room. +//#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, +//#[tracing::instrument(level event_fetch))] +pub async fn resolve<'a, E, SetIter, Fetch, FetchFut, Exists, ExistsFut>( + room_version: &RoomVersionId, + state_sets: impl IntoIterator + Send, + auth_chain_sets: &'a [HashSet], + event_fetch: &Fetch, + event_exists: &Exists, + parallel_fetches: usize, +) -> Result> +where + Fetch: Fn(E::Id) -> FetchFut + Sync, + FetchFut: Future> + Send, + Exists: Fn(E::Id) -> ExistsFut + Sync, + ExistsFut: Future + Send, + SetIter: Iterator> + Clone + Send, + E: Event + Clone + Send + Sync, + E::Id: Borrow + Send + Sync, + for<'b> &'b E: Send, +{ + debug!("State resolution starting"); + + // Split non-conflicting and conflicting state + let (clean, conflicting) = separate(state_sets.into_iter()); + + debug!(count = clean.len(), "non-conflicting events"); + trace!(map = ?clean, "non-conflicting events"); + + if conflicting.is_empty() { + debug!("no conflicting state found"); + return Ok(clean); + } + + debug!(count = conflicting.len(), "conflicting events"); + trace!(map = ?conflicting, "conflicting events"); + + let auth_chain_diff = + get_auth_chain_diff(auth_chain_sets).chain(conflicting.into_values().flatten()); + + // `all_conflicted` contains unique items + // synapse says `full_set = {eid for eid in full_conflicted_set if eid in + // event_map}` + let all_conflicted: HashSet<_> = stream::iter(auth_chain_diff) + // Don't honor events we cannot "verify" + .map(|id| event_exists(id.clone()).map(move |exists| (id, exists))) + .buffer_unordered(parallel_fetches) + .filter_map(|(id, exists)| future::ready(exists.then_some(id))) + .collect() + .boxed() + .await; + + debug!(count = all_conflicted.len(), "full conflicted set"); + trace!(set = ?all_conflicted, "full conflicted set"); + + // We used to check that all events are events from the correct room + // this is now a check the caller of `resolve` must make. + + // Get only the control events with a state_key: "" or ban/kick event (sender != + // state_key) + let control_events: Vec<_> = stream::iter(all_conflicted.iter()) + .map(|id| is_power_event_id(id, &event_fetch).map(move |is| (id, is))) + .buffer_unordered(parallel_fetches) + .filter_map(|(id, is)| future::ready(is.then_some(id.clone()))) + .collect() + .boxed() + .await; + + // Sort the control events based on power_level/clock/event_id and + // outgoing/incoming edges + let sorted_control_levels = reverse_topological_power_sort( + control_events, + &all_conflicted, + &event_fetch, + parallel_fetches, + ) + .boxed() + .await?; + + debug!(count = sorted_control_levels.len(), "power events"); + trace!(list = ?sorted_control_levels, "sorted power events"); + + let room_version = RoomVersion::new(room_version)?; + // Sequentially auth check each control event. + let resolved_control = iterative_auth_check( + &room_version, + sorted_control_levels.iter(), + clean.clone(), + &event_fetch, + parallel_fetches, + ) + .boxed() + .await?; + + debug!(count = resolved_control.len(), "resolved power events"); + trace!(map = ?resolved_control, "resolved power events"); + + // At this point the control_events have been resolved we now have to + // sort the remaining events using the mainline of the resolved power level. + let deduped_power_ev = sorted_control_levels.into_iter().collect::>(); + + // This removes the control events that passed auth and more importantly those + // that failed auth + let events_to_resolve = all_conflicted + .iter() + .filter(|&id| !deduped_power_ev.contains(id.borrow())) + .cloned() + .collect::>(); + + debug!(count = events_to_resolve.len(), "events left to resolve"); + trace!(list = ?events_to_resolve, "events left to resolve"); + + // This "epochs" power level event + let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, String::new())); + + debug!(event_id = ?power_event, "power event"); + + let sorted_left_events = + mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches) + .boxed() + .await?; + + trace!(list = ?sorted_left_events, "events left, sorted"); + + let mut resolved_state = iterative_auth_check( + &room_version, + sorted_left_events.iter(), + resolved_control, // The control events are added to the final resolved state + &event_fetch, + parallel_fetches, + ) + .boxed() + .await?; + + // Add unconflicted state to the resolved state + // We priorities the unconflicting state + resolved_state.extend(clean); + + debug!("state resolution finished"); + + Ok(resolved_state) +} + +/// Split the events that have no conflicts from those that are conflicting. +/// +/// The return tuple looks like `(unconflicted, conflicted)`. +/// +/// State is determined to be conflicting if for the given key (StateEventType, +/// StateKey) there is not exactly one event ID. This includes missing events, +/// if one state_set includes an event that none of the other have this is a +/// conflicting event. +fn separate<'a, Id>( + state_sets_iter: impl Iterator>, +) -> (StateMap, StateMap>) +where + Id: Clone + Eq + Hash + 'a, +{ + let mut state_set_count = 0_usize; + let mut occurrences = HashMap::<_, HashMap<_, _>>::new(); + + let state_sets_iter = state_sets_iter.inspect(|_| state_set_count += 1); + for (k, v) in state_sets_iter.flatten() { + occurrences + .entry(k) + .or_default() + .entry(v) + .and_modify(|x| *x += 1) + .or_insert(1); + } + + let mut unconflicted_state = StateMap::new(); + let mut conflicted_state = StateMap::new(); + + for (k, v) in occurrences { + for (id, occurrence_count) in v { + if occurrence_count == state_set_count { + unconflicted_state.insert((k.0.clone(), k.1.clone()), id.clone()); + } else { + conflicted_state + .entry((k.0.clone(), k.1.clone())) + .and_modify(|x: &mut Vec<_>| x.push(id.clone())) + .or_insert(vec![id.clone()]); + } + } + } + + (unconflicted_state, conflicted_state) +} + +/// Returns a Vec of deduped EventIds that appear in some chains but not others. +fn get_auth_chain_diff(auth_chain_sets: &[HashSet]) -> impl Iterator + Send +where + Id: Clone + Eq + Hash + Send, +{ + let num_sets = auth_chain_sets.len(); + let mut id_counts: HashMap = HashMap::new(); + for id in auth_chain_sets.iter().flatten() { + *id_counts.entry(id.clone()).or_default() += 1; + } + + id_counts + .into_iter() + .filter_map(move |(id, count)| (count < num_sets).then_some(id)) +} + +/// Events are sorted from "earliest" to "latest". +/// +/// They are compared using the negative power level (reverse topological +/// ordering), the origin server timestamp and in case of a tie the `EventId`s +/// are compared lexicographically. +/// +/// The power level is negative because a higher power level is equated to an +/// earlier (further back in time) origin server timestamp. +#[tracing::instrument(level = "debug", skip_all)] +async fn reverse_topological_power_sort( + events_to_sort: Vec, + auth_diff: &HashSet, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send + Sync, +{ + debug!("reverse topological sort of power events"); + + let mut graph = HashMap::new(); + for event_id in events_to_sort { + add_event_and_auth_chain_to_graph(&mut graph, event_id, auth_diff, fetch_event).await; + } + + // This is used in the `key_fn` passed to the lexico_topo_sort fn + let event_to_pl = stream::iter(graph.keys()) + .map(|event_id| { + get_power_level_for_sender(event_id.clone(), fetch_event, parallel_fetches) + .map(move |res| res.map(|pl| (event_id, pl))) + }) + .buffer_unordered(parallel_fetches) + .try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { + debug!( + event_id = event_id.borrow().as_str(), + power_level = i64::from(pl), + "found the power level of an event's sender", + ); + + event_to_pl.insert(event_id.clone(), pl); + future::ok(event_to_pl) + }) + .boxed() + .await?; + + let event_to_pl = &event_to_pl; + let fetcher = |event_id: E::Id| async move { + let pl = *event_to_pl + .get(event_id.borrow()) + .ok_or_else(|| Error::NotFound(String::new()))?; + let ev = fetch_event(event_id) + .await + .ok_or_else(|| Error::NotFound(String::new()))?; + Ok((pl, ev.origin_server_ts())) + }; + + lexicographical_topological_sort(&graph, &fetcher).await +} + +/// Sorts the event graph based on number of outgoing/incoming edges. +/// +/// `key_fn` is used as to obtain the power level and age of an event for +/// breaking ties (together with the event ID). +#[tracing::instrument(level = "debug", skip_all)] +pub async fn lexicographical_topological_sort( + graph: &HashMap>, + key_fn: &F, +) -> Result> +where + F: Fn(Id) -> Fut + Sync, + Fut: Future> + Send, + Id: Borrow + Clone + Eq + Hash + Ord + Send, +{ + #[derive(PartialEq, Eq)] + struct TieBreaker<'a, Id> { + power_level: Int, + origin_server_ts: MilliSecondsSinceUnixEpoch, + event_id: &'a Id, + } + + impl Ord for TieBreaker<'_, Id> + where + Id: Ord, + { + fn cmp(&self, other: &Self) -> Ordering { + // NOTE: the power level comparison is "backwards" intentionally. + // See the "Mainline ordering" section of the Matrix specification + // around where it says the following: + // + // > for events `x` and `y`, `x < y` if [...] + // + // + other + .power_level + .cmp(&self.power_level) + .then(self.origin_server_ts.cmp(&other.origin_server_ts)) + .then(self.event_id.cmp(other.event_id)) + } + } + + impl PartialOrd for TieBreaker<'_, Id> + where + Id: Ord, + { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } + } + + debug!("starting lexicographical topological sort"); + + // NOTE: an event that has no incoming edges happened most recently, + // and an event that has no outgoing edges happened least recently. + + // NOTE: this is basically Kahn's algorithm except we look at nodes with no + // outgoing edges, c.f. + // https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + + // outdegree_map is an event referring to the events before it, the + // more outdegree's the more recent the event. + let mut outdegree_map = graph.clone(); + + // The number of events that depend on the given event (the EventId key) + // How many events reference this event in the DAG as a parent + let mut reverse_graph: HashMap<_, HashSet<_>> = HashMap::new(); + + // Vec of nodes that have zero out degree, least recent events. + let mut zero_outdegree = Vec::new(); + + for (node, edges) in graph { + if edges.is_empty() { + let (power_level, origin_server_ts) = key_fn(node.clone()).await?; + // The `Reverse` is because rusts `BinaryHeap` sorts largest -> smallest we need + // smallest -> largest + zero_outdegree.push(Reverse(TieBreaker { + power_level, + origin_server_ts, + event_id: node, + })); + } + + reverse_graph.entry(node).or_default(); + for edge in edges { + reverse_graph.entry(edge).or_default().insert(node); + } + } + + let mut heap = BinaryHeap::from(zero_outdegree); + + // We remove the oldest node (most incoming edges) and check against all other + let mut sorted = vec![]; + // Destructure the `Reverse` and take the smallest `node` each time + while let Some(Reverse(item)) = heap.pop() { + let node = item.event_id; + + for &parent in reverse_graph + .get(node) + .expect("EventId in heap is also in reverse_graph") + { + // The number of outgoing edges this node has + let out = outdegree_map + .get_mut(parent.borrow()) + .expect("outdegree_map knows of all referenced EventIds"); + + // Only push on the heap once older events have been cleared + out.remove(node.borrow()); + if out.is_empty() { + let (power_level, origin_server_ts) = key_fn(parent.clone()).await?; + heap.push(Reverse(TieBreaker { + power_level, + origin_server_ts, + event_id: parent, + })); + } + } + + // synapse yields we push then return the vec + sorted.push(node.clone()); + } + + Ok(sorted) +} + +/// Find the power level for the sender of `event_id` or return a default value +/// of zero. +/// +/// Do NOT use this any where but topological sort, we find the power level for +/// the eventId at the eventId's generation (we walk backwards to `EventId`s +/// most recent previous power level event). +async fn get_power_level_for_sender( + event_id: E::Id, + fetch_event: &F, + parallel_fetches: usize, +) -> serde_json::Result +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + debug!("fetch event ({event_id}) senders power level"); + + let event = fetch_event(event_id.clone()).await; + + let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + + let pl = stream::iter(auth_events) + .map(|aid| fetch_event(aid.clone())) + .buffer_unordered(parallel_fetches.min(5)) + .filter_map(future::ready) + .collect::>() + .boxed() + .await + .into_iter() + .find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")); + + let content: PowerLevelsContentFields = match pl { + | None => return Ok(int!(0)), + | Some(ev) => from_json_str(ev.content().get())?, + }; + + if let Some(ev) = event { + if let Some(&user_level) = content.get_user_power(ev.sender()) { + debug!("found {} at power_level {user_level}", ev.sender()); + return Ok(user_level); + } + } + + Ok(content.users_default) +} + +/// Check the that each event is authenticated based on the events before it. +/// +/// ## Returns +/// +/// The `unconflicted_state` combined with the newly auth'ed events. So any +/// event that fails the `event_auth::auth_check` will be excluded from the +/// returned state map. +/// +/// For each `events_to_check` event we gather the events needed to auth it from +/// the the `fetch_event` closure and verify each event using the +/// `event_auth::auth_check` function. +async fn iterative_auth_check<'a, E, F, Fut, I>( + room_version: &RoomVersion, + events_to_check: I, + unconflicted_state: StateMap, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E::Id: Borrow + Clone + Eq + Ord + Send + Sync + 'a, + I: Iterator + Debug + Send + 'a, + E: Event + Clone + Send + Sync, +{ + debug!("starting iterative auth check"); + trace!( + list = ?events_to_check, + "events to check" + ); + + let events_to_check: Vec<_> = stream::iter(events_to_check) + .map(Result::Ok) + .map_ok(|event_id| { + fetch_event(event_id.clone()).map(move |result| { + result.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) + }) + }) + .try_buffer_unordered(parallel_fetches) + .try_collect() + .boxed() + .await?; + + let auth_event_ids: HashSet = events_to_check + .iter() + .flat_map(|event: &E| event.auth_events().map(Clone::clone)) + .collect(); + + let auth_events: HashMap = stream::iter(auth_event_ids.into_iter()) + .map(fetch_event) + .buffer_unordered(parallel_fetches) + .filter_map(future::ready) + .map(|auth_event| (auth_event.event_id().clone(), auth_event)) + .collect() + .boxed() + .await; + + let auth_events = &auth_events; + let mut resolved_state = unconflicted_state; + for event in &events_to_check { + let event_id = event.event_id(); + let state_key = event + .state_key() + .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; + + let auth_types = auth_types_for_event( + event.event_type(), + event.sender(), + Some(state_key), + event.content(), + )?; + + let mut auth_state = StateMap::new(); + for aid in event.auth_events() { + if let Some(ev) = auth_events.get(aid.borrow()) { + //TODO: synapse checks "rejected_reason" which is most likely related to + // soft-failing + auth_state.insert( + ev.event_type() + .with_state_key(ev.state_key().ok_or_else(|| { + Error::InvalidPdu("State event had no state key".to_owned()) + })?), + ev.clone(), + ); + } else { + warn!(event_id = aid.borrow().as_str(), "missing auth event"); + } + } + + stream::iter( + auth_types + .iter() + .filter_map(|key| Some((key, resolved_state.get(key)?))), + ) + .filter_map(|(key, ev_id)| async move { + if let Some(event) = auth_events.get(ev_id.borrow()) { + Some((key, event.clone())) + } else { + Some((key, fetch_event(ev_id.clone()).await?)) + } + }) + .for_each(|(key, event)| { + //TODO: synapse checks "rejected_reason" is None here + auth_state.insert(key.to_owned(), event); + future::ready(()) + }) + .await; + + debug!("event to check {:?}", event.event_id()); + + // The key for this is (eventType + a state_key of the signed token not sender) + // so search for it + let current_third_party = auth_state.iter().find_map(|(_, pdu)| { + (*pdu.event_type() == TimelineEventType::RoomThirdPartyInvite).then_some(pdu) + }); + + let fetch_state = |ty: &StateEventType, key: &str| { + future::ready(auth_state.get(&ty.with_state_key(key))) + }; + + if auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await? { + // add event to resolved state map + resolved_state.insert(event.event_type().with_state_key(state_key), event_id.clone()); + } else { + // synapse passes here on AuthError. We do not add this event to resolved_state. + warn!("event {event_id} failed the authentication check"); + } + } + + Ok(resolved_state) +} + +/// Returns the sorted `to_sort` list of `EventId`s based on a mainline sort +/// using the depth of `resolved_power_level`, the server timestamp, and the +/// eventId. +/// +/// The depth of the given event is calculated based on the depth of it's +/// closest "parent" power_level event. If there have been two power events the +/// after the most recent are depth 0, the events before (with the first power +/// level as a parent) will be marked as depth 1. depth 1 is "older" than depth +/// 0. +async fn mainline_sort( + to_sort: &[E::Id], + resolved_power_level: Option, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Clone + Send + Sync, + E::Id: Borrow + Clone + Send + Sync, +{ + debug!("mainline sort of events"); + + // There are no EventId's to sort, bail. + if to_sort.is_empty() { + return Ok(vec![]); + } + + let mut mainline = vec![]; + let mut pl = resolved_power_level; + while let Some(p) = pl { + mainline.push(p.clone()); + + let event = fetch_event(p.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?; + pl = None; + for aid in event.auth_events() { + let ev = fetch_event(aid.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") { + pl = Some(aid.to_owned()); + break; + } + } + } + + let mainline_map = mainline + .iter() + .rev() + .enumerate() + .map(|(idx, eid)| ((*eid).clone(), idx)) + .collect::>(); + + let order_map = stream::iter(to_sort.iter()) + .map(|ev_id| { + fetch_event(ev_id.clone()).map(move |event| event.map(|event| (event, ev_id))) + }) + .buffer_unordered(parallel_fetches) + .filter_map(future::ready) + .map(|(event, ev_id)| { + get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event) + .map_ok(move |depth| (depth, event, ev_id)) + .map(Result::ok) + }) + .buffer_unordered(parallel_fetches) + .filter_map(future::ready) + .fold(HashMap::new(), |mut order_map, (depth, event, ev_id)| { + order_map.insert(ev_id, (depth, event.origin_server_ts(), ev_id)); + future::ready(order_map) + }) + .boxed() + .await; + + // Sort the event_ids by their depth, timestamp and EventId + // unwrap is OK order map and sort_event_ids are from to_sort (the same Vec) + let mut sort_event_ids = order_map.keys().map(|&k| k.clone()).collect::>(); + sort_event_ids.sort_by_key(|sort_id| &order_map[sort_id]); + + Ok(sort_event_ids) +} + +/// Get the mainline depth from the `mainline_map` or finds a power_level event +/// that has an associated mainline depth. +async fn get_mainline_depth( + mut event: Option, + mainline_map: &HashMap, + fetch_event: &F, +) -> Result +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + while let Some(sort_ev) = event { + debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); + let id = sort_ev.event_id(); + if let Some(depth) = mainline_map.get(id.borrow()) { + return Ok(*depth); + } + + event = None; + for aid in sort_ev.auth_events() { + let aev = fetch_event(aid.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") { + event = Some(aev); + break; + } + } + } + // Did not find a power level event so we default to zero + Ok(0) +} + +async fn add_event_and_auth_chain_to_graph( + graph: &mut HashMap>, + event_id: E::Id, + auth_diff: &HashSet, + fetch_event: &F, +) where + F: Fn(E::Id) -> Fut, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Clone + Send, +{ + let mut state = vec![event_id]; + while let Some(eid) = state.pop() { + graph.entry(eid.clone()).or_default(); + let event = fetch_event(eid.clone()).await; + let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + + // Prefer the store to event as the store filters dedups the events + for aid in auth_events { + if auth_diff.contains(aid.borrow()) { + if !graph.contains_key(aid.borrow()) { + state.push(aid.to_owned()); + } + + // We just inserted this at the start of the while loop + graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned()); + } + } + } +} + +async fn is_power_event_id(event_id: &E::Id, fetch: &F) -> bool +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + match fetch(event_id.clone()).await.as_ref() { + | Some(state) => is_power_event(state), + | _ => false, + } +} + +fn is_type_and_key(ev: impl Event, ev_type: &TimelineEventType, state_key: &str) -> bool { + ev.event_type() == ev_type && ev.state_key() == Some(state_key) +} + +fn is_power_event(event: impl Event) -> bool { + match event.event_type() { + | TimelineEventType::RoomPowerLevels + | TimelineEventType::RoomJoinRules + | TimelineEventType::RoomCreate => event.state_key() == Some(""), + | TimelineEventType::RoomMember => { + if let Ok(content) = from_json_str::(event.content().get()) { + if [MembershipState::Leave, MembershipState::Ban].contains(&content.membership) { + return Some(event.sender().as_str()) != event.state_key(); + } + } + + false + }, + | _ => false, + } +} + +/// Convenience trait for adding event type plus state key to state maps. +pub trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for StateEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self, state_key.into()) + } +} + +impl EventTypeExt for TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +impl EventTypeExt for &T +where + T: EventTypeExt + Clone, +{ + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + self.to_owned().with_state_key(state_key) + } +} + +#[cfg(test)] +mod tests { + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + }; + + use maplit::{hashmap, hashset}; + use rand::seq::SliceRandom; + use ruma::{ + events::{ + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, + StateEventType, TimelineEventType, + }, + int, uint, + }; + use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId}; + use serde_json::{json, value::to_raw_value as to_raw_json_value}; + use tracing::debug; + + use crate::{ + is_power_event, + room_version::RoomVersion, + test_utils::{ + alice, bob, charlie, do_check, ella, event_id, member_content_ban, + member_content_join, room_id, to_init_pdu_event, to_pdu_event, zara, PduEvent, + TestStore, INITIAL_EVENTS, + }, + Event, EventTypeExt, StateMap, + }; + + async fn test_event_sort() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let event_map = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let auth_chain: HashSet = HashSet::new(); + + let power_events = event_map + .values() + .filter(|&pdu| is_power_event(&**pdu)) + .map(|pdu| pdu.event_id.clone()) + .collect::>(); + + let fetcher = |id| ready(events.get(&id).cloned()); + let sorted_power_events = + crate::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) + .await + .unwrap(); + + let resolved_power = crate::iterative_auth_check( + &RoomVersion::V6, + sorted_power_events.iter(), + HashMap::new(), // unconflicted events + &fetcher, + 1, + ) + .await + .expect("iterative auth check failed on resolved events"); + + // don't remove any events so we know it sorts them all correctly + let mut events_to_sort = events.keys().cloned().collect::>(); + + events_to_sort.shuffle(&mut rand::thread_rng()); + + let power_level = resolved_power + .get(&(StateEventType::RoomPowerLevels, "".to_owned())) + .cloned(); + + let sorted_event_ids = crate::mainline_sort(&events_to_sort, power_level, &fetcher, 1) + .await + .unwrap(); + + assert_eq!( + vec![ + "$CREATE:foo", + "$IMA:foo", + "$IPOWER:foo", + "$IJR:foo", + "$IMB:foo", + "$IMC:foo", + "$START:foo", + "$END:foo" + ], + sorted_event_ids + .iter() + .map(|id| id.to_string()) + .collect::>() + ); + } + + #[tokio::test] + async fn test_sort() { + for _ in 0..20 { + // since we shuffle the eventIds before we sort them introducing randomness + // seems like we should test this a few times + test_event_sort().await; + } + } + + #[tokio::test] + async fn ban_vs_power_level() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "MA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + ), + to_init_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_ban(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + ]; + + let edges = vec![vec!["END", "MB", "MA", "PA", "START"], vec!["END", "PA", "PB"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA", "MA", "MB"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_basic() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA1", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA2", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T3", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + ]; + + let edges = + vec![vec!["END", "PA2", "T2", "PA1", "T1", "START"], vec!["END", "T3", "PB", "PA1"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA2", "T2"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_reset() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_ban(), + ), + ]; + + let edges = vec![vec!["END", "MB", "T2", "PA", "T1", "START"], vec!["END", "T1"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["T1", "MB", "PA"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn join_rule_evasion() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "JR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Private)).unwrap(), + ), + to_init_pdu_event( + "ME", + ella(), + TimelineEventType::RoomMember, + Some(ella().to_string().as_str()), + member_content_join(), + ), + ]; + + let edges = vec![vec!["END", "JR", "START"], vec!["END", "ME", "START"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec![event_id("JR")]; + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn offtopic_power_level() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value( + &json!({ "users": { alice(): 100, bob(): 50, charlie(): 50 } }), + ) + .unwrap(), + ), + to_init_pdu_event( + "PC", + charlie(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50, charlie(): 0 } })) + .unwrap(), + ), + ]; + + let edges = vec![vec!["END", "PC", "PB", "PA", "START"], vec!["END", "PA"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PC"].into_iter().map(event_id).collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_setting() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA1", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA2", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T3", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "MZ1", + zara(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "T4", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + ]; + + let edges = vec![vec!["END", "T4", "MZ1", "PA2", "T2", "PA1", "T1", "START"], vec![ + "END", "MZ1", "T3", "PB", "PA1", + ]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["T4", "PA2"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn test_event_map_none() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let mut store = TestStore::(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, expected) = store.set_up(); + + let ev_map = store.0.clone(); + let fetcher = |id| ready(ev_map.get(&id).cloned()); + + let exists = |id: ::Id| ready(ev_map.get(&*id).is_some()); + + let state_sets = [state_at_bob, state_at_charlie]; + let auth_chain: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let resolved = match crate::resolve( + &RoomVersionId::V2, + &state_sets, + &auth_chain, + &fetcher, + &exists, + 1, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + + assert_eq!(expected, resolved); + } + + #[tokio::test] + async fn test_lexicographical_sort() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + + let res = crate::lexicographical_topological_sort(&graph, &|_id| async { + Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }) + .await + .unwrap(); + + assert_eq!( + vec!["o", "l", "n", "m", "p"], + res.iter() + .map(ToString::to_string) + .map(|s| s.replace('$', "").replace(":foo", "")) + .collect::>() + ); + } + + #[tokio::test] + async fn ban_with_auth_chains() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let ban = BAN_STATE_SET(); + + let edges = vec![vec!["END", "MB", "PA", "START"], vec!["END", "IME", "MB"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA", "MB"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(&ban.values().cloned().collect::>(), edges, expected_state_ids).await; + } + + #[tokio::test] + async fn ban_with_auth_chains2() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let init = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + let mut inner = init.clone(); + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) + .collect::>(); + + let ev_map = &store.0; + let state_sets = [state_set_a, state_set_b]; + let auth_chain: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); + let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); + let resolved = match crate::resolve( + &RoomVersionId::V6, + &state_sets, + &auth_chain, + &fetcher, + &exists, + 1, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + + debug!( + resolved = ?resolved + .iter() + .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) + .collect::>(), + "resolved state", + ); + + let expected = [ + "$CREATE:foo", + "$IJR:foo", + "$PA:foo", + "$IMA:foo", + "$IMB:foo", + "$IMC:foo", + "$MB:foo", + ]; + + for id in expected.iter().map(|i| event_id(i)) { + // make sure our resolved events are equal to the expected list + assert!(resolved.values().any(|eid| eid == &id) || init.contains_key(&id), "{id}"); + } + assert_eq!(expected.len(), resolved.len()); + } + + #[tokio::test] + async fn join_rule_with_auth_chain() { + let join_rule = JOIN_RULE(); + + let edges = vec![vec!["END", "JR", "START"], vec!["END", "IMZ", "START"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["JR"].into_iter().map(event_id).collect::>(); + + do_check(&join_rule.values().cloned().collect::>(), edges, expected_state_ids) + .await; + } + + #[allow(non_snake_case)] + fn BAN_STATE_SET() -> HashMap> { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id.clone(), ev)) + .collect() + } + + #[allow(non_snake_case)] + fn JOIN_RULE() -> HashMap> { + vec![ + to_pdu_event( + "JR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&json!({ "join_rule": "invite" })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["START"], + ), + to_pdu_event( + "IMZ", + zara(), + TimelineEventType::RoomPowerLevels, + Some(zara().as_str()), + member_content_join(), + &["CREATE", "JR", "IPOWER"], + &["START"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id.clone(), ev)) + .collect() + } + + macro_rules! state_set { + ($($kind:expr => $key:expr => $id:expr),* $(,)?) => {{ + #[allow(unused_mut)] + let mut x = StateMap::new(); + $( + x.insert(($kind, $key.to_owned()), $id); + )* + x + }}; + } + + #[test] + fn separate_unique_conflicted() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@b:hs1" => 1], + state_set![StateEventType::RoomMember => "@c:hs1" => 2], + ] + .iter(), + ); + + assert_eq!(unconflicted, StateMap::new()); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => vec![0], + StateEventType::RoomMember => "@b:hs1" => vec![1], + StateEventType::RoomMember => "@c:hs1" => vec![2], + ],); + } + + #[test] + fn separate_conflicted() { + let (unconflicted, mut conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 1], + state_set![StateEventType::RoomMember => "@a:hs1" => 2], + ] + .iter(), + ); + + // HashMap iteration order is random, so sort this before asserting on it + for v in conflicted.values_mut() { + v.sort_unstable(); + } + + assert_eq!(unconflicted, StateMap::new()); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => vec![0, 1, 2], + ],); + } + + #[test] + fn separate_unconflicted() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + ] + .iter(), + ); + + assert_eq!(unconflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + ],); + assert_eq!(conflicted, StateMap::new()); + } + + #[test] + fn separate_mixed() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + StateEventType::RoomMember => "@b:hs1" => 1, + ], + state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + StateEventType::RoomMember => "@c:hs1" => 2, + ], + ] + .iter(), + ); + + assert_eq!(unconflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + ],); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@b:hs1" => vec![1], + StateEventType::RoomMember => "@c:hs1" => vec![2], + ],); + } +} diff --git a/src/core/state_res/outcomes.txt b/src/core/state_res/outcomes.txt new file mode 100644 index 00000000..0fa1c734 --- /dev/null +++ b/src/core/state_res/outcomes.txt @@ -0,0 +1,104 @@ +11/29/2020 BRANCH: timo-spec-comp REV: d2a85669cc6056679ce6ca0fde4658a879ad2b08 +lexicographical topological sort + time: [1.7123 us 1.7157 us 1.7199 us] + change: [-1.7584% -1.5433% -1.3205%] (p = 0.00 < 0.05) + Performance has improved. +Found 8 outliers among 100 measurements (8.00%) + 2 (2.00%) low mild + 5 (5.00%) high mild + 1 (1.00%) high severe + +resolve state of 5 events one fork + time: [10.981 us 10.998 us 11.020 us] +Found 3 outliers among 100 measurements (3.00%) + 3 (3.00%) high mild + +resolve state of 10 events 3 conflicting + time: [26.858 us 26.946 us 27.037 us] + +11/29/2020 BRANCH: event-trait REV: f0eb1310efd49d722979f57f20bd1ac3592b0479 +lexicographical topological sort + time: [1.7686 us 1.7738 us 1.7810 us] + change: [-3.2752% -2.4634% -1.7635%] (p = 0.00 < 0.05) + Performance has improved. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe + +resolve state of 5 events one fork + time: [10.643 us 10.656 us 10.669 us] + change: [-4.9990% -3.8078% -2.8319%] (p = 0.00 < 0.05) + Performance has improved. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe + +resolve state of 10 events 3 conflicting + time: [29.149 us 29.252 us 29.375 us] + change: [-0.8433% -0.3270% +0.2656%] (p = 0.25 > 0.05) + No change in performance detected. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high mild + +4/26/2020 BRANCH: fix-test-serde REV: +lexicographical topological sort + time: [1.6793 us 1.6823 us 1.6857 us] +Found 9 outliers among 100 measurements (9.00%) + 1 (1.00%) low mild + 4 (4.00%) high mild + 4 (4.00%) high severe + +resolve state of 5 events one fork + time: [9.9993 us 10.062 us 10.159 us] +Found 9 outliers among 100 measurements (9.00%) + 7 (7.00%) high mild + 2 (2.00%) high severe + +resolve state of 10 events 3 conflicting + time: [26.004 us 26.092 us 26.195 us] +Found 16 outliers among 100 measurements (16.00%) + 11 (11.00%) high mild + 5 (5.00%) high severe + +6/30/2021 BRANCH: state-closure REV: 174c3e2a72232ad75b3fb14b3551f5f746f4fe84 +lexicographical topological sort + time: [1.5496 us 1.5536 us 1.5586 us] +Found 9 outliers among 100 measurements (9.00%) + 1 (1.00%) low mild + 1 (1.00%) high mild + 7 (7.00%) high severe + +resolve state of 5 events one fork + time: [10.319 us 10.333 us 10.347 us] +Found 2 outliers among 100 measurements (2.00%) + 2 (2.00%) high severe + +resolve state of 10 events 3 conflicting + time: [25.770 us 25.805 us 25.839 us] +Found 7 outliers among 100 measurements (7.00%) + 5 (5.00%) high mild + 2 (2.00%) high severe + +7/20/2021 BRANCH stateres-result REV: +This marks the switch to HashSet/Map +lexicographical topological sort + time: [1.8122 us 1.8177 us 1.8233 us] + change: [+15.205% +15.919% +16.502%] (p = 0.00 < 0.05) + Performance has regressed. +Found 7 outliers among 100 measurements (7.00%) + 5 (5.00%) high mild + 2 (2.00%) high severe + +resolve state of 5 events one fork + time: [11.966 us 12.010 us 12.059 us] + change: [+16.089% +16.730% +17.469%] (p = 0.00 < 0.05) + Performance has regressed. +Found 7 outliers among 100 measurements (7.00%) + 3 (3.00%) high mild + 4 (4.00%) high severe + +resolve state of 10 events 3 conflicting + time: [29.092 us 29.201 us 29.311 us] + change: [+12.447% +12.847% +13.280%] (p = 0.00 < 0.05) + Performance has regressed. +Found 9 outliers among 100 measurements (9.00%) + 6 (6.00%) high mild + 3 (3.00%) high severe diff --git a/src/core/state_res/power_levels.rs b/src/core/state_res/power_levels.rs new file mode 100644 index 00000000..e1768574 --- /dev/null +++ b/src/core/state_res/power_levels.rs @@ -0,0 +1,256 @@ +use std::collections::BTreeMap; + +use ruma::{ + events::{room::power_levels::RoomPowerLevelsEventContent, TimelineEventType}, + power_levels::{default_power_level, NotificationPowerLevels}, + serde::{ + deserialize_v1_powerlevel, vec_deserialize_int_powerlevel_values, + vec_deserialize_v1_powerlevel_values, + }, + Int, OwnedUserId, UserId, +}; +use serde::Deserialize; +use serde_json::{from_str as from_json_str, Error}; +use tracing::error; + +use super::{Result, RoomVersion}; + +#[derive(Deserialize)] +struct IntRoomPowerLevelsEventContent { + #[serde(default = "default_power_level")] + ban: Int, + + #[serde(default)] + events: BTreeMap, + + #[serde(default)] + events_default: Int, + + #[serde(default)] + invite: Int, + + #[serde(default = "default_power_level")] + kick: Int, + + #[serde(default = "default_power_level")] + redact: Int, + + #[serde(default = "default_power_level")] + state_default: Int, + + #[serde(default)] + users: BTreeMap, + + #[serde(default)] + users_default: Int, + + #[serde(default)] + notifications: IntNotificationPowerLevels, +} + +impl From for RoomPowerLevelsEventContent { + fn from(int_pl: IntRoomPowerLevelsEventContent) -> Self { + let IntRoomPowerLevelsEventContent { + ban, + events, + events_default, + invite, + kick, + redact, + state_default, + users, + users_default, + notifications, + } = int_pl; + + let mut pl = Self::new(); + pl.ban = ban; + pl.events = events; + pl.events_default = events_default; + pl.invite = invite; + pl.kick = kick; + pl.redact = redact; + pl.state_default = state_default; + pl.users = users; + pl.users_default = users_default; + pl.notifications = notifications.into(); + + pl + } +} + +#[derive(Deserialize)] +struct IntNotificationPowerLevels { + #[serde(default = "default_power_level")] + room: Int, +} + +impl Default for IntNotificationPowerLevels { + fn default() -> Self { Self { room: default_power_level() } } +} + +impl From for NotificationPowerLevels { + fn from(int_notif: IntNotificationPowerLevels) -> Self { + let mut notif = Self::new(); + notif.room = int_notif.room; + + notif + } +} + +#[inline] +pub(crate) fn deserialize_power_levels( + content: &str, + room_version: &RoomVersion, +) -> Option { + if room_version.integer_power_levels { + deserialize_integer_power_levels(content) + } else { + deserialize_legacy_power_levels(content) + } +} + +fn deserialize_integer_power_levels(content: &str) -> Option { + match from_json_str::(content) { + | Ok(content) => Some(content.into()), + | Err(_) => { + error!("m.room.power_levels event is not valid with integer values"); + None + }, + } +} + +fn deserialize_legacy_power_levels(content: &str) -> Option { + match from_json_str(content) { + | Ok(content) => Some(content), + | Err(_) => { + error!( + "m.room.power_levels event is not valid with integer or string integer values" + ); + None + }, + } +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentFields { + #[serde(default, deserialize_with = "vec_deserialize_v1_powerlevel_values")] + pub(crate) users: Vec<(OwnedUserId, Int)>, + + #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) users_default: Int, +} + +impl PowerLevelsContentFields { + pub(crate) fn get_user_power(&self, user_id: &UserId) -> Option<&Int> { + let comparator = |item: &(OwnedUserId, Int)| { + let item: &UserId = &item.0; + item.cmp(user_id) + }; + + self.users + .binary_search_by(comparator) + .ok() + .and_then(|idx| self.users.get(idx).map(|item| &item.1)) + } +} + +#[derive(Deserialize)] +struct IntPowerLevelsContentFields { + #[serde(default, deserialize_with = "vec_deserialize_int_powerlevel_values")] + users: Vec<(OwnedUserId, Int)>, + + #[serde(default)] + users_default: Int, +} + +impl From for PowerLevelsContentFields { + fn from(pl: IntPowerLevelsContentFields) -> Self { + let IntPowerLevelsContentFields { users, users_default } = pl; + Self { users, users_default } + } +} + +#[inline] +pub(crate) fn deserialize_power_levels_content_fields( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + deserialize_integer_power_levels_content_fields(content) + } else { + deserialize_legacy_power_levels_content_fields(content) + } +} + +fn deserialize_integer_power_levels_content_fields( + content: &str, +) -> Result { + from_json_str::(content).map(Into::into) +} + +fn deserialize_legacy_power_levels_content_fields( + content: &str, +) -> Result { + from_json_str(content) +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentInvite { + #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) invite: Int, +} + +#[derive(Deserialize)] +struct IntPowerLevelsContentInvite { + #[serde(default)] + invite: Int, +} + +impl From for PowerLevelsContentInvite { + fn from(pl: IntPowerLevelsContentInvite) -> Self { + let IntPowerLevelsContentInvite { invite } = pl; + Self { invite } + } +} + +pub(crate) fn deserialize_power_levels_content_invite( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + from_json_str::(content).map(Into::into) + } else { + from_json_str(content) + } +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentRedact { + #[serde(default = "default_power_level", deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) redact: Int, +} + +#[derive(Deserialize)] +pub(crate) struct IntPowerLevelsContentRedact { + #[serde(default = "default_power_level")] + redact: Int, +} + +impl From for PowerLevelsContentRedact { + fn from(pl: IntPowerLevelsContentRedact) -> Self { + let IntPowerLevelsContentRedact { redact } = pl; + Self { redact } + } +} + +pub(crate) fn deserialize_power_levels_content_redact( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + from_json_str::(content).map(Into::into) + } else { + from_json_str(content) + } +} diff --git a/src/core/state_res/room_version.rs b/src/core/state_res/room_version.rs new file mode 100644 index 00000000..e1b0afe1 --- /dev/null +++ b/src/core/state_res/room_version.rs @@ -0,0 +1,149 @@ +use ruma::RoomVersionId; + +use super::{Error, Result}; + +#[derive(Debug)] +#[allow(clippy::exhaustive_enums)] +pub enum RoomDisposition { + /// A room version that has a stable specification. + Stable, + /// A room version that is not yet fully specified. + Unstable, +} + +#[derive(Debug)] +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub enum EventFormatVersion { + /// $id:server event id format + V1, + /// MSC1659-style $hash event id format: introduced for room v3 + V2, + /// MSC1884-style $hash format: introduced for room v4 + V3, +} + +#[derive(Debug)] +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub enum StateResolutionVersion { + /// State resolution for rooms at version 1. + V1, + /// State resolution for room at version 2 or later. + V2, +} + +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub struct RoomVersion { + /// The stability of this room. + pub disposition: RoomDisposition, + /// The format of the EventId. + pub event_format: EventFormatVersion, + /// Which state resolution algorithm is used. + pub state_res: StateResolutionVersion, + // FIXME: not sure what this one means? + pub enforce_key_validity: bool, + + /// `m.room.aliases` had special auth rules and redaction rules + /// before room version 6. + /// + /// before MSC2261/MSC2432, + pub special_case_aliases_auth: bool, + /// Strictly enforce canonical json, do not allow: + /// * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] + /// * Floats + /// * NaN, Infinity, -Infinity + pub strict_canonicaljson: bool, + /// Verify notifications key while checking m.room.power_levels. + /// + /// bool: MSC2209: Check 'notifications' + pub limit_notifications_power_levels: bool, + /// Extra rules when verifying redaction events. + pub extra_redaction_checks: bool, + /// Allow knocking in event authentication. + /// + /// See [room v7 specification](https://spec.matrix.org/latest/rooms/v7/) for more information. + pub allow_knocking: bool, + /// Adds support for the restricted join rule. + /// + /// See: [MSC3289](https://github.com/matrix-org/matrix-spec-proposals/pull/3289) for more information. + pub restricted_join_rules: bool, + /// Adds support for the knock_restricted join rule. + /// + /// See: [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) for more information. + pub knock_restricted_join_rule: bool, + /// Enforces integer power levels. + /// + /// See: [MSC3667](https://github.com/matrix-org/matrix-spec-proposals/pull/3667) for more information. + pub integer_power_levels: bool, + /// Determine the room creator using the `m.room.create` event's `sender`, + /// instead of the event content's `creator` field. + /// + /// See: [MSC2175](https://github.com/matrix-org/matrix-spec-proposals/pull/2175) for more information. + pub use_room_create_sender: bool, +} + +impl RoomVersion { + pub const V1: Self = Self { + disposition: RoomDisposition::Stable, + event_format: EventFormatVersion::V1, + state_res: StateResolutionVersion::V1, + enforce_key_validity: false, + special_case_aliases_auth: true, + strict_canonicaljson: false, + limit_notifications_power_levels: false, + extra_redaction_checks: true, + allow_knocking: false, + restricted_join_rules: false, + knock_restricted_join_rule: false, + integer_power_levels: false, + use_room_create_sender: false, + }; + pub const V10: Self = Self { + knock_restricted_join_rule: true, + integer_power_levels: true, + ..Self::V9 + }; + pub const V11: Self = Self { + use_room_create_sender: true, + ..Self::V10 + }; + pub const V2: Self = Self { + state_res: StateResolutionVersion::V2, + ..Self::V1 + }; + pub const V3: Self = Self { + event_format: EventFormatVersion::V2, + extra_redaction_checks: false, + ..Self::V2 + }; + pub const V4: Self = Self { + event_format: EventFormatVersion::V3, + ..Self::V3 + }; + pub const V5: Self = Self { enforce_key_validity: true, ..Self::V4 }; + pub const V6: Self = Self { + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + ..Self::V5 + }; + pub const V7: Self = Self { allow_knocking: true, ..Self::V6 }; + pub const V8: Self = Self { restricted_join_rules: true, ..Self::V7 }; + pub const V9: Self = Self::V8; + + pub fn new(version: &RoomVersionId) -> Result { + Ok(match version { + | RoomVersionId::V1 => Self::V1, + | RoomVersionId::V2 => Self::V2, + | RoomVersionId::V3 => Self::V3, + | RoomVersionId::V4 => Self::V4, + | RoomVersionId::V5 => Self::V5, + | RoomVersionId::V6 => Self::V6, + | RoomVersionId::V7 => Self::V7, + | RoomVersionId::V8 => Self::V8, + | RoomVersionId::V9 => Self::V9, + | RoomVersionId::V10 => Self::V10, + | RoomVersionId::V11 => Self::V11, + | ver => return Err(Error::Unsupported(format!("found version `{ver}`"))), + }) + } +} diff --git a/src/core/state_res/state_event.rs b/src/core/state_res/state_event.rs new file mode 100644 index 00000000..2c038cfe --- /dev/null +++ b/src/core/state_res/state_event.rs @@ -0,0 +1,102 @@ +use std::{ + borrow::Borrow, + fmt::{Debug, Display}, + hash::Hash, + sync::Arc, +}; + +use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; +use serde_json::value::RawValue as RawJsonValue; + +/// Abstraction of a PDU so users can have their own PDU types. +pub trait Event { + type Id: Clone + Debug + Display + Eq + Ord + Hash + Send + Borrow; + + /// The `EventId` of this event. + fn event_id(&self) -> &Self::Id; + + /// The `RoomId` of this event. + fn room_id(&self) -> &RoomId; + + /// The `UserId` of this event. + fn sender(&self) -> &UserId; + + /// The time of creation on the originating server. + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch; + + /// The event type. + fn event_type(&self) -> &TimelineEventType; + + /// The event's content. + fn content(&self) -> &RawJsonValue; + + /// The state key for this event. + fn state_key(&self) -> Option<&str>; + + /// The events before this event. + // Requires GATs to avoid boxing (and TAIT for making it convenient). + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// All the authenticating events for this event. + // Requires GATs to avoid boxing (and TAIT for making it convenient). + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// If this event is a redaction event this is the event it redacts. + fn redacts(&self) -> Option<&Self::Id>; +} + +impl Event for &T { + type Id = T::Id; + + fn event_id(&self) -> &Self::Id { (*self).event_id() } + + fn room_id(&self) -> &RoomId { (*self).room_id() } + + fn sender(&self) -> &UserId { (*self).sender() } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (*self).origin_server_ts() } + + fn event_type(&self) -> &TimelineEventType { (*self).event_type() } + + fn content(&self) -> &RawJsonValue { (*self).content() } + + fn state_key(&self) -> Option<&str> { (*self).state_key() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (*self).prev_events() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (*self).auth_events() + } + + fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() } +} + +impl Event for Arc { + type Id = T::Id; + + fn event_id(&self) -> &Self::Id { (**self).event_id() } + + fn room_id(&self) -> &RoomId { (**self).room_id() } + + fn sender(&self) -> &UserId { (**self).sender() } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (**self).origin_server_ts() } + + fn event_type(&self) -> &TimelineEventType { (**self).event_type() } + + fn content(&self) -> &RawJsonValue { (**self).content() } + + fn state_key(&self) -> Option<&str> { (**self).state_key() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (**self).prev_events() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (**self).auth_events() + } + + fn redacts(&self) -> Option<&Self::Id> { (**self).redacts() } +} diff --git a/src/core/state_res/state_res_bench.rs b/src/core/state_res/state_res_bench.rs new file mode 100644 index 00000000..a2bd2c23 --- /dev/null +++ b/src/core/state_res/state_res_bench.rs @@ -0,0 +1,648 @@ +// Because of criterion `cargo bench` works, +// but if you use `cargo bench -- --save-baseline ` +// or pass any other args to it, it fails with the error +// `cargo bench unknown option --save-baseline`. +// To pass args to criterion, use this form +// `cargo bench --bench -- --save-baseline `. + +#![allow(clippy::exhaustive_structs)] + +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + sync::{ + atomic::{AtomicU64, Ordering::SeqCst}, + Arc, + }, +}; + +use criterion::{criterion_group, criterion_main, Criterion}; +use event::PduEvent; +use futures::{future, future::ready}; +use ruma::{int, uint}; +use maplit::{btreemap, hashmap, hashset}; +use ruma::{ + room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, + Signatures, UserId, +}; +use ruma::events::{ + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + StateEventType, TimelineEventType, +}; +use conduwuit::state_res::{self as state_res, Error, Event, Result, StateMap}; +use serde_json::{ + json, + value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, +}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +fn lexico_topo_sort(c: &mut Criterion) { + c.bench_function("lexicographical topological sort", |b| { + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + b.iter(|| { + let _ = state_res::lexicographical_topological_sort(&graph, &|_| { + future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }); + }); + }); +} + +fn resolution_shallow_auth_chain(c: &mut Criterion) { + c.bench_function("resolve state of 5 events one fork", |b| { + let mut store = TestStore(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, _) = store.set_up(); + + b.iter(|| async { + let ev_map = store.0.clone(); + let state_sets = [&state_at_bob, &state_at_charlie]; + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); + let auth_chain_sets = state_sets + .iter() + .map(|map| { + store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() + }) + .collect(); + + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + ) + .await + { + Ok(state) => state, + Err(e) => panic!("{e}"), + }; + }); + }); +} + +fn resolve_deeper_event_set(c: &mut Criterion) { + c.bench_function("resolve state of 10 events 3 conflicting", |b| { + let mut inner = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) + }) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) + }) + .collect::>(); + + b.iter(|| async { + let state_sets = [&state_set_a, &state_set_b]; + let auth_chain_sets = state_sets + .iter() + .map(|map| { + store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() + }) + .collect(); + + let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + ) + .await + { + Ok(state) => state, + Err(_) => panic!("resolution failed during benchmarking"), + }; + }); + }); +} + +criterion_group!( + benches, + lexico_topo_sort, + resolution_shallow_auth_chain, + resolve_deeper_event_set +); + +criterion_main!(benches); + +//*///////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION DETAILS AHEAD +// +/////////////////////////////////////////////////////////////////////*/ +struct TestStore(HashMap>); + +#[allow(unused)] +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + self.0 + .get(event_id) + .map(Arc::clone) + .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) + } + + /// Returns the events that correspond to the `event_ids` sorted in the same order. + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { + let mut events = vec![]; + for id in event_ids { + events.push(self.get_event(room_id, id)?); + } + Ok(events) + } + + /// Returns a Vec of the related auth events to the given `event`. + fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } + + /// Returns a vector representing the difference in auth chains of the given `events`. + fn auth_chain_diff(&self, room_id: &RoomId, event_ids: Vec>) -> Result> { + let mut auth_chain_sets = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self.auth_event_ids(room_id, ids)?.into_iter().collect::>(); + auth_chain_sets.push(chain); + } + + if let Some(first) = auth_chain_sets.first().cloned() { + let common = auth_chain_sets + .iter() + .skip(1) + .fold(first, |a, b| a.intersection(b).cloned().collect::>()); + + Ok(auth_chain_sets + .into_iter() + .flatten() + .filter(|id| !common.contains(id.borrow())) + .collect()) + } else { + Ok(vec![]) + } + } +} + +impl TestStore { + #[allow(clippy::type_complexity)] + fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), Arc::clone(&create_event)); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0.insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0.insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0.insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0.insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + format!("${}:foo", id).try_into().unwrap() +} + +fn alice() -> &'static UserId { + user_id!("@alice:foo") +} + +fn bob() -> &'static UserId { + user_id!("@bob:foo") +} + +fn charlie() -> &'static UserId { + user_id!("@charlie:foo") +} + +fn ella() -> &'static UserId { + user_id!("@ella:foo") +} + +fn room_id() -> &'static RoomId { + room_id!("!test:foo") +} + +fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> Arc +where + S: AsRef, +{ + // We don't care if the addition happens in order just that it is atomic + // (each event has its own value) + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { id.to_owned() } else { format!("${}:foo", id) }; + let auth_events = auth_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); + let prev_events = prev_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: btreemap! {}, + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new(String::new()), + signatures: Signatures::new(), + }), + }) +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn INITIAL_EVENTS() -> HashMap> { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn BAN_STATE_SET() -> HashMap> { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +/// Convenience trait for adding event type plus state key to state maps. +trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for &TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +mod event { + use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; + use ruma_events::{pdu::Pdu, TimelineEventType}; + use ruma_state_res::Event; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { + &self.event_id + } + + fn room_id(&self) -> &RoomId { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.room_id, + Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.sender, + Pdu::RoomV3Pdu(ev) => &ev.sender, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.kind, + Pdu::RoomV3Pdu(ev) => &ev.kind, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.content, + Pdu::RoomV3Pdu(ev) => &ev.content, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/core/state_res/test_utils.rs b/src/core/state_res/test_utils.rs new file mode 100644 index 00000000..7954b28d --- /dev/null +++ b/src/core/state_res/test_utils.rs @@ -0,0 +1,688 @@ +use std::{ + borrow::Borrow, + collections::{BTreeMap, HashMap, HashSet}, + sync::{ + atomic::{AtomicU64, Ordering::SeqCst}, + Arc, + }, +}; + +use futures_util::future::ready; +use js_int::{int, uint}; +use ruma_common::{ + event_id, room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, + RoomVersionId, ServerSignatures, UserId, +}; +use ruma_events::{ + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + TimelineEventType, +}; +use serde_json::{ + json, + value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, +}; +use tracing::info; + +pub(crate) use self::event::PduEvent; +use crate::{auth_types_for_event, Error, Event, EventTypeExt, Result, StateMap}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +pub(crate) async fn do_check( + events: &[Arc], + edges: Vec>, + expected_state_ids: Vec, +) { + // To activate logging use `RUST_LOG=debug cargo t` + + let init_events = INITIAL_EVENTS(); + + let mut store = TestStore( + init_events + .values() + .chain(events) + .map(|ev| (ev.event_id().to_owned(), ev.clone())) + .collect(), + ); + + // This will be lexi_topo_sorted for resolution + let mut graph = HashMap::new(); + // This is the same as in `resolve` event_id -> OriginalStateEvent + let mut fake_event_map = HashMap::new(); + + // Create the DB of events that led up to this point + // TODO maybe clean up some of these clones it is just tests but... + for ev in init_events.values().chain(events) { + graph.insert(ev.event_id().to_owned(), HashSet::new()); + fake_event_map.insert(ev.event_id().to_owned(), ev.clone()); + } + + for pair in INITIAL_EDGES().windows(2) { + if let [a, b] = &pair { + graph + .entry(a.to_owned()) + .or_insert_with(HashSet::new) + .insert(b.clone()); + } + } + + for edge_list in edges { + for pair in edge_list.windows(2) { + if let [a, b] = &pair { + graph + .entry(a.to_owned()) + .or_insert_with(HashSet::new) + .insert(b.clone()); + } + } + } + + // event_id -> PduEvent + let mut event_map: HashMap> = HashMap::new(); + // event_id -> StateMap + let mut state_at_event: HashMap> = HashMap::new(); + + // Resolve the current state and add it to the state_at_event map then continue + // on in "time" + for node in crate::lexicographical_topological_sort(&graph, &|_id| async { + Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }) + .await + .unwrap() + { + let fake_event = fake_event_map.get(&node).unwrap(); + let event_id = fake_event.event_id().to_owned(); + + let prev_events = graph.get(&node).unwrap(); + + let state_before: StateMap = if prev_events.is_empty() { + HashMap::new() + } else if prev_events.len() == 1 { + state_at_event + .get(prev_events.iter().next().unwrap()) + .unwrap() + .clone() + } else { + let state_sets = prev_events + .iter() + .filter_map(|k| state_at_event.get(k)) + .collect::>(); + + info!( + "{:#?}", + state_sets + .iter() + .map(|map| map + .iter() + .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) + .collect::>()) + .collect::>() + ); + + let auth_chain_sets: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let event_map = &event_map; + let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); + let exists = |id: ::Id| ready(event_map.get(&id).is_some()); + let resolved = crate::resolve( + &RoomVersionId::V6, + state_sets, + &auth_chain_sets, + &fetch, + &exists, + 1, + ) + .await; + + match resolved { + | Ok(state) => state, + | Err(e) => panic!("resolution for {node} failed: {e}"), + } + }; + + let mut state_after = state_before.clone(); + + let ty = fake_event.event_type(); + let key = fake_event.state_key().unwrap(); + state_after.insert(ty.with_state_key(key), event_id.to_owned()); + + let auth_types = auth_types_for_event( + fake_event.event_type(), + fake_event.sender(), + fake_event.state_key(), + fake_event.content(), + ) + .unwrap(); + + let mut auth_events = vec![]; + for key in auth_types { + if state_before.contains_key(&key) { + auth_events.push(state_before[&key].clone()); + } + } + + // TODO The event is just remade, adding the auth_events and prev_events here + // the `to_pdu_event` was split into `init` and the fn below, could be better + let e = fake_event; + let ev_id = e.event_id(); + let event = to_pdu_event( + e.event_id().as_str(), + e.sender(), + e.event_type().clone(), + e.state_key(), + e.content().to_owned(), + &auth_events, + &prev_events.iter().cloned().collect::>(), + ); + + // We have to update our store, an actual user of this lib would + // be giving us state from a DB. + store.0.insert(ev_id.to_owned(), event.clone()); + + state_at_event.insert(node, state_after); + event_map.insert(event_id.to_owned(), Arc::clone(store.0.get(ev_id).unwrap())); + } + + let mut expected_state = StateMap::new(); + for node in expected_state_ids { + let ev = event_map.get(&node).unwrap_or_else(|| { + panic!( + "{node} not found in {:?}", + event_map + .keys() + .map(ToString::to_string) + .collect::>() + ) + }); + + let key = ev.event_type().with_state_key(ev.state_key().unwrap()); + + expected_state.insert(key, node); + } + + let start_state = state_at_event.get(event_id!("$START:foo")).unwrap(); + + let end_state = state_at_event + .get(event_id!("$END:foo")) + .unwrap() + .iter() + .filter(|(k, v)| { + expected_state.contains_key(k) + || start_state.get(k) != Some(*v) + // Filter out the dummy messages events. + // These act as points in time where there should be a known state to + // test against. + && **k != ("m.room.message".into(), "dummy".to_owned()) + }) + .map(|(k, v)| (k.clone(), v.clone())) + .collect::>(); + + assert_eq!(expected_state, end_state); +} + +#[allow(clippy::exhaustive_structs)] +pub(crate) struct TestStore(pub(crate) HashMap>); + +impl TestStore { + pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result> { + self.0 + .get(event_id) + .cloned() + .ok_or_else(|| Error::NotFound(format!("{event_id} not found"))) + } + + /// Returns a Vec of the related auth events to the given `event`. + pub(crate) fn auth_event_ids( + &self, + room_id: &RoomId, + event_ids: Vec, + ) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while let Some(ev_id) = stack.pop() { + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } +} + +// A StateStore implementation for testing +#[allow(clippy::type_complexity)] +impl TestStore { + pub(crate) fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), Arc::clone(&create_event)); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0 + .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0 + .insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +pub(crate) fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + + format!("${id}:foo").try_into().unwrap() +} + +pub(crate) fn alice() -> &'static UserId { user_id!("@alice:foo") } + +pub(crate) fn bob() -> &'static UserId { user_id!("@bob:foo") } + +pub(crate) fn charlie() -> &'static UserId { user_id!("@charlie:foo") } + +pub(crate) fn ella() -> &'static UserId { user_id!("@ella:foo") } + +pub(crate) fn zara() -> &'static UserId { user_id!("@zara:foo") } + +pub(crate) fn room_id() -> &'static RoomId { room_id!("!test:foo") } + +pub(crate) fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +pub(crate) fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +pub(crate) fn to_init_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, +) -> Arc { + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${id}:foo") + }; + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: BTreeMap::new(), + auth_events: vec![], + prev_events: vec![], + depth: uint!(0), + hashes: EventHash::new("".to_owned()), + signatures: ServerSignatures::default(), + }), + }) +} + +pub(crate) fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> Arc +where + S: AsRef, +{ + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${id}:foo") + }; + let auth_events = auth_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + let prev_events = prev_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: BTreeMap::new(), + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new("".to_owned()), + signatures: ServerSignatures::default(), + }), + }) +} + +// all graphs start with these input events +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EVENTS() -> HashMap> { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomMessage, + Some("dummy"), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomMessage, + Some("dummy"), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap> { + vec![to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + )] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EDGES() -> Vec { + vec!["START", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] + .into_iter() + .map(event_id) + .collect::>() +} + +pub(crate) mod event { + use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; + use ruma_events::{pdu::Pdu, TimelineEventType}; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + use crate::Event; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.room_id, + | Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.sender, + | Pdu::RoomV3Pdu(ev) => &ev.sender, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.kind, + | Pdu::RoomV3Pdu(ev) => &ev.kind, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.content, + | Pdu::RoomV3Pdu(ev) => &ev.content, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + #[allow(refining_impl_trait)] + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + #[allow(refining_impl_trait)] + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + #[allow(clippy::exhaustive_structs)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index aea70739..5a38f7fe 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -3,12 +3,15 @@ use std::{ sync::Arc, }; -use conduwuit::{debug_warn, err, implement, PduEvent, Result}; +use conduwuit::{ + debug_warn, err, implement, + state_res::{self}, + PduEvent, Result, +}; use futures::{future, FutureExt}; use ruma::{ - int, - state_res::{self}, - uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, + int, uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, + UInt, }; use super::check_room_id; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index b7c38313..3cc15fc4 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -3,10 +3,12 @@ use std::{ sync::Arc, }; -use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; +use conduwuit::{ + debug, debug_info, err, implement, state_res, trace, warn, Err, Error, PduEvent, Result, +}; use futures::{future::ready, TryFutureExt}; use ruma::{ - api::client::error::ErrorKind, events::StateEventType, state_res, CanonicalJsonObject, + api::client::error::ErrorKind, events::StateEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, }; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 8bcbc48b..5960c734 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -19,12 +19,12 @@ use std::{ use conduwuit::{ utils::{MutexMap, TryFutureExtExt}, - Err, PduEvent, Result, Server, + Err, PduEvent, Result, RoomVersion, Server, }; use futures::TryFutureExt; use ruma::{ - events::room::create::RoomCreateEventContent, state_res::RoomVersion, OwnedEventId, - OwnedRoomId, RoomId, RoomVersionId, + events::room::create::RoomCreateEventContent, OwnedEventId, OwnedRoomId, RoomId, + RoomVersionId, }; use crate::{globals, rooms, sending, server_keys, Dep}; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index eb9ca01f..28011a1b 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,15 +5,14 @@ use std::{ }; use conduwuit::{ - err, implement, trace, + err, implement, + state_res::{self, StateMap}, + trace, utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, Error, Result, }; use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; -use ruma::{ - state_res::{self, StateMap}, - OwnedEventId, RoomId, RoomVersionId, -}; +use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::state_compressor::CompressedState; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 7bf3b8f8..843b2af9 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -8,10 +8,10 @@ use std::{ use conduwuit::{ debug, err, implement, trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, - PduEvent, Result, + PduEvent, Result, StateMap, }; use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; -use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; +use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::short::ShortStateHash; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index b33b0388..f319ba48 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,16 +1,12 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - debug, debug_info, err, implement, trace, + debug, debug_info, err, implement, state_res, trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, Err, PduEvent, Result, + warn, Err, EventTypeExt, PduEvent, Result, }; use futures::{future::ready, FutureExt, StreamExt}; -use ruma::{ - events::StateEventType, - state_res::{self, EventTypeExt}, - CanonicalJsonValue, RoomId, ServerName, -}; +use ruma::{events::StateEventType, CanonicalJsonValue, RoomId, ServerName}; use super::{get_room_version_id, to_room_version}; use crate::rooms::{ diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index de90a89c..d538de3c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use conduwuit::{ err, result::FlatOk, + state_res::{self, StateMap}, utils::{ calculate_hash, stream::{BroadbandExt, TryIgnore}, @@ -20,7 +21,6 @@ use ruma::{ AnyStrippedStateEvent, StateEventType, TimelineEventType, }, serde::Raw, - state_res::{self, StateMap}, EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, }; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a7edd4a4..d6154121 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -12,6 +12,7 @@ use std::{ use conduwuit::{ at, debug, debug_warn, err, error, implement, info, pdu::{gen_event_id, EventHash, PduBuilder, PduCount, PduEvent}, + state_res::{self, Event, RoomVersion}, utils::{ self, future::TryExtExt, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt, }, @@ -36,7 +37,6 @@ use ruma::{ GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - state_res::{self, Event, RoomVersion}, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, }; From f2ca670c3b0858675312be60dcfb971384ce1244 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 01:58:13 +0000 Subject: [PATCH 0678/1248] optimize further into state-res with SmallString triage and de-lints for state-res. Signed-off-by: Jason Volk --- Cargo.lock | 1 + Cargo.toml | 4 + src/api/client/membership.rs | 8 +- src/api/client/sync/v4.rs | 15 +- src/api/client/sync/v5.rs | 15 +- src/core/Cargo.toml | 3 + src/core/state_res/event_auth.rs | 142 ++++++++++-------- src/core/state_res/mod.rs | 89 ++++++----- src/core/state_res/room_version.rs | 1 + src/core/state_res/test_utils.rs | 43 +++--- .../rooms/event_handler/handle_outlier_pdu.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 1 - .../rooms/event_handler/state_at_incoming.rs | 1 - .../event_handler/upgrade_outlier_pdu.rs | 10 +- src/service/rooms/timeline/mod.rs | 2 +- 15 files changed, 192 insertions(+), 145 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5981a2a6..4441779e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -810,6 +810,7 @@ dependencies = [ "libc", "libloading", "log", + "maplit", "nix", "num-traits", "rand", diff --git a/Cargo.toml b/Cargo.toml index d8f34544..a17aa4d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -379,6 +379,7 @@ features = [ "unstable-msc4203", # sending to-device events to appservices "unstable-msc4210", # remove legacy mentions "unstable-extensible-events", + "unstable-pdu", ] [workspace.dependencies.rust-rocksdb] @@ -527,6 +528,9 @@ features = ["std"] version = "0.3.2" features = ["std"] +[workspace.dependencies.maplit] +version = "1.0.2" + # # Patches # diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 1045b014..6c970665 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -14,7 +14,7 @@ use conduwuit::{ result::FlatOk, state_res, trace, utils::{self, shuffle, IterStream, ReadyExt}, - warn, Err, PduEvent, Result, + warn, Err, PduEvent, Result, StateKey, }; use futures::{join, FutureExt, StreamExt, TryFutureExt}; use ruma::{ @@ -1151,8 +1151,8 @@ async fn join_room_by_id_helper_remote( debug!("Running send_join auth check"); let fetch_state = &state; - let state_fetch = |k: &'static StateEventType, s: String| async move { - let shortstatekey = services.rooms.short.get_shortstatekey(k, &s).await.ok()?; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?; let event_id = fetch_state.get(&shortstatekey)?; services.rooms.timeline.get_pdu(event_id).await.ok() @@ -1162,7 +1162,7 @@ async fn join_room_by_id_helper_remote( &state_res::RoomVersion::new(&room_version_id)?, &parsed_join_pdu, None, // TODO: third party invite - |k, s| state_fetch(k, s.to_owned()), + |k, s| state_fetch(k.clone(), s.into()), ) .await .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 4e474ef3..13f832b2 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -395,9 +395,12 @@ pub(crate) async fn sync_events_v4_route( .map_or(10, usize_from_u64_truncated) .min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); + todo_room.0.extend( + list.room_details + .required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date @@ -449,7 +452,11 @@ pub(crate) async fn sync_events_v4_route( .map_or(10, usize_from_u64_truncated) .min(100); - todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.0.extend( + room.required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date todo_room.2 = todo_room.2.min( diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 63731688..cda6c041 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -223,7 +223,11 @@ async fn fetch_subscriptions( let limit: UInt = room.timeline_limit; - todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.0.extend( + room.required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(usize_from_ruma(limit)); // 0 means unknown because it got out of date todo_room.2 = todo_room.2.min( @@ -303,9 +307,12 @@ async fn handle_lists<'a>( let limit: usize = usize_from_ruma(list.room_details.timeline_limit).min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); + todo_room.0.extend( + list.room_details + .required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index d4b0c83b..b40dd3ad 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -116,5 +116,8 @@ nix.workspace = true hardened_malloc-rs.workspace = true hardened_malloc-rs.optional = true +[dev-dependencies] +maplit.workspace = true + [lints] workspace = true diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs index 72a0216c..df2f8b36 100644 --- a/src/core/state_res/event_auth.rs +++ b/src/core/state_res/event_auth.rs @@ -21,7 +21,6 @@ use serde::{ Deserialize, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; -use tracing::{debug, error, instrument, trace, warn}; use super::{ power_levels::{ @@ -29,8 +28,9 @@ use super::{ deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, }, room_version::RoomVersion, - Error, Event, Result, StateEventType, TimelineEventType, + Error, Event, Result, StateEventType, StateKey, TimelineEventType, }; +use crate::{debug, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -56,15 +56,15 @@ pub fn auth_types_for_event( sender: &UserId, state_key: Option<&str>, content: &RawJsonValue, -) -> serde_json::Result> { +) -> serde_json::Result> { if kind == &TimelineEventType::RoomCreate { return Ok(vec![]); } let mut auth_types = vec![ - (StateEventType::RoomPowerLevels, String::new()), - (StateEventType::RoomMember, sender.to_string()), - (StateEventType::RoomCreate, String::new()), + (StateEventType::RoomPowerLevels, StateKey::new()), + (StateEventType::RoomMember, sender.as_str().into()), + (StateEventType::RoomCreate, StateKey::new()), ]; if kind == &TimelineEventType::RoomMember { @@ -82,7 +82,7 @@ pub fn auth_types_for_event( if [MembershipState::Join, MembershipState::Invite, MembershipState::Knock] .contains(&membership) { - let key = (StateEventType::RoomJoinRules, String::new()); + let key = (StateEventType::RoomJoinRules, StateKey::new()); if !auth_types.contains(&key) { auth_types.push(key); } @@ -91,21 +91,22 @@ pub fn auth_types_for_event( .join_authorised_via_users_server .map(|m| m.deserialize()) { - let key = (StateEventType::RoomMember, u.to_string()); + let key = (StateEventType::RoomMember, u.as_str().into()); if !auth_types.contains(&key) { auth_types.push(key); } } } - let key = (StateEventType::RoomMember, state_key.to_owned()); + let key = (StateEventType::RoomMember, state_key.into()); if !auth_types.contains(&key) { auth_types.push(key); } if membership == MembershipState::Invite { if let Some(Ok(t_id)) = content.third_party_invite.map(|t| t.deserialize()) { - let key = (StateEventType::RoomThirdPartyInvite, t_id.signed.token); + let key = + (StateEventType::RoomThirdPartyInvite, t_id.signed.token.into()); if !auth_types.contains(&key) { auth_types.push(key); } @@ -128,7 +129,13 @@ pub fn auth_types_for_event( /// The `fetch_state` closure should gather state from a state snapshot. We need /// to know if the event passes auth against some state not a recursive /// collection of auth_events fields. -#[instrument(level = "debug", skip_all, fields(event_id = incoming_event.event_id().borrow().as_str()))] +#[tracing::instrument( + level = "debug", + skip_all, + fields( + event_id = incoming_event.event_id().borrow().as_str() + ) +)] pub async fn auth_check( room_version: &RoomVersion, incoming_event: &Incoming, @@ -136,10 +143,10 @@ pub async fn auth_check( fetch_state: F, ) -> Result where - F: Fn(&'static StateEventType, &str) -> Fut, + F: Fn(&StateEventType, &str) -> Fut + Send, Fut: Future> + Send, Fetched: Event + Send, - Incoming: Event + Send, + Incoming: Event + Send + Sync, { debug!( "auth_check beginning for {} ({})", @@ -262,6 +269,7 @@ where // sender domain of the event does not match the sender domain of the create // event, reject. #[derive(Deserialize)] + #[allow(clippy::items_after_statements)] struct RoomCreateContentFederate { #[serde(rename = "m.federate", default = "ruma::serde::default_true")] federate: bool, @@ -354,7 +362,7 @@ where join_rules_event.as_ref(), user_for_join_auth.as_deref(), &user_for_join_auth_membership, - room_create_event, + &room_create_event, )? { return Ok(false); } @@ -364,6 +372,7 @@ where } // If the sender's current membership state is not join, reject + #[allow(clippy::manual_let_else)] let sender_member_event = match sender_member_event { | Some(mem) => mem, | None => { @@ -498,19 +507,20 @@ where /// This is generated by calling `auth_types_for_event` with the membership /// event and the current State. #[allow(clippy::too_many_arguments)] +#[allow(clippy::cognitive_complexity)] fn valid_membership_change( room_version: &RoomVersion, target_user: &UserId, - target_user_membership_event: Option, + target_user_membership_event: Option<&impl Event>, sender: &UserId, - sender_membership_event: Option, + sender_membership_event: Option<&impl Event>, current_event: impl Event, - current_third_party_invite: Option, - power_levels_event: Option, - join_rules_event: Option, + current_third_party_invite: Option<&impl Event>, + power_levels_event: Option<&impl Event>, + join_rules_event: Option<&impl Event>, user_for_join_auth: Option<&UserId>, user_for_join_auth_membership: &MembershipState, - create_room: impl Event, + create_room: &impl Event, ) -> Result { #[derive(Deserialize)] struct GetThirdPartyInvite { @@ -856,6 +866,7 @@ fn check_power_levels( // and integers here debug!("validation of power event finished"); + #[allow(clippy::manual_let_else)] let current_state = match previous_power_event { | Some(current_state) => current_state, // If there is no previous m.room.power_levels event in the room, allow @@ -1054,6 +1065,7 @@ fn verify_third_party_invite( // If there is no m.room.third_party_invite event in the current room state with // state_key matching token, reject + #[allow(clippy::manual_let_else)] let current_tpid = match current_third_party_invite { | Some(id) => id, | None => return false, @@ -1069,12 +1081,14 @@ fn verify_third_party_invite( // If any signature in signed matches any public key in the // m.room.third_party_invite event, allow + #[allow(clippy::manual_let_else)] let tpid_ev = match from_json_str::(current_tpid.content().get()) { | Ok(ev) => ev, | Err(_) => return false, }; + #[allow(clippy::manual_let_else)] let decoded_invite_token = match Base64::parse(&tp_id.signed.token) { | Ok(tok) => tok, // FIXME: Log a warning? @@ -1096,7 +1110,7 @@ fn verify_third_party_invite( mod tests { use std::sync::Arc; - use ruma_events::{ + use ruma::events::{ room::{ join_rules::{ AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, @@ -1107,7 +1121,7 @@ mod tests { }; use serde_json::value::to_raw_value as to_raw_json_value; - use crate::{ + use crate::state_res::{ event_auth::valid_membership_change, test_utils::{ alice, charlie, ella, event_id, member_content_ban, member_content_join, room_id, @@ -1145,16 +1159,16 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1188,16 +1202,16 @@ mod tests { assert!(!valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1231,16 +1245,16 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1274,16 +1288,16 @@ mod tests { assert!(!valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1334,32 +1348,32 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V9, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), Some(alice()), &MembershipState::Join, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); assert!(!valid_membership_change( &RoomVersion::V9, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), Some(ella()), &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1402,16 +1416,16 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V7, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index e4054377..19ea3cc0 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -1,3 +1,5 @@ +#![cfg_attr(test, allow(warnings))] + pub(crate) mod error; pub mod event_auth; mod power_levels; @@ -12,7 +14,7 @@ use std::{ cmp::{Ordering, Reverse}, collections::{BinaryHeap, HashMap, HashSet}, fmt::Debug, - hash::Hash, + hash::{BuildHasher, Hash}, }; use futures::{future, stream, Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; @@ -32,13 +34,13 @@ pub use self::{ room_version::RoomVersion, state_event::Event, }; -use crate::{debug, trace, warn}; +use crate::{debug, pdu::StateKey, trace, warn}; /// A mapping of event type and state_key to some value `T`, usually an /// `EventId`. pub type StateMap = HashMap; pub type StateMapItem = (TypeStateKey, T); -pub type TypeStateKey = (StateEventType, String); +pub type TypeStateKey = (StateEventType, StateKey); type Result = crate::Result; @@ -68,10 +70,10 @@ type Result = crate::Result; /// event is part of the same room. //#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, //#[tracing::instrument(level event_fetch))] -pub async fn resolve<'a, E, SetIter, Fetch, FetchFut, Exists, ExistsFut>( +pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( room_version: &RoomVersionId, - state_sets: impl IntoIterator + Send, - auth_chain_sets: &'a [HashSet], + state_sets: Sets, + auth_chain_sets: &'a [HashSet], event_fetch: &Fetch, event_exists: &Exists, parallel_fetches: usize, @@ -81,7 +83,9 @@ where FetchFut: Future> + Send, Exists: Fn(E::Id) -> ExistsFut + Sync, ExistsFut: Future + Send, + Sets: IntoIterator + Send, SetIter: Iterator> + Clone + Send, + Hasher: BuildHasher + Send + Sync, E: Event + Clone + Send + Sync, E::Id: Borrow + Send + Sync, for<'b> &'b E: Send, @@ -178,7 +182,7 @@ where trace!(list = ?events_to_resolve, "events left to resolve"); // This "epochs" power level event - let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, String::new())); + let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, StateKey::new())); debug!(event_id = ?power_event, "power event"); @@ -222,16 +226,17 @@ fn separate<'a, Id>( where Id: Clone + Eq + Hash + 'a, { - let mut state_set_count = 0_usize; + let mut state_set_count: usize = 0; let mut occurrences = HashMap::<_, HashMap<_, _>>::new(); - let state_sets_iter = state_sets_iter.inspect(|_| state_set_count += 1); + let state_sets_iter = + state_sets_iter.inspect(|_| state_set_count = state_set_count.saturating_add(1)); for (k, v) in state_sets_iter.flatten() { occurrences .entry(k) .or_default() .entry(v) - .and_modify(|x| *x += 1) + .and_modify(|x: &mut usize| *x = x.saturating_add(1)) .or_insert(1); } @@ -246,7 +251,7 @@ where conflicted_state .entry((k.0.clone(), k.1.clone())) .and_modify(|x: &mut Vec<_>| x.push(id.clone())) - .or_insert(vec![id.clone()]); + .or_insert_with(|| vec![id.clone()]); } } } @@ -255,9 +260,13 @@ where } /// Returns a Vec of deduped EventIds that appear in some chains but not others. -fn get_auth_chain_diff(auth_chain_sets: &[HashSet]) -> impl Iterator + Send +#[allow(clippy::arithmetic_side_effects)] +fn get_auth_chain_diff( + auth_chain_sets: &[HashSet], +) -> impl Iterator + Send where Id: Clone + Eq + Hash + Send, + Hasher: BuildHasher + Send + Sync, { let num_sets = auth_chain_sets.len(); let mut id_counts: HashMap = HashMap::new(); @@ -288,7 +297,7 @@ async fn reverse_topological_power_sort( where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, - E: Event + Send, + E: Event + Send + Sync, E::Id: Borrow + Send + Sync, { debug!("reverse topological sort of power events"); @@ -337,14 +346,15 @@ where /// `key_fn` is used as to obtain the power level and age of an event for /// breaking ties (together with the event ID). #[tracing::instrument(level = "debug", skip_all)] -pub async fn lexicographical_topological_sort( - graph: &HashMap>, +pub async fn lexicographical_topological_sort( + graph: &HashMap>, key_fn: &F, ) -> Result> where F: Fn(Id) -> Fut + Sync, Fut: Future> + Send, - Id: Borrow + Clone + Eq + Hash + Ord + Send, + Id: Borrow + Clone + Eq + Hash + Ord + Send + Sync, + Hasher: BuildHasher + Default + Clone + Send + Sync, { #[derive(PartialEq, Eq)] struct TieBreaker<'a, Id> { @@ -395,7 +405,7 @@ where // The number of events that depend on the given event (the EventId key) // How many events reference this event in the DAG as a parent - let mut reverse_graph: HashMap<_, HashSet<_>> = HashMap::new(); + let mut reverse_graph: HashMap<_, HashSet<_, Hasher>> = HashMap::new(); // Vec of nodes that have zero out degree, least recent events. let mut zero_outdegree = Vec::new(); @@ -727,8 +737,8 @@ async fn get_mainline_depth( where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, - E: Event + Send, - E::Id: Borrow + Send, + E: Event + Send + Sync, + E::Id: Borrow + Send + Sync, { while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); @@ -758,10 +768,10 @@ async fn add_event_and_auth_chain_to_graph( auth_diff: &HashSet, fetch_event: &F, ) where - F: Fn(E::Id) -> Fut, + F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, - E: Event + Send, - E::Id: Borrow + Clone + Send, + E: Event + Send + Sync, + E::Id: Borrow + Clone + Send + Sync, { let mut state = vec![event_id]; while let Some(eid) = state.pop() { @@ -788,7 +798,7 @@ where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, E: Event + Send, - E::Id: Borrow + Send, + E::Id: Borrow + Send + Sync, { match fetch(event_id.clone()).await.as_ref() { | Some(state) => is_power_event(state), @@ -820,18 +830,18 @@ fn is_power_event(event: impl Event) -> bool { /// Convenience trait for adding event type plus state key to state maps. pub trait EventTypeExt { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey); } impl EventTypeExt for StateEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { (self, state_key.into()) } } impl EventTypeExt for TimelineEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { - (self.to_string().into(), state_key.into()) + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + (self.into(), state_key.into()) } } @@ -839,7 +849,7 @@ impl EventTypeExt for &T where T: EventTypeExt + Clone, { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { self.to_owned().with_state_key(state_key) } } @@ -858,13 +868,11 @@ mod tests { room::join_rules::{JoinRule, RoomJoinRulesEventContent}, StateEventType, TimelineEventType, }, - int, uint, + int, uint, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, }; - use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId}; use serde_json::{json, value::to_raw_value as to_raw_json_value}; - use tracing::debug; - use crate::{ + use super::{ is_power_event, room_version::RoomVersion, test_utils::{ @@ -874,6 +882,7 @@ mod tests { }, Event, EventTypeExt, StateMap, }; + use crate::debug; async fn test_event_sort() { use futures::future::ready; @@ -898,11 +907,11 @@ mod tests { let fetcher = |id| ready(events.get(&id).cloned()); let sorted_power_events = - crate::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) + super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) .await .unwrap(); - let resolved_power = crate::iterative_auth_check( + let resolved_power = super::iterative_auth_check( &RoomVersion::V6, sorted_power_events.iter(), HashMap::new(), // unconflicted events @@ -918,10 +927,10 @@ mod tests { events_to_sort.shuffle(&mut rand::thread_rng()); let power_level = resolved_power - .get(&(StateEventType::RoomPowerLevels, "".to_owned())) + .get(&(StateEventType::RoomPowerLevels, "".into())) .cloned(); - let sorted_event_ids = crate::mainline_sort(&events_to_sort, power_level, &fetcher, 1) + let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher, 1) .await .unwrap(); @@ -1302,7 +1311,7 @@ mod tests { }) .collect(); - let resolved = match crate::resolve( + let resolved = match super::resolve( &RoomVersionId::V2, &state_sets, &auth_chain, @@ -1333,7 +1342,7 @@ mod tests { event_id("p") => hashset![event_id("o")], }; - let res = crate::lexicographical_topological_sort(&graph, &|_id| async { + let res = super::lexicographical_topological_sort(&graph, &|_id| async { Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) }) .await @@ -1421,7 +1430,7 @@ mod tests { let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); - let resolved = match crate::resolve( + let resolved = match super::resolve( &RoomVersionId::V6, &state_sets, &auth_chain, @@ -1552,7 +1561,7 @@ mod tests { #[allow(unused_mut)] let mut x = StateMap::new(); $( - x.insert(($kind, $key.to_owned()), $id); + x.insert(($kind, $key.into()), $id); )* x }}; diff --git a/src/core/state_res/room_version.rs b/src/core/state_res/room_version.rs index e1b0afe1..8dfd6cde 100644 --- a/src/core/state_res/room_version.rs +++ b/src/core/state_res/room_version.rs @@ -32,6 +32,7 @@ pub enum StateResolutionVersion { } #[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +#[allow(clippy::struct_excessive_bools)] pub struct RoomVersion { /// The stability of this room. pub disposition: RoomDisposition, diff --git a/src/core/state_res/test_utils.rs b/src/core/state_res/test_utils.rs index 7954b28d..9c2b151f 100644 --- a/src/core/state_res/test_utils.rs +++ b/src/core/state_res/test_utils.rs @@ -7,28 +7,28 @@ use std::{ }, }; -use futures_util::future::ready; -use js_int::{int, uint}; -use ruma_common::{ - event_id, room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, - RoomVersionId, ServerSignatures, UserId, -}; -use ruma_events::{ - pdu::{EventHash, Pdu, RoomV3Pdu}, - room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, +use futures::future::ready; +use ruma::{ + event_id, + events::{ + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + TimelineEventType, }, - TimelineEventType, + int, room_id, uint, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, + RoomVersionId, ServerSignatures, UserId, }; use serde_json::{ json, value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, }; -use tracing::info; pub(crate) use self::event::PduEvent; -use crate::{auth_types_for_event, Error, Event, EventTypeExt, Result, StateMap}; +use super::auth_types_for_event; +use crate::{info, Event, EventTypeExt, Result, StateMap}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); @@ -88,7 +88,7 @@ pub(crate) async fn do_check( // Resolve the current state and add it to the state_at_event map then continue // on in "time" - for node in crate::lexicographical_topological_sort(&graph, &|_id| async { + for node in super::lexicographical_topological_sort(&graph, &|_id| async { Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) }) .await @@ -135,7 +135,7 @@ pub(crate) async fn do_check( let event_map = &event_map; let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); let exists = |id: ::Id| ready(event_map.get(&id).is_some()); - let resolved = crate::resolve( + let resolved = super::resolve( &RoomVersionId::V6, state_sets, &auth_chain_sets, @@ -223,7 +223,7 @@ pub(crate) async fn do_check( // Filter out the dummy messages events. // These act as points in time where there should be a known state to // test against. - && **k != ("m.room.message".into(), "dummy".to_owned()) + && **k != ("m.room.message".into(), "dummy".into()) }) .map(|(k, v)| (k.clone(), v.clone())) .collect::>(); @@ -239,7 +239,8 @@ impl TestStore { self.0 .get(event_id) .cloned() - .ok_or_else(|| Error::NotFound(format!("{event_id} not found"))) + .ok_or_else(|| super::Error::NotFound(format!("{event_id} not found"))) + .map_err(Into::into) } /// Returns a Vec of the related auth events to the given `event`. @@ -582,8 +583,10 @@ pub(crate) fn INITIAL_EDGES() -> Vec { } pub(crate) mod event { - use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; - use ruma_events::{pdu::Pdu, TimelineEventType}; + use ruma::{ + events::{pdu::Pdu, TimelineEventType}, + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 3cc15fc4..e628c77a 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -133,7 +133,7 @@ pub(super) async fn handle_outlier_pdu<'a>( )); } - let state_fetch = |ty: &'static StateEventType, sk: &str| { + let state_fetch = |ty: &StateEventType, sk: &str| { let key = (ty.to_owned(), sk.into()); ready(auth_events.get(&key)) }; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 28011a1b..37d47d47 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -63,7 +63,6 @@ pub async fn resolve_state( .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) - .map(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) .collect() }) .map(Ok::<_, Error>) diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 843b2af9..2eb6013a 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -172,7 +172,6 @@ async fn state_at_incoming_fork( .short .get_statekey_from_short(*k) .map_ok(|(ty, sk)| ((ty, sk), id.clone())) - .map_ok(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) }) .ready_filter_map(Result::ok) .collect() diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index f319ba48..385d2142 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -3,7 +3,7 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::In use conduwuit::{ debug, debug_info, err, implement, state_res, trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, Err, EventTypeExt, PduEvent, Result, + warn, Err, EventTypeExt, PduEvent, Result, StateKey, }; use futures::{future::ready, FutureExt, StreamExt}; use ruma::{events::StateEventType, CanonicalJsonValue, RoomId, ServerName}; @@ -71,8 +71,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( debug!("Performing auth check"); // 11. Check the auth of the event passes based on the state of the event let state_fetch_state = &state_at_incoming_event; - let state_fetch = |k: &'static StateEventType, s: String| async move { - let shortstatekey = self.services.short.get_shortstatekey(k, &s).await.ok()?; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = self.services.short.get_shortstatekey(&k, &s).await.ok()?; let event_id = state_fetch_state.get(&shortstatekey)?; self.services.timeline.get_pdu(event_id).await.ok() @@ -82,7 +82,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( &room_version, &incoming_pdu, None, // TODO: third party invite - |k, s| state_fetch(k, s.to_owned()), + |ty, sk| state_fetch(ty.clone(), sk.into()), ) .await .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; @@ -104,7 +104,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( ) .await?; - let state_fetch = |k: &'static StateEventType, s: &str| { + let state_fetch = |k: &StateEventType, s: &str| { let key = k.with_state_key(s); ready(auth_events.get(&key).cloned()) }; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index d6154121..9d6ee982 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -747,7 +747,7 @@ impl Service { }; let auth_fetch = |k: &StateEventType, s: &str| { - let key = (k.clone(), s.to_owned()); + let key = (k.clone(), s.into()); ready(auth_events.get(&key)) }; From 4de0dafdf11acb71d28e6891c9b740b66d448934 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 06:24:25 +0000 Subject: [PATCH 0679/1248] bump ruma Signed-off-by: Jason Volk --- Cargo.lock | 360 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 2 +- 2 files changed, 183 insertions(+), 179 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4441779e..efba2e07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,7 +79,7 @@ checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -161,18 +161,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -221,9 +221,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.12.1" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ea835662a0af02443aa1396d39be523bbf8f11ee6fad20329607c480bea48c3" +checksum = "4c2b7ddaa2c56a367ad27a094ad8ef4faacf8a617c2575acb2ba88949df999ca" dependencies = [ "aws-lc-sys", "paste", @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.25.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2ddd3ada61a305e1d8bb6c005d1eaa7d14d903681edfc400406d523a9b491" +checksum = "54ac4f13dad353b209b34cbec082338202cbc01c8f00336b55c750c13ac91f8f" dependencies = [ "bindgen", "cc", @@ -427,7 +427,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.96", + "syn 2.0.98", "which", ] @@ -495,9 +495,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.1" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -511,9 +511,9 @@ checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytemuck" @@ -535,9 +535,9 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "bytesize" @@ -568,9 +568,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.10" +version = "1.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" dependencies = [ "jobserver", "libc", @@ -639,9 +639,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.26" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" +checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" dependencies = [ "clap_builder", "clap_derive", @@ -649,9 +649,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.26" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" dependencies = [ "anstyle", "clap_lex", @@ -659,14 +659,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -677,9 +677,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" +checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" dependencies = [ "cc", ] @@ -863,7 +863,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1030,9 +1030,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -1177,7 +1177,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1204,7 +1204,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1273,7 +1273,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1325,7 +1325,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1526,7 +1526,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1578,10 +1578,22 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "gif" version = "0.13.1" @@ -1616,7 +1628,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.0", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -1708,9 +1720,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" +checksum = "2ad3d6d98c648ed628df039541a5577bee1a7c83e9e16fe3dbedeea4cdfeb971" dependencies = [ "async-trait", "cfg-if", @@ -1732,9 +1744,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" +checksum = "dcf287bde7b776e85d7188e6e5db7cf410a2f9531fe82817eb87feed034c8d14" dependencies = [ "cfg-if", "futures-util", @@ -1802,7 +1814,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1850,9 +1862,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -1868,9 +1880,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", @@ -2054,7 +2066,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2129,9 +2141,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2158,7 +2170,7 @@ checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2189,9 +2201,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "itertools" @@ -2300,7 +2312,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2528,7 +2540,7 @@ checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2574,7 +2586,7 @@ checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "log", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -2675,7 +2687,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2739,15 +2751,15 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" @@ -2757,7 +2769,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.7.0", + "indexmap 2.7.1", "js-sys", "once_cell", "pin-project-lite", @@ -2913,7 +2925,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2928,7 +2940,7 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_shared 0.11.3", + "phf_shared", ] [[package]] @@ -2937,18 +2949,8 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" dependencies = [ - "phf_generator 0.11.3", - "phf_shared 0.11.3", -] - -[[package]] -name = "phf_generator" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" -dependencies = [ - "phf_shared 0.10.0", - "rand", + "phf_generator", + "phf_shared", ] [[package]] @@ -2957,46 +2959,37 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared 0.11.3", + "phf_shared", "rand", ] -[[package]] -name = "phf_shared" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" -dependencies = [ - "siphasher 0.3.11", -] - [[package]] name = "phf_shared" version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ - "siphasher 1.0.1", + "siphasher", ] [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3068,7 +3061,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3097,7 +3090,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "version_check", "yansi", ] @@ -3118,7 +3111,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" dependencies = [ "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3141,7 +3134,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3202,7 +3195,7 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "socket2", "thiserror 2.0.11", @@ -3217,10 +3210,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", - "getrandom", + "getrandom 0.2.15", "rand", "ring", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "rustls-pki-types", "slab", @@ -3280,7 +3273,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -3479,7 +3472,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -3489,7 +3482,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "assign", "js_int", @@ -3511,7 +3504,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "ruma-common", @@ -3523,7 +3516,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "as_variant", "assign", @@ -3546,14 +3539,14 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", "http", - "indexmap 2.7.0", + "indexmap 2.7.1", "js_int", "konst", "percent-encoding", @@ -3577,10 +3570,10 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "as_variant", - "indexmap 2.7.0", + "indexmap 2.7.1", "js_int", "js_option", "percent-encoding", @@ -3602,7 +3595,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "bytes", "http", @@ -3620,7 +3613,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3629,7 +3622,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "ruma-common", @@ -3639,7 +3632,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3647,14 +3640,14 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.96", + "syn 2.0.98", "toml", ] [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "ruma-common", @@ -3666,7 +3659,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "headers", "http", @@ -3679,7 +3672,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3695,7 +3688,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "futures-util", "js_int", @@ -3755,9 +3748,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -3770,9 +3763,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.43" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ "bitflags 2.8.0", "errno", @@ -3783,9 +3776,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.21" +version = "0.23.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" +checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" dependencies = [ "aws-lc-rs", "log", @@ -3820,9 +3813,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" dependencies = [ "web-time 1.1.0", ] @@ -3862,9 +3855,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "sanitize-filename" @@ -3892,9 +3885,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sd-notify" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561e6b346a5e59e0b8a07894004897d7160567e3352d2ebd6c3741d4e086b6f5" +checksum = "b943eadf71d8b69e661330cb0e2656e31040acf21ee7708e2c238a0ec6af2bf4" dependencies = [ "libc", ] @@ -3924,9 +3917,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" [[package]] name = "sentry" @@ -4080,7 +4073,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4090,7 +4083,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -4098,9 +4091,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.135" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ "itoa", "memchr", @@ -4155,7 +4148,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -4253,12 +4246,6 @@ dependencies = [ "quote", ] -[[package]] -name = "siphasher" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" - [[package]] name = "siphasher" version = "1.0.1" @@ -4333,26 +4320,25 @@ checksum = "f42444fea5b87a39db4218d9422087e66a85d0e7a0963a439b07bcdf91804006" [[package]] name = "string_cache" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +checksum = "938d512196766101d333398efde81bc1f37b00cb42c2f8350e5df639f040bbbe" dependencies = [ "new_debug_unreachable", - "once_cell", "parking_lot", - "phf_shared 0.10.0", + "phf_shared", "precomputed-hash", "serde", ] [[package]] name = "string_cache_codegen" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb30289b722be4ff74a408c3cc27edeaad656e06cb1fe8fa9231fa59c728988" +checksum = "244292f3441c89febe5b5bdfbb6863aeaf4f64da810ea3050fd927b27b8d92ce" dependencies = [ - "phf_generator 0.10.0", - "phf_shared 0.10.0", + "phf_generator", + "phf_shared", "proc-macro2", "quote", ] @@ -4385,9 +4371,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.96" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -4411,7 +4397,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4446,9 +4432,9 @@ dependencies = [ [[package]] name = "termimad" -version = "0.31.1" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a5d4cf55d9f1cb04fcda48f725772d0733ae34e030dfc4dd36e738a5965f4" +checksum = "a8e19c6dbf107bec01d0e216bb8219485795b7d75328e4fa5ef2756c1be4f8dc" dependencies = [ "coolor", "crokey", @@ -4496,7 +4482,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4507,7 +4493,7 @@ checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4663,7 +4649,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4726,9 +4712,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", @@ -4747,11 +4733,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "serde", "serde_spanned", "toml_datetime", @@ -4876,7 +4862,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4997,9 +4983,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-segmentation" @@ -5084,11 +5070,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" +checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" dependencies = [ - "getrandom", + "getrandom 0.3.1", "serde", ] @@ -5142,6 +5128,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.100" @@ -5164,7 +5159,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-shared", ] @@ -5199,7 +5194,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5257,9 +5252,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.7" +version = "0.26.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" +checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" dependencies = [ "rustls-pki-types", ] @@ -5515,9 +5510,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.24" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" +checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" dependencies = [ "memchr", ] @@ -5532,6 +5527,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.8.0", +] + [[package]] name = "write16" version = "1.0.0" @@ -5581,7 +5585,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -5603,7 +5607,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -5623,7 +5627,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -5652,7 +5656,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a17aa4d6..12556e00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +rev = "e7a793b720e58bbe6858fecb86db97191dbfe7aa" features = [ "compat", "rand", From 6113803038f15a9f0206b31fc0216ebc315d7761 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 10:09:57 +0000 Subject: [PATCH 0680/1248] better error logging on send_join response failure Signed-off-by: Jason Volk --- src/api/client/membership.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 6c970665..26736fb5 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,7 +9,7 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - at, debug, debug_info, debug_warn, err, info, + at, debug, debug_info, debug_warn, err, error, info, pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, state_res, trace, @@ -1011,10 +1011,17 @@ async fn join_room_by_id_helper_remote( .await, }; - let send_join_response = services + let send_join_response = match services .sending .send_synapse_request(&remote_server, send_join_request) - .await?; + .await + { + | Ok(response) => response, + | Err(e) => { + error!("send_join failed: {e}"); + return Err(e); + }, + }; info!("send_join finished"); From f47677c995e2847b6ad39c877c3ab5b9bd5b1152 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 7 Feb 2025 07:09:45 +0000 Subject: [PATCH 0681/1248] refactor spaces Signed-off-by: Jason Volk --- src/api/client/space.rs | 228 +++++---- src/api/server/hierarchy.rs | 90 ++-- src/service/rooms/spaces/mod.rs | 774 +++++++++++++++--------------- src/service/rooms/spaces/tests.rs | 27 +- 4 files changed, 568 insertions(+), 551 deletions(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 8f54de2a..7efd7817 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -1,18 +1,25 @@ -use std::{collections::VecDeque, str::FromStr}; +use std::{ + collections::{BTreeSet, VecDeque}, + str::FromStr, +}; use axum::extract::State; -use conduwuit::{checked, pdu::ShortRoomId, utils::stream::IterStream}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{ + utils::{future::TryExtExt, stream::IterStream}, + Err, Result, +}; +use futures::{future::OptionFuture, StreamExt, TryFutureExt}; use ruma::{ - api::client::{error::ErrorKind, space::get_hierarchy}, - OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, + api::client::space::get_hierarchy, OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, }; use service::{ - rooms::spaces::{get_parent_children_via, summary_to_chunk, SummaryAccessibility}, + rooms::spaces::{ + get_parent_children_via, summary_to_chunk, PaginationToken, SummaryAccessibility, + }, Services, }; -use crate::{service::rooms::spaces::PaginationToken, Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy` /// @@ -40,10 +47,9 @@ pub(crate) async fn get_hierarchy_route( // Should prevent unexpeded behaviour in (bad) clients if let Some(ref token) = key { if token.suggested_only != body.suggested_only || token.max_depth != max_depth { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "suggested_only and max_depth cannot change on paginated requests", - )); + return Err!(Request(InvalidParam( + "suggested_only and max_depth cannot change on paginated requests" + ))); } } @@ -52,58 +58,70 @@ pub(crate) async fn get_hierarchy_route( body.sender_user(), &body.room_id, limit.try_into().unwrap_or(10), - key.map_or(vec![], |token| token.short_room_ids), - max_depth.into(), + max_depth.try_into().unwrap_or(usize::MAX), body.suggested_only, + key.as_ref() + .into_iter() + .flat_map(|t| t.short_room_ids.iter()), ) .await } -async fn get_client_hierarchy( +async fn get_client_hierarchy<'a, ShortRoomIds>( services: &Services, sender_user: &UserId, room_id: &RoomId, limit: usize, - short_room_ids: Vec, - max_depth: u64, + max_depth: usize, suggested_only: bool, -) -> Result { - let mut parents = VecDeque::new(); + short_room_ids: ShortRoomIds, +) -> Result +where + ShortRoomIds: Iterator + Clone + Send + Sync + 'a, +{ + type Via = Vec; + type Entry = (OwnedRoomId, Via); + type Rooms = VecDeque; - // Don't start populating the results if we have to start at a specific room. - let mut populate_results = short_room_ids.is_empty(); + let mut queue: Rooms = [( + room_id.to_owned(), + room_id + .server_name() + .map(ToOwned::to_owned) + .into_iter() + .collect(), + )] + .into(); - let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { - | Some(server_name) => vec![server_name.into()], - | None => vec![], - })]]; + let mut rooms = Vec::with_capacity(limit); + let mut parents = BTreeSet::new(); + while let Some((current_room, via)) = queue.pop_front() { + let summary = services + .rooms + .spaces + .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) + .await?; - let mut results = Vec::with_capacity(limit); - - while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } { - if results.len() >= limit { - break; - } - - match ( - services - .rooms - .spaces - .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) - .await?, - current_room == room_id, - ) { + match (summary, current_room == room_id) { + | (None | Some(SummaryAccessibility::Inaccessible), false) => { + // Just ignore other unavailable rooms + }, + | (None, true) => { + return Err!(Request(Forbidden("The requested room was not found"))); + }, + | (Some(SummaryAccessibility::Inaccessible), true) => { + return Err!(Request(Forbidden("The requested room is inaccessible"))); + }, | (Some(SummaryAccessibility::Accessible(summary)), _) => { - let mut children: Vec<(OwnedRoomId, Vec)> = - get_parent_children_via(&summary, suggested_only) - .into_iter() - .filter(|(room, _)| parents.iter().all(|parent| parent != room)) - .rev() - .collect(); + let populate = parents.len() >= short_room_ids.clone().count(); - if populate_results { - results.push(summary_to_chunk(*summary.clone())); - } else { + let mut children: Vec = get_parent_children_via(&summary, suggested_only) + .filter(|(room, _)| !parents.contains(room)) + .rev() + .map(|(key, val)| (key, val.collect())) + .collect(); + + if !populate { children = children .iter() .rev() @@ -113,97 +131,69 @@ async fn get_client_hierarchy( .rooms .short .get_shortroomid(room) - .map_ok(|short| Some(&short) != short_room_ids.get(parents.len())) + .map_ok(|short| { + Some(&short) != short_room_ids.clone().nth(parents.len()) + }) .unwrap_or_else(|_| false) }) .map(Clone::clone) - .collect::)>>() + .collect::>() .await .into_iter() .rev() .collect(); - - if children.is_empty() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room IDs in token were not found.", - )); - } - - // We have reached the room after where we last left off - let parents_len = parents.len(); - if checked!(parents_len + 1)? == short_room_ids.len() { - populate_results = true; - } } - let parents_len: u64 = parents.len().try_into()?; - if !children.is_empty() && parents_len < max_depth { - parents.push_back(current_room.clone()); - stack.push(children); + if populate { + rooms.push(summary_to_chunk(summary.clone())); + } else if queue.is_empty() && children.is_empty() { + return Err!(Request(InvalidParam("Room IDs in token were not found."))); } - // Root room in the space hierarchy, we return an error - // if this one fails. + + parents.insert(current_room.clone()); + if rooms.len() >= limit { + break; + } + + if children.is_empty() { + break; + } + + if parents.len() >= max_depth { + continue; + } + + queue.extend(children); }, - | (Some(SummaryAccessibility::Inaccessible), true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room is inaccessible", - )); - }, - | (None, true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room was not found", - )); - }, - // Just ignore other unavailable rooms - | (None | Some(SummaryAccessibility::Inaccessible), false) => (), } } - Ok(get_hierarchy::v1::Response { - next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { - parents.pop_front(); - parents.push_back(room); + let next_batch: OptionFuture<_> = queue + .pop_front() + .map(|(room, _)| async move { + parents.insert(room); let next_short_room_ids: Vec<_> = parents .iter() .stream() - .filter_map(|room_id| async move { - services.rooms.short.get_shortroomid(room_id).await.ok() - }) + .filter_map(|room_id| services.rooms.short.get_shortroomid(room_id).ok()) .collect() .await; - (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( - || { - PaginationToken { - short_room_ids: next_short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string() - }, - ) - } else { - None - }, - rooms: results, + (next_short_room_ids.iter().ne(short_room_ids) && !next_short_room_ids.is_empty()) + .then_some(PaginationToken { + short_room_ids: next_short_room_ids, + limit: max_depth.try_into().ok()?, + max_depth: max_depth.try_into().ok()?, + suggested_only, + }) + .as_ref() + .map(PaginationToken::to_string) + }) + .into(); + + Ok(get_hierarchy::v1::Response { + next_batch: next_batch.await.flatten(), + rooms, }) } - -fn next_room_to_traverse( - stack: &mut Vec)>>, - parents: &mut VecDeque, -) -> Option<(OwnedRoomId, Vec)> { - while stack.last().is_some_and(Vec::is_empty) { - stack.pop(); - parents.pop_back(); - } - - stack.last_mut().and_then(Vec::pop) -} diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index bcf2f7bc..f7bc43ab 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,10 +1,11 @@ use axum::extract::State; -use conduwuit::{Err, Result}; -use ruma::{api::federation::space::get_hierarchy, RoomId, ServerName}; -use service::{ - rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}, - Services, +use conduwuit::{ + utils::stream::{BroadbandExt, IterStream}, + Err, Result, }; +use futures::{FutureExt, StreamExt}; +use ruma::api::federation::space::get_hierarchy; +use service::rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}; use crate::Ruma; @@ -20,54 +21,51 @@ pub(crate) async fn get_hierarchy_route( return Err!(Request(NotFound("Room does not exist."))); } - get_hierarchy(&services, &body.room_id, body.origin(), body.suggested_only).await -} - -/// Gets the response for the space hierarchy over federation request -/// -/// Errors if the room does not exist, so a check if the room exists should -/// be done -async fn get_hierarchy( - services: &Services, - room_id: &RoomId, - server_name: &ServerName, - suggested_only: bool, -) -> Result { + let room_id = &body.room_id; + let suggested_only = body.suggested_only; + let ref identifier = Identifier::ServerName(body.origin()); match services .rooms .spaces - .get_summary_and_children_local(&room_id.to_owned(), Identifier::ServerName(server_name)) + .get_summary_and_children_local(room_id, identifier) .await? { - | Some(SummaryAccessibility::Accessible(room)) => { - let mut children = Vec::new(); - let mut inaccessible_children = Vec::new(); + | None => Err!(Request(NotFound("The requested room was not found"))), - for (child, _via) in get_parent_children_via(&room, suggested_only) { - match services - .rooms - .spaces - .get_summary_and_children_local(&child, Identifier::ServerName(server_name)) - .await? - { - | Some(SummaryAccessibility::Accessible(summary)) => { - children.push((*summary).into()); - }, - | Some(SummaryAccessibility::Inaccessible) => { - inaccessible_children.push(child); - }, - | None => (), - } - } - - Ok(get_hierarchy::v1::Response { - room: *room, - children, - inaccessible_children, - }) - }, | Some(SummaryAccessibility::Inaccessible) => Err!(Request(NotFound("The requested room is inaccessible"))), - | None => Err!(Request(NotFound("The requested room was not found"))), + + | Some(SummaryAccessibility::Accessible(room)) => { + let (children, inaccessible_children) = + get_parent_children_via(&room, suggested_only) + .stream() + .broad_filter_map(|(child, _via)| async move { + match services + .rooms + .spaces + .get_summary_and_children_local(&child, identifier) + .await + .ok()? + { + | None => None, + + | Some(SummaryAccessibility::Inaccessible) => + Some((None, Some(child))), + + | Some(SummaryAccessibility::Accessible(summary)) => + Some((Some(summary), None)), + } + }) + .unzip() + .map(|(children, inaccessible_children): (Vec<_>, Vec<_>)| { + ( + children.into_iter().flatten().map(Into::into).collect(), + inaccessible_children.into_iter().flatten().collect(), + ) + }) + .await; + + Ok(get_hierarchy::v1::Response { room, children, inaccessible_children }) + }, } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1e2b0a9f..268d6dfe 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,14 +1,24 @@ mod pagination_token; +#[cfg(test)] mod tests; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; -use conduwuit::{debug_info, err, utils::math::usize_from_f64, Error, Result}; -use futures::StreamExt; +use conduwuit::{ + implement, + utils::{ + future::BoolExt, + math::usize_from_f64, + stream::{BroadbandExt, ReadyExt}, + IterStream, + }, + Err, Error, Result, +}; +use futures::{pin_mut, stream::FuturesUnordered, FutureExt, Stream, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ api::{ - client::{error::ErrorKind, space::SpaceHierarchyRoomsChunk}, + client::space::SpaceHierarchyRoomsChunk, federation::{ self, space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary}, @@ -21,46 +31,46 @@ use ruma::{ }, serde::Raw, space::SpaceRoomJoinRule, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, + OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, MutexGuard}; pub use self::pagination_token::PaginationToken; -use crate::{rooms, sending, Dep}; - -pub struct CachedSpaceHierarchySummary { - summary: SpaceHierarchyParentSummary, -} - -pub enum SummaryAccessibility { - Accessible(Box), - Inaccessible, -} - -/// Identifier used to check if rooms are accessible -/// -/// None is used if you want to return the room, no matter if accessible or not -pub enum Identifier<'a> { - UserId(&'a UserId), - ServerName(&'a ServerName), -} +use crate::{conduwuit::utils::TryFutureExtExt, rooms, sending, Dep}; pub struct Service { services: Services, - pub roomid_spacehierarchy_cache: - Mutex>>, + pub roomid_spacehierarchy_cache: Mutex, } struct Services { state_accessor: Dep, state_cache: Dep, state: Dep, - short: Dep, event_handler: Dep, timeline: Dep, sending: Dep, } +pub struct CachedSpaceHierarchySummary { + summary: SpaceHierarchyParentSummary, +} + +#[allow(clippy::large_enum_variant)] +pub enum SummaryAccessibility { + Accessible(SpaceHierarchyParentSummary), + Inaccessible, +} + +/// Identifier used to check if rooms are accessible. None is used if you want +/// to return the room, no matter if accessible or not +pub enum Identifier<'a> { + UserId(&'a UserId), + ServerName(&'a ServerName), +} + +type Cache = LruCache>; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -72,7 +82,6 @@ impl crate::Service for Service { .depend::("rooms::state_accessor"), state_cache: args.depend::("rooms::state_cache"), state: args.depend::("rooms::state"), - short: args.depend::("rooms::short"), event_handler: args .depend::("rooms::event_handler"), timeline: args.depend::("rooms::timeline"), @@ -85,370 +94,407 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Gets the summary of a space using solely local information - pub async fn get_summary_and_children_local( - &self, - current_room: &OwnedRoomId, - identifier: Identifier<'_>, - ) -> Result> { - if let Some(cached) = self - .roomid_spacehierarchy_cache - .lock() - .await - .get_mut(¤t_room.to_owned()) - .as_ref() - { - return Ok(if let Some(cached) = cached { +/// Gets the summary of a space using solely local information +#[implement(Service)] +pub async fn get_summary_and_children_local( + &self, + current_room: &RoomId, + identifier: &Identifier<'_>, +) -> Result> { + match self + .roomid_spacehierarchy_cache + .lock() + .await + .get_mut(current_room) + .as_ref() + { + | None => (), // cache miss + | Some(None) => return Ok(None), + | Some(Some(cached)) => + return Ok(Some( if self .is_accessible_child( current_room, &cached.summary.join_rule, - &identifier, + identifier, &cached.summary.allowed_room_ids, ) .await { - Some(SummaryAccessibility::Accessible(Box::new(cached.summary.clone()))) + SummaryAccessibility::Accessible(cached.summary.clone()) } else { - Some(SummaryAccessibility::Inaccessible) - } - } else { - None - }); - } + SummaryAccessibility::Inaccessible + }, + )), + }; - if let Some(children_pdus) = self.get_stripped_space_child_events(current_room).await? { - let summary = self - .get_room_summary(current_room, children_pdus, &identifier) - .await; - if let Ok(summary) = summary { - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), - ); + let children_pdus: Vec<_> = self + .get_stripped_space_child_events(current_room) + .collect() + .await; - Ok(Some(SummaryAccessibility::Accessible(Box::new(summary)))) - } else { - Ok(None) - } - } else { - Ok(None) - } - } + let summary = self + .get_room_summary(current_room, children_pdus, identifier) + .boxed() + .await; - /// Gets the summary of a space using solely federation - #[tracing::instrument(level = "debug", skip(self))] - async fn get_summary_and_children_federation( - &self, - current_room: &OwnedRoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], - ) -> Result> { - for server in via { - debug_info!("Asking {server} for /hierarchy"); - let Ok(response) = self - .services + let Ok(summary) = summary else { + return Ok(None); + }; + + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.to_owned(), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), + ); + + Ok(Some(SummaryAccessibility::Accessible(summary))) +} + +/// Gets the summary of a space using solely federation +#[implement(Service)] +#[tracing::instrument(level = "debug", skip(self))] +async fn get_summary_and_children_federation( + &self, + current_room: &RoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], +) -> Result> { + let request = federation::space::get_hierarchy::v1::Request { + room_id: current_room.to_owned(), + suggested_only, + }; + + let mut requests: FuturesUnordered<_> = via + .iter() + .map(|server| { + self.services .sending - .send_federation_request(server, federation::space::get_hierarchy::v1::Request { - room_id: current_room.to_owned(), - suggested_only, - }) - .await - else { - continue; - }; - - debug_info!("Got response from {server} for /hierarchy\n{response:?}"); - let summary = response.room.clone(); - - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), - ); - - for child in response.children { - let mut guard = self.roomid_spacehierarchy_cache.lock().await; - if !guard.contains_key(current_room) { - guard.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { - summary: { - let SpaceHierarchyChildSummary { - canonical_alias, - name, - num_joined_members, - room_id, - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - allowed_room_ids, - } = child; - - SpaceHierarchyParentSummary { - canonical_alias, - name, - num_joined_members, - room_id: room_id.clone(), - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - children_state: self - .get_stripped_space_child_events(&room_id) - .await? - .unwrap(), - allowed_room_ids, - } - }, - }), - ); - } - } - if self - .is_accessible_child( - current_room, - &response.room.join_rule, - &Identifier::UserId(user_id), - &response.room.allowed_room_ids, - ) - .await - { - return Ok(Some(SummaryAccessibility::Accessible(Box::new(summary.clone())))); - } - - return Ok(Some(SummaryAccessibility::Inaccessible)); - } + .send_federation_request(server, request.clone()) + }) + .collect(); + let Some(Ok(response)) = requests.next().await else { self.roomid_spacehierarchy_cache .lock() .await - .insert(current_room.clone(), None); + .insert(current_room.to_owned(), None); - Ok(None) - } + return Ok(None); + }; - /// Gets the summary of a space using either local or remote (federation) - /// sources - pub async fn get_summary_and_children_client( - &self, - current_room: &OwnedRoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], - ) -> Result> { - if let Ok(Some(response)) = self - .get_summary_and_children_local(current_room, Identifier::UserId(user_id)) - .await - { - Ok(Some(response)) - } else { - self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) - .await - } - } + let summary = response.room; + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.to_owned(), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), + ); - async fn get_room_summary( - &self, - current_room: &OwnedRoomId, - children_state: Vec>, - identifier: &Identifier<'_>, - ) -> Result { - let room_id: &RoomId = current_room; - - let join_rule = self - .services - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); - - let allowed_room_ids = self - .services - .state_accessor - .allowed_room_ids(join_rule.clone()); - - if !self - .is_accessible_child( - current_room, - &join_rule.clone().into(), - identifier, - &allowed_room_ids, - ) - .await - { - debug_info!("User is not allowed to see room {room_id}"); - // This error will be caught later - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to see the room", - )); - } - - Ok(SpaceHierarchyParentSummary { - canonical_alias: self - .services - .state_accessor - .get_canonical_alias(room_id) - .await - .ok(), - name: self.services.state_accessor.get_name(room_id).await.ok(), - num_joined_members: self - .services - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0) - .try_into() - .expect("user count should not be that big"), - room_id: room_id.to_owned(), - topic: self - .services - .state_accessor - .get_room_topic(room_id) - .await - .ok(), - world_readable: self - .services - .state_accessor - .is_world_readable(room_id) - .await, - guest_can_join: self.services.state_accessor.guest_can_join(room_id).await, - avatar_url: self - .services - .state_accessor - .get_avatar(room_id) - .await - .into_option() - .unwrap_or_default() - .url, - join_rule: join_rule.into(), - room_type: self - .services - .state_accessor - .get_room_type(room_id) - .await - .ok(), - children_state, - allowed_room_ids, + response + .children + .into_iter() + .stream() + .then(|child| { + self.roomid_spacehierarchy_cache + .lock() + .map(|lock| (child, lock)) }) + .ready_filter_map(|(child, mut cache)| { + (!cache.contains_key(current_room)).then_some((child, cache)) + }) + .for_each(|(child, cache)| self.cache_insert(cache, current_room, child)) + .await; + + let identifier = Identifier::UserId(user_id); + let is_accessible_child = self + .is_accessible_child( + current_room, + &summary.join_rule, + &identifier, + &summary.allowed_room_ids, + ) + .await; + + if is_accessible_child { + return Ok(Some(SummaryAccessibility::Accessible(summary))); } - /// Simply returns the stripped m.space.child events of a room - async fn get_stripped_space_child_events( - &self, - room_id: &RoomId, - ) -> Result>>, Error> { - let Ok(current_shortstatehash) = - self.services.state.get_room_shortstatehash(room_id).await - else { - return Ok(None); - }; - - let state: HashMap<_, Arc<_>> = self - .services - .state_accessor - .state_full_ids(current_shortstatehash) - .collect() - .await; - - let mut children_pdus = Vec::with_capacity(state.len()); - for (key, id) in state { - let (event_type, state_key) = - self.services.short.get_statekey_from_short(key).await?; - - if event_type != StateEventType::SpaceChild { - continue; - } - - let pdu = - self.services.timeline.get_pdu(&id).await.map_err(|e| { - err!(Database("Event {id:?} in space state not found: {e:?}")) - })?; + Ok(Some(SummaryAccessibility::Inaccessible)) +} +/// Simply returns the stripped m.space.child events of a room +#[implement(Service)] +fn get_stripped_space_child_events<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|current_shortstatehash| { + self.services + .state_accessor + .state_keys_with_ids(current_shortstatehash, &StateEventType::SpaceChild) + }) + .map(Result::into_iter) + .map(IterStream::stream) + .map(StreamExt::flatten) + .flatten_stream() + .broad_filter_map(move |(state_key, event_id): (_, OwnedEventId)| async move { + self.services + .timeline + .get_pdu(&event_id) + .map_ok(move |pdu| (state_key, pdu)) + .await + .ok() + }) + .ready_filter_map(move |(state_key, pdu)| { if let Ok(content) = pdu.get_content::() { if content.via.is_empty() { - continue; + return None; } } - if OwnedRoomId::try_from(state_key).is_ok() { - children_pdus.push(pdu.to_stripped_spacechild_state_event()); + if RoomId::parse(&state_key).is_ok() { + return Some(pdu.to_stripped_spacechild_state_event()); } - } - Ok(Some(children_pdus)) + None + }) +} + +/// Gets the summary of a space using either local or remote (federation) +/// sources +#[implement(Service)] +pub async fn get_summary_and_children_client( + &self, + current_room: &OwnedRoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], +) -> Result> { + let identifier = Identifier::UserId(user_id); + + if let Ok(Some(response)) = self + .get_summary_and_children_local(current_room, &identifier) + .await + { + return Ok(Some(response)); } - /// With the given identifier, checks if a room is accessable - async fn is_accessible_child( - &self, - current_room: &OwnedRoomId, - join_rule: &SpaceRoomJoinRule, - identifier: &Identifier<'_>, - allowed_room_ids: &Vec, - ) -> bool { - match identifier { - | Identifier::ServerName(server_name) => { - // Checks if ACLs allow for the server to participate - if self - .services - .event_handler - .acl_check(server_name, current_room) - .await - .is_err() - { - return false; - } - }, - | Identifier::UserId(user_id) => { - if self - .services - .state_cache - .is_joined(user_id, current_room) - .await || self - .services - .state_cache - .is_invited(user_id, current_room) - .await - { - return true; - } - }, + self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) + .await +} + +#[implement(Service)] +async fn get_room_summary( + &self, + room_id: &RoomId, + children_state: Vec>, + identifier: &Identifier<'_>, +) -> Result { + let join_rule = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") + .await + .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); + + let allowed_room_ids = self + .services + .state_accessor + .allowed_room_ids(join_rule.clone()); + + let join_rule = join_rule.clone().into(); + let is_accessible_child = self + .is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids) + .await; + + if !is_accessible_child { + return Err!(Request(Forbidden("User is not allowed to see the room",))); + } + + let name = self.services.state_accessor.get_name(room_id).ok(); + + let topic = self.services.state_accessor.get_room_topic(room_id).ok(); + + let room_type = self.services.state_accessor.get_room_type(room_id).ok(); + + let world_readable = self.services.state_accessor.is_world_readable(room_id); + + let guest_can_join = self.services.state_accessor.guest_can_join(room_id); + + let num_joined_members = self + .services + .state_cache + .room_joined_count(room_id) + .unwrap_or(0); + + let canonical_alias = self + .services + .state_accessor + .get_canonical_alias(room_id) + .ok(); + + let avatar_url = self + .services + .state_accessor + .get_avatar(room_id) + .map(|res| res.into_option().unwrap_or_default().url); + + let ( + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type, + ) = futures::join!( + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type + ); + + Ok(SpaceHierarchyParentSummary { + canonical_alias, + name, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type, + children_state, + allowed_room_ids, + join_rule, + room_id: room_id.to_owned(), + num_joined_members: num_joined_members + .try_into() + .expect("user count should not be that big"), + }) +} + +/// With the given identifier, checks if a room is accessable +#[implement(Service)] +async fn is_accessible_child( + &self, + current_room: &RoomId, + join_rule: &SpaceRoomJoinRule, + identifier: &Identifier<'_>, + allowed_room_ids: &[OwnedRoomId], +) -> bool { + if let Identifier::ServerName(server_name) = identifier { + // Checks if ACLs allow for the server to participate + if self + .services + .event_handler + .acl_check(server_name, current_room) + .await + .is_err() + { + return false; } - match &join_rule { - | SpaceRoomJoinRule::Public - | SpaceRoomJoinRule::Knock - | SpaceRoomJoinRule::KnockRestricted => true, - | SpaceRoomJoinRule::Restricted => { - for room in allowed_room_ids { + } + + if let Identifier::UserId(user_id) = identifier { + let is_joined = self.services.state_cache.is_joined(user_id, current_room); + + let is_invited = self.services.state_cache.is_invited(user_id, current_room); + + pin_mut!(is_joined, is_invited); + if is_joined.or(is_invited).await { + return true; + } + } + + match join_rule { + | SpaceRoomJoinRule::Public + | SpaceRoomJoinRule::Knock + | SpaceRoomJoinRule::KnockRestricted => true, + | SpaceRoomJoinRule::Restricted => + allowed_room_ids + .iter() + .stream() + .any(|room| async { match identifier { - | Identifier::UserId(user) => { - if self.services.state_cache.is_joined(user, room).await { - return true; - } - }, - | Identifier::ServerName(server) => { - if self.services.state_cache.server_in_room(server, room).await { - return true; - } - }, + | Identifier::UserId(user) => + self.services.state_cache.is_joined(user, room).await, + | Identifier::ServerName(server) => + self.services.state_cache.server_in_room(server, room).await, } - } - false - }, - // Invite only, Private, or Custom join rule - | _ => false, - } + }) + .await, + + // Invite only, Private, or Custom join rule + | _ => false, } } +/// Returns the children of a SpaceHierarchyParentSummary, making use of the +/// children_state field +pub fn get_parent_children_via( + parent: &SpaceHierarchyParentSummary, + suggested_only: bool, +) -> impl DoubleEndedIterator)> + Send + '_ +{ + parent + .children_state + .iter() + .map(Raw::deserialize) + .filter_map(Result::ok) + .filter_map(move |ce| { + (!suggested_only || ce.content.suggested) + .then_some((ce.state_key, ce.content.via.into_iter())) + }) +} + +#[implement(Service)] +async fn cache_insert( + &self, + mut cache: MutexGuard<'_, Cache>, + current_room: &RoomId, + child: SpaceHierarchyChildSummary, +) { + let SpaceHierarchyChildSummary { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + } = child; + + let summary = SpaceHierarchyParentSummary { + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + room_id: room_id.clone(), + children_state: self + .get_stripped_space_child_events(&room_id) + .collect() + .await, + }; + + cache.insert(current_room.to_owned(), Some(CachedSpaceHierarchySummary { summary })); +} + // Here because cannot implement `From` across ruma-federation-api and // ruma-client-api types impl From for SpaceHierarchyRoomsChunk { @@ -517,25 +563,3 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR children_state, } } - -/// Returns the children of a SpaceHierarchyParentSummary, making use of the -/// children_state field -#[must_use] -pub fn get_parent_children_via( - parent: &SpaceHierarchyParentSummary, - suggested_only: bool, -) -> Vec<(OwnedRoomId, Vec)> { - parent - .children_state - .iter() - .filter_map(|raw_ce| { - raw_ce.deserialize().map_or(None, |ce| { - if suggested_only && !ce.content.suggested { - None - } else { - Some((ce.state_key, ce.content.via)) - } - }) - }) - .collect() -} diff --git a/src/service/rooms/spaces/tests.rs b/src/service/rooms/spaces/tests.rs index b4c387d7..dd6c2f35 100644 --- a/src/service/rooms/spaces/tests.rs +++ b/src/service/rooms/spaces/tests.rs @@ -1,5 +1,3 @@ -#![cfg(test)] - use std::str::FromStr; use ruma::{ @@ -69,15 +67,22 @@ fn get_summary_children() { } .into(); - assert_eq!(get_parent_children_via(&summary, false), vec![ - (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) - ]); - assert_eq!(get_parent_children_via(&summary, true), vec![( - owned_room_id!("!bar:example.org"), - vec![owned_server_name!("example.org")] - )]); + assert_eq!( + get_parent_children_via(&summary, false) + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) + ] + ); + assert_eq!( + get_parent_children_via(&summary, true) + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![(owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")])] + ); } #[test] From c614d5bf44b477a39a6b819ab4f31fa9f2c626f1 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 17 Feb 2025 17:35:03 -0500 Subject: [PATCH 0682/1248] bump ruwuma Signed-off-by: strawberry --- Cargo.lock | 27 ++++++++++++------------- Cargo.toml | 2 +- flake.lock | 6 +++--- src/api/client/unstable.rs | 40 +++++++++++++++++++++++--------------- 4 files changed, 42 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efba2e07..be2c6720 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3482,7 +3482,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "assign", "js_int", @@ -3504,7 +3504,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "ruma-common", @@ -3516,7 +3516,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "as_variant", "assign", @@ -3539,12 +3539,13 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", + "getrandom 0.2.15", "http", "indexmap 2.7.1", "js_int", @@ -3570,7 +3571,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3595,7 +3596,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "bytes", "http", @@ -3613,7 +3614,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3622,7 +3623,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "ruma-common", @@ -3632,7 +3633,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3647,7 +3648,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "ruma-common", @@ -3659,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "headers", "http", @@ -3672,7 +3673,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3688,7 +3689,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 12556e00..bea306f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "e7a793b720e58bbe6858fecb86db97191dbfe7aa" +rev = "4b3a92568310bef42078783e0172b188c5a92b3d" features = [ "compat", "rand", diff --git a/flake.lock b/flake.lock index 5af6ec43..15040a42 100644 --- a/flake.lock +++ b/flake.lock @@ -567,11 +567,11 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1737828695, - "narHash": "sha256-8Ev6zzhNPU798JNvU27a7gj5X+6SDG3jBweUkQ59DbA=", + "lastModified": 1739735789, + "narHash": "sha256-BIzuZS0TV4gRnciP4ieW5J3Hql986iedM5dHQfK6z68=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "a4d9230dcc9d03be428b9a728133f8f646c0065c", + "rev": "34e401fd4392dd3268e042f1e40dffd064b9a7ff", "type": "github" }, "original": { diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 66cb31d5..67c7df75 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -272,7 +272,7 @@ pub(crate) async fn set_profile_key_route( ))); } - let Some(profile_key_value) = body.kv_pair.get(&body.key) else { + let Some(profile_key_value) = body.kv_pair.get(&body.key_name) else { return Err!(Request(BadJson( "The key does not match the URL field key, or JSON body is empty (use DELETE)" ))); @@ -290,7 +290,7 @@ pub(crate) async fn set_profile_key_route( return Err!(Request(BadJson("Key names cannot be longer than 128 bytes"))); } - if body.key == "displayname" { + if body.key_name == "displayname" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -306,7 +306,7 @@ pub(crate) async fn set_profile_key_route( &all_joined_rooms, ) .await; - } else if body.key == "avatar_url" { + } else if body.key_name == "avatar_url" { let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string()); let all_joined_rooms: Vec = services @@ -319,9 +319,11 @@ pub(crate) async fn set_profile_key_route( update_avatar_url(&services, &body.user_id, Some(mxc), None, &all_joined_rooms).await; } else { - services - .users - .set_profile_key(&body.user_id, &body.key, Some(profile_key_value.clone())); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(profile_key_value.clone()), + ); } if services.globals.allow_local_presence() { @@ -357,7 +359,7 @@ pub(crate) async fn delete_profile_key_route( ))); } - if body.key == "displayname" { + if body.key_name == "displayname" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -367,7 +369,7 @@ pub(crate) async fn delete_profile_key_route( .await; update_displayname(&services, &body.user_id, None, &all_joined_rooms).await; - } else if body.key == "avatar_url" { + } else if body.key_name == "avatar_url" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -380,7 +382,7 @@ pub(crate) async fn delete_profile_key_route( } else { services .users - .set_profile_key(&body.user_id, &body.key, None); + .set_profile_key(&body.user_id, &body.key_name, None); } if services.globals.allow_local_presence() { @@ -497,11 +499,13 @@ pub(crate) async fn get_profile_key_route( .users .set_timezone(&body.user_id, response.tz.clone()); - if let Some(value) = response.custom_profile_fields.get(&body.key) { - profile_key_value.insert(body.key.clone(), value.clone()); - services - .users - .set_profile_key(&body.user_id, &body.key, Some(value.clone())); + if let Some(value) = response.custom_profile_fields.get(&body.key_name) { + profile_key_value.insert(body.key_name.clone(), value.clone()); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(value.clone()), + ); } else { return Err!(Request(NotFound("The requested profile key does not exist."))); } @@ -520,8 +524,12 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("Profile was not found."))); } - if let Ok(value) = services.users.profile_key(&body.user_id, &body.key).await { - profile_key_value.insert(body.key.clone(), value); + if let Ok(value) = services + .users + .profile_key(&body.user_id, &body.key_name) + .await + { + profile_key_value.insert(body.key_name.clone(), value); } else { return Err!(Request(NotFound("The requested profile key does not exist."))); } From 01155fa649ef401d3ca9653439c0a7adf8a83f71 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 21 Feb 2025 17:47:44 +0000 Subject: [PATCH 0683/1248] fix unsafe precondition violation Signed-off-by: Jason Volk --- src/api/client/sync/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 1967f4a2..46540881 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -76,11 +76,13 @@ async fn share_encrypted_room( .state_cache .get_shared_rooms(sender_user, user_id) .ready_filter(|&room_id| Some(room_id) != ignore_room) - .broad_any(|other_room_id| { + .map(ToOwned::to_owned) + .broad_any(|other_room_id| async move { services .rooms .state_accessor - .is_encrypted_room(other_room_id) + .is_encrypted_room(&other_room_id) + .await }) .await } From 1061f68f0e14ee166a14d631540d322492988627 Mon Sep 17 00:00:00 2001 From: morguldir Date: Fri, 21 Feb 2025 21:13:06 +0100 Subject: [PATCH 0684/1248] bump ruwuma --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index be2c6720..7e84437c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3482,7 +3482,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "assign", "js_int", @@ -3504,7 +3504,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "ruma-common", @@ -3516,7 +3516,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "as_variant", "assign", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "as_variant", "base64 0.22.1", @@ -3571,7 +3571,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3596,7 +3596,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "bytes", "http", @@ -3614,7 +3614,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3623,7 +3623,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "ruma-common", @@ -3633,7 +3633,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3648,7 +3648,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "headers", "http", @@ -3673,7 +3673,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3689,7 +3689,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index bea306f6..ed7e6ac3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "4b3a92568310bef42078783e0172b188c5a92b3d" +rev = "5dc3e0f81d614ed9dc96b50f646b2e4385291c55" features = [ "compat", "rand", From 8085a1c064afeb61d8136963a671e6bbc15a8f98 Mon Sep 17 00:00:00 2001 From: morguldir Date: Sat, 22 Feb 2025 16:46:06 +0100 Subject: [PATCH 0685/1248] Implement MSC3967, also fixes error when uploading keys in element Co-authored-by: Aiden McClelland Signed-off-by: morguldir --- bin/complement | 2 +- src/api/client/keys.rs | 120 +++++++++++++++++++++++++++++++++------ src/api/server/send.rs | 14 ++--- src/service/users/mod.rs | 60 +++++++++++--------- 4 files changed, 143 insertions(+), 53 deletions(-) diff --git a/bin/complement b/bin/complement index a4c62856..ffd7a938 100755 --- a/bin/complement +++ b/bin/complement @@ -45,7 +45,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests ./tests/msc3967 | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 7bf0a5da..801ae32b 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{err, utils, Error, Result}; +use conduwuit::{debug, err, info, result::NotFound, utils, Err, Error, Result}; use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -15,6 +15,7 @@ use ruma::{ }, federation, }, + encryption::CrossSigningKey, serde::Raw, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; @@ -125,7 +126,24 @@ pub(crate) async fn upload_signing_keys_route( auth_error: None, }; - if let Some(auth) = &body.auth { + if let Ok(exists) = check_for_new_keys( + services, + sender_user, + body.self_signing_key.as_ref(), + body.user_signing_key.as_ref(), + body.master_key.as_ref(), + ) + .await + .inspect_err(|e| info!(?e)) + { + if let Some(result) = exists { + // No-op, they tried to reupload the same set of keys + // (lost connection for example) + return Ok(result); + } + debug!("Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"); + // Some of the keys weren't found, so we let them upload + } else if let Some(auth) = &body.auth { let (worked, uiaainfo) = services .uiaa .try_auth(sender_user, sender_device, auth, &uiaainfo) @@ -134,7 +152,7 @@ pub(crate) async fn upload_signing_keys_route( if !worked { return Err(Error::Uiaa(uiaainfo)); } - // Success! + // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services @@ -146,22 +164,90 @@ pub(crate) async fn upload_signing_keys_route( return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - if let Some(master_key) = &body.master_key { - services - .users - .add_cross_signing_keys( - sender_user, - master_key, - &body.self_signing_key, - &body.user_signing_key, - true, // notify so that other users see the new keys - ) - .await?; - } + services + .users + .add_cross_signing_keys( + sender_user, + &body.master_key, + &body.self_signing_key, + &body.user_signing_key, + true, // notify so that other users see the new keys + ) + .await?; Ok(upload_signing_keys::v3::Response {}) } +async fn check_for_new_keys( + services: crate::State, + user_id: &UserId, + self_signing_key: Option<&Raw>, + user_signing_key: Option<&Raw>, + master_signing_key: Option<&Raw>, +) -> Result> { + debug!("checking for existing keys"); + let mut empty = false; + if let Some(master_signing_key) = master_signing_key { + let (key, value) = parse_master_key(user_id, master_signing_key)?; + let result = services + .users + .get_master_key(None, user_id, &|_| true) + .await; + if result.is_not_found() { + empty = true; + } else { + let existing_master_key = result?; + let (existing_key, existing_value) = parse_master_key(user_id, &existing_master_key)?; + if existing_key != key || existing_value != value { + return Err!(Request(Forbidden( + "Tried to change an existing master key, UIA required" + ))); + } + } + } + if let Some(user_signing_key) = user_signing_key { + let key = services.users.get_user_signing_key(user_id).await; + if key.is_not_found() && !empty { + return Err!(Request(Forbidden( + "Tried to update an existing user signing key, UIA required" + ))); + } + if !key.is_not_found() { + let existing_signing_key = key?.deserialize()?; + if existing_signing_key != user_signing_key.deserialize()? { + return Err!(Request(Forbidden( + "Tried to change an existing user signing key, UIA required" + ))); + } + } + } + if let Some(self_signing_key) = self_signing_key { + let key = services + .users + .get_self_signing_key(None, user_id, &|_| true) + .await; + if key.is_not_found() && !empty { + debug!(?key); + return Err!(Request(Forbidden( + "Tried to add a new signing key independently from the master key" + ))); + } + if !key.is_not_found() { + let existing_signing_key = key?.deserialize()?; + if existing_signing_key != self_signing_key.deserialize()? { + return Err!(Request(Forbidden( + "Tried to update an existing self signing key, UIA required" + ))); + } + } + } + if empty { + return Ok(None); + } + + Ok(Some(upload_signing_keys::v3::Response {})) +} + /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. @@ -407,7 +493,9 @@ where * resulting in an endless loop */ ) .await?; - master_keys.insert(user.clone(), raw); + if let Some(raw) = raw { + master_keys.insert(user.clone(), raw); + } } self_signing_keys.extend(response.self_signing_keys); diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 2e615a0c..bc18377e 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -585,12 +585,10 @@ async fn handle_edu_signing_key_update( return; } - if let Some(master_key) = master_key { - services - .users - .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) - .await - .log_err() - .ok(); - } + services + .users + .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) + .await + .log_err() + .ok(); } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 68b87541..f0389a4a 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -514,7 +514,7 @@ impl Service { pub async fn add_cross_signing_keys( &self, user_id: &UserId, - master_key: &Raw, + master_key: &Option>, self_signing_key: &Option>, user_signing_key: &Option>, notify: bool, @@ -523,15 +523,17 @@ impl Service { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xFF); - let (master_key_key, _) = parse_master_key(user_id, master_key)?; + if let Some(master_key) = master_key { + let (master_key_key, _) = parse_master_key(user_id, master_key)?; - self.db - .keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes()); + self.db + .keyid_key + .insert(&master_key_key, master_key.json().get().as_bytes()); - self.db - .userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key); + self.db + .userid_masterkeyid + .insert(user_id.as_bytes(), &master_key_key); + } // Self-signing key if let Some(self_signing_key) = self_signing_key { @@ -567,32 +569,16 @@ impl Service { // User-signing key if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids - .next() - .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; - - if user_signing_key_ids.next().is_some() { - return Err!(Request(InvalidParam( - "User signing key contained more than one key." - ))); - } - - let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); + let user_signing_key_id = parse_user_signing_key(user_signing_key)?; + let user_signing_key_key = (user_id, &user_signing_key_id); self.db .keyid_key - .insert(&user_signing_key_key, user_signing_key.json().get().as_bytes()); + .put_raw(user_signing_key_key, user_signing_key.json().get().as_bytes()); self.db .userid_usersigningkeyid - .insert(user_id.as_bytes(), &user_signing_key_key); + .put(user_id, user_signing_key_key); } if notify { @@ -1079,6 +1065,24 @@ pub fn parse_master_key( Ok((master_key_key, master_key)) } +pub fn parse_user_signing_key(user_signing_key: &Raw) -> Result { + let mut user_signing_key_ids = user_signing_key + .deserialize() + .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? + .keys + .into_values(); + + let user_signing_key_id = user_signing_key_ids + .next() + .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; + + if user_signing_key_ids.next().is_some() { + return Err!(Request(InvalidParam("User signing key contained more than one key."))); + } + + Ok(user_signing_key_id) +} + /// Ensure that a user only sees signatures from themselves and the target user fn clean_signatures( mut cross_signing_key: serde_json::Value, From bec19df275f100f15fa58dc8654a2ec41958eacc Mon Sep 17 00:00:00 2001 From: morguldir Date: Sat, 22 Feb 2025 17:12:31 +0100 Subject: [PATCH 0686/1248] increase compression levels for some column families again --- src/database/engine/descriptor.rs | 4 ++-- src/database/maps.rs | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 934ef831..816555d2 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -83,7 +83,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { write_size: 1024 * 1024 * 32, cache_shards: 128, compression_level: -3, - bottommost_level: Some(-1), + bottommost_level: Some(2), compressed_index: true, ..BASE }; @@ -95,7 +95,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { file_size: 1024 * 1024 * 2, cache_shards: 128, compression_level: -2, - bottommost_level: Some(-1), + bottommost_level: Some(2), compression_shape: [0, 0, 1, 1, 1, 1, 1], compressed_index: false, ..BASE diff --git a/src/database/maps.rs b/src/database/maps.rs index 9ae5ab44..fc216ee0 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -171,6 +171,8 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "roomsynctoken_shortstatehash", val_size_hint: Some(8), block_size: 512, + compression_level: 3, + bottommost_level: Some(6), ..descriptor::SEQUENTIAL }, Descriptor { From e97952b7f6d310d5954a0d9e6b8979d25b090387 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 01:17:30 -0500 Subject: [PATCH 0687/1248] bump nix lock, update to rust 2024 and 1.85.0 toolchain Signed-off-by: June Clementine Strawberry --- Cargo.toml | 7 +++++-- flake.lock | 36 ++++++++++++++++++------------------ flake.nix | 2 +- rust-toolchain.toml | 2 +- rustfmt.toml | 2 +- 5 files changed, 26 insertions(+), 23 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ed7e6ac3..76de2212 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,14 +13,14 @@ authors = [ ] categories = ["network-programming"] description = "a very cool Matrix chat homeserver written in Rust" -edition = "2021" +edition = "2024" homepage = "https://conduwuit.puppyirl.gay/" keywords = ["chat", "matrix", "networking", "server", "uwu"] license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.84.0" +rust-version = "1.85.0" version = "0.5.0" [workspace.metadata.crane] @@ -975,3 +975,6 @@ suspicious = { level = "warn", priority = -1 } ## some sadness let_underscore_future = { level = "allow", priority = 1 } + +# rust doesnt understand conduwuit's custom log macros +literal_string_with_formatting_args = { level = "allow", priority = 1 } diff --git a/flake.lock b/flake.lock index 15040a42..9bf6ac55 100644 --- a/flake.lock +++ b/flake.lock @@ -10,11 +10,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1731270564, - "narHash": "sha256-6KMC/NH/VWP5Eb+hA56hz0urel3jP6Y6cF2PX6xaTkk=", + "lastModified": 1738524606, + "narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=", "owner": "zhaofengli", "repo": "attic", - "rev": "47752427561f1c34debb16728a210d378f0ece36", + "rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e", "type": "github" }, "original": { @@ -117,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1737689766, - "narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=", + "lastModified": 1739936662, + "narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=", "owner": "ipetkov", "repo": "crane", - "rev": "6fe74265bbb6d016d663b1091f015e2976c4a527", + "rev": "19de14aaeb869287647d9461cbd389187d8ecdb7", "type": "github" }, "original": { @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1737786656, - "narHash": "sha256-ubCW9Jy7ZUOF354bWxTgLDpVnTvIpNr6qR4H/j7I0oo=", + "lastModified": 1740206139, + "narHash": "sha256-wWSv4KYhPKggKuJLzghfBs99pS3Kli9UBlyXVBzuIzc=", "owner": "nix-community", "repo": "fenix", - "rev": "2f721f527886f801403f389a9cabafda8f1e3b7f", + "rev": "133a9eb59fb4ddac443ebe5ab2449d3940396533", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1737600516, - "narHash": "sha256-EKyLQ3pbcjoU5jH5atge59F4fzuhTsb6yalUj6Ve2t8=", + "lastModified": 1740063075, + "narHash": "sha256-AfrCMPiXwgB0yxociq4no4NjCqGf/nRVhC3CLRoKqhA=", "owner": "axboe", "repo": "liburing", - "rev": "6c509e2b0c881a13b83b259a221bf15fc9b3f681", + "rev": "5c788d514b9ed6d1a3624150de8aa6db403c1c65", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1737717945, - "narHash": "sha256-ET91TMkab3PmOZnqiJQYOtSGvSTvGeHoegAv4zcTefM=", + "lastModified": 1740019556, + "narHash": "sha256-vn285HxnnlHLWnv59Og7muqECNMS33mWLM14soFIv2g=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "ecd26a469ac56357fd333946a99086e992452b6a", + "rev": "dad564433178067be1fbdfcce23b546254b6d641", "type": "github" }, "original": { @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1737728869, - "narHash": "sha256-U4pl3Hi0lT6GP4ecN3q9wdD2sdaKMbmD/5NJ1NdJ9AM=", + "lastModified": 1740077634, + "narHash": "sha256-KlYdDhon/hy91NutuBeN8e3qTKf3FXgsudWsjnHud68=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "6e4c29f7ce18cea7d3d31237a4661ab932eab636", + "rev": "88fbdcd510e79ef3bcd81d6d9d4f07bdce84be8c", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 3cef1af5..04dee681 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-lMLAupxng4Fd9F1oDw8gx+qA0RuF7ou7xhNU8wgs0PU="; + sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 97e33c91..00fb6cee 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.84.0" +channel = "1.85.0" profile = "minimal" components = [ # For rust-analyzer diff --git a/rustfmt.toml b/rustfmt.toml index 635ec8f8..89041b04 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -2,7 +2,7 @@ array_width = 80 chain_width = 60 comment_width = 80 condense_wildcard_suffixes = true -edition = "2024" +style_edition = "2024" fn_call_width = 80 fn_single_line = true format_code_in_doc_comments = true From a1e1f40deda8f974d61b0095fc41356cc3fda43f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 01:17:45 -0500 Subject: [PATCH 0688/1248] run cargo fix for rust 2024 changes and rustfmt Signed-off-by: June Clementine Strawberry --- src/admin/appservice/commands.rs | 2 +- src/admin/command.rs | 4 +- src/admin/debug/commands.rs | 37 +- src/admin/debug/tester.rs | 2 +- src/admin/federation/commands.rs | 2 +- src/admin/media/commands.rs | 157 +++---- src/admin/processor.rs | 14 +- src/admin/query/account_data.rs | 2 +- src/admin/query/raw.rs | 5 +- src/admin/query/resolver.rs | 4 +- src/admin/query/room_state_cache.rs | 2 +- src/admin/query/room_timeline.rs | 4 +- src/admin/query/sending.rs | 2 +- src/admin/query/short.rs | 2 +- src/admin/query/users.rs | 2 +- src/admin/room/alias.rs | 4 +- src/admin/room/commands.rs | 4 +- src/admin/room/directory.rs | 4 +- src/admin/room/info.rs | 4 +- src/admin/room/moderation.rs | 198 ++++----- src/admin/server/commands.rs | 2 +- src/admin/user/commands.rs | 8 +- src/admin/utils.rs | 2 +- src/api/client/account.rs | 201 +++++---- src/api/client/account_data.rs | 6 +- src/api/client/alias.rs | 30 +- src/api/client/appservice.rs | 2 +- src/api/client/backup.rs | 4 +- src/api/client/capabilities.rs | 2 +- src/api/client/context.rs | 11 +- src/api/client/device.rs | 86 ++-- src/api/client/directory.rs | 41 +- src/api/client/keys.rs | 130 +++--- src/api/client/media.rs | 7 +- src/api/client/media_legacy.rs | 252 ++++++------ src/api/client/membership.rs | 157 +++---- src/api/client/message.rs | 13 +- src/api/client/presence.rs | 57 +-- src/api/client/profile.rs | 9 +- src/api/client/push.rs | 6 +- src/api/client/read_marker.rs | 6 +- src/api/client/redact.rs | 2 +- src/api/client/relations.rs | 13 +- src/api/client/report.rs | 10 +- src/api/client/room/create.rs | 25 +- src/api/client/room/event.rs | 6 +- src/api/client/room/initial_sync.rs | 5 +- src/api/client/room/upgrade.rs | 7 +- src/api/client/search.rs | 11 +- src/api/client/send.rs | 4 +- src/api/client/session.rs | 68 ++-- src/api/client/space.rs | 12 +- src/api/client/state.rs | 6 +- src/api/client/sync/mod.rs | 14 +- src/api/client/sync/v3.rs | 65 +-- src/api/client/sync/v4.rs | 25 +- src/api/client/sync/v5.rs | 24 +- src/api/client/tag.rs | 2 +- src/api/client/threads.rs | 2 +- src/api/client/typing.rs | 65 +-- src/api/client/unstable.rs | 34 +- src/api/client/unversioned.rs | 2 +- src/api/client/user_directory.rs | 4 +- src/api/client/voip.rs | 6 +- src/api/client/well_known.rs | 2 +- src/api/mod.rs | 2 +- src/api/router.rs | 6 +- src/api/router/args.rs | 8 +- src/api/router/auth.rs | 24 +- src/api/router/handler.rs | 4 +- src/api/router/request.rs | 4 +- src/api/router/response.rs | 4 +- src/api/server/backfill.rs | 4 +- src/api/server/event.rs | 4 +- src/api/server/event_auth.rs | 4 +- src/api/server/get_missing_events.rs | 2 +- src/api/server/hierarchy.rs | 4 +- src/api/server/invite.rs | 6 +- src/api/server/key.rs | 8 +- src/api/server/make_join.rs | 8 +- src/api/server/make_knock.rs | 8 +- src/api/server/make_leave.rs | 2 +- src/api/server/media.rs | 8 +- src/api/server/query.rs | 4 +- src/api/server/send.rs | 12 +- src/api/server/send_join.rs | 16 +- src/api/server/send_knock.rs | 14 +- src/api/server/send_leave.rs | 8 +- src/api/server/state.rs | 4 +- src/api/server/state_ids.rs | 4 +- src/api/server/user.rs | 2 +- src/api/server/utils.rs | 4 +- src/core/alloc/je.rs | 25 +- src/core/config/check.rs | 2 +- src/core/config/manager.rs | 4 +- src/core/config/mod.rs | 10 +- src/core/debug.rs | 2 +- src/core/error/err.rs | 4 +- src/core/error/mod.rs | 4 +- src/core/error/panic.rs | 2 +- src/core/error/response.rs | 2 +- src/core/info/room_version.rs | 2 +- src/core/log/capture/data.rs | 4 +- src/core/log/capture/util.rs | 2 +- src/core/log/console.rs | 6 +- src/core/log/fmt.rs | 2 +- src/core/log/mod.rs | 4 +- src/core/log/reload.rs | 4 +- src/core/mods/module.rs | 4 +- src/core/mods/new.rs | 2 +- src/core/pdu/builder.rs | 4 +- src/core/pdu/content.rs | 2 +- src/core/pdu/count.rs | 2 +- src/core/pdu/event.rs | 2 +- src/core/pdu/event_id.rs | 2 +- src/core/pdu/mod.rs | 6 +- src/core/pdu/raw_id.rs | 4 +- src/core/pdu/redact.rs | 31 +- src/core/pdu/strip.rs | 2 +- src/core/pdu/unsigned.rs | 4 +- src/core/server.rs | 4 +- src/core/state_res/event_auth.rs | 383 +++++++++--------- src/core/state_res/mod.rs | 25 +- src/core/state_res/power_levels.rs | 8 +- src/core/state_res/state_event.rs | 2 +- src/core/state_res/test_utils.rs | 16 +- src/core/utils/bytes.rs | 2 +- src/core/utils/defer.rs | 4 +- src/core/utils/future/bool_ext.rs | 2 +- src/core/utils/future/ext_ext.rs | 2 +- src/core/utils/future/mod.rs | 2 +- src/core/utils/future/option_ext.rs | 2 +- src/core/utils/future/try_ext_ext.rs | 3 +- src/core/utils/hash/argon.rs | 6 +- src/core/utils/json.rs | 2 +- src/core/utils/math.rs | 2 +- src/core/utils/math/tried.rs | 2 +- src/core/utils/mod.rs | 8 +- src/core/utils/mutex_map.rs | 2 +- src/core/utils/rand.rs | 2 +- src/core/utils/stream/broadband.rs | 4 +- src/core/utils/stream/cloned.rs | 2 +- src/core/utils/stream/ignore.rs | 2 +- src/core/utils/stream/iter_stream.rs | 3 +- src/core/utils/stream/mod.rs | 4 +- src/core/utils/stream/ready.rs | 2 +- src/core/utils/stream/try_parallel.rs | 4 +- src/core/utils/stream/try_ready.rs | 2 +- src/core/utils/stream/try_tools.rs | 2 +- src/core/utils/stream/wideband.rs | 4 +- src/core/utils/string.rs | 2 +- src/core/utils/string/unquoted.rs | 4 +- src/core/utils/sys.rs | 4 +- src/core/utils/sys/compute.rs | 6 +- src/core/utils/sys/storage.rs | 4 +- src/core/utils/tests.rs | 2 +- src/core/utils/time.rs | 2 +- src/database/de.rs | 5 +- src/database/engine.rs | 6 +- src/database/engine/backup.rs | 2 +- src/database/engine/cf_opts.rs | 4 +- src/database/engine/context.rs | 2 +- src/database/engine/db_opts.rs | 4 +- src/database/engine/files.rs | 4 +- src/database/engine/memory_usage.rs | 2 +- src/database/engine/open.rs | 8 +- src/database/engine/repair.rs | 2 +- src/database/handle.rs | 2 +- src/database/keyval.rs | 2 +- src/database/map.rs | 2 +- src/database/map/compact.rs | 2 +- src/database/map/contains.rs | 16 +- src/database/map/count.rs | 7 +- src/database/map/get.rs | 11 +- src/database/map/get_batch.rs | 11 +- src/database/map/keys.rs | 2 +- src/database/map/keys_from.rs | 18 +- src/database/map/keys_prefix.rs | 10 +- src/database/map/qry.rs | 13 +- src/database/map/qry_batch.rs | 7 +- src/database/map/rev_keys.rs | 2 +- src/database/map/rev_keys_from.rs | 12 +- src/database/map/rev_keys_prefix.rs | 10 +- src/database/map/rev_stream.rs | 2 +- src/database/map/rev_stream_from.rs | 12 +- src/database/map/rev_stream_prefix.rs | 10 +- src/database/map/stream.rs | 2 +- src/database/map/stream_from.rs | 12 +- src/database/map/stream_prefix.rs | 10 +- src/database/maps.rs | 2 +- src/database/mod.rs | 10 +- src/database/pool.rs | 9 +- src/database/pool/configure.rs | 3 +- src/database/ser.rs | 4 +- src/database/stream.rs | 4 +- src/database/stream/items.rs | 4 +- src/database/stream/items_rev.rs | 4 +- src/database/stream/keys.rs | 4 +- src/database/stream/keys_rev.rs | 4 +- src/database/tests.rs | 7 +- src/database/watchers.rs | 2 +- src/macros/admin.rs | 6 +- src/macros/cargo.rs | 2 +- src/macros/config.rs | 8 +- src/macros/implement.rs | 2 +- src/macros/mod.rs | 3 +- src/macros/refutable.rs | 4 +- src/macros/utils.rs | 2 +- src/main/clap.rs | 2 +- src/main/logging.rs | 6 +- src/main/main.rs | 4 +- src/main/mods.rs | 4 +- src/main/runtime.rs | 5 +- src/main/sentry.rs | 8 +- src/main/server.rs | 2 +- src/router/layers.rs | 18 +- src/router/request.rs | 4 +- src/router/router.rs | 2 +- src/router/run.rs | 4 +- src/router/serve/mod.rs | 2 +- src/router/serve/plain.rs | 6 +- src/router/serve/tls.rs | 4 +- src/router/serve/unix.rs | 10 +- src/service/account_data/mod.rs | 9 +- src/service/admin/console.rs | 8 +- src/service/admin/create.rs | 4 +- src/service/admin/execute.rs | 4 +- src/service/admin/grant.rs | 6 +- src/service/admin/mod.rs | 6 +- src/service/appservice/mod.rs | 6 +- src/service/appservice/registration_info.rs | 2 +- src/service/client/mod.rs | 9 +- src/service/config/mod.rs | 5 +- src/service/emergency/mod.rs | 6 +- src/service/federation/execute.rs | 12 +- src/service/federation/mod.rs | 2 +- src/service/globals/data.rs | 2 +- src/service/globals/mod.rs | 2 +- src/service/key_backups/mod.rs | 7 +- src/service/manager.rs | 4 +- src/service/media/blurhash.rs | 2 +- src/service/media/data.rs | 7 +- src/service/media/migrations.rs | 6 +- src/service/media/mod.rs | 70 ++-- src/service/media/preview.rs | 2 +- src/service/media/remote.rs | 10 +- src/service/media/tests.rs | 2 +- src/service/media/thumbnail.rs | 18 +- src/service/migrations.rs | 12 +- src/service/mod.rs | 2 +- src/service/presence/data.rs | 9 +- src/service/presence/mod.rs | 8 +- src/service/presence/presence.rs | 4 +- src/service/pusher/mod.rs | 17 +- src/service/resolver/actual.rs | 26 +- src/service/resolver/cache.rs | 6 +- src/service/resolver/dns.rs | 4 +- src/service/resolver/mod.rs | 4 +- src/service/resolver/tests.rs | 2 +- src/service/rooms/alias/mod.rs | 15 +- src/service/rooms/alias/remote.rs | 4 +- src/service/rooms/auth_chain/data.rs | 2 +- src/service/rooms/auth_chain/mod.rs | 8 +- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/event_handler/acl_check.rs | 4 +- .../fetch_and_handle_outliers.rs | 23 +- src/service/rooms/event_handler/fetch_prev.rs | 90 ++-- .../rooms/event_handler/fetch_state.rs | 8 +- .../event_handler/handle_incoming_pdu.rs | 8 +- .../rooms/event_handler/handle_outlier_pdu.rs | 10 +- .../rooms/event_handler/handle_prev_pdu.rs | 4 +- src/service/rooms/event_handler/mod.rs | 8 +- .../rooms/event_handler/parse_incoming_pdu.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 13 +- .../rooms/event_handler/state_at_incoming.rs | 5 +- .../event_handler/upgrade_outlier_pdu.rs | 9 +- src/service/rooms/lazy_loading/mod.rs | 9 +- src/service/rooms/metadata/mod.rs | 4 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/pdu_metadata/data.rs | 10 +- src/service/rooms/pdu_metadata/mod.rs | 8 +- src/service/rooms/read_receipt/data.rs | 8 +- src/service/rooms/read_receipt/mod.rs | 24 +- src/service/rooms/search/mod.rs | 16 +- src/service/rooms/short/mod.rs | 6 +- src/service/rooms/spaces/mod.rs | 18 +- src/service/rooms/spaces/pagination_token.rs | 2 +- src/service/rooms/spaces/tests.rs | 4 +- src/service/rooms/state/mod.rs | 132 +++--- src/service/rooms/state_accessor/mod.rs | 13 +- .../rooms/state_accessor/room_state.rs | 4 +- .../rooms/state_accessor/server_can.rs | 8 +- src/service/rooms/state_accessor/state.rs | 15 +- src/service/rooms/state_accessor/user_can.rs | 64 +-- src/service/rooms/state_cache/mod.rs | 18 +- src/service/rooms/state_compressor/mod.rs | 5 +- src/service/rooms/threads/mod.rs | 20 +- src/service/rooms/timeline/data.rs | 9 +- src/service/rooms/timeline/mod.rs | 23 +- src/service/rooms/typing/mod.rs | 9 +- src/service/rooms/user/mod.rs | 4 +- src/service/sending/appservice.rs | 4 +- src/service/sending/data.rs | 11 +- src/service/sending/mod.rs | 12 +- src/service/sending/sender.rs | 30 +- src/service/server_keys/acquire.rs | 8 +- src/service/server_keys/get.rs | 8 +- src/service/server_keys/keypair.rs | 2 +- src/service/server_keys/mod.rs | 11 +- src/service/server_keys/request.rs | 14 +- src/service/server_keys/sign.rs | 2 +- src/service/server_keys/verify.rs | 4 +- src/service/service.rs | 2 +- src/service/services.rs | 2 +- src/service/sync/mod.rs | 4 +- src/service/sync/watch.rs | 4 +- src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/mod.rs | 10 +- src/service/updates/mod.rs | 6 +- src/service/users/mod.rs | 24 +- 320 files changed, 2212 insertions(+), 2039 deletions(-) diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs index 4f02531a..88f28431 100644 --- a/src/admin/appservice/commands.rs +++ b/src/admin/appservice/commands.rs @@ -1,6 +1,6 @@ use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; -use crate::{admin_command, Result}; +use crate::{Result, admin_command}; #[admin_command] pub(super) async fn register(&self) -> Result { diff --git a/src/admin/command.rs b/src/admin/command.rs index 5ad9e581..5df980d6 100644 --- a/src/admin/command.rs +++ b/src/admin/command.rs @@ -3,9 +3,9 @@ use std::{fmt, time::SystemTime}; use conduwuit::Result; use conduwuit_service::Services; use futures::{ + Future, FutureExt, io::{AsyncWriteExt, BufWriter}, lock::Mutex, - Future, FutureExt, }; use ruma::EventId; @@ -21,7 +21,7 @@ impl Command<'_> { pub(crate) fn write_fmt( &self, arguments: fmt::Arguments<'_>, - ) -> impl Future + Send + '_ { + ) -> impl Future + Send + '_ + use<'_> { let buf = format!("{arguments}"); self.output.lock().then(|mut output| async move { output.write_all(buf.as_bytes()).await.map_err(Into::into) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index dcf9879c..c6f6a170 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,19 +6,19 @@ use std::{ }; use conduwuit::{ - debug_error, err, info, trace, utils, + Error, PduEvent, PduId, RawPduId, Result, debug_error, err, info, trace, utils, utils::{ stream::{IterStream, ReadyExt}, string::EMPTY, }, - warn, Error, PduEvent, PduId, RawPduId, Result, + warn, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - api::{client::error::ErrorKind, federation::event::get_room_state}, - events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName, + api::{client::error::ErrorKind, federation::event::get_room_state}, + events::room::message::RoomMessageEventContent, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -209,18 +209,21 @@ pub(super) async fn get_remote_pdu_list( for pdu in list { if force { - if let Err(e) = self.get_remote_pdu(Box::from(pdu), server.clone()).await { - failed_count = failed_count.saturating_add(1); - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to get remote PDU, ignoring error: {e}" - ))) - .await - .ok(); - warn!("Failed to get remote PDU, ignoring error: {e}"); - } else { - success_count = success_count.saturating_add(1); + match self.get_remote_pdu(Box::from(pdu), server.clone()).await { + | Err(e) => { + failed_count = failed_count.saturating_add(1); + self.services + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "Failed to get remote PDU, ignoring error: {e}" + ))) + .await + .ok(); + warn!("Failed to get remote PDU, ignoring error: {e}"); + }, + | _ => { + success_count = success_count.saturating_add(1); + }, } } else { self.get_remote_pdu(Box::from(pdu), server.clone()).await?; @@ -957,7 +960,7 @@ pub(super) async fn database_stats( self.services .db .iter() - .filter(|(&name, _)| map_name.is_empty() || map_name == name) + .filter(|&(&name, _)| map_name.is_empty() || map_name == name) .try_stream() .try_for_each(|(&name, map)| { let res = map.property(&property).expect("invalid property"); diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 5200fa0d..005ee775 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -1,7 +1,7 @@ use conduwuit::Err; use ruma::events::room::message::RoomMessageEventContent; -use crate::{admin_command, admin_command_dispatch, Result}; +use crate::{Result, admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, clap::Subcommand)] diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 13bc8da4..240ffa6a 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -3,7 +3,7 @@ use std::fmt::Write; use conduwuit::Result; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId, + OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent, }; use crate::{admin_command, get_room_info}; diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index 3d0a9473..aeefa9f2 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,12 +1,12 @@ use std::time::Duration; use conduwuit::{ - debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result, + Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, }; use conduwuit_service::media::Dim; use ruma::{ - events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri, - OwnedServerName, ServerName, + EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName, + events::room::message::RoomMessageEventContent, }; use crate::{admin_command, utils::parse_local_user_id}; @@ -41,103 +41,106 @@ pub(super) async fn delete( let mut mxc_urls = Vec::with_capacity(4); // parsing the PDU for any MXC URLs begins here - if let Ok(event_json) = self.services.rooms.timeline.get_pdu_json(&event_id).await { - if let Some(content_key) = event_json.get("content") { - debug!("Event ID has \"content\"."); - let content_obj = content_key.as_object(); + match self.services.rooms.timeline.get_pdu_json(&event_id).await { + | Ok(event_json) => { + if let Some(content_key) = event_json.get("content") { + debug!("Event ID has \"content\"."); + let content_obj = content_key.as_object(); - if let Some(content) = content_obj { - // 1. attempts to parse the "url" key - debug!("Attempting to go into \"url\" key for main media file"); - if let Some(url) = content.get("url") { - debug!("Got a URL in the event ID {event_id}: {url}"); + if let Some(content) = content_obj { + // 1. attempts to parse the "url" key + debug!("Attempting to go into \"url\" key for main media file"); + if let Some(url) = content.get("url") { + debug!("Got a URL in the event ID {event_id}: {url}"); - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); - } else { - info!( - "Found a URL in the event ID {event_id} but did not start with \ - mxc://, ignoring" - ); - } - } - - // 2. attempts to parse the "info" key - debug!("Attempting to go into \"info\" key for thumbnails"); - if let Some(info_key) = content.get("info") { - debug!("Event ID has \"info\"."); - let info_obj = info_key.as_object(); - - if let Some(info) = info_obj { - if let Some(thumbnail_url) = info.get("thumbnail_url") { - debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - - if thumbnail_url.to_string().starts_with("\"mxc://") { - debug!( - "Pushing thumbnail URL {thumbnail_url} to list of MXCs \ - to delete" - ); - let final_thumbnail_url = - thumbnail_url.to_string().replace('"', ""); - mxc_urls.push(final_thumbnail_url); - } else { - info!( - "Found a thumbnail URL in the event ID {event_id} but \ - did not start with mxc://, ignoring" - ); - } + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); } else { info!( - "No \"thumbnail_url\" key in \"info\" key, assuming no \ - thumbnails." + "Found a URL in the event ID {event_id} but did not start \ + with mxc://, ignoring" ); } } - } - // 3. attempts to parse the "file" key - debug!("Attempting to go into \"file\" key"); - if let Some(file_key) = content.get("file") { - debug!("Event ID has \"file\"."); - let file_obj = file_key.as_object(); + // 2. attempts to parse the "info" key + debug!("Attempting to go into \"info\" key for thumbnails"); + if let Some(info_key) = content.get("info") { + debug!("Event ID has \"info\"."); + let info_obj = info_key.as_object(); - if let Some(file) = file_obj { - if let Some(url) = file.get("url") { - debug!("Found url in file key: {url}"); + if let Some(info) = info_obj { + if let Some(thumbnail_url) = info.get("thumbnail_url") { + debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); + if thumbnail_url.to_string().starts_with("\"mxc://") { + debug!( + "Pushing thumbnail URL {thumbnail_url} to list of \ + MXCs to delete" + ); + let final_thumbnail_url = + thumbnail_url.to_string().replace('"', ""); + mxc_urls.push(final_thumbnail_url); + } else { + info!( + "Found a thumbnail URL in the event ID {event_id} \ + but did not start with mxc://, ignoring" + ); + } } else { info!( - "Found a URL in the event ID {event_id} but did not \ - start with mxc://, ignoring" + "No \"thumbnail_url\" key in \"info\" key, assuming no \ + thumbnails." ); } - } else { - info!("No \"url\" key in \"file\" key."); } } + + // 3. attempts to parse the "file" key + debug!("Attempting to go into \"file\" key"); + if let Some(file_key) = content.get("file") { + debug!("Event ID has \"file\"."); + let file_obj = file_key.as_object(); + + if let Some(file) = file_obj { + if let Some(url) = file.get("url") { + debug!("Found url in file key: {url}"); + + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); + } else { + info!( + "Found a URL in the event ID {event_id} but did not \ + start with mxc://, ignoring" + ); + } + } else { + info!("No \"url\" key in \"file\" key."); + } + } + } + } else { + return Ok(RoomMessageEventContent::text_plain( + "Event ID does not have a \"content\" key or failed parsing the \ + event ID JSON.", + )); } } else { return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key or failed parsing the event \ - ID JSON.", + "Event ID does not have a \"content\" key, this is not a message or an \ + event type that contains media.", )); } - } else { + }, + | _ => { return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key, this is not a message or an \ - event type that contains media.", + "Event ID does not exist or is not known to us.", )); - } - } else { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not exist or is not known to us.", - )); + }, } if mxc_urls.is_empty() { diff --git a/src/admin/processor.rs b/src/admin/processor.rs index eefcdcd6..77a60959 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -8,7 +8,7 @@ use std::{ use clap::{CommandFactory, Parser}; use conduwuit::{ - debug, error, + Error, Result, debug, error, log::{ capture, capture::Capture, @@ -16,24 +16,24 @@ use conduwuit::{ }, trace, utils::string::{collect_stream, common_prefix}, - warn, Error, Result, + warn, }; -use futures::{future::FutureExt, io::BufWriter, AsyncWriteExt}; +use futures::{AsyncWriteExt, future::FutureExt, io::BufWriter}; use ruma::{ + EventId, events::{ relation::InReplyTo, room::message::{Relation::Reply, RoomMessageEventContent}, }, - EventId, }; use service::{ - admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, Services, + admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, }; use tracing::Level; -use tracing_subscriber::{filter::LevelFilter, EnvFilter}; +use tracing_subscriber::{EnvFilter, filter::LevelFilter}; -use crate::{admin, admin::AdminCommand, Command}; +use crate::{Command, admin, admin::AdminCommand}; #[must_use] pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) } diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index bb8ddeff..b2bf5e6d 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId}; +use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 5a6006ec..23f11cc8 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -2,13 +2,12 @@ use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; use clap::Subcommand; use conduwuit::{ - apply, at, is_zero, + Err, Result, apply, at, is_zero, utils::{ + IterStream, stream::{ReadyExt, TryIgnore, TryParallelExt}, string::EMPTY, - IterStream, }, - Err, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::events::room::message::RoomMessageEventContent; diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 08b5d171..10748d88 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::time, Result}; +use conduwuit::{Result, utils::time}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedServerName}; +use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 71dadc99..1de5c02d 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{Error, Result}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId}; +use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent}; use crate::Command; diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs index 3fe653e3..6f08aee9 100644 --- a/src/admin/query/room_timeline.rs +++ b/src/admin/query/room_timeline.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::stream::TryTools, PduCount, Result}; +use conduwuit::{PduCount, Result, utils::stream::TryTools}; use futures::TryStreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomOrAliasId}; +use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 8c6fb25f..a148f718 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, ServerName, UserId}; +use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent}; use service::sending::Destination; use crate::Command; diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs index 7f0f3449..0957c15e 100644 --- a/src/admin/query/short.rs +++ b/src/admin/query/short.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{events::room::message::RoomMessageEventContent, OwnedEventId, OwnedRoomOrAliasId}; +use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index c517d9dd..5995bc62 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::stream::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId, + OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent, }; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index d3b956e1..6262f33e 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -4,10 +4,10 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomId, + OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent, }; -use crate::{escape_html, Command}; +use crate::{Command, escape_html}; #[derive(Debug, Subcommand)] pub(crate) enum RoomAliasCommand { diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index b58d04c5..b5c303c8 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -1,8 +1,8 @@ use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; +use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; -use crate::{admin_command, get_room_info, PAGE_SIZE}; +use crate::{PAGE_SIZE, admin_command, get_room_info}; #[admin_command] pub(super) async fn list_rooms( diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 791b9204..e9c23a1d 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,9 +1,9 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId}; +use ruma::{RoomId, events::room::message::RoomMessageEventContent}; -use crate::{get_room_info, Command, PAGE_SIZE}; +use crate::{Command, PAGE_SIZE, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomDirectoryCommand { diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index 34abf8a9..a39728fe 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::ReadyExt, Result}; +use conduwuit::{Result, utils::ReadyExt}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId}; +use ruma::{RoomId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index bf54505e..ee132590 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -1,14 +1,14 @@ use api::client::leave_room; use clap::Subcommand; use conduwuit::{ - debug, error, info, + Result, debug, error, info, utils::{IterStream, ReadyExt}, - warn, Result, + warn, }; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomId, RoomAliasId, RoomId, - RoomOrAliasId, + OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId, + events::room::message::RoomMessageEventContent, }; use crate::{admin_command, admin_command_dispatch, get_room_info}; @@ -124,41 +124,42 @@ async fn ban_room( locally, if not using get_alias_helper to fetch room ID remotely" ); - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room \ - ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch \ + room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room_id}" - ); - room_id - }, - | Err(e) => { - return Ok(RoomMessageEventContent::notice_plain(format!( - "Failed to resolve room alias {room_alias} to a room ID: {e}" - ))); - }, - } + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for {room_id}" + ); + room_id + }, + | Err(e) => { + return Ok(RoomMessageEventContent::notice_plain(format!( + "Failed to resolve room alias {room_alias} to a room ID: {e}" + ))); + }, + } + }, }; self.services.rooms.metadata.ban_room(&room_id, true); @@ -321,51 +322,55 @@ async fn ban_list_of_rooms( if room_alias_or_id.is_room_alias_id() { match RoomAliasId::parse(room_alias_or_id) { | Ok(room_alias) => { - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, \ - attempting to fetch room ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, \ + attempting to fetch room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room}", - ); - room_id - }, - | Err(e) => { - // don't fail if force blocking - if force { - warn!( - "Failed to resolve room alias {room} to a room \ - ID: {e}" + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for \ + {room}", ); - continue; - } + room_id + }, + | Err(e) => { + // don't fail if force blocking + if force { + warn!( + "Failed to resolve room alias {room} to a \ + room ID: {e}" + ); + continue; + } - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: \ - {e}" - ))); - }, - } + return Ok(RoomMessageEventContent::text_plain( + format!( + "Failed to resolve room alias {room} to a \ + room ID: {e}" + ), + )); + }, + } + }, }; room_ids.push(room_id); @@ -537,41 +542,42 @@ async fn unban_room( locally, if not using get_alias_helper to fetch room ID remotely" ); - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room \ - ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch \ + room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for room {room}" - ); - room_id - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" - ))); - }, - } + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for room {room}" + ); + room_id + }, + | Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to resolve room alias {room} to a room ID: {e}" + ))); + }, + } + }, }; self.services.rooms.metadata.ban_room(&room_id, false); diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index d4cfa7d5..17bf9ec0 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,6 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{info, utils::time, warn, Err, Result}; +use conduwuit::{Err, Result, info, utils::time, warn}; use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 64767a36..8cb8edc3 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,23 +2,23 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - debug_warn, error, info, is_equal_to, + PduBuilder, Result, debug_warn, error, info, is_equal_to, utils::{self, ReadyExt}, - warn, PduBuilder, Result, + warn, }; use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; use ruma::{ + EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, events::{ + RoomAccountDataEventType, StateEventType, room::{ message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, redaction::RoomRedactionEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, StateEventType, }, - EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, }; use crate::{ diff --git a/src/admin/utils.rs b/src/admin/utils.rs index eba33fba..a2696c50 100644 --- a/src/admin/utils.rs +++ b/src/admin/utils.rs @@ -1,4 +1,4 @@ -use conduwuit_core::{err, Err, Result}; +use conduwuit_core::{Err, Result, err}; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use service::Services; diff --git a/src/api/client/account.rs b/src/api/client/account.rs index cb25b276..cb49a6db 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,34 +3,35 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result, + Error, PduBuilder, Result, debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, }; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ + OwnedRoomId, UserId, api::client::{ account::{ - change_password, check_registration_token_validity, deactivate, get_3pids, - get_username_availability, + ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity, + deactivate, get_3pids, get_username_availability, register::{self, LoginType}, request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, - whoami, ThirdPartyIdRemovalStatus, + whoami, }, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ + GlobalAccountDataEventType, StateEventType, room::{ message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - GlobalAccountDataEventType, StateEventType, }, - push, OwnedRoomId, UserId, + push, }; use service::Services; -use super::{join_room_by_id_helper, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; +use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; use crate::Ruma; const RANDOM_USER_ID_LENGTH: usize = 10; @@ -218,12 +219,20 @@ pub(crate) async fn register_route( }; if body.body.login_type == Some(LoginType::ApplicationService) { - if let Some(ref info) = body.appservice_info { - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace.")); - } - } else { - return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing appservice token.")); + match body.appservice_info { + | Some(ref info) => + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); + }, + | _ => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing appservice token.", + )); + }, } } else if services.appservice.is_exclusive_user_id(&user_id).await { return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); @@ -256,33 +265,39 @@ pub(crate) async fn register_route( }; if !skip_auth { - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth( - &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), - "".into(), - auth, - &uiaainfo, - ) - .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services.uiaa.create( - &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), - "".into(), - &uiaainfo, - &json, - ); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth( + &UserId::parse_with_server_name("", services.globals.server_name()) + .expect("we know this is valid"), + "".into(), + auth, + &uiaainfo, + ) + .await?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services.uiaa.create( + &UserId::parse_with_server_name("", services.globals.server_name()) + .expect("we know this is valid"), + "".into(), + &uiaainfo, + &json, + ); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } } @@ -463,7 +478,7 @@ pub(crate) async fn register_route( } if let Some(room_server_name) = room.server_name() { - if let Err(e) = join_room_by_id_helper( + match join_room_by_id_helper( &services, &user_id, &room_id, @@ -475,10 +490,15 @@ pub(crate) async fn register_route( .boxed() .await { - // don't return this error so we don't fail registrations - error!("Failed to automatically join room {room} for user {user_id}: {e}"); - } else { - info!("Automatically joined room {room} for user {user_id}"); + | Err(e) => { + // don't return this error so we don't fail registrations + error!( + "Failed to automatically join room {room} for user {user_id}: {e}" + ); + }, + | _ => { + info!("Automatically joined room {room} for user {user_id}"); + }, }; } } @@ -532,26 +552,32 @@ pub(crate) async fn change_password_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } services @@ -636,25 +662,31 @@ pub(crate) async fn deactivate_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } // Remove profile pictures and display name @@ -809,7 +841,7 @@ pub async fn full_user_deactivate( power_levels_content.users.remove(user_id); // ignore errors so deactivation doesn't fail - if let Err(e) = services + match services .rooms .timeline .build_and_append_pdu( @@ -820,9 +852,12 @@ pub async fn full_user_deactivate( ) .await { - warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}"); - } else { - info!("Demoted {user_id} in {room_id} as part of account deactivation"); + | Err(e) => { + warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}"); + }, + | _ => { + info!("Demoted {user_id} in {room_id} as part of account deactivation"); + }, } } } diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs index 9f84f227..60c18b37 100644 --- a/src/api/client/account_data.rs +++ b/src/api/client/account_data.rs @@ -1,6 +1,7 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{ + RoomId, UserId, api::client::config::{ get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, @@ -10,12 +11,11 @@ use ruma::{ GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, - RoomId, UserId, }; use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -use crate::{service::Services, Result, Ruma}; +use crate::{Result, Ruma, service::Services}; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index e1af416e..319e5141 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -1,10 +1,10 @@ use axum::extract::State; -use conduwuit::{debug, Err, Result}; +use conduwuit::{Err, Result, debug}; use futures::StreamExt; use rand::seq::SliceRandom; use ruma::{ - api::client::alias::{create_alias, delete_alias, get_alias}, OwnedServerName, RoomAliasId, RoomId, + api::client::alias::{create_alias, delete_alias, get_alias}, }; use service::Services; @@ -128,18 +128,26 @@ async fn room_available_servers( // insert our server as the very first choice if in list, else check if we can // prefer the room alias server first - if let Some(server_index) = servers + match servers .iter() .position(|server_name| services.globals.server_is_ours(server_name)) { - servers.swap_remove(server_index); - servers.insert(0, services.globals.server_name().to_owned()); - } else if let Some(alias_server_index) = servers - .iter() - .position(|server| server == room_alias.server_name()) - { - servers.swap_remove(alias_server_index); - servers.insert(0, room_alias.server_name().into()); + | Some(server_index) => { + servers.swap_remove(server_index); + servers.insert(0, services.globals.server_name().to_owned()); + }, + | _ => { + match servers + .iter() + .position(|server| server == room_alias.server_name()) + { + | Some(alias_server_index) => { + servers.swap_remove(alias_server_index); + servers.insert(0, room_alias.server_name().into()); + }, + | _ => {}, + } + }, } servers diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs index e4071ab0..84955309 100644 --- a/src/api/client/appservice.rs +++ b/src/api/client/appservice.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{err, Err, Result}; +use conduwuit::{Err, Result, err}; use ruma::api::{appservice::ping, client::appservice::request_ping}; use crate::Ruma; diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index d330952d..714e3f86 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,6 +1,7 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{ + UInt, api::client::backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, delete_backup_keys, delete_backup_keys_for_room, @@ -8,7 +9,6 @@ use ruma::{ get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, update_backup_version, }, - UInt, }; use crate::{Result, Ruma}; diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 7188aa23..e20af21b 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -3,11 +3,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{Result, Server}; use ruma::{ + RoomVersionId, api::client::discovery::get_capabilities::{ self, Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability, ThirdPartyIdChangesCapability, }, - RoomVersionId, }; use serde_json::json; diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 7256683f..3f16c850 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,23 +1,22 @@ use axum::extract::State; use conduwuit::{ - at, err, ref_at, + Err, PduEvent, Result, at, err, ref_at, utils::{ + IterStream, future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, - IterStream, }, - Err, PduEvent, Result, }; use futures::{ - future::{join, join3, try_join3, OptionFuture}, FutureExt, StreamExt, TryFutureExt, TryStreamExt, + future::{OptionFuture, join, join3, try_join3}, }; -use ruma::{api::client::context::get_context, events::StateEventType, OwnedEventId, UserId}; +use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType}; use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use crate::{ - client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, Ruma, + client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, }; const LIMIT_MAX: usize = 100; diff --git a/src/api/client/device.rs b/src/api/client/device.rs index bb0773dd..6a845aed 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,18 +1,18 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use futures::StreamExt; use ruma::{ + MilliSecondsSinceUnixEpoch, api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, - MilliSecondsSinceUnixEpoch, }; use super::SESSION_ID_LENGTH; -use crate::{utils, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, utils}; /// # `GET /_matrix/client/r0/devices` /// @@ -107,25 +107,31 @@ pub(crate) async fn delete_device_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err!(Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err!(Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err!(Uiaa(uiaainfo)); - } else { - return Err!(Request(NotJson("Not json."))); + return Err!(Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("Not json."))); + }, + }, } services @@ -164,25 +170,31 @@ pub(crate) async fn delete_devices_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } for device_id in &body.devices { diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 9166eed9..136c5961 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,8 +1,9 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{info, warn, Err, Error, Result}; +use conduwuit::{Err, Error, Result, info, warn}; use futures::{StreamExt, TryFutureExt}; use ruma::{ + OwnedRoomId, RoomId, ServerName, UInt, UserId, api::{ client::{ directory::{ @@ -16,13 +17,13 @@ use ruma::{ }, directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, events::{ + StateEventType, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - StateEventType, }, - uint, OwnedRoomId, RoomId, ServerName, UInt, UserId, + uint, }; use service::Services; @@ -365,30 +366,34 @@ async fn user_can_publish_room( user_id: &UserId, room_id: &RoomId, ) -> Result { - if let Ok(event) = services + match services .rooms .state_accessor .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") .await { - serde_json::from_str(event.content.get()) + | Ok(event) => serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) .map(|content: RoomPowerLevelsEventContent| { RoomPowerLevels::from(content) .user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) - }) - } else if let Ok(event) = services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(event.sender == user_id) - } else { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); + }), + | _ => { + match services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + | Ok(event) => Ok(event.sender == user_id), + | _ => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "User is not allowed to publish this room", + )); + }, + } + }, } } diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 801ae32b..6f20153b 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,9 +1,10 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{debug, err, info, result::NotFound, utils, Err, Error, Result}; -use futures::{stream::FuturesUnordered, StreamExt}; +use conduwuit::{Err, Error, Result, debug, err, info, result::NotFound, utils}; +use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ + OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, api::{ client::{ error::ErrorKind, @@ -17,14 +18,13 @@ use ruma::{ }, encryption::CrossSigningKey, serde::Raw, - OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; use super::SESSION_ID_LENGTH; use crate::{ - service::{users::parse_master_key, Services}, Ruma, + service::{Services, users::parse_master_key}, }; /// # `POST /_matrix/client/r0/keys/upload` @@ -126,7 +126,7 @@ pub(crate) async fn upload_signing_keys_route( auth_error: None, }; - if let Ok(exists) = check_for_new_keys( + match check_for_new_keys( services, sender_user, body.self_signing_key.as_ref(), @@ -136,32 +136,45 @@ pub(crate) async fn upload_signing_keys_route( .await .inspect_err(|e| info!(?e)) { - if let Some(result) = exists { - // No-op, they tried to reupload the same set of keys - // (lost connection for example) - return Ok(result); - } - debug!("Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"); - // Some of the keys weren't found, so we let them upload - } else if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + | Ok(exists) => { + if let Some(result) = exists { + // No-op, they tried to reupload the same set of keys + // (lost connection for example) + return Ok(result); + } + debug!( + "Skipping UIA in accordance with MSC3967, the user didn't have any existing keys" + ); + // Some of the keys weren't found, so we let them upload + }, + | _ => { + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, + } + }, } services @@ -471,37 +484,40 @@ where .collect(); while let Some((server, response)) = futures.next().await { - if let Ok(response) = response { - for (user, master_key) in response.master_keys { - let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; + match response { + | Ok(response) => { + for (user, master_key) in response.master_keys { + let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; - if let Ok(our_master_key) = services - .users - .get_key(&master_key_id, sender_user, &user, &allowed_signatures) - .await - { - let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; - master_key.signatures.append(&mut our_master_key.signatures); + if let Ok(our_master_key) = services + .users + .get_key(&master_key_id, sender_user, &user, &allowed_signatures) + .await + { + let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; + master_key.signatures.append(&mut our_master_key.signatures); + } + let json = serde_json::to_value(master_key).expect("to_value always works"); + let raw = serde_json::from_value(json).expect("Raw::from_value always works"); + services + .users + .add_cross_signing_keys( + &user, &raw, &None, &None, + false, /* Dont notify. A notification would trigger another key + * request resulting in an endless loop */ + ) + .await?; + if let Some(raw) = raw { + master_keys.insert(user.clone(), raw); + } } - let json = serde_json::to_value(master_key).expect("to_value always works"); - let raw = serde_json::from_value(json).expect("Raw::from_value always works"); - services - .users - .add_cross_signing_keys( - &user, &raw, &None, &None, - false, /* Dont notify. A notification would trigger another key request - * resulting in an endless loop */ - ) - .await?; - if let Some(raw) = raw { - master_keys.insert(user.clone(), raw); - } - } - self_signing_keys.extend(response.self_signing_keys); - device_keys.extend(response.device_keys); - } else { - failures.insert(server.to_string(), json!({})); + self_signing_keys.extend(response.self_signing_keys); + device_keys.extend(response.device_keys); + }, + | _ => { + failures.insert(server.to_string(), json!({})); + }, } } diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 0cff8185..94572413 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -3,16 +3,16 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - err, + Err, Result, err, utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize}, - Err, Result, }; use conduwuit_service::{ - media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, MXC_LENGTH}, Services, + media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH}, }; use reqwest::Url; use ruma::{ + Mxc, UserId, api::client::{ authenticated_media::{ get_content, get_content_as_filename, get_content_thumbnail, get_media_config, @@ -20,7 +20,6 @@ use ruma::{ }, media::create_content, }, - Mxc, UserId, }; use crate::Ruma; diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index 4fa0b52e..d9f24f77 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -3,21 +3,20 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - err, + Err, Result, err, utils::{content_disposition::make_content_disposition, math::ruma_from_usize}, - Err, Result, }; -use conduwuit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN}; +use conduwuit_service::media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta}; use reqwest::Url; use ruma::{ + Mxc, api::client::media::{ create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, get_media_preview, }, - Mxc, }; -use crate::{client::create_content_route, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse, client::create_content_route}; /// # `GET /_matrix/media/v3/config` /// @@ -142,46 +141,52 @@ pub(crate) async fn get_content_legacy_route( media_id: &body.media_id, }; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + match services.media.get(&mxc).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + ); - Ok(get_content::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content::v3::Response { - file: response.file, - content_type: response.content_type, - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content::v3::Response { + file: response.file, + content_type: response.content_type, + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } @@ -227,49 +232,52 @@ pub(crate) async fn get_content_as_filename_legacy_route( media_id: &body.media_id, }; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - Some(&body.filename), - ); + match services.media.get(&mxc).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + Some(&body.filename), + ); - Ok(get_content_as_filename::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content_as_filename::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content_as_filename::v3::Response { - content_disposition: Some(content_disposition), - content_type: response.content_type, - file: response.file, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content_as_filename::v3::Response { + content_disposition: Some(content_disposition), + content_type: response.content_type, + file: response.file, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } @@ -315,46 +323,52 @@ pub(crate) async fn get_content_thumbnail_legacy_route( }; let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get_thumbnail(&mxc, &dim).await? - { - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + match services.media.get_thumbnail(&mxc, &dim).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + ); - Ok(get_content_thumbnail::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_thumbnail_legacy(&body) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content_thumbnail::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_thumbnail_legacy(&body) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content_thumbnail::v3::Response { - file: response.file, - content_type: response.content_type, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content_thumbnail::v3::Response { + file: response.file, + content_type: response.content_type, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 26736fb5..9c2693dc 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,51 +9,51 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - at, debug, debug_info, debug_warn, err, error, info, - pdu::{gen_event_id_canonical_json, PduBuilder}, + Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info, + pdu::{PduBuilder, gen_event_id_canonical_json}, result::FlatOk, state_res, trace, - utils::{self, shuffle, IterStream, ReadyExt}, - warn, Err, PduEvent, Result, StateKey, + utils::{self, IterStream, ReadyExt, shuffle}, + warn, }; -use futures::{join, FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, join}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, api::{ client::{ error::ErrorKind, knock::knock_room, membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, + ThirdPartySigned, ban_user, forget_room, get_member_events, invite_user, + join_room_by_id, join_room_by_id_or_alias, joined_members::{self, v3::RoomMember}, - joined_rooms, kick_user, leave_room, unban_user, ThirdPartySigned, + joined_rooms, kick_user, leave_room, unban_user, }, }, federation::{self, membership::create_invite}, }, canonical_json::to_canonical_value, events::{ + StateEventType, room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, }, - StateEventType, }, - CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use service::{ + Services, appservice::RegistrationInfo, pdu::gen_event_id, rooms::{ state::RoomMutexGuard, state_compressor::{CompressedState, HashSetCompressStateEvent}, }, - Services, }; -use crate::{client::full_user_deactivate, Ruma}; +use crate::{Ruma, client::full_user_deactivate}; /// Checks if the room is banned in any way possible and the sender user is not /// an admin. @@ -507,43 +507,54 @@ pub(crate) async fn invite_user_route( ) .await?; - if let invite_user::v3::InvitationRecipient::UserId { user_id } = &body.recipient { - let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); - let recipient_ignored_by_sender = services.users.user_is_ignored(user_id, sender_user); + match &body.recipient { + | invite_user::v3::InvitationRecipient::UserId { user_id } => { + let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); + let recipient_ignored_by_sender = + services.users.user_is_ignored(user_id, sender_user); - let (sender_ignored_recipient, recipient_ignored_by_sender) = - join!(sender_ignored_recipient, recipient_ignored_by_sender); + let (sender_ignored_recipient, recipient_ignored_by_sender) = + join!(sender_ignored_recipient, recipient_ignored_by_sender); - if sender_ignored_recipient { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); - } - - if let Ok(target_user_membership) = services - .rooms - .state_accessor - .get_member(&body.room_id, user_id) - .await - { - if target_user_membership.membership == MembershipState::Ban { - return Err!(Request(Forbidden("User is banned from this room."))); + if sender_ignored_recipient { + return Err!(Request(Forbidden( + "You cannot invite users you have ignored to rooms." + ))); } - } - if recipient_ignored_by_sender { - // silently drop the invite to the recipient if they've been ignored by the - // sender, pretend it worked - return Ok(invite_user::v3::Response {}); - } + if let Ok(target_user_membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, user_id) + .await + { + if target_user_membership.membership == MembershipState::Ban { + return Err!(Request(Forbidden("User is banned from this room."))); + } + } - invite_helper(&services, sender_user, user_id, &body.room_id, body.reason.clone(), false) + if recipient_ignored_by_sender { + // silently drop the invite to the recipient if they've been ignored by the + // sender, pretend it worked + return Ok(invite_user::v3::Response {}); + } + + invite_helper( + &services, + sender_user, + user_id, + &body.room_id, + body.reason.clone(), + false, + ) .boxed() .await?; - Ok(invite_user::v3::Response {}) - } else { - Err!(Request(NotFound("User not found."))) + Ok(invite_user::v3::Response {}) + }, + | _ => { + Err!(Request(NotFound("User not found."))) + }, } } @@ -1830,38 +1841,46 @@ async fn remote_leave_room( .collect() .await; - if let Ok(invite_state) = services + match services .rooms .state_cache .invite_state(user_id, room_id) .await { - servers.extend( - invite_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - } else if let Ok(knock_state) = services - .rooms - .state_cache - .knock_state(user_id, room_id) - .await - { - servers.extend( - knock_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .filter_map(|sender| { - if !services.globals.user_is_local(sender) { - Some(sender.server_name().to_owned()) - } else { - None - } - }), - ); + | Ok(invite_state) => { + servers.extend( + invite_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + }, + | _ => { + match services + .rooms + .state_cache + .knock_state(user_id, room_id) + .await + { + | Ok(knock_state) => { + servers.extend( + knock_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .filter_map(|sender| { + if !services.globals.user_is_local(sender) { + Some(sender.server_name().to_owned()) + } else { + None + } + }), + ); + }, + | _ => {}, + } + }, } if let Some(room_id_server_name) = room_id.server_name() { diff --git a/src/api/client/message.rs b/src/api/client/message.rs index bb4e72dd..571a238a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,30 +1,29 @@ use axum::extract::State; use conduwuit::{ - at, + Event, PduCount, PduEvent, Result, at, utils::{ + IterStream, ReadyExt, result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, - IterStream, ReadyExt, }, - Event, PduCount, PduEvent, Result, }; -use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; use ruma::{ + RoomId, UserId, api::{ - client::{filter::RoomEventFilter, message::get_message_events}, Direction, + client::{filter::RoomEventFilter, message::get_message_events}, }, events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, serde::Raw, - RoomId, UserId, }; use service::{ + Services, rooms::{ lazy_loading, lazy_loading::{Options, Witness}, timeline::PdusIterItem, }, - Services, }; use crate::Ruma; diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index d19e6ae1..9b41a721 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -70,37 +70,38 @@ pub(crate) async fn get_presence_route( } } - if let Some(presence) = presence_event { - let status_msg = if presence - .content - .status_msg - .as_ref() - .is_some_and(String::is_empty) - { - None - } else { - presence.content.status_msg - }; - - let last_active_ago = match presence.content.currently_active { - | Some(true) => None, - | _ => presence + match presence_event { + | Some(presence) => { + let status_msg = if presence .content - .last_active_ago - .map(|millis| Duration::from_millis(millis.into())), - }; + .status_msg + .as_ref() + .is_some_and(String::is_empty) + { + None + } else { + presence.content.status_msg + }; - Ok(get_presence::v3::Response { - // TODO: Should ruma just use the presenceeventcontent type here? - status_msg, - currently_active: presence.content.currently_active, - last_active_ago, - presence: presence.content.presence, - }) - } else { - Err(Error::BadRequest( + let last_active_ago = match presence.content.currently_active { + | Some(true) => None, + | _ => presence + .content + .last_active_ago + .map(|millis| Duration::from_millis(millis.into())), + }; + + Ok(get_presence::v3::Response { + // TODO: Should ruma just use the presenceeventcontent type here? + status_msg, + currently_active: presence.content.currently_active, + last_active_ago, + presence: presence.content.presence, + }) + }, + | _ => Err(Error::BadRequest( ErrorKind::NotFound, "Presence state for this user was not found", - )) + )), } } diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 584adfc1..12e5ebcc 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -2,12 +2,14 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ + Err, Error, Result, pdu::PduBuilder, - utils::{stream::TryIgnore, IterStream}, - warn, Err, Error, Result, + utils::{IterStream, stream::TryIgnore}, + warn, }; -use futures::{future::join3, StreamExt, TryStreamExt}; +use futures::{StreamExt, TryStreamExt, future::join3}; use ruma::{ + OwnedMxcUri, OwnedRoomId, UserId, api::{ client::{ error::ErrorKind, @@ -19,7 +21,6 @@ use ruma::{ }, events::room::member::{MembershipState, RoomMemberEventContent}, presence::PresenceState, - OwnedMxcUri, OwnedRoomId, UserId, }; use service::Services; diff --git a/src/api/client/push.rs b/src/api/client/push.rs index ed7371e4..384b9dbc 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,6 +1,7 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, api::client::{ error::ErrorKind, push::{ @@ -10,14 +11,13 @@ use ruma::{ }, }, events::{ - push_rules::{PushRulesEvent, PushRulesEventContent}, GlobalAccountDataEventType, + push_rules::{PushRulesEvent, PushRulesEventContent}, }, push::{ InsertPushRuleError, PredefinedContentRuleId, PredefinedOverrideRuleId, RemovePushRuleError, Ruleset, }, - CanonicalJsonObject, CanonicalJsonValue, }; use service::Services; diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index 89fe003a..d01327f6 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -1,14 +1,14 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Err, PduCount}; +use conduwuit::{Err, PduCount, err}; use ruma::{ + MilliSecondsSinceUnixEpoch, api::client::{read_marker::set_read_marker, receipt::create_receipt}, events::{ - receipt::{ReceiptThread, ReceiptType}, RoomAccountDataEventType, + receipt::{ReceiptThread, ReceiptType}, }, - MilliSecondsSinceUnixEpoch, }; use crate::{Result, Ruma}; diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index ba59a010..7b512d06 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, }; -use crate::{service::pdu::PduBuilder, Result, Ruma}; +use crate::{Result, Ruma, service::pdu::PduBuilder}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 87fb1eac..7ed40f14 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,22 +1,21 @@ use axum::extract::State; use conduwuit::{ - at, - utils::{result::FlatOk, stream::WidebandExt, IterStream, ReadyExt}, - PduCount, Result, + PduCount, Result, at, + utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; use futures::StreamExt; use ruma::{ + EventId, RoomId, UInt, UserId, api::{ + Direction, client::relations::{ get_relating_events, get_relating_events_with_rel_type, get_relating_events_with_rel_type_and_event_type, }, - Direction, }, - events::{relation::RelationType, TimelineEventType}, - EventId, RoomId, UInt, UserId, + events::{TimelineEventType, relation::RelationType}, }; -use service::{rooms::timeline::PdusIterItem, Services}; +use service::{Services, rooms::timeline::PdusIterItem}; use crate::Ruma; diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 57de3f12..db085721 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,22 +2,22 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{info, utils::ReadyExt, Err}; +use conduwuit::{Err, info, utils::ReadyExt}; use rand::Rng; use ruma::{ + EventId, RoomId, UserId, api::client::{ error::ErrorKind, room::{report_content, report_room}, }, events::room::message, - int, EventId, RoomId, UserId, + int, }; use tokio::time::sleep; use crate::{ - debug_info, - service::{pdu::PduEvent, Services}, - Error, Result, Ruma, + Error, Result, Ruma, debug_info, + service::{Services, pdu::PduEvent}, }; /// # `POST /_matrix/client/v3/rooms/{roomId}/report` diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index e362b3b3..1b8294a5 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,15 +2,17 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, StateKey, + Err, Error, Result, StateKey, debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, }; use futures::FutureExt; use ruma::{ + CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, api::client::{ error::ErrorKind, room::{self, create_room}, }, events::{ + TimelineEventType, room::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, @@ -22,16 +24,14 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, topic::RoomTopicEventContent, }, - TimelineEventType, }, int, serde::{JsonObject, Raw}, - CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; -use service::{appservice::RegistrationInfo, Services}; +use service::{Services, appservice::RegistrationInfo}; -use crate::{client::invite_helper, Ruma}; +use crate::{Ruma, client::invite_helper}; /// # `POST /_matrix/client/v3/createRoom` /// @@ -68,10 +68,9 @@ pub(crate) async fn create_room_route( )); } - let room_id: OwnedRoomId = if let Some(custom_room_id) = &body.room_id { - custom_room_id_check(&services, custom_room_id)? - } else { - RoomId::new(&services.server.name) + let room_id: OwnedRoomId = match &body.room_id { + | Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?, + | _ => RoomId::new(&services.server.name), }; // check if room ID doesn't already exist instead of erroring on auth check @@ -114,10 +113,10 @@ pub(crate) async fn create_room_route( .await; let state_lock = services.rooms.state.mutex.lock(&room_id).await; - let alias: Option = if let Some(alias) = body.room_alias_name.as_ref() { - Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?) - } else { - None + let alias: Option = match body.room_alias_name.as_ref() { + | Some(alias) => + Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?), + | _ => None, }; let room_version = match body.room_version.clone() { diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index f0ae64dd..84b591cd 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -1,9 +1,9 @@ use axum::extract::State; -use conduwuit::{err, Err, Event, Result}; -use futures::{future::try_join, FutureExt, TryFutureExt}; +use conduwuit::{Err, Event, Result, err}; +use futures::{FutureExt, TryFutureExt, future::try_join}; use ruma::api::client::room::get_room_event; -use crate::{client::is_ignored_pdu, Ruma}; +use crate::{Ruma, client::is_ignored_pdu}; /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` /// diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 233d180f..e4c76ae0 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,8 +1,7 @@ use axum::extract::State; use conduwuit::{ - at, - utils::{stream::TryTools, BoolExt}, - Err, PduEvent, Result, + Err, PduEvent, Result, at, + utils::{BoolExt, stream::TryTools}, }; use futures::TryStreamExt; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index a624f95f..4ac341a9 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,19 +1,20 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{err, info, pdu::PduBuilder, Error, Result, StateKey}; +use conduwuit::{Error, Result, StateKey, err, info, pdu::PduBuilder}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, RoomId, RoomVersionId, api::client::{error::ErrorKind, room::upgrade_room}, events::{ + StateEventType, TimelineEventType, room::{ member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, tombstone::RoomTombstoneEventContent, }, - StateEventType, TimelineEventType, }, - int, CanonicalJsonObject, RoomId, RoomVersionId, + int, }; use serde_json::{json, value::to_raw_value}; diff --git a/src/api/client/search.rs b/src/api/client/search.rs index 898dfc7f..f3366843 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -2,23 +2,22 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - at, is_true, + Err, PduEvent, Result, at, is_true, result::FlatOk, - utils::{stream::ReadyExt, IterStream}, - Err, PduEvent, Result, + utils::{IterStream, stream::ReadyExt}, }; -use futures::{future::OptionFuture, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture}; use ruma::{ + OwnedRoomId, RoomId, UInt, UserId, api::client::search::search_events::{ self, v3::{Criteria, EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, }, events::AnyStateEvent, serde::Raw, - OwnedRoomId, RoomId, UInt, UserId, }; use search_events::v3::{Request, Response}; -use service::{rooms::search::RoomQuery, Services}; +use service::{Services, rooms::search::RoomQuery}; use crate::Ruma; diff --git a/src/api/client/send.rs b/src/api/client/send.rs index 39340070..b01d1ed6 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; use serde_json::from_str; -use crate::{service::pdu::PduBuilder, utils, Result, Ruma}; +use crate::{Result, Ruma, service::pdu::PduBuilder, utils}; /// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` /// diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 7155351c..5c0ab47d 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,9 +2,10 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{debug, err, info, utils::ReadyExt, warn, Err}; +use conduwuit::{Err, debug, err, info, utils::ReadyExt, warn}; use futures::StreamExt; use ruma::{ + OwnedUserId, UserId, api::client::{ error::ErrorKind, session::{ @@ -21,12 +22,11 @@ use ruma::{ }, uiaa, }, - OwnedUserId, UserId, }; use service::uiaa::SESSION_ID_LENGTH; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{utils, utils::hash, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, utils, utils::hash}; /// # `GET /_matrix/client/v3/login` /// @@ -139,18 +139,20 @@ pub(crate) async fn login_route( Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") })?; - if let Some(ref info) = body.appservice_info { - if !info.is_user_match(&user_id) { + match body.appservice_info { + | Some(ref info) => + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); + }, + | _ => { return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", + ErrorKind::MissingToken, + "Missing appservice token.", )); - } - } else { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + }, } user_id @@ -259,26 +261,32 @@ pub(crate) async fn login_token_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } - // Success! - } else if let Some(json) = body.json_body.as_ref() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, json); + // Success! + }, + | _ => match body.json_body.as_ref() { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err!(Request(NotJson("No JSON body was sent when required."))); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("No JSON body was sent when required."))); + }, + }, } let login_token = utils::random_string(TOKEN_LENGTH); diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 7efd7817..a667f852 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -5,18 +5,18 @@ use std::{ use axum::extract::State; use conduwuit::{ - utils::{future::TryExtExt, stream::IterStream}, Err, Result, + utils::{future::TryExtExt, stream::IterStream}, }; -use futures::{future::OptionFuture, StreamExt, TryFutureExt}; +use futures::{StreamExt, TryFutureExt, future::OptionFuture}; use ruma::{ - api::client::space::get_hierarchy, OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, }; use service::{ - rooms::spaces::{ - get_parent_children_via, summary_to_chunk, PaginationToken, SummaryAccessibility, - }, Services, + rooms::spaces::{ + PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk, + }, }; use crate::Ruma; diff --git a/src/api/client/state.rs b/src/api/client/state.rs index f73ffa46..6353fe1c 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,19 +1,19 @@ use axum::extract::State; -use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, PduEvent, Result}; +use conduwuit::{Err, PduEvent, Result, err, pdu::PduBuilder, utils::BoolExt}; use futures::TryStreamExt; use ruma::{ + OwnedEventId, RoomId, UserId, api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, events::{ + AnyStateEventContent, StateEventType, room::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - AnyStateEventContent, StateEventType, }, serde::Raw, - OwnedEventId, RoomId, UserId, }; use service::Services; diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 46540881..3eab76cc 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -3,25 +3,25 @@ mod v4; mod v5; use conduwuit::{ - utils::{ - stream::{BroadbandExt, ReadyExt, TryIgnore}, - IterStream, - }, PduCount, + utils::{ + IterStream, + stream::{BroadbandExt, ReadyExt, TryIgnore}, + }, }; -use futures::{pin_mut, StreamExt}; +use futures::{StreamExt, pin_mut}; use ruma::{ + RoomId, UserId, directory::RoomTypeFilter, events::TimelineEventType::{ self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker, }, - RoomId, UserId, }; pub(crate) use self::{ v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, }; -use crate::{service::Services, Error, PduEvent, Result}; +use crate::{Error, PduEvent, Result, service::Services}; pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index f9dcd5ec..fb59837b 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,57 +6,55 @@ use std::{ use axum::extract::State; use conduwuit::{ - at, err, error, extract_variant, is_equal_to, pair_of, + PduCount, PduEvent, Result, at, err, error, extract_variant, is_equal_to, pair_of, pdu::{Event, EventHash}, ref_at, result::FlatOk, utils::{ - self, + self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, - BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - PduCount, PduEvent, Result, }; use conduwuit_service::{ + Services, rooms::{ lazy_loading, lazy_loading::{Options, Witness}, short::ShortStateHash, }, - Services, }; use futures::{ - future::{join, join3, join4, join5, try_join, try_join4, OptionFuture}, FutureExt, StreamExt, TryFutureExt, TryStreamExt, + future::{OptionFuture, join, join3, join4, join5, try_join, try_join4}, }; use ruma::{ + DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, api::client::{ filter::FilterDefinition, sync::sync_events::{ - self, + self, DeviceLists, UnreadNotificationsCount, v3::{ Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, KnockState, KnockedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State as RoomState, Timeline, ToDevice, }, - DeviceLists, UnreadNotificationsCount, }, uiaa::UiaaResponse, }, events::{ - presence::{PresenceEvent, PresenceEventContent}, - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, + presence::{PresenceEvent, PresenceEventContent}, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, + uint, }; use service::rooms::short::{ShortEventId, ShortStateKey}; use super::{load_timeline, share_encrypted_room}; -use crate::{client::ignored_filter, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse, client::ignored_filter}; #[derive(Default)] struct StateChanges { @@ -168,8 +166,8 @@ pub(crate) async fn build_sync_events( let full_state = body.body.full_state; let filter = match body.body.filter.as_ref() { | None => FilterDefinition::default(), - | Some(Filter::FilterDefinition(ref filter)) => filter.clone(), - | Some(Filter::FilterId(ref filter_id)) => services + | Some(Filter::FilterDefinition(filter)) => filter.clone(), + | Some(Filter::FilterId(filter_id)) => services .users .get_filter(sender_user, filter_id) .await @@ -1016,34 +1014,37 @@ async fn calculate_state_incremental<'a>( let lazy_state_ids: OptionFuture<_> = witness .filter(|_| !full_state && !encrypted_room) .map(|witness| { - witness - .iter() - .stream() - .broad_filter_map(|user_id| state_get_shorteventid(user_id)) - .into_future() + StreamExt::into_future( + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_shorteventid(user_id)), + ) }) .into(); let state_diff: OptionFuture<_> = (!full_state && state_changed) .then(|| { - services - .rooms - .state_accessor - .state_added((since_shortstatehash, current_shortstatehash)) - .boxed() - .into_future() + StreamExt::into_future( + services + .rooms + .state_accessor + .state_added((since_shortstatehash, current_shortstatehash)) + .boxed(), + ) }) .into(); let current_state_ids: OptionFuture<_> = full_state .then(|| { - services - .rooms - .state_accessor - .state_full_shortids(current_shortstatehash) - .expect_ok() - .boxed() - .into_future() + StreamExt::into_future( + services + .rooms + .state_accessor + .state_full_shortids(current_shortstatehash) + .expect_ok() + .boxed(), + ) }) .into(); diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 13f832b2..5fdcbab8 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,37 +6,37 @@ use std::{ use axum::extract::State; use conduwuit::{ - debug, error, extract_variant, + Error, PduCount, Result, debug, error, extract_variant, utils::{ - math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, }, - warn, Error, PduCount, Result, + warn, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, api::client::{ error::ErrorKind, sync::sync_events::{ - self, + self, DeviceLists, UnreadNotificationsCount, v4::{SlidingOp, SlidingSyncRoomHero}, - DeviceLists, UnreadNotificationsCount, }, }, events::{ - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, + uint, }; use service::rooms::read_receipt::pack_receipts; use super::{load_timeline, share_encrypted_room}; use crate::{ - client::{filter_rooms, ignored_filter, sync::v5::TodoRooms, DEFAULT_BUMP_TYPES}, Ruma, + client::{DEFAULT_BUMP_TYPES, filter_rooms, ignored_filter, sync::v5::TodoRooms}, }; pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; @@ -700,14 +700,13 @@ pub(crate) async fn sync_events_v4_route( .await .ok() .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { + avatar: match heroes_avatar { + | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), + | _ => match services.rooms.state_accessor.get_avatar(room_id).await { | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), | ruma::JsOption::Null => ruma::JsOption::Null, | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } + }, }, initial: Some(roomsince == &0), is_dm: None, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index cda6c041..b4c1b815 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,32 +6,33 @@ use std::{ use axum::extract::State; use conduwuit::{ - debug, error, extract_variant, trace, + Error, Result, TypeStateKey, debug, error, extract_variant, trace, utils::{ - math::{ruma_from_usize, usize_from_ruma}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + math::{ruma_from_usize, usize_from_ruma}, }, - warn, Error, Result, TypeStateKey, + warn, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ + DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, api::client::{ error::ErrorKind, sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, }, events::{ - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, + uint, }; -use service::{rooms::read_receipt::pack_receipts, PduCount}; +use service::{PduCount, rooms::read_receipt::pack_receipts}; use super::{filter_rooms, share_encrypted_room}; use crate::{ - client::{ignored_filter, sync::load_timeline, DEFAULT_BUMP_TYPES}, Ruma, + client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline}, }; type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); @@ -572,14 +573,13 @@ async fn process_rooms( .await .ok() .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { + avatar: match heroes_avatar { + | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), + | _ => match services.rooms.state_accessor.get_avatar(room_id).await { | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), | ruma::JsOption::Null => ruma::JsOption::Null, | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } + }, }, initial: Some(roomsince == &0), is_dm: None, diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index 820ee4a1..3b3b40d4 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -4,8 +4,8 @@ use axum::extract::State; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ - tag::{TagEvent, TagEventContent}, RoomAccountDataEventType, + tag::{TagEvent, TagEventContent}, }, }; diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index f0cbf467..d25e52c0 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{at, PduCount, PduEvent}; +use conduwuit::{PduCount, PduEvent, at}; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index b311295b..ccfa7340 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::{utils::math::Tried, Err}; +use conduwuit::{Err, utils::math::Tried}; use ruma::api::client::typing::create_typing_event; -use crate::{utils, Result, Ruma}; +use crate::{Result, Ruma, utils}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// @@ -27,37 +27,40 @@ pub(crate) async fn create_typing_event_route( return Err!(Request(Forbidden("You are not in this room."))); } - if let Typing::Yes(duration) = body.state { - let duration = utils::clamp( - duration.as_millis().try_into().unwrap_or(u64::MAX), + match body.state { + | Typing::Yes(duration) => { + let duration = utils::clamp( + duration.as_millis().try_into().unwrap_or(u64::MAX), + services + .server + .config + .typing_client_timeout_min_s + .try_mul(1000)?, + services + .server + .config + .typing_client_timeout_max_s + .try_mul(1000)?, + ); services - .server - .config - .typing_client_timeout_min_s - .try_mul(1000)?, + .rooms + .typing + .typing_add( + sender_user, + &body.room_id, + utils::millis_since_unix_epoch() + .checked_add(duration) + .expect("user typing timeout should not get this high"), + ) + .await?; + }, + | _ => { services - .server - .config - .typing_client_timeout_max_s - .try_mul(1000)?, - ); - services - .rooms - .typing - .typing_add( - sender_user, - &body.room_id, - utils::millis_since_unix_epoch() - .checked_add(duration) - .expect("user typing timeout should not get this high"), - ) - .await?; - } else { - services - .rooms - .typing - .typing_remove(sender_user, &body.room_id) - .await?; + .rooms + .typing + .typing_remove(sender_user, &body.room_id) + .await?; + }, } // ping presence diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 67c7df75..08da5a37 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -5,6 +5,7 @@ use axum_client_ip::InsecureClientIp; use conduwuit::Err; use futures::StreamExt; use ruma::{ + OwnedRoomId, api::{ client::{ error::ErrorKind, @@ -19,7 +20,6 @@ use ruma::{ }, events::room::member::MembershipState, presence::PresenceState, - OwnedRoomId, }; use super::{update_avatar_url, update_displayname}; @@ -499,15 +499,18 @@ pub(crate) async fn get_profile_key_route( .users .set_timezone(&body.user_id, response.tz.clone()); - if let Some(value) = response.custom_profile_fields.get(&body.key_name) { - profile_key_value.insert(body.key_name.clone(), value.clone()); - services.users.set_profile_key( - &body.user_id, - &body.key_name, - Some(value.clone()), - ); - } else { - return Err!(Request(NotFound("The requested profile key does not exist."))); + match response.custom_profile_fields.get(&body.key_name) { + | Some(value) => { + profile_key_value.insert(body.key_name.clone(), value.clone()); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(value.clone()), + ); + }, + | _ => { + return Err!(Request(NotFound("The requested profile key does not exist."))); + }, } if profile_key_value.is_empty() { @@ -524,14 +527,17 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("Profile was not found."))); } - if let Ok(value) = services + match services .users .profile_key(&body.user_id, &body.key_name) .await { - profile_key_value.insert(body.key_name.clone(), value); - } else { - return Err!(Request(NotFound("The requested profile key does not exist."))); + | Ok(value) => { + profile_key_value.insert(body.key_name.clone(), value); + }, + | _ => { + return Err!(Request(NotFound("The requested profile key does not exist."))); + }, } if profile_key_value.is_empty() { diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 904f1d2f..4e2b7d9d 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use axum::{extract::State, response::IntoResponse, Json}; +use axum::{Json, extract::State, response::IntoResponse}; use futures::StreamExt; use ruma::api::client::discovery::get_supported_versions; diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 182e30db..c5d79a56 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,11 +1,11 @@ use axum::extract::State; use conduwuit::utils::TryFutureExtExt; -use futures::{pin_mut, StreamExt}; +use futures::{StreamExt, pin_mut}; use ruma::{ api::client::user_directory::search_users, events::{ - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, StateEventType, + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, }, }; diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index 70ad4913..37e67984 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -1,10 +1,10 @@ use std::time::{Duration, SystemTime}; use axum::extract::State; -use base64::{engine::general_purpose, Engine as _}; -use conduwuit::{utils, Err}; +use base64::{Engine as _, engine::general_purpose}; +use conduwuit::{Err, utils}; use hmac::{Hmac, Mac}; -use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch, UserId}; +use ruma::{SecondsSinceUnixEpoch, UserId, api::client::voip::get_turn_server_info}; use sha1::Sha1; use crate::{Result, Ruma}; diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index 5c53d013..abda61b0 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -1,4 +1,4 @@ -use axum::{extract::State, response::IntoResponse, Json}; +use axum::{Json, extract::State, response::IntoResponse}; use ruma::api::client::{ discovery::{ discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, diff --git a/src/api/mod.rs b/src/api/mod.rs index 80e34f10..8df17a59 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -7,7 +7,7 @@ pub mod server; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::{debug_info, pdu::PduEvent, utils, Error, Result}; +pub(crate) use conduwuit::{Error, Result, debug_info, pdu::PduEvent, utils}; pub(crate) use self::router::{Ruma, RumaResponse, State}; diff --git a/src/api/router.rs b/src/api/router.rs index 7855ddfa..3fbef275 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -8,12 +8,12 @@ pub mod state; use std::str::FromStr; use axum::{ + Router, response::{IntoResponse, Redirect}, routing::{any, get, post}, - Router, }; -use conduwuit::{err, Server}; -use http::{uri, Uri}; +use conduwuit::{Server, err}; +use http::{Uri, uri}; use self::handler::RouterExt; pub(super) use self::{args::Args as Ruma, response::RumaResponse, state::State}; diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 582f0c56..65a68fa4 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -2,15 +2,15 @@ use std::{mem, ops::Deref}; use axum::{async_trait, body::Body, extract::FromRequest}; use bytes::{BufMut, Bytes, BytesMut}; -use conduwuit::{debug, debug_warn, err, trace, utils::string::EMPTY, Error, Result}; +use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY}; use ruma::{ - api::IncomingRequest, CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, - OwnedServerName, OwnedUserId, ServerName, UserId, + CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, + OwnedUserId, ServerName, UserId, api::IncomingRequest, }; use service::Services; use super::{auth, auth::Auth, request, request::Request}; -use crate::{service::appservice::RegistrationInfo, State}; +use crate::{State, service::appservice::RegistrationInfo}; /// Extractor for Ruma request structs pub(crate) struct Args { diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index ecea305b..56256683 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -1,12 +1,14 @@ use axum::RequestPartsExt; use axum_extra::{ - headers::{authorization::Bearer, Authorization}, - typed_header::TypedHeaderRejectionReason, TypedHeader, + headers::{Authorization, authorization::Bearer}, + typed_header::TypedHeaderRejectionReason, }; -use conduwuit::{debug_error, err, warn, Err, Error, Result}; +use conduwuit::{Err, Error, Result, debug_error, err, warn}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, api::{ + AuthScheme, IncomingRequest, Metadata, client::{ directory::get_public_rooms, error::ErrorKind, @@ -16,14 +18,12 @@ use ruma::{ voip::get_turn_server_info, }, federation::openid::get_openid_userinfo, - AuthScheme, IncomingRequest, Metadata, }, server_util::authorization::XMatrix, - CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, }; use service::{ - server_keys::{PubKeyMap, PubKeys}, Services, + server_keys::{PubKeyMap, PubKeys}, }; use super::request::Request; @@ -56,12 +56,12 @@ pub(super) async fn auth( }; let token = if let Some(token) = token { - if let Some(reg_info) = services.appservice.find_from_token(token).await { - Token::Appservice(Box::new(reg_info)) - } else if let Ok((user_id, device_id)) = services.users.find_from_token(token).await { - Token::User((user_id, device_id)) - } else { - Token::Invalid + match services.appservice.find_from_token(token).await { + | Some(reg_info) => Token::Appservice(Box::new(reg_info)), + | _ => match services.users.find_from_token(token).await { + | Ok((user_id, device_id)) => Token::User((user_id, device_id)), + | _ => Token::Invalid, + }, } } else { Token::None diff --git a/src/api/router/handler.rs b/src/api/router/handler.rs index cfb8fb6e..ab013945 100644 --- a/src/api/router/handler.rs +++ b/src/api/router/handler.rs @@ -1,8 +1,8 @@ use axum::{ + Router, extract::FromRequestParts, response::IntoResponse, - routing::{on, MethodFilter}, - Router, + routing::{MethodFilter, on}, }; use conduwuit::Result; use futures::{Future, TryFutureExt}; diff --git a/src/api/router/request.rs b/src/api/router/request.rs index 615a8bff..3cdc452b 100644 --- a/src/api/router/request.rs +++ b/src/api/router/request.rs @@ -1,8 +1,8 @@ use std::str; -use axum::{extract::Path, RequestExt, RequestPartsExt}; +use axum::{RequestExt, RequestPartsExt, extract::Path}; use bytes::Bytes; -use conduwuit::{err, Result}; +use conduwuit::{Result, err}; use http::request::Parts; use serde::Deserialize; use service::Services; diff --git a/src/api/router/response.rs b/src/api/router/response.rs index a10560f1..03c9060e 100644 --- a/src/api/router/response.rs +++ b/src/api/router/response.rs @@ -1,9 +1,9 @@ use axum::response::{IntoResponse, Response}; use bytes::BytesMut; -use conduwuit::{error, Error}; +use conduwuit::{Error, error}; use http::StatusCode; use http_body_util::Full; -use ruma::api::{client::uiaa::UiaaResponse, OutgoingResponse}; +use ruma::api::{OutgoingResponse, client::uiaa::UiaaResponse}; pub(crate) struct RumaResponse(pub(crate) T) where diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index b44db67c..5c875807 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -2,11 +2,11 @@ use std::cmp; use axum::extract::State; use conduwuit::{ - utils::{stream::TryTools, IterStream, ReadyExt}, PduCount, Result, + utils::{IterStream, ReadyExt, stream::TryTools}, }; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{api::federation::backfill::get_backfill, uint, MilliSecondsSinceUnixEpoch}; +use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill, uint}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/event.rs b/src/api/server/event.rs index 629dd6a2..5846c6d7 100644 --- a/src/api/server/event.rs +++ b/src/api/server/event.rs @@ -1,6 +1,6 @@ use axum::extract::State; -use conduwuit::{err, Result}; -use ruma::{api::federation::event::get_event, MilliSecondsSinceUnixEpoch, RoomId}; +use conduwuit::{Result, err}; +use ruma::{MilliSecondsSinceUnixEpoch, RoomId, api::federation::event::get_event}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 49dcd718..c9e210f5 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -1,11 +1,11 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{utils::stream::ReadyExt, Error, Result}; +use conduwuit::{Error, Result, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ - api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, RoomId, + api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, }; use super::AccessCheck; diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index ea06015a..3d0bbb07 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,8 +1,8 @@ use axum::extract::State; use conduwuit::{Error, Result}; use ruma::{ - api::{client::error::ErrorKind, federation::event::get_missing_events}, CanonicalJsonValue, EventId, RoomId, + api::{client::error::ErrorKind, federation::event::get_missing_events}, }; use super::AccessCheck; diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index f7bc43ab..41eaedd0 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,11 +1,11 @@ use axum::extract::State; use conduwuit::{ - utils::stream::{BroadbandExt, IterStream}, Err, Result, + utils::stream::{BroadbandExt, IterStream}, }; use futures::{FutureExt, StreamExt}; use ruma::api::federation::space::get_hierarchy; -use service::rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}; +use service::rooms::spaces::{Identifier, SummaryAccessibility, get_parent_children_via}; use crate::Ruma; diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 27a4485c..463cb9ab 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,12 +1,12 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use base64::{engine::general_purpose, Engine as _}; -use conduwuit::{err, utils, utils::hash::sha256, warn, Err, Error, PduEvent, Result}; +use base64::{Engine as _, engine::general_purpose}; +use conduwuit::{Err, Error, PduEvent, Result, err, utils, utils::hash::sha256, warn}; use ruma::{ + CanonicalJsonValue, OwnedUserId, UserId, api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, - CanonicalJsonValue, OwnedUserId, UserId, }; use service::pdu::gen_event_id; diff --git a/src/api/server/key.rs b/src/api/server/key.rs index 75801a7a..f9bd0926 100644 --- a/src/api/server/key.rs +++ b/src/api/server/key.rs @@ -3,15 +3,15 @@ use std::{ time::{Duration, SystemTime}, }; -use axum::{extract::State, response::IntoResponse, Json}; -use conduwuit::{utils::timepoint_from_now, Result}; +use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::{Result, utils::timepoint_from_now}; use ruma::{ + MilliSecondsSinceUnixEpoch, Signatures, api::{ - federation::discovery::{get_server_keys, OldVerifyKey, ServerSigningKeys}, OutgoingResponse, + federation::discovery::{OldVerifyKey, ServerSigningKeys, get_server_keys}, }, serde::Raw, - MilliSecondsSinceUnixEpoch, Signatures, }; /// # `GET /_matrix/key/v2/server` diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index b753346c..f18d1304 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,22 +1,22 @@ use axum::extract::State; -use conduwuit::{debug_info, utils::IterStream, warn, Err}; +use conduwuit::{Err, debug_info, utils::IterStream, warn}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, api::{client::error::ErrorKind, federation::membership::prepare_join_event}, events::{ + StateEventType, room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - StateEventType, }, - CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; use crate::{ - service::{pdu::PduBuilder, Services}, Error, Result, Ruma, + service::{Services, pdu::PduBuilder}, }; /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 423e202d..71536439 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -1,15 +1,15 @@ +use RoomVersionId::*; use axum::extract::State; -use conduwuit::{debug_warn, Err}; +use conduwuit::{Err, debug_warn}; use ruma::{ + RoomVersionId, api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, events::room::member::{MembershipState, RoomMemberEventContent}, - RoomVersionId, }; use serde_json::value::to_raw_value; use tracing::warn; -use RoomVersionId::*; -use crate::{service::pdu::PduBuilder, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, service::pdu::PduBuilder}; /// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}` /// diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 936e0fbb..1ed02785 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -7,7 +7,7 @@ use ruma::{ use serde_json::value::to_raw_value; use super::make_join::maybe_strip_event_id; -use crate::{service::pdu::PduBuilder, Ruma}; +use crate::{Ruma, service::pdu::PduBuilder}; /// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/media.rs b/src/api/server/media.rs index e56f5b9d..cbe8595b 100644 --- a/src/api/server/media.rs +++ b/src/api/server/media.rs @@ -1,12 +1,12 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{utils::content_disposition::make_content_disposition, Err, Result}; +use conduwuit::{Err, Result, utils::content_disposition::make_content_disposition}; use conduwuit_service::media::{Dim, FileMeta}; use ruma::{ - api::federation::authenticated_media::{ - get_content, get_content_thumbnail, Content, ContentMetadata, FileOrLocation, - }, Mxc, + api::federation::authenticated_media::{ + Content, ContentMetadata, FileOrLocation, get_content, get_content_thumbnail, + }, }; use crate::Ruma; diff --git a/src/api/server/query.rs b/src/api/server/query.rs index 69f62e94..9d4fcf73 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -1,16 +1,16 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Error, Result}; +use conduwuit::{Error, Result, err}; use futures::StreamExt; use get_profile_information::v1::ProfileField; use rand::seq::SliceRandom; use ruma::{ + OwnedServerName, api::{ client::error::ErrorKind, federation::query::{get_profile_information, get_room_information}, }, - OwnedServerName, }; use crate::Ruma; diff --git a/src/api/server/send.rs b/src/api/server/send.rs index bc18377e..1f467dac 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -3,20 +3,21 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, + Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_warn, err, error, result::LogErr, trace, utils::{ - stream::{automatic_width, BroadbandExt, TryBroadbandExt}, IterStream, ReadyExt, + stream::{BroadbandExt, TryBroadbandExt, automatic_width}, }, - warn, Err, Error, Result, + warn, }; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use itertools::Itertools; use ruma::{ + CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, api::{ client::error::ErrorKind, federation::transactions::{ @@ -31,17 +32,16 @@ use ruma::{ events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, serde::Raw, to_device::DeviceIdOrAllDevices, - CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, }; use service::{ - sending::{EDU_LIMIT, PDU_LIMIT}, Services, + sending::{EDU_LIMIT, PDU_LIMIT}, }; use utils::millis_since_unix_epoch; use crate::{ - utils::{self}, Ruma, + utils::{self}, }; type ResolvedMap = BTreeMap; diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index e81d7672..08fa3835 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -4,22 +4,22 @@ use std::borrow::Borrow; use axum::extract::State; use conduwuit::{ - at, err, + Err, Result, at, err, pdu::gen_event_id_canonical_json, utils::stream::{IterStream, TryBroadbandExt}, - warn, Err, Result, + warn, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - api::federation::membership::create_join_event, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, + api::federation::membership::create_join_event, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use service::Services; use crate::Ruma; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index b07620af..1d4c2a6c 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,15 +1,15 @@ use axum::extract::State; -use conduwuit::{err, pdu::gen_event_id_canonical_json, warn, Err, PduEvent, Result}; +use conduwuit::{Err, PduEvent, Result, err, pdu::gen_event_id_canonical_json, warn}; use futures::FutureExt; use ruma::{ - api::federation::knock::send_knock, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, - serde::JsonObject, OwnedServerName, OwnedUserId, RoomVersionId::*, + api::federation::knock::send_knock, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, + serde::JsonObject, }; use crate::Ruma; diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index e955a267..71516553 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,21 +1,21 @@ #![allow(deprecated)] use axum::extract::State; -use conduwuit::{err, Err, Result}; +use conduwuit::{Err, Result, err}; use futures::FutureExt; use ruma::{ + OwnedRoomId, OwnedUserId, RoomId, ServerName, api::federation::membership::create_leave_event, events::{ - room::member::{MembershipState, RoomMemberEventContent}, StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, - OwnedRoomId, OwnedUserId, RoomId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; use crate::{ - service::{pdu::gen_event_id_canonical_json, Services}, Ruma, + service::{Services, pdu::gen_event_id_canonical_json}, }; /// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}` diff --git a/src/api/server/state.rs b/src/api/server/state.rs index b16e61a0..8c786815 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,9 +1,9 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{at, err, utils::IterStream, Result}; +use conduwuit::{Result, at, err, utils::IterStream}; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{api::federation::event::get_room_state, OwnedEventId}; +use ruma::{OwnedEventId, api::federation::event::get_room_state}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 7d0440bf..648d4575 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,9 +1,9 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{at, err, Result}; +use conduwuit::{Result, at, err}; use futures::{StreamExt, TryStreamExt}; -use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; +use ruma::{OwnedEventId, api::federation::event::get_room_state_ids}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/user.rs b/src/api/server/user.rs index 321d0b66..80c353ab 100644 --- a/src/api/server/user.rs +++ b/src/api/server/user.rs @@ -10,8 +10,8 @@ use ruma::api::{ }; use crate::{ - client::{claim_keys_helper, get_keys_helper}, Ruma, + client::{claim_keys_helper, get_keys_helper}, }; /// # `GET /_matrix/federation/v1/user/devices/{userId}` diff --git a/src/api/server/utils.rs b/src/api/server/utils.rs index 4f3fa245..5696e44b 100644 --- a/src/api/server/utils.rs +++ b/src/api/server/utils.rs @@ -1,6 +1,6 @@ -use conduwuit::{implement, is_false, Err, Result}; +use conduwuit::{Err, Result, implement, is_false}; use conduwuit_service::Services; -use futures::{future::OptionFuture, join, FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, future::OptionFuture, join}; use ruma::{EventId, RoomId, ServerName}; pub(super) struct AccessCheck<'a> { diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 57143e85..6870c1c0 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -2,7 +2,7 @@ use std::{ cell::OnceCell, - ffi::{c_char, c_void, CStr}, + ffi::{CStr, c_char, c_void}, fmt::Debug, sync::RwLock, }; @@ -14,9 +14,8 @@ use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; use crate::{ - err, is_equal_to, is_nonzero, + Result, err, is_equal_to, is_nonzero, utils::{math, math::Tried}, - Result, }; #[cfg(feature = "jemalloc_conf")] @@ -128,7 +127,7 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { } macro_rules! mallctl { - ($name:expr) => {{ + ($name:expr_2021) => {{ thread_local! { static KEY: OnceCell = OnceCell::default(); }; @@ -141,7 +140,7 @@ macro_rules! mallctl { } pub mod this_thread { - use super::{is_nonzero, key, math, Debug, Key, OnceCell, Result}; + use super::{Debug, Key, OnceCell, Result, is_nonzero, key, math}; thread_local! { static ALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; @@ -261,18 +260,18 @@ pub fn decay>>(arena: I) -> Result { } pub fn set_muzzy_decay>>(arena: I, decay_ms: isize) -> Result { - if let Some(arena) = arena.into() { - set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms) - } else { - set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms) + match arena.into() { + | Some(arena) => + set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms), + | _ => set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms), } } pub fn set_dirty_decay>>(arena: I, decay_ms: isize) -> Result { - if let Some(arena) = arena.into() { - set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms) - } else { - set(&mallctl!("arenas.dirty_decay_ms"), decay_ms) + match arena.into() { + | Some(arena) => + set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms), + | _ => set(&mallctl!("arenas.dirty_decay_ms"), decay_ms), } } diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 5532c5a2..488f7f94 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -4,7 +4,7 @@ use either::Either; use figment::Figment; use super::DEPRECATED_KEYS; -use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result, Server}; +use crate::{Config, Err, Result, Server, debug, debug_info, debug_warn, error, warn}; /// Performs check() with additional checks specific to reloading old config /// with new config. diff --git a/src/core/config/manager.rs b/src/core/config/manager.rs index 0c95ca15..e55916ba 100644 --- a/src/core/config/manager.rs +++ b/src/core/config/manager.rs @@ -4,13 +4,13 @@ use std::{ ptr, ptr::null_mut, sync::{ - atomic::{AtomicPtr, Ordering}, Arc, + atomic::{AtomicPtr, Ordering}, }, }; use super::Config; -use crate::{implement, Result}; +use crate::{Result, implement}; /// The configuration manager is an indirection to reload the configuration for /// the server while it is running. In order to not burden or clutter the many diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e66532ee..67c3b95c 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -14,18 +14,18 @@ use either::{ Either::{Left, Right}, }; use figment::providers::{Env, Format, Toml}; -pub use figment::{value::Value as FigmentValue, Figment}; +pub use figment::{Figment, value::Value as FigmentValue}; use regex::RegexSet; use ruma::{ - api::client::discovery::discover_support::ContactRole, OwnedRoomOrAliasId, OwnedServerName, - OwnedUserId, RoomVersionId, + OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, RoomVersionId, + api::client::discovery::discover_support::ContactRole, }; -use serde::{de::IgnoredAny, Deserialize}; +use serde::{Deserialize, de::IgnoredAny}; use url::Url; use self::proxy::ProxyConfig; pub use self::{check::check, manager::Manager}; -use crate::{err, error::Error, utils::sys, Result}; +use crate::{Result, err, error::Error, utils::sys}; /// All the config options for conduwuit. #[allow(clippy::struct_excessive_bools)] diff --git a/src/core/debug.rs b/src/core/debug.rs index 8a5eccfd..b9a53038 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -13,7 +13,7 @@ pub use crate::{result::DebugInspect, utils::debug::*}; /// In release-mode it becomes DEBUG level, and possibly subject to elision. #[macro_export] macro_rules! debug_event { - ( $level:expr, $($x:tt)+ ) => { + ( $level:expr_2021, $($x:tt)+ ) => { if $crate::debug::logging() { ::tracing::event!( $level, _debug = true, $($x)+ ) } else { diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 60fa5bff..0962c4ee 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -165,10 +165,10 @@ macro_rules! err_lev { use std::{fmt, fmt::Write}; use tracing::{ - level_enabled, Callsite, Event, __macro_support, __tracing_log, + __macro_support, __tracing_log, Callsite, Event, Level, callsite::DefaultCallsite, field::{Field, ValueSet, Visit}, - Level, + level_enabled, }; struct Visitor<'a>(&'a mut String); diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 16613b7e..02ab6fa3 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -152,8 +152,8 @@ impl Error { /// Generate the error message string. pub fn message(&self) -> String { match self { - | Self::Federation(ref origin, ref error) => format!("Answer from {origin}: {error}"), - | Self::Ruma(ref error) => response::ruma_error_message(error), + | Self::Federation(origin, error) => format!("Answer from {origin}: {error}"), + | Self::Ruma(error) => response::ruma_error_message(error), | _ => format!("{self}"), } } diff --git a/src/core/error/panic.rs b/src/core/error/panic.rs index c6a83ae0..2e63105b 100644 --- a/src/core/error/panic.rs +++ b/src/core/error/panic.rs @@ -1,6 +1,6 @@ use std::{ any::Any, - panic::{panic_any, RefUnwindSafe, UnwindSafe}, + panic::{RefUnwindSafe, UnwindSafe, panic_any}, }; use super::Error; diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 75e4050d..00ade5ae 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -2,11 +2,11 @@ use bytes::BytesMut; use http::StatusCode; use http_body_util::Full; use ruma::api::{ + OutgoingResponse, client::{ error::{ErrorBody, ErrorKind}, uiaa::UiaaResponse, }, - OutgoingResponse, }; use super::Error; diff --git a/src/core/info/room_version.rs b/src/core/info/room_version.rs index b33a8562..51d5d3c6 100644 --- a/src/core/info/room_version.rs +++ b/src/core/info/room_version.rs @@ -2,7 +2,7 @@ use std::iter::once; -use ruma::{api::client::discovery::get_capabilities::RoomVersionStability, RoomVersionId}; +use ruma::{RoomVersionId, api::client::discovery::get_capabilities::RoomVersionStability}; use crate::{at, is_equal_to}; diff --git a/src/core/log/capture/data.rs b/src/core/log/capture/data.rs index 0ad7a6c2..a4a1225b 100644 --- a/src/core/log/capture/data.rs +++ b/src/core/log/capture/data.rs @@ -1,7 +1,7 @@ use tracing::Level; -use tracing_core::{span::Current, Event}; +use tracing_core::{Event, span::Current}; -use super::{layer::Value, Layer}; +use super::{Layer, layer::Value}; use crate::{info, utils::string::EMPTY}; pub struct Data<'a> { diff --git a/src/core/log/capture/util.rs b/src/core/log/capture/util.rs index 8bad4ba0..65524be5 100644 --- a/src/core/log/capture/util.rs +++ b/src/core/log/capture/util.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, Mutex}; use super::{ - super::{fmt, Level}, + super::{Level, fmt}, Closure, Data, }; use crate::Result; diff --git a/src/core/log/console.rs b/src/core/log/console.rs index 1f04ba26..d91239ac 100644 --- a/src/core/log/console.rs +++ b/src/core/log/console.rs @@ -1,20 +1,20 @@ use std::{env, io, sync::LazyLock}; use tracing::{ - field::{Field, Visit}, Event, Level, Subscriber, + field::{Field, Visit}, }; use tracing_subscriber::{ field::RecordFields, fmt, fmt::{ - format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, FmtContext, FormatEvent, FormatFields, MakeWriter, + format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, }, registry::LookupSpan, }; -use crate::{apply, Config, Result}; +use crate::{Config, Result, apply}; static SYSTEMD_MODE: LazyLock = LazyLock::new(|| env::var("SYSTEMD_EXEC_PID").is_ok() && env::var("JOURNAL_STREAM").is_ok()); diff --git a/src/core/log/fmt.rs b/src/core/log/fmt.rs index 353d4442..b73d0c9b 100644 --- a/src/core/log/fmt.rs +++ b/src/core/log/fmt.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use super::{color, Level}; +use super::{Level, color}; use crate::Result; pub fn html(out: &mut S, level: &Level, span: &str, msg: &str) -> Result<()> diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 0c1840d0..5ac374e8 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -9,7 +9,7 @@ mod reload; mod suppress; pub use capture::Capture; -pub use console::{is_systemd_mode, ConsoleFormat, ConsoleWriter}; +pub use console::{ConsoleFormat, ConsoleWriter, is_systemd_mode}; pub use reload::{LogLevelReloadHandles, ReloadHandle}; pub use suppress::Suppress; pub use tracing::Level; @@ -34,7 +34,7 @@ pub struct Log { #[macro_export] macro_rules! event { - ( $level:expr, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } + ( $level:expr_2021, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } } #[macro_export] diff --git a/src/core/log/reload.rs b/src/core/log/reload.rs index 12d14f48..e6a16c9f 100644 --- a/src/core/log/reload.rs +++ b/src/core/log/reload.rs @@ -3,9 +3,9 @@ use std::{ sync::{Arc, Mutex}, }; -use tracing_subscriber::{reload, EnvFilter}; +use tracing_subscriber::{EnvFilter, reload}; -use crate::{error, Result}; +use crate::{Result, error}; /// We need to store a reload::Handle value, but can't name it's type explicitly /// because the S type parameter depends on the subscriber's previous layers. In diff --git a/src/core/mods/module.rs b/src/core/mods/module.rs index ff181e4f..b65bbca2 100644 --- a/src/core/mods/module.rs +++ b/src/core/mods/module.rs @@ -3,8 +3,8 @@ use std::{ time::SystemTime, }; -use super::{canary, new, path, Library, Symbol}; -use crate::{error, Result}; +use super::{Library, Symbol, canary, new, path}; +use crate::{Result, error}; pub struct Module { handle: Option, diff --git a/src/core/mods/new.rs b/src/core/mods/new.rs index 77d89af4..258fdedc 100644 --- a/src/core/mods/new.rs +++ b/src/core/mods/new.rs @@ -1,6 +1,6 @@ use std::ffi::OsStr; -use super::{path, Library}; +use super::{Library, path}; use crate::{Err, Result}; const OPEN_FLAGS: i32 = libloading::os::unix::RTLD_LAZY | libloading::os::unix::RTLD_GLOBAL; diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index 0efee128..5aa0c9ca 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use ruma::{ - events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, MilliSecondsSinceUnixEpoch, OwnedEventId, + events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, }; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use super::StateKey; diff --git a/src/core/pdu/content.rs b/src/core/pdu/content.rs index fa724cb2..4e60ce6e 100644 --- a/src/core/pdu/content.rs +++ b/src/core/pdu/content.rs @@ -1,7 +1,7 @@ use serde::Deserialize; use serde_json::value::Value as JsonValue; -use crate::{err, implement, Result}; +use crate::{Result, err, implement}; #[must_use] #[implement(super::Pdu)] diff --git a/src/core/pdu/count.rs b/src/core/pdu/count.rs index 0135cf28..b880278f 100644 --- a/src/core/pdu/count.rs +++ b/src/core/pdu/count.rs @@ -4,7 +4,7 @@ use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr}; use ruma::api::Direction; -use crate::{err, Error, Result}; +use crate::{Error, Result, err}; #[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] pub enum Count { diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs index d5c0561e..09ad1666 100644 --- a/src/core/pdu/event.rs +++ b/src/core/pdu/event.rs @@ -1,4 +1,4 @@ -use ruma::{events::TimelineEventType, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; +use ruma::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::TimelineEventType}; use serde_json::value::RawValue as RawJsonValue; use super::Pdu; diff --git a/src/core/pdu/event_id.rs b/src/core/pdu/event_id.rs index 09b33edc..e9d868b1 100644 --- a/src/core/pdu/event_id.rs +++ b/src/core/pdu/event_id.rs @@ -1,7 +1,7 @@ use ruma::{CanonicalJsonObject, OwnedEventId, RoomVersionId}; use serde_json::value::RawValue as RawJsonValue; -use crate::{err, Result}; +use crate::{Result, err}; /// Generates a correct eventId for the incoming pdu. /// diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 9cb42239..9fb2a3da 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -17,13 +17,14 @@ mod unsigned; use std::cmp::Ordering; use ruma::{ - events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, - OwnedRoomId, OwnedServerName, OwnedUserId, UInt, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, UInt, events::TimelineEventType, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; pub use self::{ + Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, builder::{Builder, Builder as PduBuilder}, count::Count, event::Event, @@ -31,7 +32,6 @@ pub use self::{ id::*, raw_id::*, state_key::{ShortStateKey, StateKey}, - Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, }; use crate::Result; diff --git a/src/core/pdu/raw_id.rs b/src/core/pdu/raw_id.rs index e1fd2381..318a0cd7 100644 --- a/src/core/pdu/raw_id.rs +++ b/src/core/pdu/raw_id.rs @@ -55,8 +55,8 @@ impl RawId { #[must_use] pub fn as_bytes(&self) -> &[u8] { match self { - | Self::Normal(ref raw) => raw, - | Self::Backfilled(ref raw) => raw, + | Self::Normal(raw) => raw, + | Self::Backfilled(raw) => raw, } } } diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index 7c332719..409debfe 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -1,15 +1,15 @@ use ruma::{ - canonical_json::redact_content_in_place, - events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, OwnedEventId, RoomVersionId, + canonical_json::redact_content_in_place, + events::{TimelineEventType, room::redaction::RoomRedactionEventContent}, }; use serde::Deserialize; use serde_json::{ json, - value::{to_raw_value, RawValue as RawJsonValue}, + value::{RawValue as RawJsonValue, to_raw_value}, }; -use crate::{implement, Error, Result}; +use crate::{Error, Result, implement}; #[derive(Deserialize)] struct ExtractRedactedBecause { @@ -76,14 +76,21 @@ pub fn copy_redacts(&self) -> (Option, Box) { if let Ok(mut content) = serde_json::from_str::(self.content.get()) { - if let Some(redacts) = content.redacts { - return (Some(redacts), self.content.clone()); - } else if let Some(redacts) = self.redacts.clone() { - content.redacts = Some(redacts); - return ( - self.redacts.clone(), - to_raw_value(&content).expect("Must be valid, we only added redacts field"), - ); + match content.redacts { + | Some(redacts) => { + return (Some(redacts), self.content.clone()); + }, + | _ => match self.redacts.clone() { + | Some(redacts) => { + content.redacts = Some(redacts); + return ( + self.redacts.clone(), + to_raw_value(&content) + .expect("Must be valid, we only added redacts field"), + ); + }, + | _ => {}, + }, } } } diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 7d2fb1d6..4e7c5b83 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -1,8 +1,8 @@ use ruma::{ events::{ - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, + room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, }, serde::Raw, }; diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index 8482a48a..23897519 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -2,10 +2,10 @@ use std::collections::BTreeMap; use ruma::MilliSecondsSinceUnixEpoch; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue, Value as JsonValue}; +use serde_json::value::{RawValue as RawJsonValue, Value as JsonValue, to_raw_value}; use super::Pdu; -use crate::{err, implement, is_true, Result}; +use crate::{Result, err, implement, is_true}; #[implement(Pdu)] pub fn remove_transaction_id(&mut self) -> Result { diff --git a/src/core/server.rs b/src/core/server.rs index 80493c94..b67759d6 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -1,7 +1,7 @@ use std::{ sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }, time::SystemTime, }; @@ -9,7 +9,7 @@ use std::{ use ruma::OwnedServerName; use tokio::{runtime, sync::broadcast}; -use crate::{config, config::Config, log::Log, metrics::Metrics, Err, Result}; +use crate::{Err, Result, config, config::Config, log::Log, metrics::Metrics}; /// Server runtime state; public portion pub struct Server { diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs index df2f8b36..4b8e55f3 100644 --- a/src/core/state_res/event_auth.rs +++ b/src/core/state_res/event_auth.rs @@ -1,10 +1,11 @@ use std::{borrow::Borrow, collections::BTreeSet}; use futures::{ - future::{join3, OptionFuture}, Future, + future::{OptionFuture, join3}, }; use ruma::{ + Int, OwnedUserId, RoomVersionId, UserId, events::room::{ create::RoomCreateEventContent, join_rules::{JoinRule, RoomJoinRulesEventContent}, @@ -14,21 +15,20 @@ use ruma::{ }, int, serde::{Base64, Raw}, - Int, OwnedUserId, RoomVersionId, UserId, }; use serde::{ - de::{Error as _, IgnoredAny}, Deserialize, + de::{Error as _, IgnoredAny}, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; use super::{ + Error, Event, Result, StateEventType, StateKey, TimelineEventType, power_levels::{ deserialize_power_levels, deserialize_power_levels_content_fields, deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, }, room_version::RoomVersion, - Error, Event, Result, StateEventType, StateKey, TimelineEventType, }; use crate::{debug, error, trace, warn}; @@ -394,28 +394,27 @@ where } // If type is m.room.third_party_invite - let sender_power_level = if let Some(pl) = &power_levels_event { - let content = deserialize_power_levels_content_fields(pl.content().get(), room_version)?; - if let Some(level) = content.get_user_power(sender) { - *level - } else { - content.users_default - } - } else { - // If no power level event found the creator gets 100 everyone else gets 0 - let is_creator = if room_version.use_room_create_sender { - room_create_event.sender() == sender - } else { - #[allow(deprecated)] - from_json_str::(room_create_event.content().get()) - .is_ok_and(|create| create.creator.unwrap() == *sender) - }; + let sender_power_level = match &power_levels_event { + | Some(pl) => { + let content = + deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + match content.get_user_power(sender) { + | Some(level) => *level, + | _ => content.users_default, + } + }, + | _ => { + // If no power level event found the creator gets 100 everyone else gets 0 + let is_creator = if room_version.use_room_create_sender { + room_create_event.sender() == sender + } else { + #[allow(deprecated)] + from_json_str::(room_create_event.content().get()) + .is_ok_and(|create| create.creator.unwrap() == *sender) + }; - if is_creator { - int!(100) - } else { - int!(0) - } + if is_creator { int!(100) } else { int!(0) } + }, }; // Allow if and only if sender's current power level is greater than @@ -452,19 +451,21 @@ where if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels { debug!("starting m.room.power_levels check"); - if let Some(required_pwr_lvl) = check_power_levels( + match check_power_levels( room_version, incoming_event, power_levels_event.as_ref(), sender_power_level, ) { - if !required_pwr_lvl { + | Some(required_pwr_lvl) => + if !required_pwr_lvl { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + }, + | _ => { warn!("m.room.power_levels was not allowed"); return Ok(false); - } - } else { - warn!("m.room.power_levels was not allowed"); - return Ok(false); + }, } debug!("m.room.power_levels event allowed"); } @@ -576,10 +577,9 @@ fn valid_membership_change( let content = deserialize_power_levels_content_fields(pl.content().get(), room_version)?; - let user_pl = if let Some(level) = content.get_user_power(user_for_join_auth) { - *level - } else { - content.users_default + let user_pl = match content.get_user_power(user_for_join_auth) { + | Some(level) => *level, + | _ => content.users_default, }; (user_pl, invite) @@ -665,45 +665,48 @@ fn valid_membership_change( }, | MembershipState::Invite => { // If content has third_party_invite key - if let Some(tp_id) = third_party_invite.and_then(|i| i.deserialize().ok()) { - if target_user_current_membership == MembershipState::Ban { - warn!(?target_user_membership_event_id, "Can't invite banned user"); - false - } else { - let allow = verify_third_party_invite( - Some(target_user), - sender, - &tp_id, - current_third_party_invite, - ); - if !allow { - warn!("Third party invite invalid"); - } - allow - } - } else if !sender_is_joined - || target_user_current_membership == MembershipState::Join - || target_user_current_membership == MembershipState::Ban - { - warn!( - ?target_user_membership_event_id, - ?sender_membership_event_id, - "Can't invite user if sender not joined or the user is currently joined or \ - banned", - ); - false - } else { - let allow = sender_power - .filter(|&p| p >= &power_levels.invite) - .is_some(); - if !allow { - warn!( - ?target_user_membership_event_id, - ?power_levels_event_id, - "User does not have enough power to invite", - ); - } - allow + match third_party_invite.and_then(|i| i.deserialize().ok()) { + | Some(tp_id) => + if target_user_current_membership == MembershipState::Ban { + warn!(?target_user_membership_event_id, "Can't invite banned user"); + false + } else { + let allow = verify_third_party_invite( + Some(target_user), + sender, + &tp_id, + current_third_party_invite, + ); + if !allow { + warn!("Third party invite invalid"); + } + allow + }, + | _ => + if !sender_is_joined + || target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Ban + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't invite user if sender not joined or the user is currently \ + joined or banned", + ); + false + } else { + let allow = sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some(); + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to invite", + ); + } + allow + }, } }, | MembershipState::Leave => @@ -1111,23 +1114,23 @@ mod tests { use std::sync::Arc; use ruma::events::{ + StateEventType, TimelineEventType, room::{ join_rules::{ AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, }, member::{MembershipState, RoomMemberEventContent}, }, - StateEventType, TimelineEventType, }; use serde_json::value::to_raw_value as to_raw_json_value; use crate::state_res::{ + Event, EventTypeExt, RoomVersion, StateMap, event_auth::valid_membership_change, test_utils::{ - alice, charlie, ella, event_id, member_content_ban, member_content_join, room_id, - to_pdu_event, PduEvent, INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, + INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, PduEvent, alice, charlie, ella, event_id, + member_content_ban, member_content_join, room_id, to_pdu_event, }, - Event, EventTypeExt, RoomVersion, StateMap, }; #[test] @@ -1156,21 +1159,23 @@ mod tests { let target_user = charlie(); let sender = alice(); - assert!(valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1199,21 +1204,23 @@ mod tests { let target_user = charlie(); let sender = charlie(); - assert!(!valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + !valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1242,21 +1249,23 @@ mod tests { let target_user = alice(); let sender = alice(); - assert!(valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1285,21 +1294,23 @@ mod tests { let target_user = alice(); let sender = charlie(); - assert!(!valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + !valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1345,37 +1356,41 @@ mod tests { let target_user = ella(); let sender = ella(); - assert!(valid_membership_change( - &RoomVersion::V9, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - Some(alice()), - &MembershipState::Join, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + Some(alice()), + &MembershipState::Join, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); - assert!(!valid_membership_change( - &RoomVersion::V9, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - Some(ella()), - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + !valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + Some(ella()), + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1413,20 +1428,22 @@ mod tests { let target_user = ella(); let sender = ella(); - assert!(valid_membership_change( - &RoomVersion::V7, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V7, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } } diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index 19ea3cc0..6bff0cf8 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -17,13 +17,14 @@ use std::{ hash::{BuildHasher, Hash}, }; -use futures::{future, stream, Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt, future, stream}; use ruma::{ + EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, events::{ - room::member::{MembershipState, RoomMemberEventContent}, StateEventType, TimelineEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, - int, EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, + int, }; use serde_json::from_str as from_json_str; @@ -263,7 +264,7 @@ where #[allow(clippy::arithmetic_side_effects)] fn get_auth_chain_diff( auth_chain_sets: &[HashSet], -) -> impl Iterator + Send +) -> impl Iterator + Send + use where Id: Clone + Eq + Hash + Send, Hasher: BuildHasher + Send + Sync, @@ -864,23 +865,23 @@ mod tests { use maplit::{hashmap, hashset}; use rand::seq::SliceRandom; use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, events::{ - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, StateEventType, TimelineEventType, + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, }, - int, uint, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, + int, uint, }; use serde_json::{json, value::to_raw_value as to_raw_json_value}; use super::{ - is_power_event, + Event, EventTypeExt, StateMap, is_power_event, room_version::RoomVersion, test_utils::{ - alice, bob, charlie, do_check, ella, event_id, member_content_ban, - member_content_join, room_id, to_init_pdu_event, to_pdu_event, zara, PduEvent, - TestStore, INITIAL_EVENTS, + INITIAL_EVENTS, PduEvent, TestStore, alice, bob, charlie, do_check, ella, event_id, + member_content_ban, member_content_join, room_id, to_init_pdu_event, to_pdu_event, + zara, }, - Event, EventTypeExt, StateMap, }; use crate::debug; @@ -1557,7 +1558,7 @@ mod tests { } macro_rules! state_set { - ($($kind:expr => $key:expr => $id:expr),* $(,)?) => {{ + ($($kind:expr_2021 => $key:expr_2021 => $id:expr_2021),* $(,)?) => {{ #[allow(unused_mut)] let mut x = StateMap::new(); $( diff --git a/src/core/state_res/power_levels.rs b/src/core/state_res/power_levels.rs index e1768574..045b1666 100644 --- a/src/core/state_res/power_levels.rs +++ b/src/core/state_res/power_levels.rs @@ -1,16 +1,16 @@ use std::collections::BTreeMap; use ruma::{ - events::{room::power_levels::RoomPowerLevelsEventContent, TimelineEventType}, - power_levels::{default_power_level, NotificationPowerLevels}, + Int, OwnedUserId, UserId, + events::{TimelineEventType, room::power_levels::RoomPowerLevelsEventContent}, + power_levels::{NotificationPowerLevels, default_power_level}, serde::{ deserialize_v1_powerlevel, vec_deserialize_int_powerlevel_values, vec_deserialize_v1_powerlevel_values, }, - Int, OwnedUserId, UserId, }; use serde::Deserialize; -use serde_json::{from_str as from_json_str, Error}; +use serde_json::{Error, from_str as from_json_str}; use tracing::error; use super::{Result, RoomVersion}; diff --git a/src/core/state_res/state_event.rs b/src/core/state_res/state_event.rs index 2c038cfe..ac9e29d6 100644 --- a/src/core/state_res/state_event.rs +++ b/src/core/state_res/state_event.rs @@ -5,7 +5,7 @@ use std::{ sync::Arc, }; -use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; +use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; use serde_json::value::RawValue as RawJsonValue; /// Abstraction of a PDU so users can have their own PDU types. diff --git a/src/core/state_res/test_utils.rs b/src/core/state_res/test_utils.rs index 9c2b151f..d96ee927 100644 --- a/src/core/state_res/test_utils.rs +++ b/src/core/state_res/test_utils.rs @@ -2,33 +2,33 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, sync::{ - atomic::{AtomicU64, Ordering::SeqCst}, Arc, + atomic::{AtomicU64, Ordering::SeqCst}, }, }; use futures::future::ready; use ruma::{ - event_id, + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, ServerSignatures, + UserId, event_id, events::{ + TimelineEventType, pdu::{EventHash, Pdu, RoomV3Pdu}, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - TimelineEventType, }, - int, room_id, uint, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, - RoomVersionId, ServerSignatures, UserId, + int, room_id, uint, user_id, }; use serde_json::{ json, - value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, + value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, }; pub(crate) use self::event::PduEvent; use super::auth_types_for_event; -use crate::{info, Event, EventTypeExt, Result, StateMap}; +use crate::{Event, EventTypeExt, Result, StateMap, info}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); @@ -584,8 +584,8 @@ pub(crate) fn INITIAL_EDGES() -> Vec { pub(crate) mod event { use ruma::{ - events::{pdu::Pdu, TimelineEventType}, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + events::{TimelineEventType, pdu::Pdu}, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs index 40316440..04101be4 100644 --- a/src/core/utils/bytes.rs +++ b/src/core/utils/bytes.rs @@ -1,6 +1,6 @@ use bytesize::ByteSize; -use crate::{err, Result}; +use crate::{Result, err}; /// Parse a human-writable size string w/ si-unit suffix into integer #[inline] diff --git a/src/core/utils/defer.rs b/src/core/utils/defer.rs index 60243e97..4887d164 100644 --- a/src/core/utils/defer.rs +++ b/src/core/utils/defer.rs @@ -12,14 +12,14 @@ macro_rules! defer { let _defer_ = _Defer_ { closure: || $body }; }; - ($body:expr) => { + ($body:expr_2021) => { $crate::defer! {{ $body }} }; } #[macro_export] macro_rules! scope_restore { - ($val:ident, $ours:expr) => { + ($val:ident, $ours:expr_2021) => { let theirs = $crate::utils::exchange($val, $ours); $crate::defer! {{ *$val = theirs; }}; }; diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs index 6cb2f1fe..c93c7dbc 100644 --- a/src/core/utils/future/bool_ext.rs +++ b/src/core/utils/future/bool_ext.rs @@ -3,8 +3,8 @@ use std::marker::Unpin; use futures::{ - future::{select_ok, try_join, try_join_all, try_select}, Future, FutureExt, + future::{select_ok, try_join, try_join_all, try_select}, }; pub trait BoolExt diff --git a/src/core/utils/future/ext_ext.rs b/src/core/utils/future/ext_ext.rs index 38decaae..219bb664 100644 --- a/src/core/utils/future/ext_ext.rs +++ b/src/core/utils/future/ext_ext.rs @@ -2,7 +2,7 @@ use std::marker::Unpin; -use futures::{future, future::Select, Future}; +use futures::{Future, future, future::Select}; /// This interface is not necessarily complete; feel free to add as-needed. pub trait ExtExt diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 2198a84f..e1d96941 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -3,7 +3,7 @@ mod ext_ext; mod option_ext; mod try_ext_ext; -pub use bool_ext::{and, or, BoolExt}; +pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/option_ext.rs b/src/core/utils/future/option_ext.rs index ed61de56..d553e5dc 100644 --- a/src/core/utils/future/option_ext.rs +++ b/src/core/utils/future/option_ext.rs @@ -1,6 +1,6 @@ #![allow(clippy::wrong_self_convention)] -use futures::{future::OptionFuture, Future, FutureExt}; +use futures::{Future, FutureExt, future::OptionFuture}; pub trait OptionExt { fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send; diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index aa3d72e4..b2114e56 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -7,9 +7,8 @@ use std::marker::Unpin; use futures::{ - future, + TryFuture, TryFutureExt, future, future::{MapOkOrElse, TrySelect, UnwrapOrElse}, - TryFuture, TryFutureExt, }; /// This interface is not necessarily complete; feel free to add as-needed. diff --git a/src/core/utils/hash/argon.rs b/src/core/utils/hash/argon.rs index 18146b47..66dfab75 100644 --- a/src/core/utils/hash/argon.rs +++ b/src/core/utils/hash/argon.rs @@ -1,11 +1,11 @@ use std::sync::OnceLock; use argon2::{ - password_hash, password_hash::SaltString, Algorithm, Argon2, Params, PasswordHash, - PasswordHasher, PasswordVerifier, Version, + Algorithm, Argon2, Params, PasswordHash, PasswordHasher, PasswordVerifier, Version, + password_hash, password_hash::SaltString, }; -use crate::{err, Error, Result}; +use crate::{Error, Result, err}; const M_COST: u32 = Params::DEFAULT_M_COST; // memory size in 1 KiB blocks const T_COST: u32 = Params::DEFAULT_T_COST; // nr of iterations diff --git a/src/core/utils/json.rs b/src/core/utils/json.rs index 4a3fec8f..3f2f225e 100644 --- a/src/core/utils/json.rs +++ b/src/core/utils/json.rs @@ -1,6 +1,6 @@ use std::{fmt, str::FromStr}; -use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; +use ruma::{CanonicalJsonError, CanonicalJsonObject, canonical_json::try_from_json_map}; use crate::Result; diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index ed157daf..488f2a13 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -6,7 +6,7 @@ use std::{cmp, convert::TryFrom}; pub use checked_ops::checked_ops; pub use self::{expected::Expected, tried::Tried}; -use crate::{debug::type_name, err, Err, Error, Result}; +use crate::{Err, Error, Result, debug::type_name, err}; /// Checked arithmetic expression. Returns a Result #[macro_export] diff --git a/src/core/utils/math/tried.rs b/src/core/utils/math/tried.rs index 2006d2d5..09de731f 100644 --- a/src/core/utils/math/tried.rs +++ b/src/core/utils/math/tried.rs @@ -1,6 +1,6 @@ use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub}; -use crate::{checked, Result}; +use crate::{Result, checked}; pub trait Tried { #[inline] diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index c2d8ed45..53460c59 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -49,7 +49,7 @@ pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, sou #[macro_export] macro_rules! extract_variant { - ($e:expr, $variant:path) => { + ($e:expr_2021, $variant:path) => { match $e { | $variant(value) => Some(value), | _ => None, @@ -90,7 +90,7 @@ macro_rules! pair_of { ($decl, $decl) }; - ($init:expr) => { + ($init:expr_2021) => { ($init, $init) }; } @@ -134,7 +134,7 @@ macro_rules! is_equal_to { |x| x == $val }; - ($val:expr) => { + ($val:expr_2021) => { |x| x == $val }; } @@ -146,7 +146,7 @@ macro_rules! is_less_than { |x| x < $val }; - ($val:expr) => { + ($val:expr_2021) => { |x| x < $val }; } diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs index 03a4adf1..01504ce6 100644 --- a/src/core/utils/mutex_map.rs +++ b/src/core/utils/mutex_map.rs @@ -6,7 +6,7 @@ use std::{ use tokio::sync::OwnedMutexGuard as Omg; -use crate::{err, Result}; +use crate::{Result, err}; /// Map of Mutexes pub struct MutexMap { diff --git a/src/core/utils/rand.rs b/src/core/utils/rand.rs index 1d289c6e..72487633 100644 --- a/src/core/utils/rand.rs +++ b/src/core/utils/rand.rs @@ -4,7 +4,7 @@ use std::{ }; use arrayvec::ArrayString; -use rand::{seq::SliceRandom, thread_rng, Rng}; +use rand::{Rng, seq::SliceRandom, thread_rng}; pub fn shuffle(vec: &mut [T]) { let mut rng = thread_rng(); diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs index 282008e7..832f2638 100644 --- a/src/core/utils/stream/broadband.rs +++ b/src/core/utils/stream/broadband.rs @@ -3,11 +3,11 @@ use std::convert::identity; use futures::{ - stream::{Stream, StreamExt}, Future, + stream::{Stream, StreamExt}, }; -use super::{automatic_width, ReadyExt}; +use super::{ReadyExt, automatic_width}; /// Concurrency extensions to augment futures::StreamExt. broad_ combinators /// produce out-of-order diff --git a/src/core/utils/stream/cloned.rs b/src/core/utils/stream/cloned.rs index d6a0e647..b89e4695 100644 --- a/src/core/utils/stream/cloned.rs +++ b/src/core/utils/stream/cloned.rs @@ -1,6 +1,6 @@ use std::clone::Clone; -use futures::{stream::Map, Stream, StreamExt}; +use futures::{Stream, StreamExt, stream::Map}; pub trait Cloned<'a, T, S> where diff --git a/src/core/utils/stream/ignore.rs b/src/core/utils/stream/ignore.rs index 9baa00f3..37c89d9a 100644 --- a/src/core/utils/stream/ignore.rs +++ b/src/core/utils/stream/ignore.rs @@ -1,4 +1,4 @@ -use futures::{future::ready, Stream, StreamExt, TryStream}; +use futures::{Stream, StreamExt, TryStream, future::ready}; use crate::{Error, Result}; diff --git a/src/core/utils/stream/iter_stream.rs b/src/core/utils/stream/iter_stream.rs index 9077deac..e9a91b1c 100644 --- a/src/core/utils/stream/iter_stream.rs +++ b/src/core/utils/stream/iter_stream.rs @@ -1,7 +1,6 @@ use futures::{ - stream, + StreamExt, stream, stream::{Stream, TryStream}, - StreamExt, }; use crate::{Error, Result}; diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 23455322..a356f05f 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -14,8 +14,8 @@ mod try_wideband; mod wideband; pub use band::{ - automatic_amplification, automatic_width, set_amplification, set_width, AMPLIFICATION_LIMIT, - WIDTH_LIMIT, + AMPLIFICATION_LIMIT, WIDTH_LIMIT, automatic_amplification, automatic_width, + set_amplification, set_width, }; pub use broadband::BroadbandExt; pub use cloned::Cloned; diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index d93187e9..dce7d378 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{ready, Ready}, + future::{Ready, ready}, stream::{ All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile, }, diff --git a/src/core/utils/stream/try_parallel.rs b/src/core/utils/stream/try_parallel.rs index 7f8a63b1..60fef0ae 100644 --- a/src/core/utils/stream/try_parallel.rs +++ b/src/core/utils/stream/try_parallel.rs @@ -1,10 +1,10 @@ //! Parallelism stream combinator extensions to futures::Stream -use futures::{stream::TryStream, TryFutureExt}; +use futures::{TryFutureExt, stream::TryStream}; use tokio::{runtime, task::JoinError}; use super::TryBroadbandExt; -use crate::{utils::sys::available_parallelism, Error, Result}; +use crate::{Error, Result, utils::sys::available_parallelism}; /// Parallelism extensions to augment futures::StreamExt. These combinators are /// for computation-oriented workloads, unlike -band combinators for I/O diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index 3261acb6..611c177f 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{ready, Ready}, + future::{Ready, ready}, stream::{AndThen, TryFilterMap, TryFold, TryForEach, TryStream, TryStreamExt, TryTakeWhile}, }; diff --git a/src/core/utils/stream/try_tools.rs b/src/core/utils/stream/try_tools.rs index 3ddce6ad..ea3b50fc 100644 --- a/src/core/utils/stream/try_tools.rs +++ b/src/core/utils/stream/try_tools.rs @@ -1,7 +1,7 @@ //! TryStreamTools for futures::TryStream #![allow(clippy::type_complexity)] -use futures::{future, future::Ready, stream::TryTakeWhile, TryStream, TryStreamExt}; +use futures::{TryStream, TryStreamExt, future, future::Ready, stream::TryTakeWhile}; use crate::Result; diff --git a/src/core/utils/stream/wideband.rs b/src/core/utils/stream/wideband.rs index a8560bb4..cbebf610 100644 --- a/src/core/utils/stream/wideband.rs +++ b/src/core/utils/stream/wideband.rs @@ -3,11 +3,11 @@ use std::convert::identity; use futures::{ - stream::{Stream, StreamExt}, Future, + stream::{Stream, StreamExt}, }; -use super::{automatic_width, ReadyExt}; +use super::{ReadyExt, automatic_width}; /// Concurrency extensions to augment futures::StreamExt. wideband_ combinators /// produce in-order. diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index cc692c14..9340d009 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -5,7 +5,7 @@ mod unquote; mod unquoted; pub use self::{between::Between, split::SplitInfallible, unquote::Unquote, unquoted::Unquoted}; -use crate::{utils::exchange, Result}; +use crate::{Result, utils::exchange}; pub const EMPTY: &str = ""; diff --git a/src/core/utils/string/unquoted.rs b/src/core/utils/string/unquoted.rs index 5b002d99..88fa011f 100644 --- a/src/core/utils/string/unquoted.rs +++ b/src/core/utils/string/unquoted.rs @@ -1,9 +1,9 @@ use std::ops::Deref; -use serde::{de, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, de}; use super::Unquote; -use crate::{err, Result}; +use crate::{Result, err}; /// Unquoted string which deserialized from a quoted string. Construction from a /// &str is infallible such that the input can already be unquoted. Construction diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index a0d5be52..f795ccb8 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -5,7 +5,7 @@ use std::path::PathBuf; pub use compute::available_parallelism; -use crate::{debug, Result}; +use crate::{Result, debug}; /// This is needed for opening lots of file descriptors, which tends to /// happen more often when using RocksDB and making lots of federation @@ -16,7 +16,7 @@ use crate::{debug, Result}; /// * #[cfg(unix)] pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { - use nix::sys::resource::{getrlimit, setrlimit, Resource::RLIMIT_NOFILE as NOFILE}; + use nix::sys::resource::{Resource::RLIMIT_NOFILE as NOFILE, getrlimit, setrlimit}; let (soft_limit, hard_limit) = getrlimit(NOFILE)?; if soft_limit < hard_limit { diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs index ce2aa504..5274cd66 100644 --- a/src/core/utils/sys/compute.rs +++ b/src/core/utils/sys/compute.rs @@ -2,7 +2,7 @@ use std::{cell::Cell, fmt::Debug, path::PathBuf, sync::LazyLock}; -use crate::{is_equal_to, Result}; +use crate::{Result, is_equal_to}; type Id = usize; @@ -45,7 +45,7 @@ pub fn set_affinity(mut ids: I) where I: Iterator + Clone + Debug, { - use core_affinity::{set_each_for_current, set_for_current, CoreId}; + use core_affinity::{CoreId, set_each_for_current, set_for_current}; let n = ids.clone().count(); let mask: Mask = ids.clone().fold(0, |mask, id| { @@ -118,7 +118,7 @@ pub fn cores_available() -> impl Iterator { from_mask(*CORES_AVAILABL #[cfg(target_os = "linux")] #[inline] pub fn getcpu() -> Result { - use crate::{utils::math, Error}; + use crate::{Error, utils::math}; // SAFETY: This is part of an interface with many low-level calls taking many // raw params, but it's unclear why this specific call is unsafe. Nevertheless diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index 25b17904..b11df7bb 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -3,7 +3,7 @@ use std::{ ffi::OsStr, fs, - fs::{read_to_string, FileType}, + fs::{FileType, read_to_string}, iter::IntoIterator, path::{Path, PathBuf}, }; @@ -11,9 +11,9 @@ use std::{ use libc::dev_t; use crate::{ + Result, result::FlatOk, utils::{result::LogDebugErr, string::SplitInfallible}, - Result, }; /// Device characteristics useful for random access throughput diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs index 1bcb92b8..05a0655b 100644 --- a/src/core/utils/tests.rs +++ b/src/core/utils/tests.rs @@ -241,7 +241,7 @@ fn set_intersection_sorted_all() { #[tokio::test] async fn set_intersection_sorted_stream2() { use futures::StreamExt; - use utils::{set::intersection_sorted_stream2, IterStream}; + use utils::{IterStream, set::intersection_sorted_stream2}; let a = ["bar"]; let b = ["bar", "foo"]; diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs index 81fdda2a..73f73971 100644 --- a/src/core/utils/time.rs +++ b/src/core/utils/time.rs @@ -2,7 +2,7 @@ pub mod exponential_backoff; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use crate::{err, Result}; +use crate::{Result, err}; #[inline] #[must_use] diff --git a/src/database/de.rs b/src/database/de.rs index 441bb4ec..9c0997ff 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -1,10 +1,9 @@ use conduwuit::{ - arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, Error, Result, + Error, Result, arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, }; use serde::{ - de, + Deserialize, de, de::{DeserializeSeed, Visitor}, - Deserialize, }; use crate::util::unhandled; diff --git a/src/database/engine.rs b/src/database/engine.rs index 22e2b9c8..38dd7512 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -12,21 +12,21 @@ mod repair; use std::{ ffi::CStr, sync::{ - atomic::{AtomicU32, Ordering}, Arc, + atomic::{AtomicU32, Ordering}, }, }; -use conduwuit::{debug, info, warn, Err, Result}; +use conduwuit::{Err, Result, debug, info, warn}; use rocksdb::{ AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded, WaitForCompactOptions, }; use crate::{ + Context, pool::Pool, util::{map_err, result}, - Context, }; pub struct Engine { diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs index db718c2c..bb110630 100644 --- a/src/database/engine/backup.rs +++ b/src/database/engine/backup.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use conduwuit::{error, implement, info, utils::time::rfc2822_from_seconds, warn, Result}; +use conduwuit::{Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; use super::Engine; diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 83bce08c..5ddb9473 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -1,4 +1,4 @@ -use conduwuit::{err, utils::math::Expected, Config, Result}; +use conduwuit::{Config, Result, err, utils::math::Expected}; use rocksdb::{ BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, @@ -6,7 +6,7 @@ use rocksdb::{ }; use super::descriptor::{CacheDisp, Descriptor}; -use crate::{util::map_err, Context}; +use crate::{Context, util::map_err}; pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; diff --git a/src/database/engine/context.rs b/src/database/engine/context.rs index 04e08854..380e37af 100644 --- a/src/database/engine/context.rs +++ b/src/database/engine/context.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{debug, utils::math::usize_from_f64, Result, Server}; +use conduwuit::{Result, Server, debug, utils::math::usize_from_f64}; use rocksdb::{Cache, Env, LruCacheOptions}; use crate::{or_else, pool::Pool}; diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 6abeb4b0..18cec742 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -1,7 +1,7 @@ use std::{cmp, convert::TryFrom}; -use conduwuit::{utils, Config, Result}; -use rocksdb::{statistics::StatsLevel, Cache, DBRecoveryMode, Env, LogLevel, Options}; +use conduwuit::{Config, Result, utils}; +use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; diff --git a/src/database/engine/files.rs b/src/database/engine/files.rs index 33d6fdc4..1f38a63c 100644 --- a/src/database/engine/files.rs +++ b/src/database/engine/files.rs @@ -1,11 +1,11 @@ -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use rocksdb::LiveFile as SstFile; use super::Engine; use crate::util::map_err; #[implement(Engine)] -pub fn file_list(&self) -> impl Iterator> + Send { +pub fn file_list(&self) -> impl Iterator> + Send + use<> { self.db .live_files() .map_err(map_err) diff --git a/src/database/engine/memory_usage.rs b/src/database/engine/memory_usage.rs index 01859815..9bb5c535 100644 --- a/src/database/engine/memory_usage.rs +++ b/src/database/engine/memory_usage.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use rocksdb::perf::get_memory_usage_stats; use super::Engine; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 59dabce1..24010c3a 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -1,20 +1,20 @@ use std::{ collections::BTreeSet, path::Path, - sync::{atomic::AtomicU32, Arc}, + sync::{Arc, atomic::AtomicU32}, }; -use conduwuit::{debug, implement, info, warn, Result}; +use conduwuit::{Result, debug, implement, info, warn}; use rocksdb::{ColumnFamilyDescriptor, Options}; use super::{ + Db, Engine, cf_opts::cf_options, db_opts::db_options, descriptor::{self, Descriptor}, repair::repair, - Db, Engine, }; -use crate::{or_else, Context}; +use crate::{Context, or_else}; #[implement(Engine)] #[tracing::instrument(skip_all)] diff --git a/src/database/engine/repair.rs b/src/database/engine/repair.rs index 61283904..aeec0caf 100644 --- a/src/database/engine/repair.rs +++ b/src/database/engine/repair.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use conduwuit::{info, warn, Err, Result}; +use conduwuit::{Err, Result, info, warn}; use rocksdb::Options; use super::Db; diff --git a/src/database/handle.rs b/src/database/handle.rs index 43b57839..484e5618 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -4,7 +4,7 @@ use conduwuit::Result; use rocksdb::DBPinnableSlice; use serde::{Deserialize, Serialize, Serializer}; -use crate::{keyval::deserialize_val, Deserialized, Slice}; +use crate::{Deserialized, Slice, keyval::deserialize_val}; pub struct Handle<'a> { val: DBPinnableSlice<'a>, diff --git a/src/database/keyval.rs b/src/database/keyval.rs index f572d15f..6059cd53 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -1,4 +1,4 @@ -use conduwuit::{smallvec::SmallVec, Result}; +use conduwuit::{Result, smallvec::SmallVec}; use serde::{Deserialize, Serialize}; use crate::{de, ser}; diff --git a/src/database/map.rs b/src/database/map.rs index 37425ecf..c5a908ba 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -40,7 +40,7 @@ pub(crate) use self::options::{ read_options_default, write_options_default, }; pub use self::{get_batch::Get, qry_batch::Qry}; -use crate::{watchers::Watchers, Engine}; +use crate::{Engine, watchers::Watchers}; pub struct Map { name: &'static str, diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs index c0381eb4..84476de6 100644 --- a/src/database/map/compact.rs +++ b/src/database/map/compact.rs @@ -1,4 +1,4 @@ -use conduwuit::{implement, Err, Result}; +use conduwuit::{Err, Result, implement}; use rocksdb::{BottommostLevelCompaction, CompactOptions}; use crate::keyval::KeyBuf; diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index 7a09b358..474818e8 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; use conduwuit::{ + Result, arrayvec::ArrayVec, err, implement, utils::{future::TryExtExt, result::FlatOk}, - Result, }; use futures::FutureExt; use serde::Serialize; @@ -16,7 +16,10 @@ use crate::{keyval::KeyBuf, ser}; /// - harder errors may not be reported #[inline] #[implement(super::Map)] -pub fn contains(self: &Arc, key: &K) -> impl Future + Send + '_ +pub fn contains( + self: &Arc, + key: &K, +) -> impl Future + Send + '_ + use<'_, K> where K: Serialize + ?Sized + Debug, { @@ -32,7 +35,7 @@ where pub fn acontains( self: &Arc, key: &K, -) -> impl Future + Send + '_ +) -> impl Future + Send + '_ + use<'_, MAX, K> where K: Serialize + ?Sized + Debug, { @@ -49,7 +52,7 @@ pub fn bcontains( self: &Arc, key: &K, buf: &mut B, -) -> impl Future + Send + '_ +) -> impl Future + Send + '_ + use<'_, K, B> where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, @@ -62,7 +65,10 @@ where /// - key is raw #[inline] #[implement(super::Map)] -pub fn exists<'a, K>(self: &'a Arc, key: &K) -> impl Future + Send + 'a +pub fn exists<'a, K>( + self: &'a Arc, + key: &K, +) -> impl Future + Send + 'a + use<'a, K> where K: AsRef<[u8]> + ?Sized + Debug + 'a, { diff --git a/src/database/map/count.rs b/src/database/map/count.rs index 22b298b9..78f9e2e3 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -16,7 +16,10 @@ pub fn count(self: &Arc) -> impl Future + Send + '_ { /// - From is a structured key #[implement(super::Map)] #[inline] -pub fn count_from<'a, P>(self: &'a Arc, from: &P) -> impl Future + Send + 'a +pub fn count_from<'a, P>( + self: &'a Arc, + from: &P, +) -> impl Future + Send + 'a + use<'a, P> where P: Serialize + ?Sized + Debug + 'a, { @@ -46,7 +49,7 @@ where pub fn count_prefix<'a, P>( self: &'a Arc, prefix: &P, -) -> impl Future + Send + 'a +) -> impl Future + Send + 'a + use<'a, P> where P: Serialize + ?Sized + Debug + 'a, { diff --git a/src/database/map/get.rs b/src/database/map/get.rs index d6c65be2..0971fb17 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,20 +1,23 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; -use futures::{future::ready, Future, FutureExt, TryFutureExt}; +use conduwuit::{Err, Result, err, implement, utils::result::MapExpect}; +use futures::{Future, FutureExt, TryFutureExt, future::ready}; use rocksdb::{DBPinnableSlice, ReadOptions}; use tokio::task; use crate::{ - util::{is_incomplete, map_err, or_else}, Handle, + util::{is_incomplete, map_err, or_else}, }; /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] #[tracing::instrument(skip(self, key), fields(%self), level = "trace")] -pub fn get(self: &Arc, key: &K) -> impl Future>> + Send +pub fn get( + self: &Arc, + key: &K, +) -> impl Future>> + Send + use<'_, K> where K: AsRef<[u8]> + Debug + ?Sized, { diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index ab9c1dc8..e23a8848 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,12 +1,11 @@ use std::{convert::AsRef, sync::Arc}; use conduwuit::{ - implement, + Result, implement, utils::{ - stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, + stream::{WidebandExt, automatic_amplification, automatic_width}, }, - Result, }; use futures::{Stream, StreamExt, TryStreamExt}; use rocksdb::{DBPinnableSlice, ReadOptions}; @@ -64,7 +63,7 @@ where pub(crate) fn get_batch_cached<'a, I, K>( &self, keys: I, -) -> impl Iterator>>> + Send +) -> impl Iterator>>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -78,7 +77,7 @@ where pub(crate) fn get_batch_blocking<'a, I, K>( &self, keys: I, -) -> impl Iterator>> + Send +) -> impl Iterator>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -92,7 +91,7 @@ fn get_batch_blocking_opts<'a, I, K>( &self, keys: I, read_options: &ReadOptions, -) -> impl Iterator>, rocksdb::Error>> + Send +) -> impl Iterator>, rocksdb::Error>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 2fe70f15..7ca932a5 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 76c76325..c9b1717a 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use super::stream_from::is_cached; use crate::{ - keyval::{result_deserialize_key, serialize_key, Key}, + keyval::{Key, result_deserialize_key, serialize_key}, stream, }; @@ -15,7 +15,7 @@ use crate::{ pub fn keys_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -25,7 +25,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn keys_from_raw

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn keys_from_raw

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -37,7 +40,7 @@ where pub fn keys_raw_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -47,7 +50,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn raw_keys_from

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn raw_keys_from

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index 28bc7ccd..09dd79ac 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -1,16 +1,16 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize_key, serialize_key, Key}; +use crate::keyval::{Key, result_deserialize_key, serialize_key}; #[implement(super::Map)] pub fn keys_prefix<'a, K, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -24,7 +24,7 @@ where pub fn keys_prefix_raw

    ( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs index 178f4a61..c6f13c0b 100644 --- a/src/database/map/qry.rs +++ b/src/database/map/qry.rs @@ -1,17 +1,20 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; -use conduwuit::{arrayvec::ArrayVec, implement, Result}; +use conduwuit::{Result, arrayvec::ArrayVec, implement}; use futures::Future; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::{Handle, keyval::KeyBuf, ser}; /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is serialized into an allocated buffer to perform /// the query. #[implement(super::Map)] #[inline] -pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send +pub fn qry( + self: &Arc, + key: &K, +) -> impl Future>> + Send + use<'_, K> where K: Serialize + ?Sized + Debug, { @@ -27,7 +30,7 @@ where pub fn aqry( self: &Arc, key: &K, -) -> impl Future>> + Send +) -> impl Future>> + Send + use<'_, MAX, K> where K: Serialize + ?Sized + Debug, { @@ -43,7 +46,7 @@ pub fn bqry( self: &Arc, key: &K, buf: &mut B, -) -> impl Future>> + Send +) -> impl Future>> + Send + use<'_, K, B> where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs index 31817c48..f44d1c86 100644 --- a/src/database/map/qry_batch.rs +++ b/src/database/map/qry_batch.rs @@ -1,17 +1,16 @@ use std::{fmt::Debug, sync::Arc}; use conduwuit::{ - implement, + Result, implement, utils::{ - stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, + stream::{WidebandExt, automatic_amplification, automatic_width}, }, - Result, }; use futures::{Stream, StreamExt, TryStreamExt}; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::{Handle, keyval::KeyBuf, ser}; pub trait Qry<'a, K, S> where diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index 21558a17..c00f3e55 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index 65072337..04e457dc 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use super::rev_stream_from::is_cached; use crate::{ - keyval::{result_deserialize_key, serialize_key, Key}, + keyval::{Key, result_deserialize_key, serialize_key}, stream, }; @@ -15,7 +15,7 @@ use crate::{ pub fn rev_keys_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -29,7 +29,7 @@ where pub fn rev_keys_from_raw

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -41,7 +41,7 @@ where pub fn rev_keys_raw_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -55,7 +55,7 @@ where pub fn rev_raw_keys_from

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index fb29acaf..fbe9f9ca 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -1,16 +1,16 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize_key, serialize_key, Key}; +use crate::keyval::{Key, result_deserialize_key, serialize_key}; #[implement(super::Map)] pub fn rev_keys_prefix<'a, K, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -24,7 +24,7 @@ where pub fn rev_keys_prefix_raw

    ( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index f55053be..fc2d1116 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index ddc98607..d67986e7 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use tokio::task; use crate::{ - keyval::{result_deserialize, serialize_key, KeyVal}, + keyval::{KeyVal, result_deserialize, serialize_key}, stream, util::is_incomplete, }; @@ -20,7 +20,7 @@ use crate::{ pub fn rev_stream_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -39,7 +39,7 @@ where pub fn rev_stream_from_raw

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -55,7 +55,7 @@ where pub fn rev_stream_raw_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -74,7 +74,7 @@ where pub fn rev_raw_stream_from

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index 22a2ce53..46dc9247 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize, serialize_key, KeyVal}; +use crate::keyval::{KeyVal, result_deserialize, serialize_key}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -14,7 +14,7 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; pub fn rev_stream_prefix<'a, K, V, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -33,7 +33,7 @@ where pub fn rev_stream_prefix_raw

    ( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index bfc8ba04..f1450b6f 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 74140a65..00c3a051 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use tokio::task; use crate::{ - keyval::{result_deserialize, serialize_key, KeyVal}, + keyval::{KeyVal, result_deserialize, serialize_key}, stream, }; @@ -19,7 +19,7 @@ use crate::{ pub fn stream_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -37,7 +37,7 @@ where pub fn stream_from_raw

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -53,7 +53,7 @@ where pub fn stream_raw_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -71,7 +71,7 @@ where pub fn raw_stream_from

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index adacfc81..a26478aa 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize, serialize_key, KeyVal}; +use crate::keyval::{KeyVal, result_deserialize, serialize_key}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -14,7 +14,7 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; pub fn stream_prefix<'a, K, V, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -33,7 +33,7 @@ where pub fn stream_prefix_raw

    ( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/maps.rs b/src/database/maps.rs index fc216ee0..b060ab8d 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -3,8 +3,8 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::Result; use crate::{ - engine::descriptor::{self, CacheDisp, Descriptor}, Engine, Map, + engine::descriptor::{self, CacheDisp, Descriptor}, }; pub(super) type Maps = BTreeMap; diff --git a/src/database/mod.rs b/src/database/mod.rs index 4f8e2ad9..0481d1bd 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -23,18 +23,18 @@ mod watchers; use std::{ops::Index, sync::Arc}; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; pub use self::{ de::{Ignore, IgnoreAll}, deserialized::Deserialized, handle::Handle, - keyval::{serialize_key, serialize_val, KeyVal, Slice}, - map::{compact, Get, Map, Qry}, - ser::{serialize, serialize_to, serialize_to_vec, Cbor, Interfix, Json, Separator, SEP}, + keyval::{KeyVal, Slice, serialize_key, serialize_val}, + map::{Get, Map, Qry, compact}, + ser::{Cbor, Interfix, Json, SEP, Separator, serialize, serialize_to, serialize_to_vec}, }; pub(crate) use self::{ - engine::{context::Context, Engine}, + engine::{Engine, context::Context}, util::or_else, }; use crate::maps::{Maps, MapsKey, MapsVal}; diff --git a/src/database/pool.rs b/src/database/pool.rs index 7636ff5e..e6ed59ac 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -3,8 +3,8 @@ mod configure; use std::{ mem::take, sync::{ - atomic::{AtomicUsize, Ordering}, Arc, Mutex, + atomic::{AtomicUsize, Ordering}, }, thread, thread::JoinHandle, @@ -12,19 +12,18 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - debug, debug_warn, err, error, implement, + Error, Result, Server, debug, debug_warn, err, error, implement, result::DebugInspect, smallvec::SmallVec, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, - Error, Result, Server, }; -use futures::{channel::oneshot, TryFutureExt}; +use futures::{TryFutureExt, channel::oneshot}; use oneshot::Sender as ResultSender; use rocksdb::Direction; use self::configure::configure; -use crate::{keyval::KeyBuf, stream, Handle, Map}; +use crate::{Handle, Map, keyval::KeyBuf, stream}; /// Frontend thread-pool. Operating system threads are used to make database /// requests which are not cached. These thread-blocking requests are offloaded diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs index ff42ef51..92dda56e 100644 --- a/src/database/pool/configure.rs +++ b/src/database/pool/configure.rs @@ -1,7 +1,7 @@ use std::{path::PathBuf, sync::Arc}; use conduwuit::{ - debug, debug_info, expected, is_equal_to, + Server, debug, debug_info, expected, is_equal_to, utils::{ math::usize_from_f64, result::LogDebugErr, @@ -9,7 +9,6 @@ use conduwuit::{ stream::{AMPLIFICATION_LIMIT, WIDTH_LIMIT}, sys::{compute::is_core_available, storage}, }, - Server, }; use super::{QUEUE_LIMIT, WORKER_LIMIT}; diff --git a/src/database/ser.rs b/src/database/ser.rs index 372b7522..6dd2043d 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -1,7 +1,7 @@ use std::io::Write; -use conduwuit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; -use serde::{ser, Deserialize, Serialize}; +use conduwuit::{Error, Result, debug::type_name, err, result::DebugInspect, utils::exchange}; +use serde::{Deserialize, Serialize, ser}; use crate::util::unhandled; diff --git a/src/database/stream.rs b/src/database/stream.rs index f3063bb3..eb856b3f 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -5,15 +5,15 @@ mod keys_rev; use std::sync::Arc; -use conduwuit::{utils::exchange, Result}; +use conduwuit::{Result, utils::exchange}; use rocksdb::{DBRawIteratorWithThreadMode, ReadOptions}; pub(crate) use self::{items::Items, items_rev::ItemsRev, keys::Keys, keys_rev::KeysRev}; use crate::{ + Map, Slice, engine::Db, keyval::{Key, KeyVal, Val}, util::{is_incomplete, map_err}, - Map, Slice, }; pub(crate) struct State<'a> { diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index 8814419e..ede2b822 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{keyval_longevity, Cursor, State}; +use super::{Cursor, State, keyval_longevity}; use crate::keyval::KeyVal; pub(crate) struct Items<'a> { diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index f6fcb0e5..dba8d16c 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{keyval_longevity, Cursor, State}; +use super::{Cursor, State, keyval_longevity}; use crate::keyval::KeyVal; pub(crate) struct ItemsRev<'a> { diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index b953f51c..7c89869b 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{slice_longevity, Cursor, State}; +use super::{Cursor, State, slice_longevity}; use crate::keyval::Key; pub(crate) struct Keys<'a> { diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index acf78d88..51561e5c 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{slice_longevity, Cursor, State}; +use super::{Cursor, State, slice_longevity}; use crate::keyval::Key; pub(crate) struct KeysRev<'a> { diff --git a/src/database/tests.rs b/src/database/tests.rs index 594170e8..140bc56d 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -4,14 +4,13 @@ use std::fmt::Debug; use conduwuit::{ arrayvec::ArrayVec, - ruma::{serde::Raw, EventId, RoomId, UserId}, + ruma::{EventId, RoomId, UserId, serde::Raw}, }; use serde::Serialize; use crate::{ - de, ser, - ser::{serialize_to_vec, Json}, - Ignore, Interfix, + Ignore, Interfix, de, ser, + ser::{Json, serialize_to_vec}, }; #[test] diff --git a/src/database/watchers.rs b/src/database/watchers.rs index 9ce6f74c..be814f8c 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, HashMap}, + collections::{HashMap, hash_map}, future::Future, pin::Pin, sync::RwLock, diff --git a/src/macros/admin.rs b/src/macros/admin.rs index e35bd586..bf1586a0 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -1,10 +1,10 @@ use itertools::Itertools; use proc_macro::{Span, TokenStream}; use proc_macro2::TokenStream as TokenStream2; -use quote::{quote, ToTokens}; -use syn::{parse_quote, Attribute, Error, Fields, Ident, ItemEnum, ItemFn, Meta, Variant}; +use quote::{ToTokens, quote}; +use syn::{Attribute, Error, Fields, Ident, ItemEnum, ItemFn, Meta, Variant, parse_quote}; -use crate::{utils::camel_to_snake_string, Result}; +use crate::{Result, utils::camel_to_snake_string}; pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result { let attr: Attribute = parse_quote! { diff --git a/src/macros/cargo.rs b/src/macros/cargo.rs index cd36658e..a452c672 100644 --- a/src/macros/cargo.rs +++ b/src/macros/cargo.rs @@ -4,7 +4,7 @@ use proc_macro::{Span, TokenStream}; use quote::quote; use syn::{Error, ItemConst, Meta}; -use crate::{utils, Result}; +use crate::{Result, utils}; pub(super) fn manifest(item: ItemConst, args: &[Meta]) -> Result { let member = utils::get_named_string(args, "crate"); diff --git a/src/macros/config.rs b/src/macros/config.rs index 50feefa8..07ac1c0a 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -2,15 +2,15 @@ use std::{collections::HashSet, fmt::Write as _, fs::OpenOptions, io::Write as _ use proc_macro::TokenStream; use proc_macro2::{Span, TokenStream as TokenStream2}; -use quote::{quote, ToTokens}; +use quote::{ToTokens, quote}; use syn::{ - parse::Parser, punctuated::Punctuated, spanned::Spanned, Error, Expr, ExprLit, Field, Fields, - FieldsNamed, ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, + Error, Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, MetaList, + MetaNameValue, Type, TypePath, parse::Parser, punctuated::Punctuated, spanned::Spanned, }; use crate::{ - utils::{get_simple_settings, is_cargo_build, is_cargo_test}, Result, + utils::{get_simple_settings, is_cargo_build, is_cargo_test}, }; const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; diff --git a/src/macros/implement.rs b/src/macros/implement.rs index 8d18f243..7acc12d2 100644 --- a/src/macros/implement.rs +++ b/src/macros/implement.rs @@ -3,7 +3,7 @@ use quote::quote; use syn::{Error, ItemFn, Meta, Path}; use utils::get_named_generics; -use crate::{utils, Result}; +use crate::{Result, utils}; pub(super) fn implement(item: ItemFn, args: &[Meta]) -> Result { let generics = get_named_generics(args, "generics")?; diff --git a/src/macros/mod.rs b/src/macros/mod.rs index 1aa1e24f..31a797fe 100644 --- a/src/macros/mod.rs +++ b/src/macros/mod.rs @@ -9,8 +9,9 @@ mod utils; use proc_macro::TokenStream; use syn::{ + Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, parse::{Parse, Parser}, - parse_macro_input, Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, + parse_macro_input, }; pub(crate) type Result = std::result::Result; diff --git a/src/macros/refutable.rs b/src/macros/refutable.rs index 66e0ebc3..acfc4cd5 100644 --- a/src/macros/refutable.rs +++ b/src/macros/refutable.rs @@ -1,5 +1,5 @@ use proc_macro::{Span, TokenStream}; -use quote::{quote, ToTokens}; +use quote::{ToTokens, quote}; use syn::{FnArg::Typed, Ident, ItemFn, Meta, Pat, PatIdent, PatType, Stmt}; use crate::Result; @@ -20,7 +20,7 @@ pub(super) fn refutable(mut item: ItemFn, _args: &[Meta]) -> Result let variant = &pat.path; let fields = &pat.fields; - let Some(Typed(PatType { ref mut pat, .. })) = sig.inputs.get_mut(i) else { + let Some(Typed(PatType { pat, .. })) = sig.inputs.get_mut(i) else { continue; }; diff --git a/src/macros/utils.rs b/src/macros/utils.rs index af2519a7..a45e5ecc 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use syn::{parse_str, Expr, ExprLit, Generics, Lit, Meta, MetaNameValue}; +use syn::{Expr, ExprLit, Generics, Lit, Meta, MetaNameValue, parse_str}; use crate::Result; diff --git a/src/main/clap.rs b/src/main/clap.rs index 2bb6f3f2..c7f33bfe 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -4,10 +4,10 @@ use std::path::PathBuf; use clap::{ArgAction, Parser}; use conduwuit::{ + Err, Result, config::{Figment, FigmentValue}, err, toml, utils::available_parallelism, - Err, Result, }; /// Commandline arguments diff --git a/src/main/logging.rs b/src/main/logging.rs index 35e482de..7ce86d56 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -1,13 +1,13 @@ use std::sync::Arc; use conduwuit::{ + Result, config::Config, debug_warn, err, - log::{capture, fmt_span, ConsoleFormat, ConsoleWriter, LogLevelReloadHandles}, + log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span}, result::UnwrapOrErr, - Result, }; -use tracing_subscriber::{fmt, layer::SubscriberExt, reload, EnvFilter, Layer, Registry}; +use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload}; #[cfg(feature = "perf_measurements")] pub(crate) type TracingFlameGuard = diff --git a/src/main/main.rs b/src/main/main.rs index dacc2a2e..2bfc3c06 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -9,9 +9,9 @@ mod signal; extern crate conduwuit_core as conduwuit; -use std::sync::{atomic::Ordering, Arc}; +use std::sync::{Arc, atomic::Ordering}; -use conduwuit::{debug_info, error, rustc_flags_capture, Error, Result}; +use conduwuit::{Error, Result, debug_info, error, rustc_flags_capture}; use server::Server; rustc_flags_capture! {} diff --git a/src/main/mods.rs b/src/main/mods.rs index 9ab36e6c..6dc79b2f 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -6,10 +6,10 @@ extern crate conduwuit_service; use std::{ future::Future, pin::Pin, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; -use conduwuit::{debug, error, mods, Error, Result}; +use conduwuit::{Error, Result, debug, error, mods}; use conduwuit_service::Services; use crate::Server; diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 474b373b..b3174e9c 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -1,8 +1,8 @@ use std::{ iter::once, sync::{ - atomic::{AtomicUsize, Ordering}, OnceLock, + atomic::{AtomicUsize, Ordering}, }, thread, time::Duration, @@ -11,9 +11,8 @@ use std::{ #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use conduwuit::result::LogDebugErr; use conduwuit::{ - is_true, + Result, is_true, utils::sys::compute::{nth_core_available, set_affinity}, - Result, }; use tokio::runtime::Builder; diff --git a/src/main/sentry.rs b/src/main/sentry.rs index 02835ec8..1ea1f3ae 100644 --- a/src/main/sentry.rs +++ b/src/main/sentry.rs @@ -7,11 +7,11 @@ use std::{ use conduwuit::{config::Config, debug, trace}; use sentry::{ - types::{ - protocol::v7::{Context, Event}, - Dsn, - }, Breadcrumb, ClientOptions, Level, + types::{ + Dsn, + protocol::v7::{Context, Event}, + }, }; static SEND_PANIC: OnceLock = OnceLock::new(); diff --git a/src/main/server.rs b/src/main/server.rs index 7376b2fc..44ca69b0 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,11 +1,11 @@ use std::{path::PathBuf, sync::Arc}; use conduwuit::{ + Error, Result, config::Config, info, log::Log, utils::{stream, sys}, - Error, Result, }; use tokio::{runtime, sync::Mutex}; diff --git a/src/router/layers.rs b/src/router/layers.rs index 7ebec16e..88e6a8d5 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -1,16 +1,16 @@ use std::{any::Any, sync::Arc, time::Duration}; use axum::{ - extract::{DefaultBodyLimit, MatchedPath}, Router, + extract::{DefaultBodyLimit, MatchedPath}, }; use axum_client_ip::SecureClientIpSource; -use conduwuit::{debug, error, Result, Server}; +use conduwuit::{Result, Server, debug, error}; use conduwuit_api::router::state::Guard; use conduwuit_service::Services; use http::{ - header::{self, HeaderName}, HeaderValue, Method, StatusCode, + header::{self, HeaderName}, }; use tower::ServiceBuilder; use tower_http::{ @@ -176,12 +176,12 @@ fn catch_panic( .requests_panic .fetch_add(1, std::sync::atomic::Ordering::Release); - let details = if let Some(s) = err.downcast_ref::() { - s.clone() - } else if let Some(s) = err.downcast_ref::<&str>() { - (*s).to_owned() - } else { - "Unknown internal server error occurred.".to_owned() + let details = match err.downcast_ref::() { + | Some(s) => s.clone(), + | _ => match err.downcast_ref::<&str>() { + | Some(s) => (*s).to_owned(), + | _ => "Unknown internal server error occurred.".to_owned(), + }, }; error!("{details:#}"); diff --git a/src/router/request.rs b/src/router/request.rs index b6c22d45..00769b3f 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,6 +1,6 @@ use std::{ fmt::Debug, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, time::Duration, }; @@ -8,7 +8,7 @@ use axum::{ extract::State, response::{IntoResponse, Response}, }; -use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; +use conduwuit::{Result, debug, debug_error, debug_warn, err, error, trace}; use conduwuit_service::Services; use futures::FutureExt; use http::{Method, StatusCode, Uri}; diff --git a/src/router/router.rs b/src/router/router.rs index b3531418..0f95b924 100644 --- a/src/router/router.rs +++ b/src/router/router.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use axum::{response::IntoResponse, routing::get, Router}; +use axum::{Router, response::IntoResponse, routing::get}; use conduwuit::Error; use conduwuit_api::router::{state, state::Guard}; use conduwuit_service::Services; diff --git a/src/router/run.rs b/src/router/run.rs index 024cb813..31789626 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -3,12 +3,12 @@ extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; use std::{ - sync::{atomic::Ordering, Arc, Weak}, + sync::{Arc, Weak, atomic::Ordering}, time::Duration, }; use axum_server::Handle as ServerHandle; -use conduwuit::{debug, debug_error, debug_info, error, info, Error, Result, Server}; +use conduwuit::{Error, Result, Server, debug, debug_error, debug_info, error, info}; use futures::FutureExt; use service::Services; use tokio::{ diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs index 5c822f2b..2399edf0 100644 --- a/src/router/serve/mod.rs +++ b/src/router/serve/mod.rs @@ -6,7 +6,7 @@ mod unix; use std::sync::Arc; use axum_server::Handle as ServerHandle; -use conduwuit::{err, Result}; +use conduwuit::{Result, err}; use conduwuit_service::Services; use tokio::sync::broadcast; diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index 535282b9..6db7e138 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -1,11 +1,11 @@ use std::{ net::SocketAddr, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; use axum::Router; -use axum_server::{bind, Handle as ServerHandle}; -use conduwuit::{debug_info, info, Result, Server}; +use axum_server::{Handle as ServerHandle, bind}; +use conduwuit::{Result, Server, debug_info, info}; use tokio::task::JoinSet; pub(super) async fn serve( diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index ab1a9371..dd46ab53 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -3,10 +3,10 @@ use std::{net::SocketAddr, sync::Arc}; use axum::Router; use axum_server::Handle as ServerHandle; use axum_server_dual_protocol::{ - axum_server::{bind_rustls, tls_rustls::RustlsConfig}, ServerExt, + axum_server::{bind_rustls, tls_rustls::RustlsConfig}, }; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; use tokio::task::JoinSet; use tracing::{debug, info, warn}; diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index 6a030c30..2af17274 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -4,15 +4,15 @@ use std::{ net::{self, IpAddr, Ipv4Addr}, os::fd::AsRawFd, path::Path, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; use axum::{ - extract::{connect_info::IntoMakeServiceWithConnectInfo, Request}, Router, + extract::{Request, connect_info::IntoMakeServiceWithConnectInfo}, }; use conduwuit::{ - debug, debug_error, info, result::UnwrapInfallible, trace, warn, Err, Result, Server, + Err, Result, Server, debug, debug_error, info, result::UnwrapInfallible, trace, warn, }; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ @@ -21,10 +21,10 @@ use hyper_util::{ }; use tokio::{ fs, - net::{unix::SocketAddr, UnixListener, UnixStream}, + net::{UnixListener, UnixStream, unix::SocketAddr}, sync::broadcast::{self}, task::JoinSet, - time::{sleep, Duration}, + time::{Duration, sleep}, }; use tower::{Service, ServiceExt}; diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 5a943f88..453051be 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,23 +1,22 @@ use std::sync::Arc; use conduwuit::{ - err, implement, - utils::{result::LogErr, stream::TryIgnore, ReadyExt}, - Err, Result, + Err, Result, err, implement, + utils::{ReadyExt, result::LogErr, stream::TryIgnore}, }; use database::{Deserialized, Handle, Ignore, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ + RoomId, UserId, events::{ AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, - RoomId, UserId, }; use serde::Deserialize; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { services: Services, diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index 59b9a31b..02f41303 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -5,14 +5,14 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{debug, defer, error, log, log::is_systemd_mode, Server}; +use conduwuit::{Server, debug, defer, error, log, log::is_systemd_mode}; use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; use termimad::MadSkin; use tokio::task::JoinHandle; -use crate::{admin, Dep}; +use crate::{Dep, admin}; pub struct Console { server: Arc, @@ -221,7 +221,7 @@ pub fn print(markdown: &str) { } fn configure_output_err(mut output: MadSkin) -> MadSkin { - use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; + use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(196), Color::AnsiValue(234)); output.inline_code = code_style.clone(); @@ -236,7 +236,7 @@ fn configure_output_err(mut output: MadSkin) -> MadSkin { } fn configure_output(mut output: MadSkin) -> MadSkin { - use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; + use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(40), Color::AnsiValue(234)); output.inline_code = code_style.clone(); diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 7b691fb1..7f71665a 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -1,7 +1,8 @@ use std::collections::BTreeMap; -use conduwuit::{pdu::PduBuilder, Result}; +use conduwuit::{Result, pdu::PduBuilder}; use ruma::{ + RoomId, RoomVersionId, events::room::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, @@ -14,7 +15,6 @@ use ruma::{ preview_url::RoomPreviewUrlsEventContent, topic::RoomTopicEventContent, }, - RoomId, RoomVersionId, }; use crate::Services; diff --git a/src/service/admin/execute.rs b/src/service/admin/execute.rs index 462681da..174b28ed 100644 --- a/src/service/admin/execute.rs +++ b/src/service/admin/execute.rs @@ -1,6 +1,6 @@ -use conduwuit::{debug, debug_info, error, implement, info, Err, Result}; +use conduwuit::{Err, Result, debug, debug_info, error, implement, info}; use ruma::events::room::message::RoomMessageEventContent; -use tokio::time::{sleep, Duration}; +use tokio::time::{Duration, sleep}; pub(super) const SIGNAL: &str = "SIGUSR2"; diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 3ad9283f..358ea267 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,17 +1,17 @@ use std::collections::BTreeMap; -use conduwuit::{error, implement, Result}; +use conduwuit::{Result, error, implement}; use ruma::{ + RoomId, UserId, events::{ + RoomAccountDataEventType, room::{ member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, power_levels::RoomPowerLevelsEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, }, - RoomId, UserId, }; use crate::pdu::PduBuilder; diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 31b046b7..4622f10e 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -11,18 +11,18 @@ use std::{ use async_trait::async_trait; use conduwuit::{ - debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server, + Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, }; pub use create::create_admin_room; use futures::{FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{ - events::room::message::{Relation, RoomMessageEventContent}, OwnedEventId, OwnedRoomId, RoomId, UserId, + events::room::message::{Relation, RoomMessageEventContent}, }; use tokio::sync::RwLock; -use crate::{account_data, globals, rooms, rooms::state::RoomMutexGuard, Dep}; +use crate::{Dep, account_data, globals, rooms, rooms::state::RoomMutexGuard}; pub struct Service { services: Services, diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 2a54ee09..5aba0018 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -4,14 +4,14 @@ mod registration_info; use std::{collections::BTreeMap, sync::Arc}; use async_trait::async_trait; -use conduwuit::{err, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, err, utils::stream::TryIgnore}; use database::Map; use futures::{Future, StreamExt, TryStreamExt}; -use ruma::{api::appservice::Registration, RoomAliasId, RoomId, UserId}; +use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration}; use tokio::sync::RwLock; pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo}; -use crate::{sending, Dep}; +use crate::{Dep, sending}; pub struct Service { registration_info: RwLock>, diff --git a/src/service/appservice/registration_info.rs b/src/service/appservice/registration_info.rs index 9758e186..a511f58d 100644 --- a/src/service/appservice/registration_info.rs +++ b/src/service/appservice/registration_info.rs @@ -1,5 +1,5 @@ use conduwuit::Result; -use ruma::{api::appservice::Registration, UserId}; +use ruma::{UserId, api::appservice::Registration}; use super::NamespaceRegex; diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index f63d78b8..d5008491 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, time::Duration}; -use conduwuit::{err, implement, trace, Config, Result}; +use conduwuit::{Config, Result, err, implement, trace}; use either::Either; use ipaddress::IPAddress; use reqwest::redirect; @@ -172,10 +172,9 @@ fn base(config: &Config) -> Result { builder = builder.no_zstd(); }; - if let Some(proxy) = config.proxy.to_proxy()? { - Ok(builder.proxy(proxy)) - } else { - Ok(builder) + match config.proxy.to_proxy()? { + | Some(proxy) => Ok(builder.proxy(proxy)), + | _ => Ok(builder), } } diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs index c9ac37a3..fd0d8764 100644 --- a/src/service/config/mod.rs +++ b/src/service/config/mod.rs @@ -2,8 +2,9 @@ use std::{iter, ops::Deref, path::Path, sync::Arc}; use async_trait::async_trait; use conduwuit::{ - config::{check, Config}, - error, implement, Result, Server, + Result, Server, + config::{Config, check}, + error, implement, }; pub struct Service { diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 9b2e4025..47a309a5 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -1,15 +1,15 @@ use std::sync::Arc; use async_trait::async_trait; -use conduwuit::{error, warn, Result}; +use conduwuit::{Result, error, warn}; use ruma::{ events::{ - push_rules::PushRulesEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, + GlobalAccountDataEvent, GlobalAccountDataEventType, push_rules::PushRulesEventContent, }, push::Ruleset, }; -use crate::{account_data, globals, users, Dep}; +use crate::{Dep, account_data, globals, users}; pub struct Service { services: Services, diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 3146bb8a..d254486f 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -2,20 +2,20 @@ use std::{fmt::Debug, mem}; use bytes::Bytes; use conduwuit::{ - debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, error::inspect_debug_log, - implement, trace, utils::string::EMPTY, Err, Error, Result, + Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, + error::inspect_debug_log, implement, trace, utils::string::EMPTY, }; -use http::{header::AUTHORIZATION, HeaderValue}; +use http::{HeaderValue, header::AUTHORIZATION}; use ipaddress::IPAddress; use reqwest::{Client, Method, Request, Response, Url}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, api::{ - client::error::Error as RumaError, EndpointError, IncomingResponse, MatrixVersion, - OutgoingRequest, SendAccessToken, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + client::error::Error as RumaError, }, serde::Base64, server_util::authorization::XMatrix, - CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, }; use crate::resolver::actual::ActualDest; diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs index dacdb20e..ce7765ee 100644 --- a/src/service/federation/mod.rs +++ b/src/service/federation/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use conduwuit::{Result, Server}; -use crate::{client, resolver, server_keys, Dep}; +use crate::{Dep, client, resolver, server_keys}; pub struct Service { services: Services, diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 26a18607..b43b7c5f 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,6 +1,6 @@ use std::sync::{Arc, RwLock}; -use conduwuit::{utils, Result}; +use conduwuit::{Result, utils}; use database::{Database, Deserialized, Map}; pub struct Data { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 485d5020..16b3ef3c 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,7 +7,7 @@ use std::{ time::Instant, }; -use conduwuit::{error, utils::bytes::pretty, Result, Server}; +use conduwuit::{Result, Server, error, utils::bytes::pretty}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 1165c3ed..1bf048ef 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,19 +1,18 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - err, implement, + Err, Result, err, implement, utils::stream::{ReadyExt, TryIgnore}, - Err, Result, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::StreamExt; use ruma::{ + OwnedRoomId, RoomId, UserId, api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, - OwnedRoomId, RoomId, UserId, }; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { db: Data, diff --git a/src/service/manager.rs b/src/service/manager.rs index e0d885c2..3cdf5945 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,6 +1,6 @@ use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; -use conduwuit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; +use conduwuit::{Err, Error, Result, Server, debug, debug_warn, error, trace, utils::time, warn}; use futures::{FutureExt, TryFutureExt}; use tokio::{ sync::{Mutex, MutexGuard}, @@ -8,7 +8,7 @@ use tokio::{ time::sleep, }; -use crate::{service, service::Service, Services}; +use crate::{Services, service, service::Service}; pub(crate) struct Manager { manager: Mutex>>>, diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index 60ade723..9d73f5dc 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -1,6 +1,6 @@ #[cfg(feature = "blurhashing")] use conduwuit::config::BlurhashConfig as CoreBlurhashConfig; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use super::Service; diff --git a/src/service/media/data.rs b/src/service/media/data.rs index f48482ea..0ccd844f 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,13 +1,12 @@ use std::{sync::Arc, time::Duration}; use conduwuit::{ - debug, debug_info, err, - utils::{str_from_bytes, stream::TryIgnore, string_from_bytes, ReadyExt}, - Err, Result, + Err, Result, debug, debug_info, err, + utils::{ReadyExt, str_from_bytes, stream::TryIgnore, string_from_bytes}, }; use database::{Database, Interfix, Map}; use futures::StreamExt; -use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; +use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; use super::{preview::UrlPreviewData, thumbnail::Dim}; diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 8526ffcd..5fd628cd 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -8,9 +8,9 @@ use std::{ }; use conduwuit::{ - debug, debug_info, debug_warn, error, info, - utils::{stream::TryIgnore, ReadyExt}, - warn, Config, Result, + Config, Result, debug, debug_info, debug_warn, error, info, + utils::{ReadyExt, stream::TryIgnore}, + warn, }; use crate::Services; diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index f5913f43..5c26efe8 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -8,13 +8,13 @@ mod thumbnail; use std::{path::PathBuf, sync::Arc, time::SystemTime}; use async_trait::async_trait; -use base64::{engine::general_purpose, Engine as _}; +use base64::{Engine as _, engine::general_purpose}; use conduwuit::{ - debug, debug_error, debug_info, debug_warn, err, error, trace, + Err, Result, Server, debug, debug_error, debug_info, debug_warn, err, error, trace, utils::{self, MutexMap}, - warn, Err, Result, Server, + warn, }; -use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; +use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt, BufReader}, @@ -22,7 +22,7 @@ use tokio::{ use self::data::{Data, Metadata}; pub use self::thumbnail::Dim; -use crate::{client, globals, sending, Dep}; +use crate::{Dep, client, globals, sending}; #[derive(Debug)] pub struct FileMeta { @@ -105,22 +105,27 @@ impl Service { /// Deletes a file in the database and from the media directory via an MXC pub async fn delete(&self, mxc: &Mxc<'_>) -> Result<()> { - if let Ok(keys) = self.db.search_mxc_metadata_prefix(mxc).await { - for key in keys { - trace!(?mxc, "MXC Key: {key:?}"); - debug_info!(?mxc, "Deleting from filesystem"); + match self.db.search_mxc_metadata_prefix(mxc).await { + | Ok(keys) => { + for key in keys { + trace!(?mxc, "MXC Key: {key:?}"); + debug_info!(?mxc, "Deleting from filesystem"); - if let Err(e) = self.remove_media_file(&key).await { - debug_error!(?mxc, "Failed to remove media file: {e}"); + if let Err(e) = self.remove_media_file(&key).await { + debug_error!(?mxc, "Failed to remove media file: {e}"); + } + + debug_info!(?mxc, "Deleting from database"); + self.db.delete_file_mxc(mxc).await; } - debug_info!(?mxc, "Deleting from database"); - self.db.delete_file_mxc(mxc).await; - } - - Ok(()) - } else { - Err!(Database(error!("Failed to find any media keys for MXC {mxc} in our database."))) + Ok(()) + }, + | _ => { + Err!(Database(error!( + "Failed to find any media keys for MXC {mxc} in our database." + ))) + }, } } @@ -154,22 +159,21 @@ impl Service { /// Downloads a file. pub async fn get(&self, mxc: &Mxc<'_>) -> Result> { - if let Ok(Metadata { content_disposition, content_type, key }) = - self.db.search_file_metadata(mxc, &Dim::default()).await - { - let mut content = Vec::with_capacity(8192); - let path = self.get_media_file(&key); - BufReader::new(fs::File::open(path).await?) - .read_to_end(&mut content) - .await?; + match self.db.search_file_metadata(mxc, &Dim::default()).await { + | Ok(Metadata { content_disposition, content_type, key }) => { + let mut content = Vec::with_capacity(8192); + let path = self.get_media_file(&key); + BufReader::new(fs::File::open(path).await?) + .read_to_end(&mut content) + .await?; - Ok(Some(FileMeta { - content: Some(content), - content_type, - content_disposition, - })) - } else { - Ok(None) + Ok(Some(FileMeta { + content: Some(content), + content_type, + content_disposition, + })) + }, + | _ => Ok(None), } } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index e7f76bab..17216869 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -7,7 +7,7 @@ use std::time::SystemTime; -use conduwuit::{debug, Err, Result}; +use conduwuit::{Err, Result, debug}; use conduwuit_core::implement; use ipaddress::IPAddress; use serde::Serialize; diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index 72f1184e..61635011 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -1,21 +1,21 @@ use std::{fmt::Debug, time::Duration}; use conduwuit::{ - debug_warn, err, implement, utils::content_disposition::make_content_disposition, Err, Error, - Result, + Err, Error, Result, debug_warn, err, implement, + utils::content_disposition::make_content_disposition, }; -use http::header::{HeaderValue, CONTENT_DISPOSITION, CONTENT_TYPE}; +use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE, HeaderValue}; use ruma::{ + Mxc, ServerName, UserId, api::{ + OutgoingRequest, client::{ error::ErrorKind::{NotFound, Unrecognized}, media, }, federation, federation::authenticated_media::{Content, FileOrLocation}, - OutgoingRequest, }, - Mxc, ServerName, UserId, }; use super::{Dim, FileMeta}; diff --git a/src/service/media/tests.rs b/src/service/media/tests.rs index 1d6dce30..651e0ade 100644 --- a/src/service/media/tests.rs +++ b/src/service/media/tests.rs @@ -5,7 +5,7 @@ async fn long_file_names_works() { use std::path::PathBuf; - use base64::{engine::general_purpose, Engine as _}; + use base64::{Engine as _, engine::general_purpose}; use super::*; diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs index 7350b3a1..e5a98774 100644 --- a/src/service/media/thumbnail.rs +++ b/src/service/media/thumbnail.rs @@ -7,14 +7,14 @@ use std::{cmp, num::Saturating as Sat}; -use conduwuit::{checked, err, implement, Result}; -use ruma::{http_headers::ContentDisposition, media::Method, Mxc, UInt, UserId}; +use conduwuit::{Result, checked, err, implement}; +use ruma::{Mxc, UInt, UserId, http_headers::ContentDisposition, media::Method}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt}, }; -use super::{data::Metadata, FileMeta}; +use super::{FileMeta, data::Metadata}; /// Dimension specification for a thumbnail. #[derive(Debug)] @@ -65,12 +65,12 @@ impl super::Service { // 0, 0 because that's the original file let dim = dim.normalized(); - if let Ok(metadata) = self.db.search_file_metadata(mxc, &dim).await { - self.get_thumbnail_saved(metadata).await - } else if let Ok(metadata) = self.db.search_file_metadata(mxc, &Dim::default()).await { - self.get_thumbnail_generate(mxc, &dim, metadata).await - } else { - Ok(None) + match self.db.search_file_metadata(mxc, &dim).await { + | Ok(metadata) => self.get_thumbnail_saved(metadata).await, + | _ => match self.db.search_file_metadata(mxc, &Dim::default()).await { + | Ok(metadata) => self.get_thumbnail_generate(mxc, &dim, metadata).await, + | _ => Ok(None), + }, } } } diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 69b1be4e..512a7867 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -1,25 +1,25 @@ use std::cmp; use conduwuit::{ - debug, debug_info, debug_warn, error, info, + Err, Result, debug, debug_info, debug_warn, error, info, result::NotFound, utils::{ - stream::{TryExpect, TryIgnore}, IterStream, ReadyExt, + stream::{TryExpect, TryIgnore}, }, - warn, Err, Result, + warn, }; use futures::{FutureExt, StreamExt}; use itertools::Itertools; use ruma::{ + OwnedUserId, RoomId, UserId, events::{ - push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType, + GlobalAccountDataEventType, push_rules::PushRulesEvent, room::member::MembershipState, }, push::Ruleset, - OwnedUserId, RoomId, UserId, }; -use crate::{media, Services}; +use crate::{Services, media}; /// The current schema version. /// - If database is opened at greater version we reject with error. The diff --git a/src/service/mod.rs b/src/service/mod.rs index 71bd0eb4..0bde0255 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -30,7 +30,7 @@ pub mod users; extern crate conduwuit_core as conduwuit; extern crate conduwuit_database as database; -pub use conduwuit::{pdu, PduBuilder, PduCount, PduEvent}; +pub use conduwuit::{PduBuilder, PduCount, PduEvent, pdu}; pub(crate) use service::{Args, Dep, Service}; pub use crate::services::Services; diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 4ec0a7ee..d7ef5175 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -1,16 +1,15 @@ use std::sync::Arc; use conduwuit::{ - debug_warn, utils, - utils::{stream::TryIgnore, ReadyExt}, - Result, + Result, debug_warn, utils, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Json, Map}; use futures::Stream; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, UInt, UserId}; +use ruma::{UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; use super::Presence; -use crate::{globals, users, Dep}; +use crate::{Dep, globals, users}; pub(crate) struct Data { presenceid_presence: Arc, diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index eb4105e5..8f646be6 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -5,16 +5,16 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; use conduwuit::{ - checked, debug, debug_warn, error, result::LogErr, trace, Error, Result, Server, + Error, Result, Server, checked, debug, debug_warn, error, result::LogErr, trace, }; use database::Database; -use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; +use futures::{Stream, StreamExt, TryFutureExt, stream::FuturesUnordered}; use loole::{Receiver, Sender}; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; +use ruma::{OwnedUserId, UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; use tokio::time::sleep; use self::{data::Data, presence::Presence}; -use crate::{globals, users, Dep}; +use crate::{Dep, globals, users}; pub struct Service { timer_channel: (Sender, Receiver), diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index b322dfb4..3357bd61 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -1,8 +1,8 @@ -use conduwuit::{utils, Error, Result}; +use conduwuit::{Error, Result, utils}; use ruma::{ + UInt, UserId, events::presence::{PresenceEvent, PresenceEventContent}, presence::PresenceState, - UInt, UserId, }; use serde::{Deserialize, Serialize}; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 43d60c08..2b269b3d 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -2,34 +2,35 @@ use std::{fmt::Debug, mem, sync::Arc}; use bytes::BytesMut; use conduwuit::{ - debug_warn, err, trace, + Err, PduEvent, Result, debug_warn, err, trace, utils::{stream::TryIgnore, string_from_bytes}, - warn, Err, PduEvent, Result, + warn, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ + RoomId, UInt, UserId, api::{ - client::push::{set_pusher, Pusher, PusherKind}, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + client::push::{Pusher, PusherKind, set_pusher}, push_gateway::send_event_notification::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ - room::power_levels::RoomPowerLevelsEventContent, AnySyncTimelineEvent, StateEventType, - TimelineEventType, + AnySyncTimelineEvent, StateEventType, TimelineEventType, + room::power_levels::RoomPowerLevelsEventContent, }, push::{ Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, Ruleset, Tweak, }, serde::Raw, - uint, RoomId, UInt, UserId, + uint, }; -use crate::{client, globals, rooms, sending, users, Dep}; +use crate::{Dep, client, globals, rooms, sending, users}; pub struct Service { db: Data, diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 66854764..8860d0a0 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -3,7 +3,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use conduwuit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; +use conduwuit::{Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace}; use futures::{FutureExt, TryFutureExt}; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; @@ -11,7 +11,7 @@ use ruma::ServerName; use super::{ cache::{CachedDest, CachedOverride, MAX_IPS}, - fed::{add_port_to_hostname, get_ip_with_port, FedDest, PortString}, + fed::{FedDest, PortString, add_port_to_hostname, get_ip_with_port}, }; #[derive(Clone, Debug)] @@ -71,12 +71,16 @@ impl super::Service { | None => if let Some(pos) = dest.as_str().find(':') { self.actual_dest_2(dest, cache, pos).await? - } else if let Some(delegated) = self.request_well_known(dest.as_str()).await? { - self.actual_dest_3(&mut host, cache, delegated).await? - } else if let Some(overrider) = self.query_srv_record(dest.as_str()).await? { - self.actual_dest_4(&host, cache, overrider).await? } else { - self.actual_dest_5(dest, cache).await? + match self.request_well_known(dest.as_str()).await? { + | Some(delegated) => + self.actual_dest_3(&mut host, cache, delegated).await?, + | _ => match self.query_srv_record(dest.as_str()).await? { + | Some(overrider) => + self.actual_dest_4(&host, cache, overrider).await?, + | _ => self.actual_dest_5(dest, cache).await?, + }, + } }, }; @@ -136,10 +140,10 @@ impl super::Service { self.actual_dest_3_2(cache, delegated, pos).await } else { trace!("Delegated hostname has no port in this branch"); - if let Some(overrider) = self.query_srv_record(&delegated).await? { - self.actual_dest_3_3(cache, delegated, overrider).await - } else { - self.actual_dest_3_4(cache, delegated).await + match self.query_srv_record(&delegated).await? { + | Some(overrider) => + self.actual_dest_3_3(cache, delegated, overrider).await, + | _ => self.actual_dest_3_4(cache, delegated).await, } }, } diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 7b4f104d..6b05c00c 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -1,10 +1,10 @@ use std::{net::IpAddr, sync::Arc, time::SystemTime}; use conduwuit::{ + Result, arrayvec::ArrayVec, at, err, implement, utils::{math::Expected, rand, stream::TryIgnore}, - Result, }; use database::{Cbor, Deserialized, Map}; use futures::{Stream, StreamExt}; @@ -96,7 +96,7 @@ pub fn destinations(&self) -> impl Stream + Se self.destinations .stream() .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) } #[implement(Cache)] @@ -104,7 +104,7 @@ pub fn overrides(&self) -> impl Stream + S self.overrides .stream() .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) } impl CachedDest { diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index ca6106e2..98ad7e60 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -1,8 +1,8 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; use futures::FutureExt; -use hickory_resolver::{lookup_ip::LookupIp, TokioAsyncResolver}; +use hickory_resolver::{TokioAsyncResolver, lookup_ip::LookupIp}; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use super::cache::{Cache, CachedOverride}; diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 6be9d42d..2ec9c0ef 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,10 +6,10 @@ mod tests; use std::sync::Arc; -use conduwuit::{arrayvec::ArrayString, utils::MutexMap, Result, Server}; +use conduwuit::{Result, Server, arrayvec::ArrayString, utils::MutexMap}; use self::{cache::Cache, dns::Resolver}; -use crate::{client, Dep}; +use crate::{Dep, client}; pub struct Service { pub cache: Arc, diff --git a/src/service/resolver/tests.rs b/src/service/resolver/tests.rs index 870f5eab..6e9d0e71 100644 --- a/src/service/resolver/tests.rs +++ b/src/service/resolver/tests.rs @@ -1,6 +1,6 @@ #![cfg(test)] -use super::fed::{add_port_to_hostname, get_ip_with_port, FedDest}; +use super::fed::{FedDest, add_port_to_hostname, get_ip_with_port}; #[test] fn ips_get_default_ports() { diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 17ed5e13..866e45a9 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -3,21 +3,20 @@ mod remote; use std::sync::Arc; use conduwuit::{ - err, - utils::{stream::TryIgnore, ReadyExt}, - Err, Result, Server, + Err, Result, Server, err, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ - events::{ - room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - StateEventType, - }, OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId, UserId, + events::{ + StateEventType, + room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, }; -use crate::{admin, appservice, appservice::RegistrationInfo, globals, rooms, sending, Dep}; +use crate::{Dep, admin, appservice, appservice::RegistrationInfo, globals, rooms, sending}; pub struct Service { db: Data, diff --git a/src/service/rooms/alias/remote.rs b/src/service/rooms/alias/remote.rs index 7744bee2..60aed76d 100644 --- a/src/service/rooms/alias/remote.rs +++ b/src/service/rooms/alias/remote.rs @@ -1,8 +1,8 @@ use std::iter::once; -use conduwuit::{debug, debug_error, err, implement, Result}; +use conduwuit::{Result, debug, debug_error, err, implement}; use federation::query::get_room_information::v1::Response; -use ruma::{api::federation, OwnedRoomId, OwnedServerName, RoomAliasId, ServerName}; +use ruma::{OwnedRoomId, OwnedServerName, RoomAliasId, ServerName, api::federation}; #[implement(super::Service)] pub(super) async fn remote_resolve( diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index af8ae364..8c3588cc 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{err, utils, utils::math::usize_from_f64, Err, Result}; +use conduwuit::{Err, Result, err, utils, utils::math::usize_from_f64}; use database::Map; use lru_cache::LruCache; diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 0ff96846..0903ea75 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -8,18 +8,18 @@ use std::{ }; use conduwuit::{ - at, debug, debug_error, implement, trace, + Err, Result, at, debug, debug_error, implement, trace, utils::{ - stream::{ReadyExt, TryBroadbandExt}, IterStream, + stream::{ReadyExt, TryBroadbandExt}, }, - validated, warn, Err, Result, + validated, warn, }; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; -use crate::{rooms, rooms::short::ShortEventId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortEventId}; pub struct Service { services: Services, diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 039efca7..4ea10641 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,9 +1,9 @@ use std::sync::Arc; -use conduwuit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, implement, utils::stream::TryIgnore}; use database::Map; use futures::Stream; -use ruma::{api::client::room::Visibility, RoomId}; +use ruma::{RoomId, api::client::room::Visibility}; pub struct Service { db: Data, diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs index 714b6fc1..6b432a4b 100644 --- a/src/service/rooms/event_handler/acl_check.rs +++ b/src/service/rooms/event_handler/acl_check.rs @@ -1,7 +1,7 @@ -use conduwuit::{debug, implement, trace, warn, Err, Result}; +use conduwuit::{Err, Result, debug, implement, trace, warn}; use ruma::{ - events::{room::server_acl::RoomServerAclEventContent, StateEventType}, RoomId, ServerName, + events::{StateEventType, room::server_acl::RoomServerAclEventContent}, }; /// Returns Ok if the acl allows the server diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 540ebb64..80e91eff 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -1,16 +1,16 @@ use std::{ - collections::{hash_map, BTreeMap, HashSet, VecDeque}, + collections::{BTreeMap, HashSet, VecDeque, hash_map}, sync::Arc, time::Instant, }; use conduwuit::{ - debug, debug_error, debug_warn, implement, pdu, trace, - utils::continue_exponential_backoff_secs, warn, PduEvent, + PduEvent, debug, debug_error, debug_warn, implement, pdu, trace, + utils::continue_exponential_backoff_secs, warn, }; use futures::TryFutureExt; use ruma::{ - api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, ServerName, + CanonicalJsonValue, OwnedEventId, RoomId, ServerName, api::federation::event::get_event, }; use super::get_room_version_id; @@ -138,12 +138,15 @@ pub(super) async fn fetch_and_handle_outliers<'a>( .and_then(CanonicalJsonValue::as_array) { for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value::(auth_event.clone().into()) - { - todo_auth_events.push_back(auth_event); - } else { - warn!("Auth event id is not valid"); + match serde_json::from_value::( + auth_event.clone().into(), + ) { + | Ok(auth_event) => { + todo_auth_events.push_back(auth_event); + }, + | _ => { + warn!("Auth event id is not valid"); + }, } } } else { diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 5a38f7fe..e817430b 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -4,14 +4,13 @@ use std::{ }; use conduwuit::{ - debug_warn, err, implement, + PduEvent, Result, debug_warn, err, implement, state_res::{self}, - PduEvent, Result, }; -use futures::{future, FutureExt}; +use futures::{FutureExt, future}; use ruma::{ - int, uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, - UInt, + CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, int, + uint, }; use super::check_room_id; @@ -43,54 +42,59 @@ pub(super) async fn fetch_prev( while let Some(prev_event_id) = todo_outlier_stack.pop_front() { self.services.server.check_running()?; - if let Some((pdu, mut json_opt)) = self + match self .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id) .boxed() .await .pop() { - check_room_id(room_id, &pdu)?; + | Some((pdu, mut json_opt)) => { + check_room_id(room_id, &pdu)?; - let limit = self.services.server.config.max_fetch_prev_events; - if amount > limit { - debug_warn!("Max prev event limit reached! Limit: {limit}"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if json_opt.is_none() { - json_opt = self - .services - .outlier - .get_outlier_pdu_json(&prev_event_id) - .await - .ok(); - } - - if let Some(json) = json_opt { - if pdu.origin_server_ts > first_ts_in_room { - amount = amount.saturating_add(1); - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push_back(prev_prev.clone()); - } - } - - graph - .insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect()); - } else { - // Time based check failed + let limit = self.services.server.config.max_fetch_prev_events; + if amount > limit { + debug_warn!("Max prev event limit reached! Limit: {limit}"); graph.insert(prev_event_id.clone(), HashSet::new()); + continue; } - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed, so this was not fetched over federation + if json_opt.is_none() { + json_opt = self + .services + .outlier + .get_outlier_pdu_json(&prev_event_id) + .await + .ok(); + } + + if let Some(json) = json_opt { + if pdu.origin_server_ts > first_ts_in_room { + amount = amount.saturating_add(1); + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push_back(prev_prev.clone()); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); + } + }, + | _ => { + // Fetch and handle failed graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); + }, } } diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 4f2580db..b1a4a38b 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -1,10 +1,10 @@ -use std::collections::{hash_map, HashMap}; +use std::collections::{HashMap, hash_map}; -use conduwuit::{debug, debug_warn, implement, Err, Error, PduEvent, Result}; +use conduwuit::{Err, Error, PduEvent, Result, debug, debug_warn, implement}; use futures::FutureExt; use ruma::{ - api::federation::event::get_room_state_ids, events::StateEventType, EventId, OwnedEventId, - RoomId, ServerName, + EventId, OwnedEventId, RoomId, ServerName, api::federation::event::get_room_state_ids, + events::StateEventType, }; use crate::rooms::short::ShortStateKey; diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 31c7762d..b6d3e21e 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -1,14 +1,14 @@ use std::{ - collections::{hash_map, BTreeMap}, + collections::{BTreeMap, hash_map}, time::Instant, }; -use conduwuit::{debug, debug::INFO_SPAN_LEVEL, err, implement, warn, Err, Result}; +use conduwuit::{Err, Result, debug, debug::INFO_SPAN_LEVEL, err, implement, warn}; use futures::{ - future::{try_join5, OptionFuture}, FutureExt, + future::{OptionFuture, try_join5}, }; -use ruma::{events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId}; +use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UserId, events::StateEventType}; use crate::rooms::timeline::RawPduId; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index e628c77a..974eb300 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -1,15 +1,15 @@ use std::{ - collections::{hash_map, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, hash_map}, sync::Arc, }; use conduwuit::{ - debug, debug_info, err, implement, state_res, trace, warn, Err, Error, PduEvent, Result, + Err, Error, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, }; -use futures::{future::ready, TryFutureExt}; +use futures::{TryFutureExt, future::ready}; use ruma::{ - api::client::error::ErrorKind, events::StateEventType, CanonicalJsonObject, - CanonicalJsonValue, EventId, RoomId, ServerName, + CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, + api::client::error::ErrorKind, events::StateEventType, }; use super::{check_room_id, get_room_version_id, to_room_version}; diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index f911f1fd..cf69a515 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -5,8 +5,8 @@ use std::{ }; use conduwuit::{ - debug, debug::INFO_SPAN_LEVEL, implement, utils::continue_exponential_backoff_secs, Err, - PduEvent, Result, + Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, implement, + utils::continue_exponential_backoff_secs, }; use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 5960c734..e9e79ce4 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -18,16 +18,16 @@ use std::{ }; use conduwuit::{ - utils::{MutexMap, TryFutureExtExt}, Err, PduEvent, Result, RoomVersion, Server, + utils::{MutexMap, TryFutureExtExt}, }; use futures::TryFutureExt; use ruma::{ - events::room::create::RoomCreateEventContent, OwnedEventId, OwnedRoomId, RoomId, - RoomVersionId, + OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, + events::room::create::RoomCreateEventContent, }; -use crate::{globals, rooms, sending, server_keys, Dep}; +use crate::{Dep, globals, rooms, sending, server_keys}; pub struct Service { pub mutex_federation: RoomMutexMap, diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 9b130763..a49fc541 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -1,4 +1,4 @@ -use conduwuit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; +use conduwuit::{Result, err, implement, pdu::gen_event_id_canonical_json, result::FlatOk}; use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 37d47d47..9033c3a8 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,13 +5,12 @@ use std::{ }; use conduwuit::{ - err, implement, + Error, Result, err, implement, state_res::{self, StateMap}, trace, - utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, - Error, Result, + utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt, automatic_width}, }; -use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::state_compressor::CompressedState; @@ -93,11 +92,7 @@ pub async fn resolve_state( let new_room_state: CompressedState = self .services .state_compressor - .compress_state_events( - state_events - .iter() - .map(|(ref ssk, eid)| (ssk, (*eid).borrow())), - ) + .compress_state_events(state_events.iter().map(|(ssk, eid)| (ssk, (*eid).borrow()))) .collect() .await; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 2eb6013a..8326f9da 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -6,11 +6,10 @@ use std::{ }; use conduwuit::{ - debug, err, implement, trace, + PduEvent, Result, StateMap, debug, err, implement, trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, - PduEvent, Result, StateMap, }; -use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::short::ShortStateHash; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 385d2142..c1a1c3eb 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,12 +1,13 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - debug, debug_info, err, implement, state_res, trace, + Err, EventTypeExt, PduEvent, Result, StateKey, debug, debug_info, err, implement, state_res, + trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, Err, EventTypeExt, PduEvent, Result, StateKey, + warn, }; -use futures::{future::ready, FutureExt, StreamExt}; -use ruma::{events::StateEventType, CanonicalJsonValue, RoomId, ServerName}; +use futures::{FutureExt, StreamExt, future::ready}; +use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; use super::{get_room_version_id, to_room_version}; use crate::rooms::{ diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index a6e00271..346314d1 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -3,13 +3,12 @@ use std::{collections::HashSet, sync::Arc}; use conduwuit::{ - implement, - utils::{stream::TryIgnore, IterStream, ReadyExt}, - Result, + Result, implement, + utils::{IterStream, ReadyExt, stream::TryIgnore}, }; use database::{Database, Deserialized, Handle, Interfix, Map, Qry}; -use futures::{pin_mut, Stream, StreamExt}; -use ruma::{api::client::filter::LazyLoadOptions, DeviceId, OwnedUserId, RoomId, UserId}; +use futures::{Stream, StreamExt, pin_mut}; +use ruma::{DeviceId, OwnedUserId, RoomId, UserId, api::client::filter::LazyLoadOptions}; pub struct Service { db: Data, diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 6d5a85a0..54eef47d 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,11 +1,11 @@ use std::sync::Arc; -use conduwuit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, implement, utils::stream::TryIgnore}; use database::Map; use futures::{Stream, StreamExt}; use ruma::RoomId; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { db: Data, diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 9cd3d805..a1b0263a 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 26e11ded..f0beab5a 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,25 +1,25 @@ use std::{mem::size_of, sync::Arc}; use conduwuit::{ + PduCount, PduEvent, arrayvec::ArrayVec, result::LogErr, utils::{ + ReadyExt, stream::{TryIgnore, WidebandExt}, - u64_from_u8, ReadyExt, + u64_from_u8, }, - PduCount, PduEvent, }; use database::Map; use futures::{Stream, StreamExt}; -use ruma::{api::Direction, EventId, RoomId, UserId}; +use ruma::{EventId, RoomId, UserId, api::Direction}; use crate::{ - rooms, + Dep, rooms, rooms::{ short::{ShortEventId, ShortRoomId}, timeline::{PduId, RawPduId}, }, - Dep, }; pub(super) struct Data { diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index ba289f9b..18221c2d 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,11 +2,11 @@ mod data; use std::sync::Arc; use conduwuit::{PduCount, Result}; -use futures::{future::try_join, StreamExt}; -use ruma::{api::Direction, EventId, RoomId, UserId}; +use futures::{StreamExt, future::try_join}; +use ruma::{EventId, RoomId, UserId, api::Direction}; use self::data::{Data, PdusIterItem}; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { services: Services, @@ -81,7 +81,7 @@ impl Service { .collect(); 'limit: while let Some(stack_pdu) = stack.pop() { - let target = match stack_pdu.0 .0 { + let target = match stack_pdu.0.0 { | PduCount::Normal(c) => c, // TODO: Support backfilled relations | PduCount::Backfilled(_) => 0, // This will result in an empty iterator diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index c21ad36c..62f87948 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -1,18 +1,18 @@ use std::sync::Arc; use conduwuit::{ - utils::{stream::TryIgnore, ReadyExt}, Result, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Json, Map}; use futures::{Stream, StreamExt}; use ruma::{ - events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent}, - serde::Raw, CanonicalJsonObject, RoomId, UserId, + events::{AnySyncEphemeralRoomEvent, receipt::ReceiptEvent}, + serde::Raw, }; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub(super) struct Data { roomuserid_privateread: Arc, diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 2bc21355..d6239aee 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,19 +2,19 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; -use futures::{try_join, Stream, TryFutureExt}; +use conduwuit::{PduCount, PduId, RawPduId, Result, debug, err, warn}; +use futures::{Stream, TryFutureExt, try_join}; use ruma::{ + OwnedEventId, OwnedUserId, RoomId, UserId, events::{ - receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, + receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, }, serde::Raw, - OwnedEventId, OwnedUserId, RoomId, UserId, }; use self::data::{Data, ReceiptItem}; -use crate::{rooms, sending, Dep}; +use crate::{Dep, rooms, sending}; pub struct Service { services: Services, @@ -145,12 +145,14 @@ where let receipt = serde_json::from_str::>( value.json().get(), ); - if let Ok(value) = receipt { - for (event, receipt) in value.content { - json.insert(event, receipt); - } - } else { - debug!("failed to parse receipt: {:?}", receipt); + match receipt { + | Ok(value) => + for (event, receipt) in value.content { + json.insert(event, receipt); + }, + | _ => { + debug!("failed to parse receipt: {:?}", receipt); + }, } } let content = ReceiptEventContent::from_iter(json); diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index cc015237..4100dd75 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,26 +1,24 @@ use std::sync::Arc; use conduwuit::{ + PduCount, PduEvent, Result, arrayvec::ArrayVec, implement, utils::{ - set, + ArrayVecExt, IterStream, ReadyExt, set, stream::{TryIgnore, WidebandExt}, - ArrayVecExt, IterStream, ReadyExt, }, - PduCount, PduEvent, Result, }; -use database::{keyval::Val, Map}; +use database::{Map, keyval::Val}; use futures::{Stream, StreamExt}; -use ruma::{api::client::search::search_events::v3::Criteria, RoomId, UserId}; +use ruma::{RoomId, UserId, api::client::search::search_events::v3::Criteria}; use crate::{ - rooms, + Dep, rooms, rooms::{ short::ShortRoomId, timeline::{PduId, RawPduId}, }, - Dep, }; pub struct Service { @@ -140,7 +138,7 @@ pub async fn search_pdus<'a>( pub async fn search_pdu_ids( &self, query: &RoomQuery<'_>, -) -> Result + Send + '_> { +) -> Result + Send + '_ + use<'_>> { let shortroomid = self.services.short.get_shortroomid(query.room_id).await?; let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await; @@ -187,7 +185,7 @@ fn search_pdu_ids_query_word( &self, shortroomid: ShortRoomId, word: &str, -) -> impl Stream> + Send + '_ { +) -> impl Stream> + Send + '_ + use<'_> { // rustc says const'ing this not yet stable let end_id: RawPduId = PduId { shortroomid, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 8728325a..3980617e 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,13 +1,13 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; -use conduwuit::{err, implement, utils, utils::IterStream, Result, StateKey}; +use conduwuit::{Result, StateKey, err, implement, utils, utils::IterStream}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { db: Data, diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 268d6dfe..52e7d2be 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -5,18 +5,18 @@ mod tests; use std::sync::Arc; use conduwuit::{ - implement, + Err, Error, Result, implement, utils::{ + IterStream, future::BoolExt, math::usize_from_f64, stream::{BroadbandExt, ReadyExt}, - IterStream, }, - Err, Error, Result, }; -use futures::{pin_mut, stream::FuturesUnordered, FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, pin_mut, stream::FuturesUnordered}; use lru_cache::LruCache; use ruma::{ + OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, api::{ client::space::SpaceHierarchyRoomsChunk, federation::{ @@ -25,18 +25,17 @@ use ruma::{ }, }, events::{ + StateEventType, room::join_rules::{JoinRule, RoomJoinRulesEventContent}, space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, - StateEventType, }, serde::Raw, space::SpaceRoomJoinRule, - OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; use tokio::sync::{Mutex, MutexGuard}; pub use self::pagination_token::PaginationToken; -use crate::{conduwuit::utils::TryFutureExtExt, rooms, sending, Dep}; +use crate::{Dep, conduwuit::utils::TryFutureExtExt, rooms, sending}; pub struct Service { services: Services, @@ -440,8 +439,9 @@ async fn is_accessible_child( pub fn get_parent_children_via( parent: &SpaceHierarchyParentSummary, suggested_only: bool, -) -> impl DoubleEndedIterator)> + Send + '_ -{ +) -> impl DoubleEndedIterator + use<>)> ++ Send ++ '_ { parent .children_state .iter() diff --git a/src/service/rooms/spaces/pagination_token.rs b/src/service/rooms/spaces/pagination_token.rs index 8f019e8d..d97b7a2f 100644 --- a/src/service/rooms/spaces/pagination_token.rs +++ b/src/service/rooms/spaces/pagination_token.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{Error, Result}; -use ruma::{api::client::error::ErrorKind, UInt}; +use ruma::{UInt, api::client::error::ErrorKind}; use crate::rooms::short::ShortRoomId; diff --git a/src/service/rooms/spaces/tests.rs b/src/service/rooms/spaces/tests.rs index dd6c2f35..d0395fdd 100644 --- a/src/service/rooms/spaces/tests.rs +++ b/src/service/rooms/spaces/tests.rs @@ -1,13 +1,13 @@ use std::str::FromStr; use ruma::{ + UInt, api::federation::space::{SpaceHierarchyParentSummary, SpaceHierarchyParentSummaryInit}, owned_room_id, owned_server_name, space::SpaceRoomJoinRule, - UInt, }; -use crate::rooms::spaces::{get_parent_children_via, PaginationToken}; +use crate::rooms::spaces::{PaginationToken, get_parent_children_via}; #[test] fn get_summary_children() { diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index d538de3c..8683a3be 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,36 +1,34 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use conduwuit::{ - err, + PduEvent, Result, err, result::FlatOk, state_res::{self, StateMap}, utils::{ - calculate_hash, + IterStream, MutexMap, MutexMapGuard, ReadyExt, calculate_hash, stream::{BroadbandExt, TryIgnore}, - IterStream, MutexMap, MutexMapGuard, ReadyExt, }, - warn, PduEvent, Result, + warn, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{ - future::join_all, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, + FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future::join_all, pin_mut, }; use ruma::{ + EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, events::{ - room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, AnyStrippedStateEvent, StateEventType, TimelineEventType, + room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, }, serde::Raw, - EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, }; use crate::{ - globals, rooms, + Dep, globals, rooms, rooms::{ short::{ShortEventId, ShortStateHash}, - state_compressor::{parse_compressed_state_event, CompressedState}, + state_compressor::{CompressedState, parse_compressed_state_event}, }, - Dep, }; pub struct Service { @@ -192,13 +190,13 @@ impl Service { .await; if !already_existed { - let states_parents = if let Ok(p) = previous_shortstatehash { - self.services - .state_compressor - .load_shortstatehash_info(p) - .await? - } else { - Vec::new() + let states_parents = match previous_shortstatehash { + | Ok(p) => + self.services + .state_compressor + .load_shortstatehash_info(p) + .await?, + | _ => Vec::new(), }; let (statediffnew, statediffremoved) = @@ -256,63 +254,65 @@ impl Service { .aput::(shorteventid, p); } - if let Some(state_key) = &new_pdu.state_key { - let states_parents = if let Ok(p) = previous_shortstatehash { - self.services + match &new_pdu.state_key { + | Some(state_key) => { + let states_parents = match previous_shortstatehash { + | Ok(p) => + self.services + .state_compressor + .load_shortstatehash_info(p) + .await?, + | _ => Vec::new(), + }; + + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) + .await; + + let new = self + .services .state_compressor - .load_shortstatehash_info(p) - .await? - } else { - Vec::new() - }; + .compress_state_event(shortstatekey, &new_pdu.event_id) + .await; - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) - .await; + let replaces = states_parents + .last() + .map(|info| { + info.full_state + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + }) + .unwrap_or_default(); - let new = self - .services - .state_compressor - .compress_state_event(shortstatekey, &new_pdu.event_id) - .await; + if Some(&new) == replaces { + return Ok(previous_shortstatehash.expect("must exist")); + } - let replaces = states_parents - .last() - .map(|info| { - info.full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); + // TODO: statehash with deterministic inputs + let shortstatehash = self.services.globals.next_count()?; - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } + let mut statediffnew = CompressedState::new(); + statediffnew.insert(new); - // TODO: statehash with deterministic inputs - let shortstatehash = self.services.globals.next_count()?; + let mut statediffremoved = CompressedState::new(); + if let Some(replaces) = replaces { + statediffremoved.insert(*replaces); + } - let mut statediffnew = CompressedState::new(); - statediffnew.insert(new); + self.services.state_compressor.save_state_from_diff( + shortstatehash, + Arc::new(statediffnew), + Arc::new(statediffremoved), + 2, + states_parents, + )?; - let mut statediffremoved = CompressedState::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.services.state_compressor.save_state_from_diff( - shortstatehash, - Arc::new(statediffnew), - Arc::new(statediffremoved), - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) + Ok(shortstatehash) + }, + | _ => + Ok(previous_shortstatehash.expect("first event in room must be a state event")), } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index b7952ce6..7004e35a 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -9,14 +9,16 @@ use std::{ }; use conduwuit::{ - err, utils, - utils::math::{usize_from_f64, Expected}, - Result, + Result, err, utils, + utils::math::{Expected, usize_from_f64}, }; use database::Map; use lru_cache::LruCache; use ruma::{ + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, UserId, events::{ + StateEventType, room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, @@ -29,15 +31,12 @@ use ruma::{ name::RoomNameEventContent, topic::RoomTopicEventContent, }, - StateEventType, }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, UserId, }; -use crate::{rooms, rooms::short::ShortStateHash, Dep}; +use crate::{Dep, rooms, rooms::short::ShortStateHash}; pub struct Service { pub server_visibility_cache: Mutex>, diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index e3ec55fe..ff26b33a 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -1,8 +1,8 @@ use std::borrow::Borrow; -use conduwuit::{err, implement, PduEvent, Result, StateKey}; +use conduwuit::{PduEvent, Result, StateKey, err, implement}; use futures::{Stream, StreamExt, TryFutureExt}; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index 4d834227..2e8f3325 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -1,11 +1,11 @@ use conduwuit::{error, implement, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ - events::{ - room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - StateEventType, - }, EventId, RoomId, ServerName, + events::{ + StateEventType, + room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + }, }; /// Whether a server is allowed to see an event through federation, based on diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index da1500cb..625defe6 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -1,27 +1,26 @@ use std::{borrow::Borrow, ops::Deref, sync::Arc}; use conduwuit::{ - at, err, implement, pair_of, + PduEvent, Result, StateKey, at, err, implement, pair_of, utils::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, - PduEvent, Result, StateKey, }; use database::Deserialized; -use futures::{future::try_join, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, future::try_join, pin_mut}; use ruma::{ - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, EventId, OwnedEventId, UserId, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, }; use serde::Deserialize; use crate::rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, - state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, + state_compressor::{CompressedState, compress_state_event, parse_compressed_state_event}, }; /// The user was a joined member at this state (potentially in the past) diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 0332c227..c30e1da8 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,14 +1,14 @@ -use conduwuit::{error, implement, pdu::PduBuilder, Err, Error, Result}; +use conduwuit::{Err, Error, Result, error, implement, pdu::PduBuilder}; use ruma::{ + EventId, RoomId, UserId, events::{ + StateEventType, TimelineEventType, room::{ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, member::{MembershipState, RoomMemberEventContent}, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - StateEventType, TimelineEventType, }, - EventId, RoomId, UserId, }; use crate::rooms::state::RoomMutexGuard; @@ -44,7 +44,7 @@ pub async fn user_can_redact( ))); } - if let Ok(pl_event_content) = self + match self .room_state_get_content::( room_id, &StateEventType::RoomPowerLevels, @@ -52,33 +52,35 @@ pub async fn user_can_redact( ) .await { - let pl_event: RoomPowerLevels = pl_event_content.into(); - Ok(pl_event.user_can_redact_event_of_other(sender) - || pl_event.user_can_redact_own_event(sender) - && if let Ok(redacting_event) = redacting_event { - if federation { - redacting_event.sender.server_name() == sender.server_name() - } else { - redacting_event.sender == sender - } - } else { - false - }) - } else { - // Falling back on m.room.create to judge power level - if let Ok(room_create) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(room_create.sender == sender - || redacting_event - .as_ref() - .is_ok_and(|redacting_event| redacting_event.sender == sender)) - } else { - Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", - )) - } + | Ok(pl_event_content) => { + let pl_event: RoomPowerLevels = pl_event_content.into(); + Ok(pl_event.user_can_redact_event_of_other(sender) + || pl_event.user_can_redact_own_event(sender) + && match redacting_event { + | Ok(redacting_event) => + if federation { + redacting_event.sender.server_name() == sender.server_name() + } else { + redacting_event.sender == sender + }, + | _ => false, + }) + }, + | _ => { + // Falling back on m.room.create to judge power level + match self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + | Ok(room_create) => Ok(room_create.sender == sender + || redacting_event + .as_ref() + .is_ok_and(|redacting_event| redacting_event.sender == sender)), + | _ => Err(Error::bad_database( + "No m.room.power_levels or m.room.create events in database for room", + )), + } + }, } } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 0d25142d..4403468b 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -4,31 +4,31 @@ use std::{ }; use conduwuit::{ - is_not_empty, + Result, is_not_empty, result::LogErr, - utils::{stream::TryIgnore, ReadyExt, StreamTools}, - warn, Result, + utils::{ReadyExt, StreamTools, stream::TryIgnore}, + warn, }; -use database::{serialize_key, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{future::join5, pin_mut, stream::iter, Stream, StreamExt}; +use database::{Deserialized, Ignore, Interfix, Json, Map, serialize_key}; +use futures::{Stream, StreamExt, future::join5, pin_mut, stream::iter}; use itertools::Itertools; use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, events::{ + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, direct::DirectEvent, room::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, }, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, StateEventType, }, int, serde::Raw, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; -use crate::{account_data, appservice::RegistrationInfo, globals, rooms, users, Dep}; +use crate::{Dep, account_data, appservice::RegistrationInfo, globals, rooms, users}; pub struct Service { appservice_in_room_cache: AppServiceInRoomCache, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 18731809..c566eb1c 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -6,10 +6,10 @@ use std::{ }; use conduwuit::{ + Result, arrayvec::ArrayVec, at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, - Result, }; use database::Map; use futures::{Stream, StreamExt}; @@ -17,9 +17,8 @@ use lru_cache::LruCache; use ruma::{EventId, RoomId}; use crate::{ - rooms, + Dep, rooms, rooms::short::{ShortEventId, ShortId, ShortStateHash, ShortStateKey}, - Dep, }; pub struct Service { diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index bc995e27..7f9a7515 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,22 +1,21 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - err, + PduCount, PduEvent, PduId, RawPduId, Result, err, utils::{ - stream::{TryIgnore, WidebandExt}, ReadyExt, + stream::{TryIgnore, WidebandExt}, }, - PduCount, PduEvent, PduId, RawPduId, Result, }; use database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{ - api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, + api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, }; use serde_json::json; -use crate::{rooms, rooms::short::ShortRoomId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortRoomId}; pub struct Service { db: Data, @@ -121,10 +120,13 @@ impl Service { } let mut users = Vec::new(); - if let Ok(userids) = self.get_participants(&root_id).await { - users.extend_from_slice(&userids); - } else { - users.push(root_pdu.sender); + match self.get_participants(&root_id).await { + | Ok(userids) => { + users.extend_from_slice(&userids); + }, + | _ => { + users.push(root_pdu.sender); + }, } users.push(pdu.sender.clone()); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 457c1e8d..94c78bb0 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,18 +1,17 @@ use std::{borrow::Borrow, sync::Arc}; use conduwuit::{ - at, err, + Err, PduCount, PduEvent, Result, at, err, result::{LogErr, NotFound}, utils, utils::stream::TryReadyExt, - Err, PduCount, PduEvent, Result, }; use database::{Database, Deserialized, Json, KeyVal, Map}; -use futures::{future::select_ok, pin_mut, FutureExt, Stream, TryFutureExt, TryStreamExt}; -use ruma::{api::Direction, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; +use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut}; +use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, api::Direction}; use super::{PduId, RawPduId}; -use crate::{rooms, rooms::short::ShortRoomId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortRoomId}; pub(super) struct Data { eventid_outlierpdu: Arc, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 9d6ee982..4be97fb2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -10,22 +10,25 @@ use std::{ }; use conduwuit::{ - at, debug, debug_warn, err, error, implement, info, - pdu::{gen_event_id, EventHash, PduBuilder, PduCount, PduEvent}, + Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, + pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, state_res::{self, Event, RoomVersion}, utils::{ - self, future::TryExtExt, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt, + self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, }, - validated, warn, Err, Error, Result, Server, + validated, warn, }; pub use conduwuit::{PduId, RawPduId}; use futures::{ - future, future::ready, pin_mut, Future, FutureExt, Stream, StreamExt, TryStreamExt, + Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, }; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + RoomId, RoomVersionId, ServerName, UserId, api::federation, canonical_json::to_canonical_value, events::{ + GlobalAccountDataEventType, StateEventType, TimelineEventType, push_rules::PushRulesEvent, room::{ create::RoomCreateEventContent, @@ -34,23 +37,21 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent, }, - GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, + uint, }; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use self::data::Data; pub use self::data::PdusIterItem; use crate::{ - account_data, admin, appservice, + Dep, account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, rooms::{short::ShortRoomId, state_compressor::CompressedState}, - sending, server_keys, users, Dep, + sending, server_keys, users, }; // Update Relationships diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index c710b33a..a81ee95c 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -1,19 +1,18 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - debug_info, trace, + Result, Server, debug_info, trace, utils::{self, IterStream}, - Result, Server, }; use futures::StreamExt; use ruma::{ + OwnedRoomId, OwnedUserId, RoomId, UserId, api::federation::transactions::edu::{Edu, TypingContent}, events::SyncEphemeralRoomEvent, - OwnedRoomId, OwnedUserId, RoomId, UserId, }; -use tokio::sync::{broadcast, RwLock}; +use tokio::sync::{RwLock, broadcast}; -use crate::{globals, sending, sending::EduBuf, users, Dep}; +use crate::{Dep, globals, sending, sending::EduBuf, users}; pub struct Service { server: Arc, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 6a0c6aa1..bd76f1f4 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,10 +1,10 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Database, Deserialized, Map}; use ruma::{RoomId, UserId}; -use crate::{globals, rooms, rooms::short::ShortStateHash, Dep}; +use crate::{Dep, globals, rooms, rooms::short::ShortStateHash}; pub struct Service { db: Data, diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index 6b58d964..7fa0be9a 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -1,10 +1,10 @@ use std::{fmt::Debug, mem}; use bytes::BytesMut; -use conduwuit::{debug_error, err, trace, utils, warn, Err, Result}; +use conduwuit::{Err, Result, debug_error, err, trace, utils, warn}; use reqwest::Client; use ruma::api::{ - appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, appservice::Registration, }; /// Sends a request to an appservice diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 4dd2d5aa..a6bcc2b2 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -1,16 +1,15 @@ use std::{fmt::Debug, sync::Arc}; use conduwuit::{ - at, utils, - utils::{stream::TryIgnore, ReadyExt}, - Error, Result, + Error, Result, at, utils, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Database, Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{OwnedServerName, ServerName, UserId}; use super::{Destination, SendingEvent}; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub(super) type OutgoingItem = (Key, SendingEvent, Destination); pub(super) type SendingItem = (Key, SendingEvent); @@ -102,7 +101,7 @@ impl Data { pub fn active_requests_for( &self, destination: &Destination, - ) -> impl Stream + Send + '_ { + ) -> impl Stream + Send + '_ + use<'_> { let prefix = destination.get_prefix(); self.servercurrentevent_data .raw_stream_from(&prefix) @@ -156,7 +155,7 @@ impl Data { pub fn queued_requests( &self, destination: &Destination, - ) -> impl Stream + Send + '_ { + ) -> impl Stream + Send + '_ + use<'_> { let prefix = destination.get_prefix(); self.servernameevent_data .raw_stream_from(&prefix) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b46ce7a8..379829b4 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -12,15 +12,15 @@ use std::{ use async_trait::async_trait; use conduwuit::{ - debug, debug_warn, err, error, + Result, Server, debug, debug_warn, err, error, smallvec::SmallVec, - utils::{available_parallelism, math::usize_from_u64_truncated, ReadyExt, TryReadyExt}, - warn, Result, Server, + utils::{ReadyExt, TryReadyExt, available_parallelism, math::usize_from_u64_truncated}, + warn, }; use futures::{FutureExt, Stream, StreamExt}; use ruma::{ - api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, + api::{OutgoingRequest, appservice::Registration}, }; use tokio::{task, task::JoinSet}; @@ -30,8 +30,8 @@ pub use self::{ sender::{EDU_LIMIT, PDU_LIMIT}, }; use crate::{ - account_data, client, federation, globals, presence, pusher, rooms, - rooms::timeline::RawPduId, users, Dep, + Dep, account_data, client, federation, globals, presence, pusher, rooms, + rooms::timeline::RawPduId, users, }; pub struct Service { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 3e86de2d..c4f34177 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -2,32 +2,33 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, + atomic::{AtomicU64, AtomicUsize, Ordering}, }, time::{Duration, Instant}, }; -use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; +use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD}; use conduwuit::{ - debug, err, error, + Error, Result, debug, err, error, result::LogErr, trace, utils::{ - calculate_hash, continue_exponential_backoff_secs, + ReadyExt, calculate_hash, continue_exponential_backoff_secs, future::TryExtExt, stream::{BroadbandExt, IterStream, WidebandExt}, - ReadyExt, }, - warn, Error, Result, + warn, }; use futures::{ + FutureExt, StreamExt, future::{BoxFuture, OptionFuture}, join, pin_mut, stream::FuturesUnordered, - FutureExt, StreamExt, }; use ruma::{ + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId, + RoomId, RoomVersionId, ServerName, UInt, api::{ appservice::event::push_events::v1::EphemeralData, federation::transactions::{ @@ -40,18 +41,17 @@ use ruma::{ }, device_id, events::{ - push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, - GlobalAccountDataEventType, + AnySyncEphemeralRoomEvent, GlobalAccountDataEventType, push_rules::PushRulesEvent, + receipt::ReceiptType, }, push, serde::Raw, - uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UInt, + uint, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use super::{ - appservice, data::QueueItem, Destination, EduBuf, EduVec, Msg, SendingEvent, Service, + Destination, EduBuf, EduVec, Msg, SendingEvent, Service, appservice, data::QueueItem, }; #[derive(Debug)] @@ -146,7 +146,7 @@ impl Service { statuses.entry(dest).and_modify(|e| { *e = match e { | TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - | TransactionStatus::Retrying(ref n) => + | &mut TransactionStatus::Retrying(ref n) => TransactionStatus::Failed(n.saturating_add(1), Instant::now()), | TransactionStatus::Failed(..) => { panic!("Request that was not even running failed?!") @@ -211,7 +211,7 @@ impl Service { async fn finish_responses<'a>(&'a self, futures: &mut SendingFutures<'a>) { use tokio::{ select, - time::{sleep_until, Instant}, + time::{Instant, sleep_until}, }; let timeout = self.server.config.sender_shutdown_timeout; diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index 305cbfef..64b936b6 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -7,13 +7,13 @@ use std::{ use conduwuit::{ debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn, }; -use futures::{stream::FuturesUnordered, StreamExt}; +use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ - api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, - OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + CanonicalJsonObject, OwnedServerName, OwnedServerSigningKeyId, ServerName, + ServerSigningKeyId, api::federation::discovery::ServerSigningKeys, serde::Raw, }; use serde_json::value::RawValue as RawJsonValue; -use tokio::time::{timeout_at, Instant}; +use tokio::time::{Instant, timeout_at}; use super::key_exists; diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 5a027d64..00aeae1e 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -1,12 +1,12 @@ use std::borrow::Borrow; -use conduwuit::{implement, Err, Result}; +use conduwuit::{Err, Result, implement}; use ruma::{ - api::federation::discovery::VerifyKey, CanonicalJsonObject, RoomVersionId, ServerName, - ServerSigningKeyId, + CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId, + api::federation::discovery::VerifyKey, }; -use super::{extract_key, PubKeyMap, PubKeys}; +use super::{PubKeyMap, PubKeys, extract_key}; #[implement(super::Service)] pub async fn get_event_keys( diff --git a/src/service/server_keys/keypair.rs b/src/service/server_keys/keypair.rs index 6f983c26..259c37fb 100644 --- a/src/service/server_keys/keypair.rs +++ b/src/service/server_keys/keypair.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{debug, debug_info, err, error, utils, utils::string_from_bytes, Result}; +use conduwuit::{Result, debug, debug_info, err, error, utils, utils::string_from_bytes}; use database::Database; use ruma::{api::federation::discovery::VerifyKey, serde::Base64, signatures::Ed25519KeyPair}; diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index 3f6a3039..bf6799ba 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -8,22 +8,21 @@ mod verify; use std::{collections::BTreeMap, sync::Arc, time::Duration}; use conduwuit::{ - implement, - utils::{timepoint_from_now, IterStream}, - Result, Server, + Result, Server, implement, + utils::{IterStream, timepoint_from_now}, }; use database::{Deserialized, Json, Map}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, + ServerName, ServerSigningKeyId, api::federation::discovery::{ServerSigningKeys, VerifyKey}, serde::Raw, signatures::{Ed25519KeyPair, PublicKeyMap, PublicKeySet}, - CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, - ServerName, ServerSigningKeyId, }; use serde_json::value::RawValue as RawJsonValue; -use crate::{globals, sending, Dep}; +use crate::{Dep, globals, sending}; pub struct Service { keypair: Box, diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index afe8958b..171b755b 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -1,13 +1,13 @@ use std::{collections::BTreeMap, fmt::Debug}; -use conduwuit::{debug, implement, Err, Result}; +use conduwuit::{Err, Result, debug, implement}; use ruma::{ - api::federation::discovery::{ - get_remote_server_keys, - get_remote_server_keys_batch::{self, v2::QueryCriteria}, - get_server_keys, ServerSigningKeys, - }, OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + api::federation::discovery::{ + ServerSigningKeys, get_remote_server_keys, + get_remote_server_keys_batch::{self, v2::QueryCriteria}, + get_server_keys, + }, }; #[implement(super::Service)] @@ -79,7 +79,7 @@ pub async fn notary_request( &self, notary: &ServerName, target: &ServerName, -) -> Result + Clone + Debug + Send> { +) -> Result + Clone + Debug + Send + use<>> { use get_remote_server_keys::v2::Request; let request = Request { diff --git a/src/service/server_keys/sign.rs b/src/service/server_keys/sign.rs index 8d6f108c..e8cc485d 100644 --- a/src/service/server_keys/sign.rs +++ b/src/service/server_keys/sign.rs @@ -1,4 +1,4 @@ -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use ruma::{CanonicalJsonObject, RoomVersionId}; #[implement(super::Service)] diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs index 0f03e59e..84433628 100644 --- a/src/service/server_keys/verify.rs +++ b/src/service/server_keys/verify.rs @@ -1,6 +1,6 @@ -use conduwuit::{implement, pdu::gen_event_id_canonical_json, Err, Result}; +use conduwuit::{Err, Result, implement, pdu::gen_event_id_canonical_json}; use ruma::{ - signatures::Verified, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified, }; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/service.rs b/src/service/service.rs index cad01437..2907a562 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -7,7 +7,7 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{err, error::inspect_log, utils::string::SplitInfallible, Err, Result, Server}; +use conduwuit::{Err, Result, Server, err, error::inspect_log, utils::string::SplitInfallible}; use database::Database; /// Abstract interface for a Service diff --git a/src/service/services.rs b/src/service/services.rs index fb334b96..269a1f87 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, RwLock}, }; -use conduwuit::{debug, debug_info, info, trace, Result, Server}; +use conduwuit::{Result, Server, debug, debug_info, info, trace}; use database::Database; use tokio::sync::Mutex; diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index 0b86377a..bf2bc142 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -8,15 +8,15 @@ use std::{ use conduwuit::{Result, Server}; use database::Map; use ruma::{ + DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, api::client::sync::sync_events::{ self, v4::{ExtensionsConfig, SyncRequestList}, v5, }, - DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, }; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { db: Data, diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs index 0a9c5d15..96981472 100644 --- a/src/service/sync/watch.rs +++ b/src/service/sync/watch.rs @@ -1,5 +1,5 @@ -use conduwuit::{implement, trace, Result}; -use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; +use conduwuit::{Result, implement, trace}; +use futures::{FutureExt, StreamExt, pin_mut, stream::FuturesUnordered}; use ruma::{DeviceId, UserId}; #[implement(super::Service)] diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 912c0b49..9c284b70 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Handle, Map}; use ruma::{DeviceId, TransactionId, UserId}; diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 7084f32a..51f5fb11 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -4,20 +4,19 @@ use std::{ }; use conduwuit::{ - err, error, implement, utils, + Error, Result, err, error, implement, utils, utils::{hash, string::EMPTY}, - Error, Result, }; use database::{Deserialized, Json, Map}; use ruma::{ + CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, api::client::{ error::ErrorKind, uiaa::{AuthData, AuthType, Password, UiaaInfo, UserIdentifier}, }, - CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, }; -use crate::{config, globals, users, Dep}; +use crate::{Dep, config, globals, users}; pub struct Service { userdevicesessionid_uiaarequest: RwLock, @@ -144,8 +143,7 @@ pub async fn try_auth( }; #[cfg(not(feature = "element_hacks"))] - let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier - else { + let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier else { return Err(Error::BadRequest( ErrorKind::Unrecognized, "Identifier type not recognized.", diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index 7fd93b6c..28bee65a 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -1,16 +1,16 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduwuit::{debug, info, warn, Result, Server}; +use conduwuit::{Result, Server, debug, info, warn}; use database::{Deserialized, Map}; use ruma::events::room::message::RoomMessageEventContent; use serde::Deserialize; use tokio::{ sync::Notify, - time::{interval, MissedTickBehavior}, + time::{MissedTickBehavior, interval}, }; -use crate::{admin, client, globals, Dep}; +use crate::{Dep, admin, client, globals}; pub struct Service { interval: Duration, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index f0389a4a..b3f5db88 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,25 +1,24 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - at, debug_warn, err, trace, - utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, - Err, Error, Result, Server, + Err, Error, Result, Server, at, debug_warn, err, trace, + utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted}, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ + DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, + OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{ - ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType, + AnyToDeviceEvent, GlobalAccountDataEventType, ignored_user_list::IgnoredUserListEvent, }, serde::Raw, - DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, - OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, }; use serde_json::json; -use crate::{account_data, admin, globals, rooms, Dep}; +use crate::{Dep, account_data, admin, globals, rooms}; pub struct Service { services: Services, @@ -246,10 +245,13 @@ impl Service { /// Sets a new avatar_url or removes it if avatar_url is None. pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) { - if let Some(avatar_url) = avatar_url { - self.db.userid_avatarurl.insert(user_id, &avatar_url); - } else { - self.db.userid_avatarurl.remove(user_id); + match avatar_url { + | Some(avatar_url) => { + self.db.userid_avatarurl.insert(user_id, &avatar_url); + }, + | _ => { + self.db.userid_avatarurl.remove(user_id); + }, } } From 045e8a293740ba1ee94d93d09d27d07a6c0d67d0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 02:51:55 -0500 Subject: [PATCH 0689/1248] stop building mac binaries for now because of linker issues Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 126 --------------------------------------- 1 file changed, 126 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 24f2db45..8e1cf6c6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -623,132 +623,6 @@ jobs: scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz fi - build_mac_binaries: - name: Build MacOS Binaries - strategy: - matrix: - os: [macos-latest, macos-13] - runs-on: ${{ matrix.os }} - steps: - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - name: Tag comparison check - if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} - run: | - # Tag mismatch with latest repo tag check to prevent potential downgrades - LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - - if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - # Nix can't do portable macOS builds yet - - name: Build macOS x86_64 binary - if: ${{ matrix.os == 'macos-13' }} - run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls - cp -v -f target/release/conduwuit conduwuit-macos-x86_64 - otool -L conduwuit-macos-x86_64 - - # quick smoke test of the x86_64 macOS binary - - name: Run x86_64 macOS release binary - if: ${{ matrix.os == 'macos-13' }} - run: | - ./conduwuit-macos-x86_64 --help - ./conduwuit-macos-x86_64 --version - - - name: Build macOS arm64 binary - if: ${{ matrix.os == 'macos-latest' }} - run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls - cp -v -f target/release/conduwuit conduwuit-macos-arm64 - otool -L conduwuit-macos-arm64 - - # quick smoke test of the arm64 macOS binary - - name: Run arm64 macOS release binary - if: ${{ matrix.os == 'macos-latest' }} - run: | - ./conduwuit-macos-arm64 --help - ./conduwuit-macos-arm64 --version - - - name: Upload macOS x86_64 binary to webserver - if: ${{ matrix.os == 'macos-13' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x conduwuit-macos-x86_64 - scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-x86_64 - fi - - - name: Upload macOS arm64 binary to webserver - if: ${{ matrix.os == 'macos-latest' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x conduwuit-macos-arm64 - scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-arm64 - fi - - - name: Upload macOS x86_64 binary - if: ${{ matrix.os == 'macos-13' }} - uses: actions/upload-artifact@v4 - with: - name: conduwuit-macos-x86_64 - path: conduwuit-macos-x86_64 - if-no-files-found: error - - - name: Upload macOS arm64 binary - if: ${{ matrix.os == 'macos-latest' }} - uses: actions/upload-artifact@v4 - with: - name: conduwuit-macos-arm64 - path: conduwuit-macos-arm64 - if-no-files-found: error variables: outputs: github_repository: ${{ steps.var.outputs.github_repository }} From 4bdd0d77db9b4eaa7864431da6c5b19218e18c79 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 02:52:19 -0500 Subject: [PATCH 0690/1248] bump complement, actually run all tests Signed-off-by: June Clementine Strawberry --- bin/complement | 2 +- flake.lock | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/complement b/bin/complement index ffd7a938..4356f2e7 100755 --- a/bin/complement +++ b/bin/complement @@ -45,7 +45,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests ./tests/msc3967 | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/flake.lock b/flake.lock index 9bf6ac55..a7d80508 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1734303596, - "narHash": "sha256-HjDRyLR4MBqQ3IjfMM6eE+8ayztXlbz3gXdyDmFla68=", + "lastModified": 1740291865, + "narHash": "sha256-wl1+yCTEtvIH8vgXygnxPkaSgg4MYNKs+c9tzVytr20=", "owner": "girlbossceo", "repo": "complement", - "rev": "14cc5be797b774f1a2b9f826f38181066d4952b8", + "rev": "35ad9d9051498fbac8ea4abff8ab7d8b1844f87b", "type": "github" }, "original": { From cbf207bd1f1587418be0de2a1a5cbd745baec9e2 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 03:11:34 -0500 Subject: [PATCH 0691/1248] try adding back some skipped complement tests Signed-off-by: June Clementine Strawberry --- bin/complement | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/complement b/bin/complement index 4356f2e7..9960299c 100755 --- a/bin/complement +++ b/bin/complement @@ -18,7 +18,7 @@ RESULTS_FILE="$3" OCI_IMAGE="complement-conduwuit:main" # Complement tests that are skipped due to flakiness/reliability issues -SKIPPED_COMPLEMENT_TESTS='-skip=TestClientSpacesSummary.*|TestJoinFederatedRoomFromApplicationServiceBridgeUser.*|TestJumpToDateEndpoint.*|TestUnbanViaInvite.*' +SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then From a67ab754179d0bbaa09aa19d974035c521643fe9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 25 Feb 2025 18:38:12 +0000 Subject: [PATCH 0692/1248] fix edition 2024 lints Signed-off-by: Jason Volk --- Cargo.toml | 2 ++ src/admin/admin.rs | 2 +- src/admin/room/commands.rs | 2 +- src/admin/room/directory.rs | 2 +- src/admin/user/commands.rs | 6 +++--- src/api/client/account.rs | 2 +- src/api/client/directory.rs | 14 +++----------- src/api/client/membership.rs | 6 +++--- src/api/client/report.rs | 6 +++--- src/api/router/auth.rs | 2 +- src/api/server/send_join.rs | 2 +- src/api/server/send_knock.rs | 2 +- src/database/de.rs | 2 +- src/database/engine/logger.rs | 2 +- src/database/map/compact.rs | 2 +- src/database/map/qry_batch.rs | 1 - src/database/map/rev_stream.rs | 2 +- src/database/map/rev_stream_from.rs | 2 +- src/database/map/stream.rs | 2 +- src/database/map/stream_from.rs | 2 +- src/database/pool.rs | 14 ++++++-------- src/database/ser.rs | 2 +- src/database/stream.rs | 6 +++--- src/database/watchers.rs | 2 +- src/macros/config.rs | 2 +- src/service/media/blurhash.rs | 2 +- src/service/media/remote.rs | 2 +- .../rooms/event_handler/handle_incoming_pdu.rs | 2 +- src/service/rooms/spaces/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 4 ++-- src/service/rooms/state_compressor/mod.rs | 4 ++-- src/service/rooms/timeline/mod.rs | 14 +++++++------- src/service/sending/mod.rs | 2 +- src/service/sending/sender.rs | 7 ++----- src/service/server_keys/request.rs | 2 +- src/service/uiaa/mod.rs | 2 +- 36 files changed, 60 insertions(+), 72 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 76de2212..52695d89 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -905,6 +905,7 @@ missing_docs_in_private_items = { level = "allow", priority = 1 } missing_errors_doc = { level = "allow", priority = 1 } missing_panics_doc = { level = "allow", priority = 1 } module_name_repetitions = { level = "allow", priority = 1 } +needless_continue = { level = "allow", priority = 1 } no_effect_underscore_binding = { level = "allow", priority = 1 } similar_names = { level = "allow", priority = 1 } single_match_else = { level = "allow", priority = 1 } @@ -969,6 +970,7 @@ style = { level = "warn", priority = -1 } # trivial assertions are quite alright assertions_on_constants = { level = "allow", priority = 1 } module_inception = { level = "allow", priority = 1 } +obfuscated_if_else = { level = "allow", priority = 1 } ################### suspicious = { level = "warn", priority = -1 } diff --git a/src/admin/admin.rs b/src/admin/admin.rs index b6de1ec6..9e010a59 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -62,7 +62,7 @@ pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Res | Debug(command) => debug::process(command, context).await?, | Query(command) => query::process(command, context).await?, | Check(command) => check::process(command, context).await?, - }; + } Ok(()) } diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index b5c303c8..6dd31b48 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -42,7 +42,7 @@ pub(super) async fn list_rooms( if rooms.is_empty() { return Ok(RoomMessageEventContent::text_plain("No more rooms.")); - }; + } let output_plain = format!( "Rooms ({}):\n```\n{}\n```", diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index e9c23a1d..ca036825 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -67,7 +67,7 @@ pub(super) async fn reprocess( if rooms.is_empty() { return Ok(RoomMessageEventContent::text_plain("No more rooms.")); - }; + } let output = format!( "Rooms (page {page}):\n```\n{}\n```", diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 8cb8edc3..8565f04a 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -166,7 +166,7 @@ pub(super) async fn create_user( "Failed to automatically join room {room} for user {user_id}: {e}" ); }, - }; + } } } } @@ -550,7 +550,7 @@ pub(super) async fn force_join_list_of_local_users( debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, - }; + } } Ok(RoomMessageEventContent::notice_markdown(format!( @@ -646,7 +646,7 @@ pub(super) async fn force_join_all_local_users( debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, - }; + } } Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/api/client/account.rs b/src/api/client/account.rs index cb49a6db..b42f51f7 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -499,7 +499,7 @@ pub(crate) async fn register_route( | _ => { info!("Automatically joined room {room} for user {user_id}"); }, - }; + } } } } diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 136c5961..6af9b533 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -131,7 +131,7 @@ pub(crate) async fn set_room_visibility_route( if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist - return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + return Err!(Request(NotFound("Room not found"))); } if services @@ -145,10 +145,7 @@ pub(crate) async fn set_room_visibility_route( } if !user_can_publish_room(&services, sender_user, &body.room_id).await? { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); + return Err!(Request(Forbidden("User is not allowed to publish this room"))); } match &body.visibility { @@ -386,12 +383,7 @@ async fn user_can_publish_room( .await { | Ok(event) => Ok(event.sender == user_id), - | _ => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); - }, + | _ => Err!(Request(Forbidden("User is not allowed to publish this room"))), } }, } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 9c2693dc..0b9c0c69 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -993,7 +993,7 @@ async fn join_room_by_id_helper_remote( | _ => { join_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present @@ -1420,7 +1420,7 @@ async fn join_room_by_id_helper_local( | _ => { join_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present @@ -1947,7 +1947,7 @@ async fn remote_leave_room( | _ => { leave_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present diff --git a/src/api/client/report.rs b/src/api/client/report.rs index db085721..7922caca 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -43,7 +43,7 @@ pub(crate) async fn report_room_route( ErrorKind::InvalidParam, "Reason too long, should be 750 characters or fewer", )); - }; + } delay_response().await; @@ -164,14 +164,14 @@ async fn is_event_report_valid( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", )); - }; + } if reason.as_ref().is_some_and(|s| s.len() > 750) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Reason too long, should be 750 characters or fewer", )); - }; + } if !services .rooms diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 56256683..92b75cfa 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -110,7 +110,7 @@ pub(super) async fn auth( } }, | _ => {}, - }; + } } match (metadata.authentication, token) { diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 08fa3835..c1749835 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -135,7 +135,7 @@ async fn create_join_event( if state_key != sender { return Err!(Request(BadJson("State key does not match sender user."))); - }; + } if let Some(authorising_user) = content.join_authorized_via_users_server { use ruma::RoomVersionId::*; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index 1d4c2a6c..f7bb0735 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -137,7 +137,7 @@ pub(crate) async fn create_knock_event_v1_route( if state_key != sender { return Err!(Request(InvalidParam("state_key does not match sender user of event."))); - }; + } let origin: OwnedServerName = serde_json::from_value( value diff --git a/src/database/de.rs b/src/database/de.rs index 9c0997ff..849b3b2e 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -241,7 +241,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { | "Ignore" => self.record_ignore(), | "IgnoreAll" => self.record_ignore_all(), | _ => unhandled!("Unrecognized deserialization Directive {name:?}"), - }; + } visitor.visit_unit() } diff --git a/src/database/engine/logger.rs b/src/database/engine/logger.rs index a1898e30..23e23fc7 100644 --- a/src/database/engine/logger.rs +++ b/src/database/engine/logger.rs @@ -18,5 +18,5 @@ pub(crate) fn handle(level: LogLevel, msg: &str) { | LogLevel::Error | LogLevel::Fatal => error!("{msg}"), | LogLevel::Info => debug!("{msg}"), | LogLevel::Warn => warn!("{msg}"), - }; + } } diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs index 84476de6..b49bf30b 100644 --- a/src/database/map/compact.rs +++ b/src/database/map/compact.rs @@ -52,7 +52,7 @@ pub fn compact_blocking(&self, opts: Options) -> Result { co.set_target_level(level.try_into()?); }, | (Some(_), Some(_)) => return Err!("compacting between specific levels not supported"), - }; + } self.db .db diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs index f44d1c86..e42d3e63 100644 --- a/src/database/map/qry_batch.rs +++ b/src/database/map/qry_batch.rs @@ -50,7 +50,6 @@ where .iter() .map(ser::serialize_to::) .map(|result| result.expect("failed to serialize query key")) - .map(Into::into) .collect(); self.db diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index fc2d1116..789a52e8 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -40,7 +40,7 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index d67986e7..a612d2a2 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -89,7 +89,7 @@ where .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index f1450b6f..f7371b6c 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -39,7 +39,7 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 00c3a051..ccf48db6 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -86,7 +86,7 @@ where .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/pool.rs b/src/database/pool.rs index e6ed59ac..47e61c30 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -146,11 +146,9 @@ pub(crate) fn close(&self) { .map(JoinHandle::join) .map(|result| result.map_err(Error::from_panic)) .enumerate() - .for_each(|(id, result)| { - match result { - | Ok(()) => trace!(?id, "worker joined"), - | Err(error) => error!(?id, "worker joined with error: {error}"), - }; + .for_each(|(id, result)| match result { + | Ok(()) => trace!(?id, "worker joined"), + | Err(error) => error!(?id, "worker joined with error: {error}"), }); } @@ -345,7 +343,7 @@ fn worker_handle(self: &Arc, cmd: Cmd) { | Cmd::Get(cmd) if cmd.key.len() == 1 => self.handle_get(cmd), | Cmd::Get(cmd) => self.handle_batch(cmd), | Cmd::Iter(cmd) => self.handle_iter(cmd), - }; + } } #[implement(Pool)] @@ -362,7 +360,7 @@ fn handle_iter(&self, mut cmd: Seek) { return; } - let from = cmd.key.as_deref().map(Into::into); + let from = cmd.key.as_deref(); let result = match cmd.dir { | Direction::Forward => cmd.state.init_fwd(from), @@ -394,7 +392,7 @@ fn handle_batch(self: &Arc, mut cmd: Get) { return; } - let keys = cmd.key.iter().map(Into::into); + let keys = cmd.key.iter(); let result: SmallVec<_> = cmd.map.get_batch_blocking(keys).collect(); diff --git a/src/database/ser.rs b/src/database/ser.rs index 6dd2043d..2e1a2cb0 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -224,7 +224,7 @@ impl ser::Serializer for &mut Serializer<'_, W> { self.separator()?; }, | _ => unhandled!("Unrecognized serialization directive: {name:?}"), - }; + } Ok(()) } diff --git a/src/database/stream.rs b/src/database/stream.rs index eb856b3f..eb264ccd 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -113,13 +113,13 @@ impl<'a> State<'a> { } #[inline] - fn fetch_key(&self) -> Option> { self.inner.key().map(Key::from) } + fn fetch_key(&self) -> Option> { self.inner.key() } #[inline] - fn _fetch_val(&self) -> Option> { self.inner.value().map(Val::from) } + fn _fetch_val(&self) -> Option> { self.inner.value() } #[inline] - fn fetch(&self) -> Option> { self.inner.item().map(KeyVal::from) } + fn fetch(&self) -> Option> { self.inner.item() } #[inline] pub(super) fn status(&self) -> Option { self.inner.status().err() } diff --git a/src/database/watchers.rs b/src/database/watchers.rs index be814f8c..b3907833 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -53,6 +53,6 @@ impl Watchers { tx.0.send(()).expect("channel should still be open"); } } - }; + } } } diff --git a/src/macros/config.rs b/src/macros/config.rs index 07ac1c0a..7b424325 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -205,7 +205,7 @@ fn get_default(field: &Field) -> Option { }, | Meta::Path { .. } => return Some("false".to_owned()), | _ => return None, - }; + } } None diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index 9d73f5dc..91e00228 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -172,7 +172,7 @@ impl std::fmt::Display for BlurhashingError { #[cfg(feature = "blurhashing")] | Self::ImageError(e) => write!(f, "There was an error with the image loading library => {e}")?, - }; + } Ok(()) } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index 61635011..b6c853d2 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -283,7 +283,7 @@ async fn location_request(&self, location: &str) -> Result { .map_err(Into::into) .map(|content| FileMeta { content: Some(content), - content_type: content_type.clone().map(Into::into), + content_type: content_type.clone(), content_disposition: Some(make_content_disposition( content_disposition.as_ref(), content_type.as_deref(), diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index b6d3e21e..b437bf2e 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -170,7 +170,7 @@ pub async fn handle_incoming_pdu<'a>( | Entry::Occupied(mut e) => { *e.get_mut() = (now, e.get().1.saturating_add(1)); }, - }; + } } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 52e7d2be..910da914 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -125,7 +125,7 @@ pub async fn get_summary_and_children_local( SummaryAccessibility::Inaccessible }, )), - }; + } let children_pdus: Vec<_> = self .get_stripped_space_child_events(current_room) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 4403468b..02ffa0d1 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -218,7 +218,7 @@ impl Service { ) .await .ok(); - }; + } // Copy direct chat flag if let Ok(mut direct_event) = self @@ -250,7 +250,7 @@ impl Service { ) .await?; } - }; + } } } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index c566eb1c..305d3187 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -303,7 +303,7 @@ impl Service { }); return Ok(()); - }; + } // Else we have two options. // 1. We add the current diff on top of the parent layer. @@ -419,7 +419,7 @@ impl Service { 2, // every state change is 2 event changes on average states_parents, )?; - }; + } Ok(HashSetCompressStateEvent { shortstatehash: new_shortstatehash, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 4be97fb2..35c972fa 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -422,7 +422,7 @@ impl Service { highlight = true; }, | _ => {}, - }; + } // Break early if both conditions are true if notify && highlight { @@ -484,7 +484,7 @@ impl Service { } } }, - }; + } }, | TimelineEventType::SpaceChild => if let Some(_state_key) = &pdu.state_key { @@ -776,7 +776,7 @@ impl Service { | _ => { pdu_json.remove("event_id"); }, - }; + } // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( @@ -847,7 +847,7 @@ impl Service { { return Err!(Request(Forbidden("User cannot redact this event."))); } - }; + } }, | _ => { let content: RoomRedactionEventContent = pdu.get_content()?; @@ -863,7 +863,7 @@ impl Service { } }, } - }; + } if pdu.kind == TimelineEventType::RoomMember { let content: RoomMemberEventContent = pdu.get_content()?; @@ -1293,10 +1293,10 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res } }, | _ => {}, - }; + } }, | _ => {}, - }; + } Ok(()) } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 379829b4..08ca7010 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -131,7 +131,7 @@ impl crate::Service for Service { | Err(error) => { error!(id = ?error.id(), ?error, "sender worker finished"); }, - }; + } } Ok(()) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index c4f34177..616f0846 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -138,7 +138,7 @@ impl Service { match response { | Ok(dest) => self.handle_response_ok(&dest, futures, statuses).await, | Err((dest, e)) => Self::handle_response_err(dest, statuses, &e), - }; + } } fn handle_response_err(dest: Destination, statuses: &mut CurTransactionStatus, e: &Error) { @@ -319,10 +319,7 @@ impl Service { if let Destination::Federation(server_name) = dest { if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { debug_assert!(select_edus.len() <= EDU_LIMIT, "exceeded edus limit"); - let select_edus = select_edus - .into_iter() - .map(Into::into) - .map(SendingEvent::Edu); + let select_edus = select_edus.into_iter().map(SendingEvent::Edu); events.extend(select_edus); self.db.set_latest_educount(server_name, last_count); diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index 171b755b..d9907616 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -43,7 +43,7 @@ where .keys() .rev() .take(self.services.server.config.trusted_server_batch_size) - .last() + .next_back() .cloned() { let request = Request { diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 51f5fb11..39dd2b41 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -69,7 +69,7 @@ pub async fn read_tokens(&self) -> Result> { }, | Err(e) => error!("Failed to read the registration token file: {e}"), } - }; + } if let Some(token) = &self.services.config.registration_token { tokens.insert(token.to_owned()); } From dca7bf9635ecd1fef3cd4bca56a25054d346692d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 27 Feb 2025 10:39:06 -0500 Subject: [PATCH 0693/1248] try bumping cache-nix-action to v6 Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 8 ++------ .github/workflows/documentation.yml | 4 +--- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8e1cf6c6..82ffc6b6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -129,7 +129,7 @@ jobs: # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 + uses: nix-community/cache-nix-action@v6 with: # restore and save a cache using this key primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} @@ -146,8 +146,6 @@ jobs: purge-last-accessed: 86400 # except the version with the `primary-key`, if it exists purge-primary-key: never - # always save the cache - save-always: true - name: Enable Cachix binary cache run: | @@ -324,7 +322,7 @@ jobs: # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 + uses: nix-community/cache-nix-action@v6 with: # restore and save a cache using this key primary-key: nix-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('**/*.nix', '**/.lock') }} @@ -341,8 +339,6 @@ jobs: purge-last-accessed: 86400 # except the version with the `primary-key`, if it exists purge-primary-key: never - # always save the cache - save-always: true - name: Enable Cachix binary cache run: | diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 0eefe0a4..fadc7b3f 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -76,7 +76,7 @@ jobs: # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 + uses: nix-community/cache-nix-action@v6 with: # restore and save a cache using this key primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} @@ -93,8 +93,6 @@ jobs: purge-last-accessed: 86400 # except the version with the `primary-key`, if it exists purge-primary-key: never - # always save the cache - save-always: true - name: Enable Cachix binary cache run: | From 17e0384eeb91bfbb77576359252db25e3248cc40 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:11:43 -0500 Subject: [PATCH 0694/1248] ignore errors instead of expecting for state gathering Signed-off-by: June Clementine Strawberry --- src/service/rooms/state_accessor/state.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 625defe6..02a6194e 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -4,7 +4,7 @@ use conduwuit::{ PduEvent, Result, StateKey, at, err, implement, pair_of, utils::{ result::FlatOk, - stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, + stream::{BroadbandExt, IterStream, ReadyExt, TryIgnore}, }, }; use database::Deserialized; @@ -232,7 +232,7 @@ pub fn state_keys_with_shortids<'a>( ) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .unzip() .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) .shared(); @@ -269,7 +269,7 @@ pub fn state_keys<'a>( ) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .map(at!(0)); self.services @@ -305,7 +305,7 @@ pub fn state_added( .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) .map_ok(IterStream::try_stream) .try_flatten_stream() - .expect_ok() + .ignore_err() .map(parse_compressed_state_event) } @@ -327,7 +327,7 @@ pub fn state_full_pdus( ) -> impl Stream + Send + '_ { let short_ids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .map(at!(1)); self.services @@ -352,7 +352,7 @@ where { let shortids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .unzip() .shared(); From de53ad83b2ec49170075cc5176e0ec7a604aad94 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:15:05 -0500 Subject: [PATCH 0695/1248] run nightly cargo fmt again Signed-off-by: June Clementine Strawberry --- src/admin/room/alias.rs | 5 +++-- src/admin/room/moderation.rs | 20 +++++++++++-------- src/api/client/directory.rs | 5 +++-- src/api/client/read_marker.rs | 5 +++-- src/api/server/hierarchy.rs | 5 +++-- src/core/state_res/event_auth.rs | 5 +++-- src/router/layers.rs | 8 ++++++-- .../rooms/event_handler/fetch_state.rs | 5 +++-- .../rooms/event_handler/handle_outlier_pdu.rs | 5 +++-- src/service/rooms/spaces/mod.rs | 5 +++-- src/service/server_keys/get.rs | 5 +++-- 11 files changed, 45 insertions(+), 28 deletions(-) diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 6262f33e..ab21170c 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -66,10 +66,11 @@ pub(super) async fn reprocess( format!("#{}:{}", room_alias_localpart, services.globals.server_name()); let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { | Ok(alias) => alias, - | Err(err) => + | Err(err) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse alias: {err}" - ))), + ))); + }, }; match command { | RoomAliasCommand::Set { force, room_id, .. } => { diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index ee132590..444dfa2f 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -96,12 +96,13 @@ async fn ban_room( let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!("Room specified is a room ID, banning room ID"); @@ -111,12 +112,13 @@ async fn ban_room( } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!( @@ -514,12 +516,13 @@ async fn unban_room( let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!("Room specified is a room ID, unbanning room ID"); @@ -529,12 +532,13 @@ async fn unban_room( } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!( diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 6af9b533..88f0e668 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -267,8 +267,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( let backwards = match characters.next() { | Some('n') => false, | Some('p') => true, - | _ => - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")), + | _ => { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")); + }, }; num_since = characters diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index d01327f6..187616b4 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -197,11 +197,12 @@ pub(crate) async fn create_receipt_route( .read_receipt .private_read_set(&body.room_id, sender_user, count); }, - | _ => + | _ => { return Err!(Request(InvalidParam(warn!( "Received unknown read receipt type: {}", &body.receipt_type - )))), + )))); + }, } Ok(create_receipt::v3::Response {}) diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index 41eaedd0..c759c8ea 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -32,8 +32,9 @@ pub(crate) async fn get_hierarchy_route( { | None => Err!(Request(NotFound("The requested room was not found"))), - | Some(SummaryAccessibility::Inaccessible) => - Err!(Request(NotFound("The requested room is inaccessible"))), + | Some(SummaryAccessibility::Inaccessible) => { + Err!(Request(NotFound("The requested room is inaccessible"))) + }, | Some(SummaryAccessibility::Accessible(room)) => { let (children, inaccessible_children) = diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs index 4b8e55f3..65bec802 100644 --- a/src/core/state_res/event_auth.rs +++ b/src/core/state_res/event_auth.rs @@ -682,7 +682,7 @@ fn valid_membership_change( } allow }, - | _ => + | _ => { if !sender_is_joined || target_user_current_membership == MembershipState::Join || target_user_current_membership == MembershipState::Ban @@ -706,7 +706,8 @@ fn valid_membership_change( ); } allow - }, + } + }, } }, | MembershipState::Leave => diff --git a/src/router/layers.rs b/src/router/layers.rs index 88e6a8d5..6920555d 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -61,8 +61,12 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ) .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::handle)) .layer(SecureClientIpSource::ConnectInfo.into_extension()) - .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs(server.config.client_response_timeout))) - .layer(RequestBodyTimeoutLayer::new(Duration::from_secs(server.config.client_receive_timeout))) + .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs( + server.config.client_response_timeout, + ))) + .layer(RequestBodyTimeoutLayer::new(Duration::from_secs( + server.config.client_receive_timeout, + ))) .layer(TimeoutLayer::new(Duration::from_secs(server.config.client_request_timeout))) .layer(SetResponseHeaderLayer::if_not_present( HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index b1a4a38b..0f9e093b 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -58,10 +58,11 @@ pub(super) async fn fetch_state( | hash_map::Entry::Vacant(v) => { v.insert(pdu.event_id.clone()); }, - | hash_map::Entry::Occupied(_) => + | hash_map::Entry::Occupied(_) => { return Err!(Database( "State event's type and state_key combination exists multiple times.", - )), + )); + }, } } diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 974eb300..99e90a50 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -56,10 +56,11 @@ pub(super) async fn handle_outlier_pdu<'a>( obj }, - | Err(e) => + | Err(e) => { return Err!(Request(InvalidParam(debug_error!( "Signature verification failed for {event_id}: {e}" - )))), + )))); + }, }; // Now that we have checked the signature and hashes we can add the eventID and diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 910da914..1da38234 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -109,7 +109,7 @@ pub async fn get_summary_and_children_local( { | None => (), // cache miss | Some(None) => return Ok(None), - | Some(Some(cached)) => + | Some(Some(cached)) => { return Ok(Some( if self .is_accessible_child( @@ -124,7 +124,8 @@ pub async fn get_summary_and_children_local( } else { SummaryAccessibility::Inaccessible }, - )), + )); + }, } let children_pdus: Vec<_> = self diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 00aeae1e..f9c5bdaf 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -18,8 +18,9 @@ pub async fn get_event_keys( let required = match required_keys(object, version) { | Ok(required) => required, - | Err(e) => - return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")), + | Err(e) => { + return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")); + }, }; let batch = required From 00cc23b6496533b9cfb77145966e2e7355f1f886 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:15:30 -0500 Subject: [PATCH 0696/1248] bump nix lockfile, bump cargo.lock, bump ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 263 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 4 +- flake.lock | 24 ++--- 3 files changed, 146 insertions(+), 145 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e84437c..e632b504 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,9 +55,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4" [[package]] name = "arbitrary" @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.18" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" +checksum = "310c9bcae737a48ef5cdee3174184e6d548b292739ede61a1f955ef76a738861" dependencies = [ "brotli", "flate2", @@ -212,18 +212,18 @@ dependencies = [ [[package]] name = "avif-serialize" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e335041290c43101ca215eed6f43ec437eb5a42125573f600fc3fa42b9bddd62" +checksum = "98922d6a4cfbcb08820c69d8eeccc05bb1f29bfa06b4f5b1dbfe9a868bd7608e" dependencies = [ "arrayvec", ] [[package]] name = "aws-lc-rs" -version = "1.12.2" +version = "1.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2b7ddaa2c56a367ad27a094ad8ef4faacf8a617c2575acb2ba88949df999ca" +checksum = "5e4e8200b9a4a5801a769d50eeabc05670fec7e959a8cb7a63a93e4e519942ae" dependencies = [ "aws-lc-sys", "paste", @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54ac4f13dad353b209b34cbec082338202cbc01c8f00336b55c750c13ac91f8f" +checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" dependencies = [ "bindgen", "cc", @@ -414,7 +414,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -445,9 +445,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" [[package]] name = "bitstream-io" @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "built" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" +checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" [[package]] name = "bumpalo" @@ -541,18 +541,17 @@ checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "bytesize" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" +checksum = "2d2c12f985c78475a6b8d629afd0c360260ef34cfef52efccdcfd31972f81c2e" [[package]] name = "bzip2-sys" -version = "0.1.11+1.0.8" +version = "0.1.13+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" dependencies = [ "cc", - "libc", "pkg-config", ] @@ -568,9 +567,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.12" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ "jobserver", "libc", @@ -619,9 +618,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "num-traits", ] @@ -639,9 +638,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.28" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" +checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" dependencies = [ "clap_builder", "clap_derive", @@ -649,9 +648,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.27" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" +checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" dependencies = [ "anstyle", "clap_lex", @@ -677,9 +676,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" dependencies = [ "cc", ] @@ -1134,7 +1133,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "crossterm_winapi", "futures-core", "mio", @@ -1215,9 +1214,9 @@ checksum = "817fa642fb0ee7fe42e95783e00e0969927b96091bdd4b9b1af082acd943913b" [[package]] name = "data-encoding" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" +checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" [[package]] name = "date_header" @@ -1309,9 +1308,9 @@ dependencies = [ [[package]] name = "either" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" dependencies = [ "serde", ] @@ -1330,9 +1329,9 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" @@ -1422,9 +1421,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.35" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" dependencies = [ "crc32fast", "miniz_oxide", @@ -1618,9 +1617,9 @@ checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "h2" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ "atomic-waker", "bytes", @@ -1720,9 +1719,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.3" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad3d6d98c648ed628df039541a5577bee1a7c83e9e16fe3dbedeea4cdfeb971" +checksum = "92652067c9ce6f66ce53cc38d1169daa36e6e7eb7dd3b63b5103bd9d97117248" dependencies = [ "async-trait", "cfg-if", @@ -1744,9 +1743,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.3" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf287bde7b776e85d7188e6e5db7cf410a2f9531fe82817eb87feed034c8d14" +checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" dependencies = [ "cfg-if", "futures-util", @@ -2223,6 +2222,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.14" @@ -2335,9 +2343,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libfuzzer-sys" @@ -2384,9 +2392,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "lock_api" @@ -2400,9 +2408,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" [[package]] name = "loole" @@ -2570,9 +2578,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ "adler2", "simd-adler32", @@ -2602,7 +2610,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if", "cfg_aliases", "libc", @@ -2844,9 +2852,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.9.2" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" +checksum = "2a604e53c24761286860eba4e2c8b23a0161526476b1de520139d69cdb85a6b5" dependencies = [ "log", "serde", @@ -3116,9 +3124,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", "prost-derive", @@ -3126,12 +3134,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.98", @@ -3139,20 +3147,20 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" dependencies = [ "prost", ] [[package]] name = "pulldown-cmark" -version = "0.12.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "memchr", "pulldown-cmark-escape", "unicase", @@ -3225,9 +3233,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" +checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" dependencies = [ "cfg_aliases", "libc", @@ -3348,11 +3356,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "82b568323e98e49e2a0899dcee453dd679fae22d69adf9b11dd508d1549b7e2f" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -3466,15 +3474,14 @@ checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" [[package]] name = "ring" -version = "0.17.8" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" dependencies = [ "cc", "cfg-if", "getrandom 0.2.15", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] @@ -3482,7 +3489,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "assign", "js_int", @@ -3504,7 +3511,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "ruma-common", @@ -3516,7 +3523,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "as_variant", "assign", @@ -3539,7 +3546,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "as_variant", "base64 0.22.1", @@ -3571,7 +3578,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3596,7 +3603,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "bytes", "http", @@ -3614,7 +3621,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3623,7 +3630,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "ruma-common", @@ -3633,7 +3640,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3648,7 +3655,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3667,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "headers", "http", @@ -3673,7 +3680,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3689,7 +3696,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "futures-util", "js_int", @@ -3768,7 +3775,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "errno", "libc", "linux-raw-sys", @@ -3777,9 +3784,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.22" +version = "0.23.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" +checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ "aws-lc-rs", "log", @@ -3899,7 +3906,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation", "core-foundation-sys", "libc", @@ -4059,18 +4066,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" dependencies = [ "proc-macro2", "quote", @@ -4092,9 +4099,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.138" +version = "1.0.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6" dependencies = [ "itoa", "memchr", @@ -4274,9 +4281,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" dependencies = [ "serde", ] @@ -4291,12 +4298,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - [[package]] name = "spki" version = "0.7.3" @@ -4334,9 +4335,9 @@ dependencies = [ [[package]] name = "string_cache_codegen" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244292f3441c89febe5b5bdfbb6863aeaf4f64da810ea3050fd927b27b8d92ce" +checksum = "c711928715f1fe0fe509c53b43e993a9a557babc2d0a3567d0a3006f1ac931a0" dependencies = [ "phf_generator", "phf_shared", @@ -4667,9 +4668,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", @@ -4734,9 +4735,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.23" +version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ "indexmap 2.7.1", "serde", @@ -4817,7 +4818,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.8.0", + "bitflags 2.9.0", "bytes", "futures-core", "futures-util", @@ -4939,9 +4940,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "typewit" @@ -4984,9 +4985,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" +checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" [[package]] name = "unicode-segmentation" @@ -5071,9 +5072,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.13.1" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" +checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" dependencies = [ "getrandom 0.3.1", "serde", @@ -5511,9 +5512,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" +checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" dependencies = [ "memchr", ] @@ -5534,7 +5535,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -5613,18 +5614,18 @@ dependencies = [ [[package]] name = "zerofrom" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", @@ -5662,27 +5663,27 @@ dependencies = [ [[package]] name = "zstd" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.2.1" +version = "7.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" +version = "2.0.14+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" +checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 52695d89..e2fe7021 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +rev = "b40e76528660f6a389eacd19a83ef9060644ee8f" features = [ "compat", "rand", @@ -509,7 +509,7 @@ version = "1.0.37" version = "1.0.89" [workspace.dependencies.bytesize] -version = "1.3.0" +version = "1.3.2" [workspace.dependencies.core_affinity] version = "0.8.1" diff --git a/flake.lock b/flake.lock index a7d80508..59fcbd8d 100644 --- a/flake.lock +++ b/flake.lock @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1740206139, - "narHash": "sha256-wWSv4KYhPKggKuJLzghfBs99pS3Kli9UBlyXVBzuIzc=", + "lastModified": 1740724364, + "narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=", "owner": "nix-community", "repo": "fenix", - "rev": "133a9eb59fb4ddac443ebe5ab2449d3940396533", + "rev": "edf7d9e431cda8782e729253835f178a356d3aab", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1740063075, - "narHash": "sha256-AfrCMPiXwgB0yxociq4no4NjCqGf/nRVhC3CLRoKqhA=", + "lastModified": 1740613216, + "narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=", "owner": "axboe", "repo": "liburing", - "rev": "5c788d514b9ed6d1a3624150de8aa6db403c1c65", + "rev": "e1003e496e66f9b0ae06674869795edf772d5500", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1740019556, - "narHash": "sha256-vn285HxnnlHLWnv59Og7muqECNMS33mWLM14soFIv2g=", + "lastModified": 1740547748, + "narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "dad564433178067be1fbdfcce23b546254b6d641", + "rev": "3a05eebede89661660945da1f151959900903b6a", "type": "github" }, "original": { @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1740077634, - "narHash": "sha256-KlYdDhon/hy91NutuBeN8e3qTKf3FXgsudWsjnHud68=", + "lastModified": 1740691488, + "narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "88fbdcd510e79ef3bcd81d6d9d4f07bdce84be8c", + "rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5", "type": "github" }, "original": { From af714d5778bf8b5ba4356821941e48bff55aefea Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:16:30 -0500 Subject: [PATCH 0697/1248] refactor+fix various issues with regs/logins and admin user commands Signed-off-by: June Clementine Strawberry --- src/admin/user/commands.rs | 35 ++++-- src/api/client/account.rs | 224 ++++++++++++++++++--------------- src/api/client/session.rs | 88 ++++++------- src/core/config/check.rs | 8 ++ src/service/admin/create.rs | 20 +-- src/service/admin/grant.rs | 129 ++++++++++++------- src/service/admin/mod.rs | 7 +- src/service/appservice/mod.rs | 2 +- src/service/emergency/mod.rs | 9 +- src/service/globals/mod.rs | 2 - src/service/resolver/actual.rs | 5 +- 11 files changed, 309 insertions(+), 220 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 8565f04a..35067304 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - PduBuilder, Result, debug_warn, error, info, is_equal_to, + PduBuilder, Result, debug, debug_warn, error, info, is_equal_to, utils::{self, ReadyExt}, warn, }; @@ -57,16 +57,16 @@ pub(super) async fn create_user( // Validate user id let user_id = parse_local_user_id(self.services, &username)?; - if self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain(format!( - "Userid {user_id} already exists" - ))); + if let Err(e) = user_id.validate_strict() { + if self.services.config.emergency_password.is_none() { + return Ok(RoomMessageEventContent::text_plain(format!( + "Username {user_id} contains disallowed characters or spaces: {e}" + ))); + } } - if user_id.is_historical() { - return Ok(RoomMessageEventContent::text_plain(format!( - "User ID {user_id} does not conform to new Matrix identifier spec" - ))); + if self.services.users.exists(&user_id).await { + return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists"))); } let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -185,12 +185,12 @@ pub(super) async fn create_user( .is_ok_and(is_equal_to!(1)) { self.services.admin.make_user_admin(&user_id).await?; - warn!("Granting {user_id} admin privileges as the first user"); } + } else { + debug!("create_user admin command called without an admin room being available"); } - // Inhibit login does not work for guests Ok(RoomMessageEventContent::text_plain(format!( "Created user with user_id: {user_id} and password: `{password}`" ))) @@ -694,6 +694,19 @@ pub(super) async fn force_leave_room( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); + + if !self + .services + .rooms + .state_cache + .is_joined(&user_id, &room_id) + .await + { + return Ok(RoomMessageEventContent::notice_markdown(format!( + "{user_id} is not joined in the room" + ))); + } + leave_room(self.services, &user_id, &room_id, None).await?; Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/api/client/account.rs b/src/api/client/account.rs index b42f51f7..2b8209d4 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,7 +3,8 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Error, PduBuilder, Result, debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, + Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils, + utils::ReadyExt, warn, }; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; @@ -17,7 +18,6 @@ use ruma::{ request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, }, - error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ @@ -60,6 +60,14 @@ pub(crate) async fn get_register_available_route( || appservice.registration.id.contains("matrix_appservice_irc") }); + if services + .globals + .forbidden_usernames() + .is_match(&body.username) + { + return Err!(Request(Forbidden("Username is forbidden"))); + } + // don't force the username lowercase if it's from matrix-appservice-irc let body_username = if is_matrix_appservice_irc { body.username.clone() @@ -68,30 +76,45 @@ pub(crate) async fn get_register_available_route( }; // Validate user id - let user_id = UserId::parse_with_server_name(body_username, services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) - && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + match UserId::parse_with_server_name(&body_username, services.globals.server_name()) { + | Ok(user_id) => { + if let Err(e) = user_id.validate_strict() { + // unless the username is from the broken matrix appservice IRC bridge, we + // should follow synapse's behaviour on not allowing things like spaces + // and UTF-8 characters in usernames + if !is_matrix_appservice_irc { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} contains disallowed characters or spaces: \ + {e}" + )))); + } + } + + user_id + }, + | Err(e) => { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} is not valid: {e}" + )))); + }, + }; // Check if username is creative enough if services.users.exists(&user_id).await { - return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); + return Err!(Request(UserInUse("User ID is not available."))); } - if services - .globals - .forbidden_usernames() - .is_match(user_id.localpart()) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + if let Some(ref info) = body.appservice_info { + if !info.is_user_match(&user_id) { + return Err!(Request(Exclusive("Username is not in an appservice namespace."))); + } + }; + + if services.appservice.is_exclusive_user_id(&user_id).await { + return Err!(Request(Exclusive("Username is reserved by an appservice."))); } - // TODO add check for appservice namespaces - - // If no if check is true we have an username that's available to be used. Ok(get_username_availability::v3::Response { available: true }) } @@ -119,16 +142,27 @@ pub(crate) async fn register_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - if !services.globals.allow_registration() && body.appservice_info.is_none() { - info!( - "Registration disabled and request not from known appservice, rejecting \ - registration attempt for username \"{}\"", - body.username.as_deref().unwrap_or("") - ); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration has been disabled.")); - } - let is_guest = body.kind == RegistrationKind::Guest; + let emergency_mode_enabled = services.config.emergency_password.is_some(); + + if !services.globals.allow_registration() && body.appservice_info.is_none() { + match (body.username.as_ref(), body.initial_device_display_name.as_ref()) { + | (Some(username), Some(device_display_name)) => { + info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + }, + | (Some(username), _) => { + info!(%is_guest, user = %username, "Rejecting registration attempt as registration is disabled"); + }, + | (_, Some(device_display_name)) => { + info!(%is_guest, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + }, + | (None, _) => { + info!(%is_guest, "Rejecting registration attempt as registration is disabled"); + }, + }; + + return Err!(Request(Forbidden("Registration has been disabled."))); + } if is_guest && (!services.globals.allow_guest_registration() @@ -140,10 +174,7 @@ pub(crate) async fn register_route( rejecting guest registration attempt, initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); - return Err(Error::BadRequest( - ErrorKind::GuestAccessForbidden, - "Guest registration is disabled.", - )); + return Err!(Request(GuestAccessForbidden("Guest registration is disabled."))); } // forbid guests from registering if there is not a real admin user yet. give @@ -154,13 +185,10 @@ pub(crate) async fn register_route( rejecting registration. Guest's initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Registration temporarily disabled.", - )); + return Err!(Request(Forbidden("Registration is temporarily disabled."))); } - let user_id = match (&body.username, is_guest) { + let user_id = match (body.username.as_ref(), is_guest) { | (Some(username), false) => { // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue let is_matrix_appservice_irc = @@ -170,6 +198,12 @@ pub(crate) async fn register_route( || appservice.registration.id.contains("matrix_appservice_irc") }); + if services.globals.forbidden_usernames().is_match(username) + && !emergency_mode_enabled + { + return Err!(Request(Forbidden("Username is forbidden"))); + } + // don't force the username lowercase if it's from matrix-appservice-irc let body_username = if is_matrix_appservice_irc { username.clone() @@ -177,31 +211,34 @@ pub(crate) async fn register_route( username.to_lowercase() }; - let proposed_user_id = - UserId::parse_with_server_name(body_username, services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) - && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let proposed_user_id = match UserId::parse_with_server_name( + &body_username, + services.globals.server_name(), + ) { + | Ok(user_id) => { + if let Err(e) = user_id.validate_strict() { + // unless the username is from the broken matrix appservice IRC bridge, or + // we are in emergency mode, we should follow synapse's behaviour on + // not allowing things like spaces and UTF-8 characters in usernames + if !is_matrix_appservice_irc && !emergency_mode_enabled { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} contains disallowed characters or \ + spaces: {e}" + )))); + } + } + + user_id + }, + | Err(e) => { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} is not valid: {e}" + )))); + }, + }; if services.users.exists(&proposed_user_id).await { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } - - if services - .globals - .forbidden_usernames() - .is_match(proposed_user_id.localpart()) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + return Err!(Request(UserInUse("User ID is not available."))); } proposed_user_id @@ -221,21 +258,18 @@ pub(crate) async fn register_route( if body.body.login_type == Some(LoginType::ApplicationService) { match body.appservice_info { | Some(ref info) => - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", - )); + if !info.is_user_match(&user_id) && !emergency_mode_enabled { + return Err!(Request(Exclusive( + "Username is not in an appservice namespace." + ))); }, | _ => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + return Err!(Request(MissingToken("Missing appservice token."))); }, } - } else if services.appservice.is_exclusive_user_id(&user_id).await { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); + } else if services.appservice.is_exclusive_user_id(&user_id).await && !emergency_mode_enabled + { + return Err!(Request(Exclusive("Username is reserved by an appservice."))); } // UIAA @@ -271,7 +305,7 @@ pub(crate) async fn register_route( .uiaa .try_auth( &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), + .unwrap(), "".into(), auth, &uiaainfo, @@ -287,7 +321,7 @@ pub(crate) async fn register_route( uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services.uiaa.create( &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), + .unwrap(), "".into(), &uiaainfo, &json, @@ -295,7 +329,7 @@ pub(crate) async fn register_route( return Err(Error::Uiaa(uiaainfo)); }, | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("JSON body is not valid"))); }, }, } @@ -407,7 +441,7 @@ pub(crate) async fn register_route( // log in conduit admin channel if a guest registered if body.appservice_info.is_none() && is_guest && services.globals.log_guest_registrations() { - info!("New guest user \"{user_id}\" registered on this server."); + debug_info!("New guest user \"{user_id}\" registered on this server."); if !device_display_name.is_empty() { if services.server.config.admin_room_notices { @@ -436,7 +470,8 @@ pub(crate) async fn register_route( } // If this is the first real user, grant them admin privileges except for guest - // users Note: the server user, @conduit:servername, is generated first + // users + // Note: the server user is generated first if !is_guest { if let Ok(admin_room) = services.admin.get_admin_room().await { if services @@ -541,8 +576,8 @@ pub(crate) async fn change_password_route( let sender_user = body .sender_user .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; + let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -566,16 +601,16 @@ pub(crate) async fn change_password_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("JSON body is not valid"))); }, }, } @@ -589,7 +624,7 @@ pub(crate) async fn change_password_route( services .users .all_device_ids(sender_user) - .ready_filter(|id| id != sender_device) + .ready_filter(|id| *id != sender_device) .for_each(|id| services.users.remove_device(sender_user, id)) .await; } @@ -651,8 +686,8 @@ pub(crate) async fn deactivate_route( let sender_user = body .sender_user .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; + let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -675,16 +710,16 @@ pub(crate) async fn deactivate_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("JSON body is not valid"))); }, }, } @@ -743,10 +778,7 @@ pub(crate) async fn third_party_route( pub(crate) async fn request_3pid_management_token_via_email_route( _body: Ruma, ) -> Result { - Err(Error::BadRequest( - ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", - )) + Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) } /// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken` @@ -759,10 +791,7 @@ pub(crate) async fn request_3pid_management_token_via_email_route( pub(crate) async fn request_3pid_management_token_via_msisdn_route( _body: Ruma, ) -> Result { - Err(Error::BadRequest( - ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", - )) + Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) } /// # `GET /_matrix/client/v1/register/m.login.registration_token/validity` @@ -776,10 +805,7 @@ pub(crate) async fn check_registration_token_validity( body: Ruma, ) -> Result { let Some(reg_token) = services.globals.registration_token.clone() else { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server does not allow token registration.", - )); + return Err!(Request(Forbidden("Server does not allow token registration"))); }; Ok(check_registration_token_validity::v1::Response { valid: reg_token == body.token }) diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 5c0ab47d..6db761af 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,12 +2,11 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, debug, err, info, utils::ReadyExt, warn}; +use conduwuit::{Err, debug, err, info, utils::ReadyExt}; use futures::StreamExt; use ruma::{ - OwnedUserId, UserId, + UserId, api::client::{ - error::ErrorKind, session::{ get_login_token, get_login_types::{ @@ -67,6 +66,8 @@ pub(crate) async fn login_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { + let emergency_mode_enabled = services.config.emergency_password.is_some(); + // Validate login method // TODO: Other login methods let user_id = match &body.login_info { @@ -78,20 +79,22 @@ pub(crate) async fn login_route( .. }) => { debug!("Got password login type"); - let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = - identifier - { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) - } else if let Some(user) = user { - OwnedUserId::parse(user) - } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err!(Request(Forbidden("Bad login type."))); - } - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name(user_id, &services.config.server_name) + } else if let Some(user) = user { + UserId::parse_with_server_name(user, &services.config.server_name) + } else { + return Err!(Request(Unknown( + warn!(?body.login_info, "Invalid or unsupported login type") + ))); + } + .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + + assert!( + services.globals.user_is_local(&user_id), + "User ID does not belong to this homeserver" + ); let hash = services .users @@ -124,46 +127,40 @@ pub(crate) async fn login_route( debug!("Got appservice login type"); let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) + UserId::parse_with_server_name(user_id, &services.config.server_name) } else if let Some(user) = user { - OwnedUserId::parse(user) + UserId::parse_with_server_name(user, &services.config.server_name) } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); + return Err!(Request(Unknown( + warn!(?body.login_info, "Invalid or unsupported login type") + ))); } - .map_err(|e| { - warn!("Failed to parse username from appservice logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + + assert!( + services.globals.user_is_local(&user_id), + "User ID does not belong to this homeserver" + ); match body.appservice_info { | Some(ref info) => - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", - )); + if !info.is_user_match(&user_id) && !emergency_mode_enabled { + return Err!(Request(Exclusive( + "Username is not in an appservice namespace." + ))); }, | _ => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + return Err!(Request(MissingToken("Missing appservice token."))); }, } user_id }, | _ => { - warn!("Unsupported or unknown login type: {:?}", &body.login_info); - debug!("JSON body: {:?}", &body.json_body); - return Err(Error::BadRequest( - ErrorKind::Unknown, - "Unsupported or unknown login type.", - )); + debug!("/login json_body: {:?}", &body.json_body); + return Err!(Request(Unknown( + warn!(?body.login_info, "Invalid or unsupported login type") + ))); }, }; @@ -216,9 +213,6 @@ pub(crate) async fn login_route( info!("{user_id} logged in"); - // home_server is deprecated but apparently must still be sent despite it being - // deprecated over 6 years ago. initially i thought this macro was unnecessary, - // but ruma uses this same macro for the same reason so... #[allow(deprecated)] Ok(login::v3::Response { user_id, @@ -226,7 +220,7 @@ pub(crate) async fn login_route( device_id, well_known: client_discovery_info, expires_in: None, - home_server: Some(services.globals.server_name().to_owned()), + home_server: Some(services.config.server_name.clone()), refresh_token: None, }) } diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 488f7f94..98223be4 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -126,6 +126,14 @@ pub fn check(config: &Config) -> Result { )); } + if config.emergency_password == Some(String::new()) { + return Err!(Config( + "emergency_password", + "Emergency password was set to an empty string, this is not valid. Unset \ + emergency_password to disable it or set it to a real password." + )); + } + // check if the user specified a registration token as `""` if config.registration_token == Some(String::new()) { return Err!(Config( diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 7f71665a..4de37092 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -21,11 +21,11 @@ use crate::Services; /// Create the admin room. /// -/// Users in this room are considered admins by conduit, and the room can be +/// Users in this room are considered admins by conduwuit, and the room can be /// used to issue admin commands by talking to the server user inside it. -pub async fn create_admin_room(services: &Services) -> Result<()> { +pub async fn create_admin_room(services: &Services) -> Result { let room_id = RoomId::new(services.globals.server_name()); - let room_version = &services.server.config.default_room_version; + let room_version = &services.config.default_room_version; let _short_id = services .rooms @@ -36,14 +36,14 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { let state_lock = services.rooms.state.mutex.lock(&room_id).await; // Create a user for the server - let server_user = &services.globals.server_user; + let server_user = services.globals.server_user.as_ref(); services.users.create(server_user, None)?; let create_content = { use RoomVersionId::*; match room_version { | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => - RoomCreateEventContent::new_v1(server_user.clone()), + RoomCreateEventContent::new_v1(server_user.into()), | _ => RoomCreateEventContent::new_v11(), } }; @@ -71,7 +71,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .timeline .build_and_append_pdu( PduBuilder::state( - server_user.to_string(), + String::from(server_user), &RoomMemberEventContent::new(MembershipState::Join), ), server_user, @@ -81,7 +81,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .await?; // 3. Power levels - let users = BTreeMap::from_iter([(server_user.clone(), 100.into())]); + let users = BTreeMap::from_iter([(server_user.into(), 69420.into())]); services .rooms @@ -140,7 +140,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .await?; // 5. Events implied by name and topic - let room_name = format!("{} Admin Room", services.globals.server_name()); + let room_name = format!("{} Admin Room", services.config.server_name); services .rooms .timeline @@ -157,7 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .timeline .build_and_append_pdu( PduBuilder::state(String::new(), &RoomTopicEventContent { - topic: format!("Manage {}", services.globals.server_name()), + topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://conduwuit.puppyirl.gay/", services.config.server_name), }), server_user, &room_id, @@ -187,7 +187,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .alias .set_alias(alias, &room_id, server_user)?; - // 7. (ad-hoc) Disable room previews for everyone by default + // 7. (ad-hoc) Disable room URL previews for everyone by default services .rooms .timeline diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 358ea267..5173987a 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,10 +1,10 @@ use std::collections::BTreeMap; -use conduwuit::{Result, error, implement}; +use conduwuit::{Err, Result, debug_info, debug_warn, error, implement}; use ruma::{ RoomId, UserId, events::{ - RoomAccountDataEventType, + RoomAccountDataEventType, StateEventType, room::{ member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, @@ -20,55 +20,98 @@ use crate::pdu::PduBuilder; /// /// This is equivalent to granting server admin privileges. #[implement(super::Service)] -pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { +pub async fn make_user_admin(&self, user_id: &UserId) -> Result { let Ok(room_id) = self.get_admin_room().await else { + debug_warn!( + "make_user_admin was called without an admin room being available or created" + ); return Ok(()); }; let state_lock = self.services.state.mutex.lock(&room_id).await; + if self.services.state_cache.is_joined(user_id, &room_id).await { + return Err!(debug_warn!("User is already joined in the admin room")); + } + if self + .services + .state_cache + .is_invited(user_id, &room_id) + .await + { + return Err!(debug_warn!("User is already pending an invitation to the admin room")); + } + // Use the server user to grant the new admin's power level - let server_user = &self.services.globals.server_user; + let server_user = self.services.globals.server_user.as_ref(); - // Invite and join the real user - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - server_user, + // if this is our local user, just forcefully join them in the room. otherwise, + // invite the remote user. + if self.services.globals.user_is_local(user_id) { + debug_info!("Inviting local user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::from(user_id), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + server_user, + &room_id, + &state_lock, + ) + .await?; + + debug_info!("Force joining local user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::from(user_id), + &RoomMemberEventContent::new(MembershipState::Join), + ), + user_id, + &room_id, + &state_lock, + ) + .await?; + } else { + debug_info!("Inviting remote user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + server_user, + &room_id, + &state_lock, + ) + .await?; + } + + // Set power levels + let mut room_power_levels = self + .services + .state_accessor + .room_state_get_content::( &room_id, - &state_lock, + &StateEventType::RoomPowerLevels, + "", ) - .await?; - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Join), - ), - user_id, - &room_id, - &state_lock, - ) - .await?; + .await + .unwrap_or_default(); - // Set power level - let users = BTreeMap::from_iter([ - (server_user.clone(), 100.into()), - (user_id.to_owned(), 100.into()), - ]); + room_power_levels + .users + .insert(server_user.into(), 69420.into()); + room_power_levels.users.insert(user_id.into(), 100.into()); self.services .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { - users, - ..Default::default() - }), + PduBuilder::state(String::new(), &room_power_levels), server_user, &room_id, &state_lock, @@ -76,15 +119,17 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { .await?; // Set room tag - let room_tag = &self.services.server.config.admin_room_tag; + let room_tag = self.services.server.config.admin_room_tag.as_str(); if !room_tag.is_empty() { if let Err(e) = self.set_room_tag(&room_id, user_id, room_tag).await { - error!(?room_id, ?user_id, ?room_tag, ?e, "Failed to set tag for admin grant"); + error!(?room_id, ?user_id, ?room_tag, "Failed to set tag for admin grant: {e}"); } } if self.services.server.config.admin_room_notices { - let welcome_message = String::from("## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`"); + let welcome_message = String::from( + "## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`", + ); // Send welcome message self.services @@ -102,7 +147,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { } #[implement(super::Service)] -async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result<()> { +async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result { let mut event = self .services .account_data @@ -125,7 +170,5 @@ async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> R RoomAccountDataEventType::Tag, &serde_json::to_value(event)?, ) - .await?; - - Ok(()) + .await } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 4622f10e..b3466711 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -40,6 +40,7 @@ struct Services { timeline: Dep, state: Dep, state_cache: Dep, + state_accessor: Dep, account_data: Dep, services: StdRwLock>>, } @@ -85,6 +86,8 @@ impl crate::Service for Service { timeline: args.depend::("rooms::timeline"), state: args.depend::("rooms::state"), state_cache: args.depend::("rooms::state_cache"), + state_accessor: args + .depend::("rooms::state_accessor"), account_data: args.depend::("account_data"), services: None.into(), }, @@ -357,8 +360,8 @@ impl Service { } // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let emergency_password_set = self.services.globals.emergency_password().is_some(); + // the administrator can execute commands as the server user + let emergency_password_set = self.services.server.config.emergency_password.is_some(); let from_server = pdu.sender == *server_user && !emergency_password_set; if from_server && self.is_admin_room(&pdu.room_id).await { return false; diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 5aba0018..50a60033 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -90,7 +90,7 @@ impl Service { .write() .await .remove(appservice_id) - .ok_or(err!("Appservice not found"))?; + .ok_or_else(|| err!("Appservice not found"))?; // remove the appservice from the database self.db.id_appserviceregistrations.del(appservice_id); diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 47a309a5..3a61f710 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -9,7 +9,7 @@ use ruma::{ push::Ruleset, }; -use crate::{Dep, account_data, globals, users}; +use crate::{Dep, account_data, config, globals, users}; pub struct Service { services: Services, @@ -17,6 +17,7 @@ pub struct Service { struct Services { account_data: Dep, + config: Dep, globals: Dep, users: Dep, } @@ -27,6 +28,8 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { account_data: args.depend::("account_data"), + config: args.depend::("config"), + globals: args.depend::("globals"), users: args.depend::("users"), }, @@ -54,9 +57,9 @@ impl Service { self.services .users - .set_password(server_user, self.services.globals.emergency_password().as_deref())?; + .set_password(server_user, self.services.config.emergency_password.as_deref())?; - let (ruleset, pwd_set) = match self.services.globals.emergency_password() { + let (ruleset, pwd_set) = match self.services.config.emergency_password { | Some(_) => (Ruleset::server_default(server_user), true), | None => (Ruleset::new(), false), }; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 16b3ef3c..74f83228 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -153,8 +153,6 @@ impl Service { pub fn notification_push_path(&self) -> &String { &self.server.config.notification_push_path } - pub fn emergency_password(&self) -> &Option { &self.server.config.emergency_password } - pub fn url_preview_domain_contains_allowlist(&self) -> &Vec { &self.server.config.url_preview_domain_contains_allowlist } diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 8860d0a0..b037cf77 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -363,7 +363,7 @@ impl super::Service { let hostname = hostname.trim_end_matches('.'); match self.resolver.resolver.srv_lookup(hostname).await { | Err(e) => Self::handle_resolve_error(&e, hostname)?, - | Ok(result) => + | Ok(result) => { return Ok(result.iter().next().map(|result| { FedDest::Named( result.target().to_string().trim_end_matches('.').to_owned(), @@ -372,7 +372,8 @@ impl super::Service { .try_into() .unwrap_or_else(|_| FedDest::default_port()), ) - })), + })); + }, } } From 0d741bbd46cd1c2a86321a4a68da3167c46d53e3 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 3 Mar 2025 00:15:12 -0500 Subject: [PATCH 0698/1248] remove nix run ci test as its covered by complement Signed-off-by: June Clementine Strawberry --- engage.toml | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/engage.toml b/engage.toml index c1a2be1f..71366532 100644 --- a/engage.toml +++ b/engage.toml @@ -18,12 +18,12 @@ script = "direnv --version" [[task]] name = "rustc" group = "versions" -script = "rustc --version" +script = "rustc --version -v" [[task]] name = "cargo" group = "versions" -script = "cargo --version" +script = "cargo --version -v" [[task]] name = "cargo-fmt" @@ -60,11 +60,6 @@ name = "markdownlint" group = "versions" script = "markdownlint --version" -[[task]] -name = "dpkg" -group = "versions" -script = "dpkg --version" - [[task]] name = "cargo-audit" group = "security" @@ -228,22 +223,3 @@ depends = ["cargo/default"] script = """ git diff --exit-code conduwuit-example.toml """ - -# Ensure that the flake's default output can build and run without crashing -# -# This is a dynamically-linked jemalloc build, which is a case not covered by -# our other tests. We've had linking problems in the past with dynamic -# jemalloc builds that usually show up as an immediate segfault or "invalid free" -[[task]] -name = "nix-default" -group = "tests" -script = """ -env DIRENV_DEVSHELL=dynamic \ - CARGO_PROFILE="test" \ - direnv exec . \ - bin/nix-build-and-cache just .#default-test -env DIRENV_DEVSHELL=dynamic \ - CARGO_PROFILE="test" \ - direnv exec . \ - nix run -L .#default-test -- --help && nix run -L .#default-test -- --version -""" From df72384c16aa77ccedf532888b0799a3edc2d8b0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 3 Mar 2025 01:05:43 -0500 Subject: [PATCH 0699/1248] delete snappy, bump rust-rocksdb, bump rocksdb to v9.10.0 again Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- deps/rust-rocksdb/Cargo.toml | 3 +-- flake.lock | 8 ++++---- flake.nix | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e632b504..ec531994 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" dependencies = [ "bindgen", "bzip2-sys", @@ -3728,7 +3728,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index c6af428d..f9069fc1 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -14,7 +14,6 @@ default = ["lz4", "zstd", "zlib", "bzip2"] jemalloc = ["rust-rocksdb/jemalloc"] io-uring = ["rust-rocksdb/io-uring"] valgrind = ["rust-rocksdb/valgrind"] -snappy = ["rust-rocksdb/snappy"] lz4 = ["rust-rocksdb/lz4"] zstd = ["rust-rocksdb/zstd"] zlib = ["rust-rocksdb/zlib"] @@ -27,7 +26,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" +rev = "2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index 59fcbd8d..ba7fdcff 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1739735789, - "narHash": "sha256-BIzuZS0TV4gRnciP4ieW5J3Hql986iedM5dHQfK6z68=", + "lastModified": 1739735940, + "narHash": "sha256-9AqKOWsYXy0sU2C+kB+3NLCDMZ2VsjfbHqvSiydUlcs=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "34e401fd4392dd3268e042f1e40dffd064b9a7ff", + "rev": "f8ad8cd72fd7e527171d35fa8dbca9a073b5b26c", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.9.3", + "ref": "v9.10.0", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index 04dee681..6702111f 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 1ecd02738992f6fd75ea627e60a2ebf1133f4561 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 3 Mar 2025 01:06:04 -0500 Subject: [PATCH 0700/1248] always run checks when building in nix (doCheck true) Signed-off-by: June Clementine Strawberry --- nix/pkgs/main/default.nix | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 4150b389..5dfb32ec 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -162,18 +162,12 @@ commonAttrs = { ]; }; - # This is redundant with CI - doCheck = false; + doCheck = true; - cargoTestCommand = "cargo test --locked "; cargoExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - cargoTestExtraArgs = "--no-default-features --locked " - + lib.optionalString - (features'' != []) - "--features " + (builtins.concatStringsSep "," features''); dontStrip = profile == "dev" || profile == "test"; dontPatchELF = profile == "dev" || profile == "test"; @@ -209,18 +203,12 @@ craneLib.buildPackage ( commonAttrs // { env = buildDepsOnlyEnv; }); - # This is redundant with CI - doCheck = false; + doCheck = true; - cargoTestCommand = "cargo test --locked "; cargoExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - cargoTestExtraArgs = "--no-default-features --locked " - + lib.optionalString - (features'' != []) - "--features " + (builtins.concatStringsSep "," features''); env = buildPackageEnv; From 7c17163730fcd0f43132cce82cc28b6793ae662a Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 4 Mar 2025 23:35:21 -0500 Subject: [PATCH 0701/1248] switch to self-hosted ci runner, remove sudo usages Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 132 ++-------------------------- .github/workflows/documentation.yml | 64 +------------- 2 files changed, 9 insertions(+), 187 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 82ffc6b6..c0425873 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,8 +45,8 @@ env: # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps NIX_CONFIG: | show-trace = true - extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= + extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org + extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= experimental-features = nix-command flakes extra-experimental-features = nix-command flakes accept-flake-config = true @@ -59,7 +59,7 @@ permissions: {} jobs: tests: name: Test - runs-on: ubuntu-24.04 + runs-on: self-hosted steps: - name: Setup SSH web publish env: @@ -93,19 +93,6 @@ jobs: echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - - name: Install liburing - run: | - sudo apt install liburing-dev -y - - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 with: @@ -123,58 +110,9 @@ jobs: exit 1 fi - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v6 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow nix develop .#all-features --command true @@ -267,22 +205,13 @@ jobs: build: name: Build - runs-on: ubuntu-24.04 + runs-on: self-hosted strategy: matrix: include: - target: aarch64-linux-musl - target: x86_64-linux-musl steps: - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 with: @@ -316,58 +245,9 @@ jobs: echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v6 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow nix develop .#all-features --command true --impure @@ -622,7 +502,7 @@ jobs: variables: outputs: github_repository: ${{ steps.var.outputs.github_repository }} - runs-on: "ubuntu-latest" + runs-on: self-hosted steps: - name: Setting global variables uses: actions/github-script@v7 @@ -632,7 +512,7 @@ jobs: core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase()) docker: name: Docker publish - runs-on: ubuntu-24.04 + runs-on: self-hosted needs: [build, variables, tests] permissions: packages: write diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index fadc7b3f..88e7bbe1 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -24,8 +24,8 @@ env: # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps NIX_CONFIG: | show-trace = true - extra-substituters = extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= + extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org + extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= experimental-features = nix-command flakes extra-experimental-features = nix-command flakes accept-flake-config = true @@ -41,7 +41,7 @@ permissions: {} jobs: docs: name: Documentation and GitHub Pages - runs-on: ubuntu-24.04 + runs-on: self-hosted permissions: pages: write @@ -52,15 +52,6 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} steps: - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 with: @@ -70,58 +61,9 @@ jobs: if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') uses: actions/configure-pages@v5 - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v6 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow nix develop --command true From 35981d5aef8785c132d2e2a166cfcde1cd24169e Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 19:05:42 -0500 Subject: [PATCH 0702/1248] automatically forget rooms on leaving Signed-off-by: June Clementine Strawberry --- src/service/rooms/state_cache/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 02ffa0d1..f406eb69 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -267,6 +267,10 @@ impl Service { }, | MembershipState::Leave | MembershipState::Ban => { self.mark_as_left(user_id, room_id); + + if self.services.globals.user_is_local(user_id) { + self.forget(room_id, user_id); + } }, | _ => {}, } From 97208d6081da92f8b5c732aa6b3bf06997ad4a16 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 19:06:10 -0500 Subject: [PATCH 0703/1248] add more safety checks before allowing a room marked as forgotten Signed-off-by: June Clementine Strawberry --- src/api/client/membership.rs | 40 +++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 0b9c0c69..940c8639 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -11,12 +11,12 @@ use axum_client_ip::InsecureClientIp; use conduwuit::{ Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info, pdu::{PduBuilder, gen_event_id_canonical_json}, - result::FlatOk, + result::{FlatOk, NotFound}, state_res, trace, utils::{self, IterStream, ReadyExt, shuffle}, warn, }; -use futures::{FutureExt, StreamExt, TryFutureExt, join}; +use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, @@ -717,21 +717,37 @@ pub(crate) async fn forget_room_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user(); + let user_id = body.sender_user(); + let room_id = &body.room_id; - if services - .rooms - .state_cache - .is_joined(sender_user, &body.room_id) - .await - { + let joined = services.rooms.state_cache.is_joined(user_id, room_id); + let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); + let left = services.rooms.state_cache.is_left(user_id, room_id); + let invited = services.rooms.state_cache.is_invited(user_id, room_id); + + let (joined, knocked, left, invited) = join4(joined, knocked, left, invited).await; + + if joined || knocked || invited { return Err!(Request(Unknown("You must leave the room before forgetting it"))); } - services + let membership = services .rooms - .state_cache - .forget(&body.room_id, sender_user); + .state_accessor + .get_member(room_id, user_id) + .await; + + if membership.is_not_found() { + return Err!(Request(Unknown("No membership event was found, room was never joined"))); + } + + if left + || membership.is_ok_and(|member| { + member.membership == MembershipState::Leave + || member.membership == MembershipState::Ban + }) { + services.rooms.state_cache.forget(room_id, user_id); + } Ok(forget_room::v3::Response::new()) } From 408f5bd30cb461cec9472a51b87f0bb1ed6b7381 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 19:06:31 -0500 Subject: [PATCH 0704/1248] add val_size_hints on membership cfs (todo remove these anyways) Signed-off-by: June Clementine Strawberry --- src/database/maps.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/database/maps.rs b/src/database/maps.rs index b060ab8d..9af45159 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -181,6 +181,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomuserid_invitecount", + val_size_hint: Some(8), ..descriptor::RANDOM_SMALL }, Descriptor { @@ -193,10 +194,12 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomuserid_leftcount", + val_size_hint: Some(8), ..descriptor::RANDOM }, Descriptor { name: "roomuserid_knockedcount", + val_size_hint: Some(8), ..descriptor::RANDOM_SMALL }, Descriptor { From 2c1ec3fb02a823515697b159e26d5464ebe29937 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 21:31:49 -0500 Subject: [PATCH 0705/1248] allow both lowercase and uppercase usernames to login Signed-off-by: June Clementine Strawberry --- src/api/client/session.rs | 54 +++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 6db761af..ab67ee18 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -3,7 +3,7 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{Err, debug, err, info, utils::ReadyExt}; -use futures::StreamExt; +use futures::{StreamExt, TryFutureExt}; use ruma::{ UserId, api::client::{ @@ -86,29 +86,40 @@ pub(crate) async fn login_route( UserId::parse_with_server_name(user, &services.config.server_name) } else { return Err!(Request(Unknown( - warn!(?body.login_info, "Invalid or unsupported login type") + debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") ))); } .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + let lowercased_user_id = UserId::parse_with_server_name( + user_id.localpart().to_lowercase(), + &services.config.server_name, + )?; + assert!( services.globals.user_is_local(&user_id), "User ID does not belong to this homeserver" ); + assert!( + services.globals.user_is_local(&lowercased_user_id), + "User ID does not belong to this homeserver" + ); let hash = services .users .password_hash(&user_id) + .or_else(|_| services.users.password_hash(&lowercased_user_id)) .await + .inspect_err(|e| debug!("{e}")) .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; if hash.is_empty() { return Err!(Request(UserDeactivated("The user has been deactivated"))); } - if hash::verify_password(password, &hash).is_err() { - return Err!(Request(Forbidden("Wrong username or password."))); - } + hash::verify_password(password, &hash) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; user_id }, @@ -125,6 +136,11 @@ pub(crate) async fn login_route( user, }) => { debug!("Got appservice login type"); + + let Some(ref info) = body.appservice_info else { + return Err!(Request(MissingToken("Missing appservice token."))); + }; + let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { UserId::parse_with_server_name(user_id, &services.config.server_name) @@ -132,26 +148,30 @@ pub(crate) async fn login_route( UserId::parse_with_server_name(user, &services.config.server_name) } else { return Err!(Request(Unknown( - warn!(?body.login_info, "Invalid or unsupported login type") + debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") ))); } .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + let lowercased_user_id = UserId::parse_with_server_name( + user_id.localpart().to_lowercase(), + &services.config.server_name, + )?; + assert!( services.globals.user_is_local(&user_id), "User ID does not belong to this homeserver" ); + assert!( + services.globals.user_is_local(&lowercased_user_id), + "User ID does not belong to this homeserver" + ); - match body.appservice_info { - | Some(ref info) => - if !info.is_user_match(&user_id) && !emergency_mode_enabled { - return Err!(Request(Exclusive( - "Username is not in an appservice namespace." - ))); - }, - | _ => { - return Err!(Request(MissingToken("Missing appservice token."))); - }, + if !info.is_user_match(&user_id) + && !info.is_user_match(&lowercased_user_id) + && !emergency_mode_enabled + { + return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } user_id @@ -159,7 +179,7 @@ pub(crate) async fn login_route( | _ => { debug!("/login json_body: {:?}", &body.json_body); return Err!(Request(Unknown( - warn!(?body.login_info, "Invalid or unsupported login type") + debug_warn!(?body.login_info, "Invalid or unsupported login type") ))); }, }; From c10500f8aebcd52a219bdba4f2114b03d9474565 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:14:24 -0500 Subject: [PATCH 0706/1248] bump rust-rocksdb and ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 60 +++++++++---------------------- Cargo.toml | 3 +- deps/rust-rocksdb/Cargo.toml | 5 +-- flake.lock | 8 ++--- flake.nix | 2 +- src/api/router/auth.rs | 3 +- src/service/federation/execute.rs | 3 +- 7 files changed, 28 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec531994..d51bb966 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3489,7 +3489,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "assign", "js_int", @@ -3502,16 +3502,14 @@ dependencies = [ "ruma-identifiers-validation", "ruma-identity-service-api", "ruma-push-gateway-api", - "ruma-server-util", "ruma-signatures", - "ruma-state-res", "web-time 1.1.0", ] [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "ruma-common", @@ -3523,7 +3521,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "as_variant", "assign", @@ -3546,7 +3544,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "as_variant", "base64 0.22.1", @@ -3578,7 +3576,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3603,10 +3601,12 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "bytes", + "headers", "http", + "http-auth", "httparse", "js_int", "memchr", @@ -3616,12 +3616,14 @@ dependencies = [ "ruma-events", "serde", "serde_json", + "thiserror 2.0.11", + "tracing", ] [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3630,7 +3632,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "ruma-common", @@ -3640,7 +3642,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3655,7 +3657,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "ruma-common", @@ -3664,23 +3666,10 @@ dependencies = [ "serde_json", ] -[[package]] -name = "ruma-server-util" -version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" -dependencies = [ - "headers", - "http", - "http-auth", - "ruma-common", - "thiserror 2.0.11", - "tracing", -] - [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3693,25 +3682,10 @@ dependencies = [ "thiserror 2.0.11", ] -[[package]] -name = "ruma-state-res" -version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" -dependencies = [ - "futures-util", - "js_int", - "ruma-common", - "ruma-events", - "serde", - "serde_json", - "thiserror 2.0.11", - "tracing", -] - [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" dependencies = [ "bindgen", "bzip2-sys", @@ -3728,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index e2fe7021..7f08a21a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "b40e76528660f6a389eacd19a83ef9060644ee8f" +rev = "bb42118bd85e731b652a6110896b6945085bf944" features = [ "compat", "rand", @@ -355,7 +355,6 @@ features = [ "federation-api", "markdown", "push-gateway-api-c", - "server-util", "unstable-exhaustive-types", "ring-compat", "compat-upload-signatures", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index f9069fc1..61bd2333 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -2,7 +2,7 @@ name = "rust-rocksdb-uwu" categories.workspace = true description = "dylib wrapper for rust-rocksdb" -edition = "2021" +edition = "2024" keywords.workspace = true license.workspace = true readme.workspace = true @@ -13,6 +13,7 @@ version = "0.0.1" default = ["lz4", "zstd", "zlib", "bzip2"] jemalloc = ["rust-rocksdb/jemalloc"] io-uring = ["rust-rocksdb/io-uring"] +numa = ["rust-rocksdb/numa"] # unused by rocksdb for now valgrind = ["rust-rocksdb/valgrind"] lz4 = ["rust-rocksdb/lz4"] zstd = ["rust-rocksdb/zstd"] @@ -26,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" +rev = "513133a3dc24b667f32933aa3247c6ec71a958f3" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index ba7fdcff..a1bd423f 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1739735940, - "narHash": "sha256-9AqKOWsYXy0sU2C+kB+3NLCDMZ2VsjfbHqvSiydUlcs=", + "lastModified": 1741234703, + "narHash": "sha256-sT5g/RM9vrwY6AmjSfl4RoJPGtcJCkZCsxiX3PFJgKQ=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "f8ad8cd72fd7e527171d35fa8dbca9a073b5b26c", + "rev": "185593ce4534091e57025e9f3571dbf681c04631", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.10.0", + "ref": "v9.9.3", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index 6702111f..04dee681 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 92b75cfa..5cd7b831 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -17,9 +17,8 @@ use ruma::{ }, voip::get_turn_server_info, }, - federation::openid::get_openid_userinfo, + federation::{authentication::XMatrix, openid::get_openid_userinfo}, }, - server_util::authorization::XMatrix, }; use service::{ Services, diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index d254486f..63f2ccfb 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -12,10 +12,9 @@ use ruma::{ CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, api::{ EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - client::error::Error as RumaError, + client::error::Error as RumaError, federation::authentication::XMatrix, }, serde::Base64, - server_util::authorization::XMatrix, }; use crate::resolver::actual::ActualDest; From 17b625a85b908d4c2cb3df308c2337be6e571ce2 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:14:49 -0500 Subject: [PATCH 0707/1248] reject device keys if they dont match user ID or device ID or are missing fields Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 6f20153b..8a7eab7e 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -48,6 +48,19 @@ pub(crate) async fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { + let deser_device_keys = device_keys.deserialize()?; + + if deser_device_keys.user_id != sender_user { + return Err!(Request(Unknown( + "User ID in keys uploaded does not match your own user ID" + ))); + } + if deser_device_keys.device_id != sender_device { + return Err!(Request(Unknown( + "Device ID in keys uploaded does not match your own device ID" + ))); + } + // TODO: merge this and the existing event? // This check is needed to assure that signatures are kept if services From f4c51cd405f1a0695b16c085655eb0180637fe2d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:18:28 -0500 Subject: [PATCH 0708/1248] remove zlib as a default rocksdb compression option Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 2 +- deps/rust-rocksdb/Cargo.toml | 2 +- src/core/config/mod.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 9b6f6ce0..541f062d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -821,7 +821,7 @@ # Type of RocksDB database compression to use. # -# Available options are "zstd", "zlib", "bz2", "lz4", or "none". +# Available options are "zstd", "bz2", "lz4", or "none". # # It is best to use ZSTD as an overall good balance between # speed/performance, storage, IO amplification, and CPU usage. For more diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 61bd2333..35f755b4 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -10,7 +10,7 @@ repository.workspace = true version = "0.0.1" [features] -default = ["lz4", "zstd", "zlib", "bzip2"] +default = ["lz4", "zstd", "bzip2"] jemalloc = ["rust-rocksdb/jemalloc"] io-uring = ["rust-rocksdb/io-uring"] numa = ["rust-rocksdb/numa"] # unused by rocksdb for now diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 67c3b95c..5a4819e0 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -975,7 +975,7 @@ pub struct Config { /// Type of RocksDB database compression to use. /// - /// Available options are "zstd", "zlib", "bz2", "lz4", or "none". + /// Available options are "zstd", "bz2", "lz4", or "none". /// /// It is best to use ZSTD as an overall good balance between /// speed/performance, storage, IO amplification, and CPU usage. For more From 657e91fd4226d2521e9e7bb15d5982e62ad68624 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:34:17 -0500 Subject: [PATCH 0709/1248] dont send push notifications from ignored users PDUs Signed-off-by: June Clementine Strawberry --- src/service/rooms/timeline/mod.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 35c972fa..138340a4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -368,7 +368,7 @@ impl Service { .state_accessor .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") .await - .map_err(|_| err!(Database("invalid m.room.power_levels event"))) + .map_err(|e| err!(Database(warn!("invalid m.room.power_levels event: {e}")))) .unwrap_or_default(); let sync_pdu = pdu.to_sync_room_event(); @@ -377,9 +377,10 @@ impl Service { .services .state_cache .active_local_users_in_room(&pdu.room_id) - // Don't notify the sender of their own events - .ready_filter(|user| user != &pdu.sender) .map(ToOwned::to_owned) + // Don't notify the sender of their own events, and dont send from ignored users + .ready_filter(|user| user != &pdu.sender) + .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, recipient_user).await).then_some(recipient_user) }) .collect() .await; From 931fd4c80215cee5cd709d42c86d1fefe0844fe1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:44:57 -0500 Subject: [PATCH 0710/1248] add missing target Signed-off-by: June Clementine Strawberry --- rust-toolchain.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 00fb6cee..97b4a789 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -24,5 +24,6 @@ targets = [ "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", "aarch64-unknown-linux-musl", + "aarch64-unknown-linux-gnu", #"aarch64-apple-darwin", ] From ecea0cff69d583439e4a84fba6bd2d5aaba8faee Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:51:13 -0500 Subject: [PATCH 0711/1248] fix TestFetchMessagesFromNonExistentRoom complement test Signed-off-by: June Clementine Strawberry --- src/api/client/message.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 571a238a..c755cc47 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Event, PduCount, PduEvent, Result, at, + Err, Event, PduCount, PduEvent, Result, at, utils::{ IterStream, ReadyExt, result::{FlatOk, LogErr}, @@ -68,6 +68,10 @@ pub(crate) async fn get_message_events_route( let room_id = &body.room_id; let filter = &body.filter; + if !services.rooms.metadata.exists(room_id).await { + return Err!(Request(Forbidden("Room does not exist to this server"))); + } + let from: PduCount = body .from .as_deref() From c92678ecbeb55cf323758da08e8c36e65496aa38 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 13:08:01 -0500 Subject: [PATCH 0712/1248] dont build with zlib in the nix flake Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 1 - deps/rust-rocksdb/Cargo.toml | 2 +- flake.nix | 9 +++++++-- src/service/rooms/timeline/mod.rs | 4 ++-- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d51bb966..2ade8b83 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" dependencies = [ "bindgen", "bzip2-sys", @@ -3702,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index 7f08a21a..5edcc60a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -389,7 +389,6 @@ features = [ "mt_static", "lz4", "zstd", - "zlib", "bzip2", ] diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 35f755b4..f6e0a54f 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "513133a3dc24b667f32933aa3247c6ec71a958f3" +rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" #branch = "master" default-features = false diff --git a/flake.nix b/flake.nix index 04dee681..faff87d6 100644 --- a/flake.nix +++ b/flake.nix @@ -64,8 +64,10 @@ patches = []; cmakeFlags = pkgs.lib.subtractLists [ - # no real reason to have snappy, no one uses this + # no real reason to have snappy or zlib, no one uses this "-DWITH_SNAPPY=1" + "-DZLIB=1" + "-DWITH_ZLIB=1" # we dont need to use ldb or sst_dump (core_tools) "-DWITH_CORE_TOOLS=1" # we dont need to build rocksdb tests @@ -82,6 +84,8 @@ ++ [ # no real reason to have snappy, no one uses this "-DWITH_SNAPPY=0" + "-DZLIB=0" + "-DWITH_ZLIB=0" # we dont need to use ldb or sst_dump (core_tools) "-DWITH_CORE_TOOLS=0" # we dont need trace tools @@ -171,7 +175,8 @@ sccache ] # liburing is Linux-exclusive - ++ lib.optional stdenv.hostPlatform.isLinux liburing) + ++ lib.optional stdenv.hostPlatform.isLinux liburing + ++ lib.optional stdenv.hostPlatform.isLinux numactl) ++ scope.main.buildInputs ++ scope.main.propagatedBuildInputs ++ scope.main.nativeBuildInputs; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 138340a4..276b8b6a 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -379,8 +379,8 @@ impl Service { .active_local_users_in_room(&pdu.room_id) .map(ToOwned::to_owned) // Don't notify the sender of their own events, and dont send from ignored users - .ready_filter(|user| user != &pdu.sender) - .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, recipient_user).await).then_some(recipient_user) }) + .ready_filter(|user| *user != pdu.sender) + .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, &recipient_user).await).then_some(recipient_user) }) .collect() .await; From d80e61cbee21706454d1033ba46b51e4dcbb8679 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 17:53:23 -0500 Subject: [PATCH 0713/1248] bump ring to 0.17.12 Signed-off-by: June Clementine Strawberry --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ade8b83..3a57df7b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2364,7 +2364,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -3474,9 +3474,9 @@ checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" [[package]] name = "ring" -version = "0.17.11" +version = "0.17.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" +checksum = "ed9b823fa29b721a59671b41d6b06e66b29e0628e207e8b1c3ceeda701ec928d" dependencies = [ "cc", "cfg-if", From f34e0b21a3cbf7eaa737256fc57c13719b225507 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 18:12:54 -0500 Subject: [PATCH 0714/1248] remove rust-rocksdb dylib wrapper as we have a fork already Signed-off-by: June Clementine Strawberry --- Cargo.lock | 10 +----- Cargo.toml | 9 +++--- deps/rust-rocksdb/Cargo.toml | 42 ------------------------ deps/rust-rocksdb/lib.rs | 62 ------------------------------------ 4 files changed, 6 insertions(+), 117 deletions(-) delete mode 100644 deps/rust-rocksdb/Cargo.toml delete mode 100644 deps/rust-rocksdb/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 3a57df7b..9a46f008 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -848,7 +848,7 @@ dependencies = [ "log", "minicbor", "minicbor-serde", - "rust-rocksdb-uwu", + "rust-rocksdb", "serde", "serde_json", "tokio", @@ -3706,14 +3706,6 @@ source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca dependencies = [ "libc", "rust-librocksdb-sys", - "serde", -] - -[[package]] -name = "rust-rocksdb-uwu" -version = "0.0.1" -dependencies = [ - "rust-rocksdb", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 5edcc60a..62c90119 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -382,8 +382,9 @@ features = [ ] [workspace.dependencies.rust-rocksdb] -path = "deps/rust-rocksdb" -package = "rust-rocksdb-uwu" +git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" +rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" +default-features = false features = [ "multi-threaded-cf", "mt_static", @@ -683,7 +684,7 @@ inherits = "release" # To enable hot-reloading: # 1. Uncomment all of the rustflags here. -# 2. Uncomment crate-type=dylib in src/*/Cargo.toml and deps/rust-rocksdb/Cargo.toml +# 2. Uncomment crate-type=dylib in src/*/Cargo.toml # # opt-level, mir-opt-level, validate-mir are not known to interfere with reloading # and can be raised if build times are tolerable. @@ -751,7 +752,7 @@ inherits = "dev" # '-Clink-arg=-Wl,-z,lazy', #] -[profile.dev.package.rust-rocksdb-uwu] +[profile.dev.package.rust-rocksdb] inherits = "dev" debug = 'limited' incremental = false diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml deleted file mode 100644 index f6e0a54f..00000000 --- a/deps/rust-rocksdb/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "rust-rocksdb-uwu" -categories.workspace = true -description = "dylib wrapper for rust-rocksdb" -edition = "2024" -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version = "0.0.1" - -[features] -default = ["lz4", "zstd", "bzip2"] -jemalloc = ["rust-rocksdb/jemalloc"] -io-uring = ["rust-rocksdb/io-uring"] -numa = ["rust-rocksdb/numa"] # unused by rocksdb for now -valgrind = ["rust-rocksdb/valgrind"] -lz4 = ["rust-rocksdb/lz4"] -zstd = ["rust-rocksdb/zstd"] -zlib = ["rust-rocksdb/zlib"] -bzip2 = ["rust-rocksdb/bzip2"] -rtti = ["rust-rocksdb/rtti"] -mt_static = ["rust-rocksdb/mt_static"] -multi-threaded-cf = ["rust-rocksdb/multi-threaded-cf"] -serde1 = ["rust-rocksdb/serde1"] -malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] - -[dependencies.rust-rocksdb] -git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" -#branch = "master" -default-features = false - -[lib] -path = "lib.rs" -crate-type = [ - "rlib", -# "dylib" -] - -[lints] -workspace = true diff --git a/deps/rust-rocksdb/lib.rs b/deps/rust-rocksdb/lib.rs deleted file mode 100644 index 8dbbda98..00000000 --- a/deps/rust-rocksdb/lib.rs +++ /dev/null @@ -1,62 +0,0 @@ -pub use rust_rocksdb::*; - -#[cfg_attr(not(conduwuit_mods), link(name = "rocksdb"))] -#[cfg_attr(conduwuit_mods, link(name = "rocksdb", kind = "static"))] -unsafe extern "C" { - pub unsafe fn rocksdb_list_column_families(); - pub unsafe fn rocksdb_logger_create_stderr_logger(); - pub unsafe fn rocksdb_logger_create_callback_logger(); - pub unsafe fn rocksdb_options_set_info_log(); - pub unsafe fn rocksdb_get_options_from_string(); - pub unsafe fn rocksdb_writebatch_create(); - pub unsafe fn rocksdb_writebatch_destroy(); - pub unsafe fn rocksdb_writebatch_put_cf(); - pub unsafe fn rocksdb_writebatch_delete_cf(); - pub unsafe fn rocksdb_iter_value(); - pub unsafe fn rocksdb_iter_seek_to_last(); - pub unsafe fn rocksdb_iter_seek_for_prev(); - pub unsafe fn rocksdb_iter_seek_to_first(); - pub unsafe fn rocksdb_iter_next(); - pub unsafe fn rocksdb_iter_prev(); - pub unsafe fn rocksdb_iter_seek(); - pub unsafe fn rocksdb_iter_valid(); - pub unsafe fn rocksdb_iter_get_error(); - pub unsafe fn rocksdb_iter_key(); - pub unsafe fn rocksdb_iter_destroy(); - pub unsafe fn rocksdb_livefiles(); - pub unsafe fn rocksdb_livefiles_count(); - pub unsafe fn rocksdb_livefiles_destroy(); - pub unsafe fn rocksdb_livefiles_column_family_name(); - pub unsafe fn rocksdb_livefiles_name(); - pub unsafe fn rocksdb_livefiles_size(); - pub unsafe fn rocksdb_livefiles_level(); - pub unsafe fn rocksdb_livefiles_smallestkey(); - pub unsafe fn rocksdb_livefiles_largestkey(); - pub unsafe fn rocksdb_livefiles_entries(); - pub unsafe fn rocksdb_livefiles_deletions(); - pub unsafe fn rocksdb_put_cf(); - pub unsafe fn rocksdb_delete_cf(); - pub unsafe fn rocksdb_get_pinned_cf(); - pub unsafe fn rocksdb_create_column_family(); - pub unsafe fn rocksdb_get_latest_sequence_number(); - pub unsafe fn rocksdb_batched_multi_get_cf(); - pub unsafe fn rocksdb_cancel_all_background_work(); - pub unsafe fn rocksdb_repair_db(); - pub unsafe fn rocksdb_list_column_families_destroy(); - pub unsafe fn rocksdb_flush(); - pub unsafe fn rocksdb_flush_wal(); - pub unsafe fn rocksdb_open_column_families(); - pub unsafe fn rocksdb_open_for_read_only_column_families(); - pub unsafe fn rocksdb_open_as_secondary_column_families(); - pub unsafe fn rocksdb_open_column_families_with_ttl(); - pub unsafe fn rocksdb_open(); - pub unsafe fn rocksdb_open_for_read_only(); - pub unsafe fn rocksdb_open_with_ttl(); - pub unsafe fn rocksdb_open_as_secondary(); - pub unsafe fn rocksdb_write(); - pub unsafe fn rocksdb_create_iterator_cf(); - pub unsafe fn rocksdb_backup_engine_create_new_backup_flush(); - pub unsafe fn rocksdb_backup_engine_options_create(); - pub unsafe fn rocksdb_write_buffer_manager_destroy(); - pub unsafe fn rocksdb_options_set_ttl(); -} From fa71162c7dd943afdf78d10710914076ec2d3c85 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 18:45:14 -0500 Subject: [PATCH 0715/1248] bump rocksdb to v9.11.1 Signed-off-by: June Clementine Strawberry --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- flake.lock | 8 ++++---- flake.nix | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a46f008..56ff3c6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3684,8 +3684,8 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" +version = "0.33.0+9.11.1" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" dependencies = [ "bindgen", "bzip2-sys", @@ -3701,8 +3701,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" +version = "0.37.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index 62c90119..43b2d55d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" +rev = "3f4c5357243defedc849ae6227490102a9f90bef" default-features = false features = [ "multi-threaded-cf", diff --git a/flake.lock b/flake.lock index a1bd423f..3a43c4cd 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1741234703, - "narHash": "sha256-sT5g/RM9vrwY6AmjSfl4RoJPGtcJCkZCsxiX3PFJgKQ=", + "lastModified": 1741303627, + "narHash": "sha256-7HpydEinYHvskC4vkl1Yie2kg2yShfZbREAyQMkvEUc=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "185593ce4534091e57025e9f3571dbf681c04631", + "rev": "cecee0e4fbff2b69e3edc6e9b5b751d8098a3ba1", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.9.3", + "ref": "v9.11.1", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index faff87d6..8f08a7d9 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 20dd1d148dd31948d9055c5a19ba8f8e13041363 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 18:45:48 -0500 Subject: [PATCH 0716/1248] add new complement test results Signed-off-by: June Clementine Strawberry --- .../complement/test_results.jsonl | 425 +++++++++++++++++- 1 file changed, 421 insertions(+), 4 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 11339049..fed43b48 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -1,5 +1,26 @@ {"Action":"pass","Test":"TestACLs"} +{"Action":"pass","Test":"TestAddAccountData"} +{"Action":"pass","Test":"TestAddAccountData/Can_add_global_account_data"} +{"Action":"pass","Test":"TestAddAccountData/Can_add_room_account_data"} +{"Action":"fail","Test":"TestArchivedRoomsHistory"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/incremental_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/initial_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty"} +{"Action":"skip","Test":"TestArchivedRoomsHistory/timeline_is_empty/incremental_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} +{"Action":"fail","Test":"TestAsyncUpload"} +{"Action":"fail","Test":"TestAsyncUpload/Cannot_upload_to_a_media_ID_that_has_already_been_uploaded_to"} +{"Action":"fail","Test":"TestAsyncUpload/Create_media"} +{"Action":"fail","Test":"TestAsyncUpload/Download_media"} +{"Action":"fail","Test":"TestAsyncUpload/Download_media_over__matrix/client/v1/media/download"} +{"Action":"fail","Test":"TestAsyncUpload/Not_yet_uploaded"} +{"Action":"fail","Test":"TestAsyncUpload/Upload_media"} +{"Action":"pass","Test":"TestAvatarUrlUpdate"} {"Action":"pass","Test":"TestBannedUserCannotSendJoin"} +{"Action":"skip","Test":"TestCanRegisterAdmin"} +{"Action":"pass","Test":"TestCannotKickLeftUser"} +{"Action":"fail","Test":"TestCannotKickNonPresentUser"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"} @@ -42,30 +63,124 @@ {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/knock_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/regular_event"} +{"Action":"pass","Test":"TestChangePassword"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_a_different_session_no_longer_works_by_default"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can't_log_in_with_old_password"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can_log_in_with_new_password"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_different_sessions_can_optionally_be_kept"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_existing_session_still_works"} +{"Action":"fail","Test":"TestChangePasswordPushers"} +{"Action":"fail","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} +{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} +{"Action":"fail","Test":"TestClientSpacesSummary"} +{"Action":"fail","Test":"TestClientSpacesSummary/max_depth"} +{"Action":"fail","Test":"TestClientSpacesSummary/pagination"} +{"Action":"fail","Test":"TestClientSpacesSummary/query_whole_graph"} +{"Action":"fail","Test":"TestClientSpacesSummary/redact_link"} +{"Action":"fail","Test":"TestClientSpacesSummary/suggested_only"} +{"Action":"fail","Test":"TestClientSpacesSummaryJoinRules"} +{"Action":"pass","Test":"TestContent"} +{"Action":"pass","Test":"TestContentCSAPIMediaV1"} {"Action":"pass","Test":"TestContentMediaV1"} +{"Action":"fail","Test":"TestCumulativeJoinLeaveJoinSync"} +{"Action":"pass","Test":"TestDeactivateAccount"} +{"Action":"pass","Test":"TestDeactivateAccount/After_deactivating_account,_can't_log_in_with_password"} +{"Action":"pass","Test":"TestDeactivateAccount/Can't_deactivate_account_with_wrong_password"} +{"Action":"pass","Test":"TestDeactivateAccount/Can_deactivate_account"} +{"Action":"pass","Test":"TestDeactivateAccount/Password_flow_is_available"} +{"Action":"fail","Test":"TestDelayedEvents"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_with_an_invalid_action"} +{"Action":"pass","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_delay_ID"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_request_body"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_an_action"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_events_are_empty_on_startup"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_message_events_are_sent_on_timeout"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_another_user"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_the_same_user"} +{"Action":"skip","Test":"TestDelayedEvents/delayed_state_events_are_kept_on_server_restart"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_sent_on_timeout"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_cancelled"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_restarted"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_sent_on_request"} +{"Action":"pass","Test":"TestDelayedEvents/parallel"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_cancel_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_restart_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_send_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings"} +{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings/Deleting_a_user's_device_should_delete_any_local_notification_settings_entries_from_their_account_data"} +{"Action":"pass","Test":"TestDemotingUsersViaUsersDefault"} +{"Action":"fail","Test":"TestDeviceListUpdates"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_local_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_remote_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_local_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_remote_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_joins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_leaves_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_rejoins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_joins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_leaves_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_rejoins_a_room"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/good_connectivity"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/interrupted_connectivity"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/stopped_server"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederationOnRoomJoin"} +{"Action":"fail","Test":"TestDeviceManagement"} +{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}"} +{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}_gives_a_404_for_unknown_devices"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/devices"} +{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_gives_a_404_for_unknown_devices"} +{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_updates_device_fields"} +{"Action":"pass","Test":"TestDisplayNameUpdate"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestEvent"} +{"Action":"pass","Test":"TestEvent/Parallel"} +{"Action":"pass","Test":"TestEvent/Parallel/Large_Event"} +{"Action":"pass","Test":"TestEvent/Parallel/Large_State_Event"} {"Action":"pass","Test":"TestEventAuth"} {"Action":"pass","Test":"TestEventAuth/returns_auth_events_for_the_requested_event"} {"Action":"pass","Test":"TestEventAuth/returns_the_auth_chain_for_the_requested_event"} -{"Action":"pass","Test":"TestFederatedClientSpaces"} +{"Action":"fail","Test":"TestEventRelationships"} +{"Action":"fail","Test":"TestFederatedClientSpaces"} +{"Action":"fail","Test":"TestFederatedEventRelationships"} {"Action":"fail","Test":"TestFederationKeyUploadQuery"} {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST"} {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"} {"Action":"pass","Test":"TestFederationRedactSendsWithoutEvent"} {"Action":"pass","Test":"TestFederationRejectInvite"} -{"Action":"pass","Test":"TestFederationRoomsInvite"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel"} +{"Action":"fail","Test":"TestFederationRoomsInvite"} +{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} +{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_join_the_room_when_homeserver_is_already_participating_in_the_room"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_reject_invite_when_homeserver_is_already_participating_in_the_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata"} {"Action":"pass","Test":"TestFederationThumbnail"} +{"Action":"pass","Test":"TestFetchEvent"} +{"Action":"fail","Test":"TestFetchEventNonWorldReadable"} +{"Action":"pass","Test":"TestFetchEventWorldReadable"} +{"Action":"fail","Test":"TestFetchHistoricalInvitedEventFromBeforeInvite"} +{"Action":"pass","Test":"TestFetchHistoricalInvitedEventFromBetweenInvite"} +{"Action":"fail","Test":"TestFetchHistoricalJoinedEventDenied"} +{"Action":"pass","Test":"TestFetchHistoricalSharedEvent"} +{"Action":"pass","Test":"TestFetchMessagesFromNonExistentRoom"} +{"Action":"pass","Test":"TestFilter"} +{"Action":"fail","Test":"TestFilterMessagesByRelType"} +{"Action":"fail","Test":"TestGappedSyncLeaveSection"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/join"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/leave"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers/not_membership"} {"Action":"fail","Test":"TestGetMissingEventsGapFilling"} +{"Action":"pass","Test":"TestGetRoomMembers"} +{"Action":"fail","Test":"TestGetRoomMembersAtPoint"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility"} @@ -76,15 +191,41 @@ {"Action":"pass","Test":"TestInboundFederationProfile/Inbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"} {"Action":"fail","Test":"TestInboundFederationRejectsEventsWithRejectedAuthEvents"} +{"Action":"fail","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} {"Action":"pass","Test":"TestIsDirectFlagFederation"} {"Action":"pass","Test":"TestIsDirectFlagLocal"} {"Action":"pass","Test":"TestJoinFederatedRoomFailOver"} +{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser"} +{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser/join_remote_federated_room_as_application_service_user"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinViaRoomIDAndServerName"} +{"Action":"fail","Test":"TestJson"} +{"Action":"fail","Test":"TestJson/Parallel"} +{"Action":"fail","Test":"TestJson/Parallel/Invalid_JSON_special_values"} +{"Action":"fail","Test":"TestJson/Parallel/Invalid_numerical_values"} +{"Action":"fail","Test":"TestJumpToDateEndpoint"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/can_paginate_after_getting_remote_event_from_timestamp_to_event_endpoint"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_backwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_forwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/when_looking_backwards_before_the_room_was_created,_should_be_able_to_find_event_that_was_imported"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_after_given_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_before_given_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_after_given_timestmap_when_all_message_timestamps_are_the_same"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_before_given_timestamp_when_all_message_timestamps_are_the_same"} +{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_after_the_latest_timestmap"} +{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_before_the_earliest_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_private_room_you_are_not_a_member_of"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_public_room_you_are_not_a_member_of"} +{"Action":"fail","Test":"TestKeyChangesLocal"} +{"Action":"fail","Test":"TestKeyChangesLocal/New_login_should_create_a_device_lists.changed_entry"} +{"Action":"fail","Test":"TestKeyClaimOrdering"} +{"Action":"pass","Test":"TestKeysQueryWithDeviceIDAsObjectFails"} {"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectory"} {"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room"} {"Action":"fail","Test":"TestKnocking"} @@ -139,9 +280,35 @@ {"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} +{"Action":"pass","Test":"TestLeakyTyping"} +{"Action":"fail","Test":"TestLeaveEventInviteRejection"} +{"Action":"fail","Test":"TestLeaveEventVisibility"} +{"Action":"fail","Test":"TestLeftRoomFixture"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_'m.room.name'_state_for_a_departed_room"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/members_for_a_departed_room"} +{"Action":"pass","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/messages_for_a_departed_room"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/state_for_a_departed_room"} +{"Action":"pass","Test":"TestLeftRoomFixture/Getting_messages_going_forward_is_limited_for_a_departed_room"} {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} +{"Action":"fail","Test":"TestLogin"} +{"Action":"fail","Test":"TestLogin/parallel"} +{"Action":"pass","Test":"TestLogin/parallel/GET_/login_yields_a_set_of_flows"} +{"Action":"fail","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_as_non-existing_user_is_rejected"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_log_in_as_a_user_with_just_the_local_part_of_the_id"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_login_as_user"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_returns_the_same_device_id_as_that_in_the_request"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_wrong_password_is_rejected"} +{"Action":"pass","Test":"TestLogout"} +{"Action":"pass","Test":"TestLogout/Can_logout_all_devices"} +{"Action":"pass","Test":"TestLogout/Can_logout_current_device"} +{"Action":"pass","Test":"TestLogout/Request_to_logout_with_invalid_an_access_token_is_rejected"} +{"Action":"pass","Test":"TestLogout/Request_to_logout_without_an_access_token_is_rejected"} +{"Action":"fail","Test":"TestMSC3757OwnedState"} +{"Action":"pass","Test":"TestMSC3967"} +{"Action":"pass","Test":"TestMediaConfig"} {"Action":"pass","Test":"TestMediaFilenames"} {"Action":"pass","Test":"TestMediaFilenames/Parallel"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII"} @@ -178,11 +345,74 @@ {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_locally"} {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_over_federation"} {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_upload_without_a_file_name"} +{"Action":"fail","Test":"TestMembersLocal"} +{"Action":"fail","Test":"TestMembersLocal/Parallel"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_join_events"} +{"Action":"fail","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_incremental_sync)"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_initial_sync)"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/New_room_members_see_their_own_join_event"} +{"Action":"fail","Test":"TestMembershipOnEvents"} {"Action":"fail","Test":"TestNetworkPartitionOrdering"} +{"Action":"pass","Test":"TestNotPresentUserCannotBanOthers"} +{"Action":"fail","Test":"TestOlderLeftRoomsNotInLeaveSection"} +{"Action":"fail","Test":"TestOutboundFederationEventSizeGetMissingEvents"} {"Action":"fail","Test":"TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"} {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} +{"Action":"fail","Test":"TestPollsLocalPushRules"} +{"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} +{"Action":"pass","Test":"TestPowerLevels"} +{"Action":"pass","Test":"TestPowerLevels/GET_/rooms/:room_id/state/m.room.power_levels_can_fetch_levels"} +{"Action":"pass","Test":"TestPowerLevels/PUT_/rooms/:room_id/state/m.room.power_levels_can_set_levels"} +{"Action":"pass","Test":"TestPowerLevels/PUT_power_levels_should_not_explode_if_the_old_power_levels_were_empty"} +{"Action":"fail","Test":"TestPresence"} +{"Action":"fail","Test":"TestPresence/GET_/presence/:user_id/status_fetches_initial_status"} +{"Action":"pass","Test":"TestPresence/PUT_/presence/:user_id/status_updates_my_presence"} +{"Action":"pass","Test":"TestPresence/Presence_can_be_set_from_sync"} +{"Action":"pass","Test":"TestPresence/Presence_changes_are_reported_to_local_room_members"} +{"Action":"pass","Test":"TestPresence/Presence_changes_to_UNAVAILABLE_are_reported_to_local_room_members"} +{"Action":"pass","Test":"TestPresenceSyncDifferentRooms"} +{"Action":"pass","Test":"TestProfileAvatarURL"} +{"Action":"pass","Test":"TestProfileAvatarURL/GET_/profile/:user_id/avatar_url_publicly_accessible"} +{"Action":"pass","Test":"TestProfileAvatarURL/PUT_/profile/:user_id/avatar_url_sets_my_avatar"} +{"Action":"pass","Test":"TestProfileDisplayName"} +{"Action":"pass","Test":"TestProfileDisplayName/GET_/profile/:user_id/displayname_publicly_accessible"} +{"Action":"pass","Test":"TestProfileDisplayName/PUT_/profile/:user_id/displayname_sets_my_name"} +{"Action":"pass","Test":"TestPushRuleCacheHealth"} +{"Action":"pass","Test":"TestPushSync"} +{"Action":"pass","Test":"TestPushSync/Adding_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Disabling_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Enabling_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Push_rules_come_down_in_an_initial_/sync"} +{"Action":"pass","Test":"TestPushSync/Setting_actions_for_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestRegistration"} +{"Action":"pass","Test":"TestRegistration/parallel"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_INVALID_USERNAME_for_invalid_user_name"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_USER_IN_USE_for_registered_user_name"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_available_for_unregistered_user_name"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_admin_with_shared_secret"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_disallows_symbols"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_downcases_capitals"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/-"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/."} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_//"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/3"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/="} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/_"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/q"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_can_create_a_user"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_downcases_capitals_in_usernames"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_if_user_already_exists"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_usernames_with_special_characters"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_returns_the_same_device_id_as_that_in_the_request"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_{}_returns_a_set_of_flows"} +{"Action":"pass","Test":"TestRegistration/parallel/Registration_accepts_non-ascii_passwords"} +{"Action":"pass","Test":"TestRelations"} +{"Action":"fail","Test":"TestRelationsPagination"} +{"Action":"pass","Test":"TestRelationsPaginationSync"} {"Action":"pass","Test":"TestRemoteAliasRequestsUnderstandUnicode"} {"Action":"pass","Test":"TestRemotePngThumbnail"} {"Action":"pass","Test":"TestRemotePngThumbnail/test_/_matrix/client/v1/media_endpoint"} @@ -191,6 +421,13 @@ {"Action":"fail","Test":"TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"} {"Action":"fail","Test":"TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"} {"Action":"pass","Test":"TestRemoteTyping"} +{"Action":"fail","Test":"TestRemovingAccountData"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_DELETE_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_PUT_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_account_data_via_PUT_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_data_via_DELETE_works"} +{"Action":"fail","Test":"TestRequestEncodingFails"} +{"Action":"fail","Test":"TestRequestEncodingFails/POST_rejects_invalid_utf-8_in_JSON"} {"Action":"fail","Test":"TestRestrictedRoomsLocalJoin"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room"} @@ -221,12 +458,170 @@ {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsSpacesSummaryFederation"} {"Action":"fail","Test":"TestRestrictedRoomsSpacesSummaryLocal"} +{"Action":"pass","Test":"TestRoomAlias"} +{"Action":"pass","Test":"TestRoomAlias/Parallel"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/GET_/rooms/:room_id/aliases_lists_aliases"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/Only_room_members_can_list_aliases_of_a_room"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/PUT_/directory/room/:room_alias_creates_alias"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/Room_aliases_can_contain_Unicode"} +{"Action":"fail","Test":"TestRoomCanonicalAlias"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_aliases"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_alt_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases#01"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_setting_rejects_deleted_aliases"} +{"Action":"pass","Test":"TestRoomCreate"} +{"Action":"pass","Test":"TestRoomCreate/Parallel"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/Can_/sync_newly_created_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_creates_a_room_with_the_given_version"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_ignores_attempts_to_set_the_room_version_via_creation_content"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room_with_invites"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_public_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_name"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_topic"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_numeric_versions"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_unknown_versions"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/Rooms_can_be_created_with_an_initial_invite_list_(SYN-205)"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Joining_room_twice_is_idempotent"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} +{"Action":"fail","Test":"TestRoomDeleteAlias"} +{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_alias_with_no_ops"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_canonical_alias_with_no_ops"} +{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Deleting_a_non-existent_alias_should_return_a_404"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_in_the_default_room_configuration"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.room.aliases_is_restricted"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_can't_delete_other's_aliases"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other's_aliases"} +{"Action":"fail","Test":"TestRoomForget"} +{"Action":"fail","Test":"TestRoomForget/Parallel"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can't_forget_room_you're_still_in"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_we_weren't_an_actual_member"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_you've_been_kicked_from"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_re-join_room_if_re-invited"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Forgetting_room_does_not_show_up_in_v2_initial_/sync"} +{"Action":"fail","Test":"TestRoomForget/Parallel/Forgotten_room_messages_cannot_be_paginated"} +{"Action":"fail","Test":"TestRoomForget/Parallel/Leave_for_forgotten_room_shows_up_in_v2_incremental_/sync"} +{"Action":"pass","Test":"TestRoomImageRoundtrip"} +{"Action":"fail","Test":"TestRoomMembers"} +{"Action":"fail","Test":"TestRoomMembers/Parallel"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room"} +{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room_with_custom_content"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room"} +{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room_with_custom_content"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/ban_can_ban_a_user"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/invite_can_send_an_invite"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/join_can_join_a_room"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/leave_can_leave_a_room"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} +{"Action":"pass","Test":"TestRoomMessagesLazyLoading"} +{"Action":"pass","Test":"TestRoomMessagesLazyLoadingLocalUser"} +{"Action":"pass","Test":"TestRoomReadMarkers"} +{"Action":"pass","Test":"TestRoomReceipts"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} +{"Action":"fail","Test":"TestRoomState"} +{"Action":"fail","Test":"TestRoomState/Parallel"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/directory/room/:room_alias_yields_room_ID"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/joined_rooms_lists_newly-created_room"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/publicRooms_lists_newly-created_room"} +{"Action":"fail","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_fetches_my_membership"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_is_forbidden_after_leaving_room"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id?format=event_fetches_my_membership_event"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id_fetches_my_membership"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.name_gets_name"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.power_levels_fetches_powerlevels"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.topic_gets_topic"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state_fetches_entire_room_state"} +{"Action":"pass","Test":"TestRoomState/Parallel/POST_/rooms/:room_id/state/m.room.name_sets_name"} +{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/createRoom_with_creation_content"} +{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/rooms/:room_id/state/m.room.topic_sets_topic"} +{"Action":"pass","Test":"TestRoomSummary"} +{"Action":"fail","Test":"TestRoomsInvite"} +{"Action":"fail","Test":"TestRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Can_invite_users_to_invite-only_rooms"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite"} +{"Action":"fail","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_see_room_metadata"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Uninvited_users_cannot_join_the_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_a_user_that_is_already_in_the_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_themselves_to_a_room"} +{"Action":"fail","Test":"TestSearch"} +{"Action":"fail","Test":"TestSearch/parallel"} +{"Action":"fail","Test":"TestSearch/parallel/Can_back-paginate_search_results"} +{"Action":"fail","Test":"TestSearch/parallel/Can_get_context_around_search_results"} +{"Action":"pass","Test":"TestSearch/parallel/Can_search_for_an_event_by_body"} +{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_rank_ordering_do_not_include_redacted_events"} +{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_recent_ordering_do_not_include_redacted_events"} +{"Action":"pass","Test":"TestSearch/parallel/Search_works_across_an_upgraded_room_and_its_predecessor"} +{"Action":"fail","Test":"TestSendAndFetchMessage"} {"Action":"skip","Test":"TestSendJoinPartialStateResponse"} +{"Action":"pass","Test":"TestSendMessageWithTxn"} +{"Action":"pass","Test":"TestServerCapabilities"} +{"Action":"skip","Test":"TestServerNotices"} +{"Action":"fail","Test":"TestSync"} +{"Action":"fail","Test":"TestSync/parallel"} +{"Action":"pass","Test":"TestSync/parallel/Can_sync_a_joined_room"} +{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking"} +{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking/User_is_correctly_listed_when_they_leave,_even_when_lazy_loading_is_enabled"} +{"Action":"pass","Test":"TestSync/parallel/Full_state_sync_includes_joined_rooms"} +{"Action":"fail","Test":"TestSync/parallel/Get_presence_for_newly_joined_members_in_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_has_correct_timeline_in_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_includes_presence_in_incremental_sync"} +{"Action":"pass","Test":"TestSync/parallel/Newly_joined_room_is_included_in_an_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} +{"Action":"pass","Test":"TestSyncFilter"} +{"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} +{"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} +{"Action":"fail","Test":"TestSyncLeaveSection"} +{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} +{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} +{"Action":"fail","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} {"Action":"pass","Test":"TestSyncOmitsStateChangeOnFilteredEvents"} +{"Action":"pass","Test":"TestSyncTimelineGap"} +{"Action":"pass","Test":"TestSyncTimelineGap/full"} +{"Action":"pass","Test":"TestSyncTimelineGap/incremental"} +{"Action":"fail","Test":"TestTentativeEventualJoiningAfterRejecting"} +{"Action":"fail","Test":"TestThreadReceiptsInSyncMSC4102"} +{"Action":"fail","Test":"TestThreadedReceipts"} +{"Action":"fail","Test":"TestThreadsEndpoint"} +{"Action":"pass","Test":"TestToDeviceMessages"} {"Action":"fail","Test":"TestToDeviceMessagesOverFederation"} {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/good_connectivity"} {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/interrupted_connectivity"} {"Action":"fail","Test":"TestToDeviceMessagesOverFederation/stopped_server"} +{"Action":"fail","Test":"TestTxnIdWithRefreshToken"} +{"Action":"fail","Test":"TestTxnIdempotency"} +{"Action":"pass","Test":"TestTxnIdempotencyScopedToDevice"} +{"Action":"pass","Test":"TestTxnInEvent"} +{"Action":"pass","Test":"TestTxnScopeOnLocalEcho"} +{"Action":"pass","Test":"TestTyping"} +{"Action":"pass","Test":"TestTyping/Typing_can_be_explicitly_stopped"} +{"Action":"pass","Test":"TestTyping/Typing_notification_sent_to_local_room_members"} +{"Action":"fail","Test":"TestUnbanViaInvite"} {"Action":"fail","Test":"TestUnknownEndpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Client-server_endpoints"} {"Action":"fail","Test":"TestUnknownEndpoints/Key_endpoints"} @@ -234,5 +629,27 @@ {"Action":"pass","Test":"TestUnknownEndpoints/Server-server_endpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Unknown_prefix"} {"Action":"fail","Test":"TestUnrejectRejectedEvents"} +{"Action":"fail","Test":"TestUploadKey"} +{"Action":"fail","Test":"TestUploadKey/Parallel"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Can_claim_one_time_key_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_device_keys_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_specific_device_keys_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_upload_device_keys"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} +{"Action":"pass","Test":"TestUploadKey/Parallel/query_for_user_with_no_keys_returns_empty_key_dict"} +{"Action":"pass","Test":"TestUploadKeyIdempotency"} +{"Action":"pass","Test":"TestUploadKeyIdempotencyOverlap"} +{"Action":"fail","Test":"TestUrlPreview"} {"Action":"pass","Test":"TestUserAppearsInChangedDeviceListOnJoinOverFederation"} +{"Action":"pass","Test":"TestVersionStructure"} +{"Action":"pass","Test":"TestVersionStructure/Version_responds_200_OK_with_valid_structure"} +{"Action":"pass","Test":"TestWithoutOwnedState"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_a_non-member_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_suffixed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_malformed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_their_own_suffixed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/user_can_set_state_with_their_own_user_ID_as_state_key"} {"Action":"pass","Test":"TestWriteMDirectAccountData"} From 5ad1100e0fdf41a380b445154b42bc09f38a64b5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 19:48:06 -0500 Subject: [PATCH 0717/1248] bump our rocksdb fork Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- flake.lock | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56ff3c6b..7dd24e2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" dependencies = [ "bindgen", "bzip2-sys", @@ -3702,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index 43b2d55d..a9f1abb3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "3f4c5357243defedc849ae6227490102a9f90bef" +rev = "d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" default-features = false features = [ "multi-threaded-cf", diff --git a/flake.lock b/flake.lock index 3a43c4cd..c3292cbc 100644 --- a/flake.lock +++ b/flake.lock @@ -567,11 +567,11 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1741303627, - "narHash": "sha256-7HpydEinYHvskC4vkl1Yie2kg2yShfZbREAyQMkvEUc=", + "lastModified": 1741308171, + "narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "cecee0e4fbff2b69e3edc6e9b5b751d8098a3ba1", + "rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986", "type": "github" }, "original": { From fe65648296b1827841c3e2a602cc78bd1af0a9b5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 20:10:32 -0500 Subject: [PATCH 0718/1248] remove unnecessary map_err Signed-off-by: June Clementine Strawberry --- src/service/rooms/timeline/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 276b8b6a..826a1dae 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -344,7 +344,7 @@ impl Service { let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; - let count1 = self.services.globals.next_count()?; + let count1 = self.services.globals.next_count().unwrap(); // Mark as read first so the sending client doesn't get a notification even if // appending fails self.services @@ -362,13 +362,12 @@ impl Service { drop(insert_lock); - // See if the event matches any known pushers + // See if the event matches any known pushers via power level let power_levels: RoomPowerLevelsEventContent = self .services .state_accessor .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") .await - .map_err(|e| err!(Database(warn!("invalid m.room.power_levels event: {e}")))) .unwrap_or_default(); let sync_pdu = pdu.to_sync_room_event(); From 2c58a6efda4f0ae7fa7b5ad05758489b5ff2e5f5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 00:54:30 -0500 Subject: [PATCH 0719/1248] allow broken no-op deny+allow room server ACL keys Signed-off-by: June Clementine Strawberry --- src/service/rooms/event_handler/acl_check.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs index 6b432a4b..f847015b 100644 --- a/src/service/rooms/event_handler/acl_check.rs +++ b/src/service/rooms/event_handler/acl_check.rs @@ -14,14 +14,21 @@ pub async fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Res .room_state_get_content(room_id, &StateEventType::RoomServerAcl, "") .await .map(|c: RoomServerAclEventContent| c) - .inspect(|acl| trace!("ACL content found: {acl:?}")) - .inspect_err(|e| trace!("No ACL content found: {e:?}")) + .inspect(|acl| trace!(%room_id, "ACL content found: {acl:?}")) + .inspect_err(|e| trace!(%room_id, "No ACL content found: {e:?}")) else { return Ok(()); }; if acl_event_content.allow.is_empty() { - warn!("Ignoring broken ACL event (allow key is empty)"); + warn!(%room_id, "Ignoring broken ACL event (allow key is empty)"); + return Ok(()); + } + + if acl_event_content.deny.contains(&String::from("*")) + && acl_event_content.allow.contains(&String::from("*")) + { + warn!(%room_id, "Ignoring broken ACL event (allow key and deny key both contain wildcard \"*\""); return Ok(()); } From 4f882c3bd8adfa86edc504396f6cd45b56fd8b62 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 00:57:39 -0500 Subject: [PATCH 0720/1248] add some ACL paw-gun checks, better `PUT` state event validation Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 23 +++- src/api/client/state.rs | 253 +++++++++++++++++++++++++--------------- 2 files changed, 178 insertions(+), 98 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 8a7eab7e..4c1c986a 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{Err, Error, Result, debug, err, info, result::NotFound, utils}; +use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils}; use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, @@ -41,6 +41,20 @@ pub(crate) async fn upload_keys_route( let (sender_user, sender_device) = body.sender(); for (key_id, one_time_key) in &body.one_time_keys { + if one_time_key + .deserialize() + .inspect_err(|e| { + debug_warn!( + ?key_id, + ?one_time_key, + "Invalid one time key JSON submitted by client, skipping: {e}" + ) + }) + .is_err() + { + continue; + } + services .users .add_one_time_key(sender_user, sender_device, key_id, one_time_key) @@ -48,7 +62,12 @@ pub(crate) async fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { - let deser_device_keys = device_keys.deserialize()?; + let deser_device_keys = device_keys.deserialize().map_err(|e| { + err!(Request(BadJson(debug_warn!( + ?device_keys, + "Invalid device keys JSON uploaded by client: {e}" + )))) + })?; if deser_device_keys.user_id != sender_user { return Err!(Request(Unknown( diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 6353fe1c..c92091eb 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -11,6 +11,7 @@ use ruma::{ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, + server_acl::RoomServerAclEventContent, }, }, serde::Raw, @@ -194,134 +195,194 @@ async fn allowed_to_send_state_event( ) -> Result { match event_type { | StateEventType::RoomCreate => { - return Err!(Request(BadJson( + return Err!(Request(BadJson(debug_warn!( + ?room_id, "You cannot update m.room.create after a room has been created." - ))); + )))); + }, + | StateEventType::RoomServerAcl => { + // prevents common ACL paw-guns as ACL management is difficult and prone to + // irreversible mistakes + match json.deserialize_as::() { + | Ok(acl_content) => { + if acl_content.allow.is_empty() { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with an empty allow key will permanently \ + brick the room for non-conduwuit's as this equates to no servers \ + being allowed to participate in this room." + )))); + } + + if acl_content.deny.contains(&String::from("*")) + && acl_content.allow.contains(&String::from("*")) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with a deny and allow key value of \"*\" will \ + permanently brick the room for non-conduwuit's as this equates to \ + no servers being allowed to participate in this room." + )))); + } + + if acl_content.deny.contains(&String::from("*")) + && !acl_content.is_allowed(services.globals.server_name()) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with a deny key value of \"*\" and without \ + your own server name in the allow key will result in you being \ + unable to participate in this room." + )))); + } + + if !acl_content.allow.contains(&String::from("*")) + && !acl_content.is_allowed(services.globals.server_name()) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event for an allow key without \"*\" and without \ + your own server name in the allow key will result in you being \ + unable to participate in this room." + )))); + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room server ACL event is invalid: {e}" + )))); + }, + }; }, - // Forbid m.room.encryption if encryption is disabled | StateEventType::RoomEncryption => - if !services.globals.allow_encryption() { + // Forbid m.room.encryption if encryption is disabled + if !services.config.allow_encryption { return Err!(Request(Forbidden("Encryption is disabled on this homeserver."))); }, - // admin room is a sensitive room, it should not ever be made public | StateEventType::RoomJoinRules => { + // admin room is a sensitive room, it should not ever be made public if let Ok(admin_room_id) = services.admin.get_admin_room().await { if admin_room_id == room_id { - if let Ok(join_rule) = - serde_json::from_str::(json.json().get()) - { - if join_rule.join_rule == JoinRule::Public { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made public" - ))); - } + match json.deserialize_as::() { + | Ok(join_rule) => + if join_rule.join_rule == JoinRule::Public { + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made public" + ))); + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room join rules event is invalid: {e}" + )))); + }, } } } }, - // admin room is a sensitive room, it should not ever be made world readable | StateEventType::RoomHistoryVisibility => { - if let Ok(visibility_content) = - serde_json::from_str::(json.json().get()) - { - if let Ok(admin_room_id) = services.admin.get_admin_room().await { - if admin_room_id == room_id - && visibility_content.history_visibility - == HistoryVisibility::WorldReadable - { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made world readable \ - (public room history)." - ))); - } + // admin room is a sensitive room, it should not ever be made world readable + if let Ok(admin_room_id) = services.admin.get_admin_room().await { + match json.deserialize_as::() { + | Ok(visibility_content) => { + if admin_room_id == room_id + && visibility_content.history_visibility + == HistoryVisibility::WorldReadable + { + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made world \ + readable (public room history)." + ))); + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room history visibility event is invalid: {e}" + )))); + }, } } }, | StateEventType::RoomCanonicalAlias => { - if let Ok(canonical_alias) = - serde_json::from_str::(json.json().get()) - { - let mut aliases = canonical_alias.alt_aliases.clone(); + match json.deserialize_as::() { + | Ok(canonical_alias_content) => { + let mut aliases = canonical_alias_content.alt_aliases.clone(); - if let Some(alias) = canonical_alias.alias { - aliases.push(alias); - } + if let Some(alias) = canonical_alias_content.alias { + aliases.push(alias); + } - for alias in aliases { - if !services.globals.server_is_ours(alias.server_name()) { - return Err!(Request(Forbidden( - "canonical_alias must be for this server" + for alias in aliases { + let (alias_room_id, _servers) = + services.rooms.alias.resolve_alias(&alias, None).await?; + + if alias_room_id != room_id { + return Err!(Request(Forbidden( + "Room alias {alias} does not belong to room {room_id}" + ))); + } + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room canonical alias event is invalid: {e}" + )))); + }, + } + }, + | StateEventType::RoomMember => match json.deserialize_as::() { + | Ok(membership_content) => { + let Ok(state_key) = UserId::parse(state_key) else { + return Err!(Request(BadJson( + "Membership event has invalid or non-existent state key" + ))); + }; + + if let Some(authorising_user) = + membership_content.join_authorized_via_users_server + { + if membership_content.membership != MembershipState::Join { + return Err!(Request(BadJson( + "join_authorised_via_users_server is only for member joins" + ))); + } + + if services + .rooms + .state_cache + .is_joined(state_key, room_id) + .await + { + return Err!(Request(InvalidParam( + "{state_key} is already joined, an authorising user is not required." + ))); + } + + if !services.globals.user_is_local(&authorising_user) { + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} does not belong to this \ + homeserver" ))); } if !services .rooms - .alias - .resolve_local_alias(&alias) + .state_cache + .is_joined(&authorising_user, room_id) .await - .is_ok_and(|room| room == room_id) - // Make sure it's the right room { - return Err!(Request(Forbidden( - "You are only allowed to send canonical_alias events when its \ - aliases already exist" + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} is not in the room, they \ + cannot authorise the join." ))); } } - } - }, - | StateEventType::RoomMember => { - let Ok(membership_content) = - serde_json::from_str::(json.json().get()) - else { + }, + | Err(e) => { return Err!(Request(BadJson( "Membership content must have a valid JSON body with at least a valid \ - membership state." + membership state: {e}" ))); - }; - - let Ok(state_key) = UserId::parse(state_key) else { - return Err!(Request(BadJson( - "Membership event has invalid or non-existent state key" - ))); - }; - - if let Some(authorising_user) = membership_content.join_authorized_via_users_server { - if membership_content.membership != MembershipState::Join { - return Err!(Request(BadJson( - "join_authorised_via_users_server is only for member joins" - ))); - } - - if services - .rooms - .state_cache - .is_joined(state_key, room_id) - .await - { - return Err!(Request(InvalidParam( - "{state_key} is already joined, an authorising user is not required." - ))); - } - - if !services.globals.user_is_local(&authorising_user) { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} does not belong to this homeserver" - ))); - } - - if !services - .rooms - .state_cache - .is_joined(&authorising_user, room_id) - .await - { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} is not in the room, they cannot \ - authorise the join." - ))); - } - } + }, }, | _ => (), } From 8b3f62919831650a8198ca751dd1892e9889a51d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 00:57:47 -0500 Subject: [PATCH 0721/1248] bump rust-rocksdb Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7dd24e2e..a224ad0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" dependencies = [ "bindgen", "bzip2-sys", @@ -3702,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index a9f1abb3..0b08cd8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" +rev = "2e692ae026881fc385f111fdcfba38bee98f1e47" default-features = false features = [ "multi-threaded-cf", From 6052c0c8a2c5722a5ca057576ba174f8f72ab9e0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 01:04:53 -0500 Subject: [PATCH 0722/1248] ci: allow ourselves to write to the public docs directory Signed-off-by: June Clementine Strawberry --- .github/workflows/documentation.yml | 1 + conduwuit-example.toml | 2 +- src/api/client/keys.rs | 2 +- src/core/config/mod.rs | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 88e7bbe1..b5b4ff46 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -81,6 +81,7 @@ jobs: bin/nix-build-and-cache just .#book cp -r --dereference result public + chmod u+w -R public - name: Upload generated documentation (book) as normal artifact uses: actions/upload-artifact@v4 diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 541f062d..3d4b15bc 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -593,7 +593,7 @@ # Currently, conduwuit doesn't support inbound batched key requests, so # this list should only contain other Synapse servers. # -# example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] +# example: ["matrix.org", "envs.net", "tchncs.de"] # #trusted_servers = ["matrix.org"] diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 4c1c986a..9cd50e85 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -48,7 +48,7 @@ pub(crate) async fn upload_keys_route( ?key_id, ?one_time_key, "Invalid one time key JSON submitted by client, skipping: {e}" - ) + ); }) .is_err() { diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 5a4819e0..a82f5f53 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -713,7 +713,7 @@ pub struct Config { /// Currently, conduwuit doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers. /// - /// example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] + /// example: ["matrix.org", "envs.net", "tchncs.de"] /// /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] From 298b58c069534833cfd027510ad7683e18d71e7a Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Mar 2025 21:44:33 -0500 Subject: [PATCH 0723/1248] set file_shape for roomsynctoken_shortstatehash to 3, remove rust-rocksdb package spec Signed-off-by: strawberry --- Cargo.toml | 21 --------------------- src/database/maps.rs | 1 + 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0b08cd8f..c48be06a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -752,27 +752,6 @@ inherits = "dev" # '-Clink-arg=-Wl,-z,lazy', #] -[profile.dev.package.rust-rocksdb] -inherits = "dev" -debug = 'limited' -incremental = false -codegen-units = 1 -opt-level = 'z' -#rustflags = [ -# '--cfg', 'conduwuit_mods', -# '-Ztls-model=initial-exec', -# '-Cprefer-dynamic=true', -# '-Zstaticlib-prefer-dynamic=true', -# '-Zstaticlib-allow-rdylib-deps=true', -# '-Zpacked-bundled-libs=true', -# '-Zplt=true', -# '-Clink-arg=-Wl,--no-as-needed', -# '-Clink-arg=-Wl,--allow-shlib-undefined', -# '-Clink-arg=-Wl,-z,lazy', -# '-Clink-arg=-Wl,-z,nodlopen', -# '-Clink-arg=-Wl,-z,nodelete', -#] - [profile.dev.package.'*'] inherits = "dev" debug = 'limited' diff --git a/src/database/maps.rs b/src/database/maps.rs index 9af45159..138bb038 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -169,6 +169,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomsynctoken_shortstatehash", + file_shape: 3, val_size_hint: Some(8), block_size: 512, compression_level: 3, From 51d29bc1cbca84c001c3b4efbfca9c34a9b94f37 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Mar 2025 21:44:52 -0500 Subject: [PATCH 0724/1248] bump complement Signed-off-by: strawberry --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index c3292cbc..03fc205c 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1740291865, - "narHash": "sha256-wl1+yCTEtvIH8vgXygnxPkaSgg4MYNKs+c9tzVytr20=", + "lastModified": 1741378155, + "narHash": "sha256-rJSfqf3q4oWxcAwENtAowLZeCi8lktwKVH9XQvvZR64=", "owner": "girlbossceo", "repo": "complement", - "rev": "35ad9d9051498fbac8ea4abff8ab7d8b1844f87b", + "rev": "1502a00d8551d0f6e8954a23e43868877c3e57d9", "type": "github" }, "original": { From 90fee4f50eb5a0f81390e088f60265ab4974370e Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 00:15:13 -0500 Subject: [PATCH 0725/1248] add gotestfmt log output to complement script and CI output Signed-off-by: strawberry --- .github/workflows/ci.yml | 30 +++++++++++++++++++----------- bin/complement | 22 +++++++++++++++++----- flake.nix | 1 + 3 files changed, 37 insertions(+), 16 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0425873..c8fef47f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -176,6 +176,13 @@ jobs: path: complement_test_results.jsonl if-no-files-found: error + - name: Upload Complement logs (gotestfmt) + uses: actions/upload-artifact@v4 + with: + name: complement_test_logs_gotestfmt.log + path: complement_test_logs_gotestfmt.log + if-no-files-found: error + - name: Diff Complement results with checked-in repo results run: | diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log) @@ -186,22 +193,23 @@ jobs: if: success() || failure() run: | if [ ${GH_JOB_STATUS} == 'success' ]; then - echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY + echo '# ✅ CI completed suwuccessfully' >> $GITHUB_STEP_SUMMARY else - echo '# CI failure' >> $GITHUB_STEP_SUMMARY + echo '# ❌ CI failed (last 100 lines of output)' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - tail -n 40 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY fi - - name: Run cargo clean test artifacts to free up space - run: | - cargo clean --profile test + echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY build: name: Build diff --git a/bin/complement b/bin/complement index 9960299c..aec27c5b 100755 --- a/bin/complement +++ b/bin/complement @@ -10,15 +10,15 @@ set -euo pipefail COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}" # A `.jsonl` file to write test logs to -LOG_FILE="$2" +LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to -RESULTS_FILE="$3" +RESULTS_FILE="${3:-complement_test_results.jsonl}" OCI_IMAGE="complement-conduwuit:main" -# Complement tests that are skipped due to flakiness/reliability issues -SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' +# Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time +#SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then @@ -34,6 +34,7 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null +# if using macOS, use linux-complement #bin/nix-build-and-cache just .#linux-complement bin/nix-build-and-cache just .#complement @@ -45,7 +46,8 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ # reuses the same complement container for faster complement, at the possible expense of test environment pollution + go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results @@ -55,3 +57,13 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' and .Test != null ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" + +grep '^{"Time":' "$LOG_FILE" | gotestfmt > "${LOG_FILE}_gotestfmt.log" + +echo "" +echo "" +echo "complement logs saved at $LOG_FILE" +echo "complement results saved at $RESULTS_FILE" +echo "complement logs in gotestfmt pretty format outputted at ${LOG_FILE}_gotestfmt.log (use an editor/terminal that interprets ANSI colours)" +echo "" +echo "" diff --git a/flake.nix b/flake.nix index 8f08a7d9..544cdd4a 100644 --- a/flake.nix +++ b/flake.nix @@ -161,6 +161,7 @@ # Needed for our script for Complement jq + gotestfmt # Needed for finding broken markdown links lychee From 5a3264980aee8f5869eb953e82c01b62c2ac5bed Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 01:35:26 -0500 Subject: [PATCH 0726/1248] adjust complement script to allow using your own hs OCI image without nix Signed-off-by: strawberry --- bin/complement | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/bin/complement b/bin/complement index aec27c5b..47c02843 100755 --- a/bin/complement +++ b/bin/complement @@ -15,7 +15,7 @@ LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to RESULTS_FILE="${3:-complement_test_results.jsonl}" -OCI_IMAGE="complement-conduwuit:main" +COMPLEMENT_OCI_IMAGE="${COMPLEMENT_OCI_IMAGE:-complement-conduwuit:main}" # Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time #SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' @@ -34,18 +34,38 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null -# if using macOS, use linux-complement -#bin/nix-build-and-cache just .#linux-complement -bin/nix-build-and-cache just .#complement +if [ ! -f "complement_oci_image.tar.gz" ]; then + echo "building complement conduwuit image" -docker load < result -popd > /dev/null + # if using macOS, use linux-complement + #bin/nix-build-and-cache just .#linux-complement + bin/nix-build-and-cache just .#complement + + echo "complement conduwuit image tar.gz built at \"result\"" + + echo "loading into docker" + docker load < result + popd > /dev/null +else + echo "skipping building a complement conduwuit image as complement_oci_image.tar.gz was already found, loading this" + + docker load < complement_oci_image.tar.gz + popd > /dev/null +fi + +echo "" +echo "running go test with:" +echo "\$COMPLEMENT_SRC: $COMPLEMENT_SRC" +echo "\$COMPLEMENT_BASE_IMAGE: $COMPLEMENT_BASE_IMAGE" +echo "\$RESULTS_FILE: $RESULTS_FILE" +echo "\$LOG_FILE: $LOG_FILE" +echo "" # It's okay (likely, even) that `go test` exits nonzero set +o pipefail env \ -C "$COMPLEMENT_SRC" \ - COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ + COMPLEMENT_BASE_IMAGE="$COMPLEMENT_OCI_IMAGE" \ COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ # reuses the same complement container for faster complement, at the possible expense of test environment pollution go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail From bb0b57efb8d8d89fce0392e7c6c34c169ba054b8 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 02:30:58 -0500 Subject: [PATCH 0727/1248] bump rust-rocksdb Signed-off-by: strawberry --- Cargo.lock | 26 ++++++++++++++++++++++---- Cargo.toml | 2 +- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a224ad0f..8d4688f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -236,7 +236,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" dependencies = [ - "bindgen", + "bindgen 0.69.5", "cc", "cmake", "dunce", @@ -431,6 +431,24 @@ dependencies = [ "which", ] +[[package]] +name = "bindgen" +version = "0.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +dependencies = [ + "bitflags 2.9.0", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "proc-macro2", + "quote", + "regex", + "rustc-hash 2.1.1", + "shlex", + "syn 2.0.98", +] + [[package]] name = "bit_field" version = "0.10.2" @@ -3685,9 +3703,9 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" dependencies = [ - "bindgen", + "bindgen 0.71.1", "bzip2-sys", "cc", "glob", @@ -3702,7 +3720,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index c48be06a..de90e63e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "2e692ae026881fc385f111fdcfba38bee98f1e47" +rev = "1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" default-features = false features = [ "multi-threaded-cf", From c8a730c29e3ec5c9d38028b89f3fd26ed546ef8f Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 03:07:42 -0500 Subject: [PATCH 0728/1248] implement MSC4267 automatically forgetting room on leave Signed-off-by: strawberry --- conduwuit-example.toml | 11 ++++++++++- src/api/client/capabilities.rs | 7 +++++++ src/core/config/mod.rs | 10 ++++++++++ src/service/rooms/state_cache/mod.rs | 8 ++++++-- 4 files changed, 33 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3d4b15bc..15e6dd37 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -445,10 +445,19 @@ # #allow_federation = true -# This item is undocumented. Please contribute documentation for it. +# Allows federation requests to be made to itself +# +# This isn't intended and is very likely a bug if federation requests are +# being sent to yourself. This currently mainly exists for development +# purposes. # #federation_loopback = false +# Always calls /forget on behalf of the user if leaving a room. This is a +# part of MSC4267 "Automatically forgetting rooms on leave" +# +#forget_forced_upon_leave = false + # Set this to true to require authentication on the normally # unauthenticated profile retrieval endpoints (GET) # "/_matrix/client/v3/profile/{userId}". diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index e20af21b..470ff6ab 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -42,5 +42,12 @@ pub(crate) async fn get_capabilities_route( .set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true})) .expect("this is valid JSON we created"); + capabilities + .set( + "org.matrix.msc4267.forget_forced_upon_leave", + json!({"enabled": services.config.forget_forced_upon_leave}), + ) + .expect("valid JSON we created"); + Ok(get_capabilities::v3::Response { capabilities }) } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a82f5f53..e69a56b9 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -558,9 +558,19 @@ pub struct Config { #[serde(default = "true_fn")] pub allow_federation: bool, + /// Allows federation requests to be made to itself + /// + /// This isn't intended and is very likely a bug if federation requests are + /// being sent to yourself. This currently mainly exists for development + /// purposes. #[serde(default)] pub federation_loopback: bool, + /// Always calls /forget on behalf of the user if leaving a room. This is a + /// part of MSC4267 "Automatically forgetting rooms on leave" + #[serde(default)] + pub forget_forced_upon_leave: bool, + /// Set this to true to require authentication on the normally /// unauthenticated profile retrieval endpoints (GET) /// "/_matrix/client/v3/profile/{userId}". diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index f406eb69..23ba0520 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -28,7 +28,7 @@ use ruma::{ serde::Raw, }; -use crate::{Dep, account_data, appservice::RegistrationInfo, globals, rooms, users}; +use crate::{Dep, account_data, appservice::RegistrationInfo, config, globals, rooms, users}; pub struct Service { appservice_in_room_cache: AppServiceInRoomCache, @@ -38,6 +38,7 @@ pub struct Service { struct Services { account_data: Dep, + config: Dep, globals: Dep, state_accessor: Dep, users: Dep, @@ -70,6 +71,7 @@ impl crate::Service for Service { appservice_in_room_cache: RwLock::new(HashMap::new()), services: Services { account_data: args.depend::("account_data"), + config: args.depend::("config"), globals: args.depend::("globals"), state_accessor: args .depend::("rooms::state_accessor"), @@ -268,7 +270,9 @@ impl Service { | MembershipState::Leave | MembershipState::Ban => { self.mark_as_left(user_id, room_id); - if self.services.globals.user_is_local(user_id) { + if self.services.globals.user_is_local(user_id) + && self.services.config.forget_forced_upon_leave + { self.forget(room_id, user_id); } }, From ef96e7afac81ffa6e3335144644277e4ac28658b Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 13:52:56 -0500 Subject: [PATCH 0729/1248] add cargo auditable for future use, ignore paste dependency being unmaintained for now Signed-off-by: strawberry Signed-off-by: June Clementine Strawberry --- .cargo/audit.toml | 27 +++++++++++++++++++++++++++ engage.toml | 2 +- flake.nix | 8 +++++--- 3 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 .cargo/audit.toml diff --git a/.cargo/audit.toml b/.cargo/audit.toml new file mode 100644 index 00000000..bf44fbd6 --- /dev/null +++ b/.cargo/audit.toml @@ -0,0 +1,27 @@ +[advisories] +ignore = ["RUSTSEC-2024-0436"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] +informational_warnings = [] # warn for categories of informational advisories +severity_threshold = "none" # CVSS severity ("none", "low", "medium", "high", "critical") + +# Advisory Database Configuration +[database] +path = "~/.cargo/advisory-db" # Path where advisory git repo will be cloned +url = "https://github.com/RustSec/advisory-db.git" # URL to git repo +fetch = true # Perform a `git fetch` before auditing (default: true) +stale = false # Allow stale advisory DB (i.e. no commits for 90 days, default: false) + +# Output Configuration +[output] +deny = ["warnings", "unmaintained", "unsound", "yanked"] # exit on error if unmaintained dependencies are found +format = "terminal" # "terminal" (human readable report) or "json" +quiet = false # Only print information on error +show_tree = true # Show inverse dependency trees along with advisories (default: true) + +# Target Configuration +[target] +arch = ["x86_64", "aarch64"] # Ignore advisories for CPU architectures other than these +os = ["linux", "windows", "macos"] # Ignore advisories for operating systems other than these + +[yanked] +enabled = true # Warn for yanked crates in Cargo.lock (default: true) +update_index = true # Auto-update the crates.io index (default: true) diff --git a/engage.toml b/engage.toml index 71366532..0a857b5a 100644 --- a/engage.toml +++ b/engage.toml @@ -63,7 +63,7 @@ script = "markdownlint --version" [[task]] name = "cargo-audit" group = "security" -script = "cargo audit -D warnings -D unmaintained -D unsound -D yanked" +script = "cargo audit --color=always -D warnings -D unmaintained -D unsound -D yanked" [[task]] name = "cargo-fmt" diff --git a/flake.nix b/flake.nix index 544cdd4a..9db2e90a 100644 --- a/flake.nix +++ b/flake.nix @@ -144,18 +144,20 @@ toolchain ] ++ (with pkgsHost.pkgs; [ - engage - cargo-audit - # Required by hardened-malloc.rs dep binutils + cargo-audit + cargo-auditable + # Needed for producing Debian packages cargo-deb # Needed for CI to check validity of produced Debian packages (dpkg-deb) dpkg + engage + # Needed for Complement go From 5efe804a207420482dc5c57b8db044c5818d5037 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 8 Mar 2025 15:48:23 -0500 Subject: [PATCH 0730/1248] always disable fed, evict admins, and forget the room when banning a room Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 20 +-- bin/complement | 18 +- nix/pkgs/main/default.nix | 2 +- src/admin/room/moderation.rs | 328 +++++++++-------------------------- 4 files changed, 109 insertions(+), 259 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c8fef47f..9a1366f1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -199,18 +199,18 @@ jobs: echo '```' >> $GITHUB_STEP_SUMMARY tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY + + echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY fi - echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - build: name: Build runs-on: self-hosted diff --git a/bin/complement b/bin/complement index 47c02843..b869bad6 100755 --- a/bin/complement +++ b/bin/complement @@ -15,7 +15,7 @@ LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to RESULTS_FILE="${3:-complement_test_results.jsonl}" -COMPLEMENT_OCI_IMAGE="${COMPLEMENT_OCI_IMAGE:-complement-conduwuit:main}" +COMPLEMENT_BASE_IMAGE="${COMPLEMENT_BASE_IMAGE:-complement-conduwuit:main}" # Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time #SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' @@ -62,12 +62,13 @@ echo "\$LOG_FILE: $LOG_FILE" echo "" # It's okay (likely, even) that `go test` exits nonzero +# `COMPLEMENT_ENABLE_DIRTY_RUNS=1` reuses the same complement container for faster complement, at the possible expense of test environment pollution set +o pipefail env \ -C "$COMPLEMENT_SRC" \ - COMPLEMENT_BASE_IMAGE="$COMPLEMENT_OCI_IMAGE" \ - COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ # reuses the same complement container for faster complement, at the possible expense of test environment pollution - go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ + COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ + go test -tags="conduwuit_blacklist" -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results @@ -78,12 +79,17 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" -grep '^{"Time":' "$LOG_FILE" | gotestfmt > "${LOG_FILE}_gotestfmt.log" +if command -v gotestfmt &> /dev/null; then + echo "using gotestfmt on $LOG_FILE" + grep '^{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" +fi echo "" echo "" echo "complement logs saved at $LOG_FILE" echo "complement results saved at $RESULTS_FILE" -echo "complement logs in gotestfmt pretty format outputted at ${LOG_FILE}_gotestfmt.log (use an editor/terminal that interprets ANSI colours)" +if command -v gotestfmt &> /dev/null; then + echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" +fi echo "" echo "" diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 5dfb32ec..9c8038a7 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -155,9 +155,9 @@ commonAttrs = { # Keep sorted include = [ + ".cargo" "Cargo.lock" "Cargo.toml" - "deps" "src" ]; }; diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index 444dfa2f..dd5ea627 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -1,7 +1,7 @@ use api::client::leave_room; use clap::Subcommand; use conduwuit::{ - Result, debug, error, info, + Result, debug, utils::{IterStream, ReadyExt}, warn, }; @@ -17,51 +17,23 @@ use crate::{admin_command, admin_command_dispatch, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomModerationCommand { /// - Bans a room from local users joining and evicts all our local users + /// (including server + /// admins) /// from the room. Also blocks any invites (local and remote) for the - /// banned room. - /// - /// Server admins (users in the conduwuit admin room) will not be evicted - /// and server admins can still join the room. To evict admins too, use - /// --force (also ignores errors) To disable incoming federation of the - /// room, use --disable-federation + /// banned room, and disables federation entirely with it. BanRoom { - #[arg(short, long)] - /// Evicts admins out of the room and ignores any potential errors when - /// making our local users leave the room - force: bool, - - #[arg(long)] - /// Disables incoming federation of the room after banning and evicting - /// users - disable_federation: bool, - /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` room: Box, }, /// - Bans a list of rooms (room IDs and room aliases) from a newline - /// delimited codeblock similar to `user deactivate-all` - BanListOfRooms { - #[arg(short, long)] - /// Evicts admins out of the room and ignores any potential errors when - /// making our local users leave the room - force: bool, - - #[arg(long)] - /// Disables incoming federation of the room after banning and evicting - /// users - disable_federation: bool, - }, + /// delimited codeblock similar to `user deactivate-all`. Applies the same + /// steps as ban-room + BanListOfRooms, /// - Unbans a room to allow local users to join again - /// - /// To re-enable incoming federation of the room, use --enable-federation UnbanRoom { - #[arg(long)] - /// Enables incoming federation of the room after unbanning - enable_federation: bool, - /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` room: Box, @@ -77,12 +49,7 @@ pub(crate) enum RoomModerationCommand { } #[admin_command] -async fn ban_room( - &self, - force: bool, - disable_federation: bool, - room: Box, -) -> Result { +async fn ban_room(&self, room: Box) -> Result { debug!("Got room alias or ID: {}", room); let admin_room_alias = &self.services.globals.admin_alias; @@ -175,98 +142,56 @@ async fn ban_room( )); }; - debug!("Making all users leave the room {}", &room); - if force { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); + debug!("Making all users leave the room {room_id} and forgetting it"); + let mut users = self + .services + .rooms + .state_cache + .room_members(&room_id) + .map(ToOwned::to_owned) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); - while let Some(local_user) = users.next().await { - debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring all \ - errors, evicting admins too)", - ); + while let Some(ref user_id) = users.next().await { + debug!( + "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ + evicting admins too)", + ); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - warn!(%e, "Failed to leave room"); - } + if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + warn!("Failed to leave room: {e}"); } - } else { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - while let Some(local_user) = users.next().await { - if self.services.users.is_admin(local_user).await { - continue; - } - - debug!("Attempting leave for user {} in room {}", &local_user, &room_id); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - error!( - "Error attempting to make local user {} leave room {} during room banning: \ - {}", - &local_user, &room_id, e - ); - return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room banning \ - (room is still banned but not removing any more users): {}\nIf you would \ - like to ignore errors, use --force", - &local_user, &room_id, e - ))); - } - } + self.services.rooms.state_cache.forget(&room_id, user_id); } - // remove any local aliases, ignore errors - for local_alias in &self - .services + self.services .rooms .alias .local_aliases_for_room(&room_id) .map(ToOwned::to_owned) - .collect::>() - .await - { - _ = self - .services - .rooms - .alias - .remove_alias(local_alias, &self.services.globals.server_user) - .await; - } + .for_each(|local_alias| async move { + self.services + .rooms + .alias + .remove_alias(&local_alias, &self.services.globals.server_user) + .await + .ok(); + }) + .await; - // unpublish from room directory, ignore errors + // unpublish from room directory self.services.rooms.directory.set_not_public(&room_id); - if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true); - return Ok(RoomMessageEventContent::text_plain( - "Room banned, removed all our local users, and disabled incoming federation with \ - room.", - )); - } + self.services.rooms.metadata.disable_room(&room_id, true); Ok(RoomMessageEventContent::text_plain( - "Room banned and removed all our local users, use `!admin federation disable-room` to \ - stop receiving new inbound federation events as well if needed.", + "Room banned, removed all our local users, and disabled incoming federation with room.", )) } #[admin_command] -async fn ban_list_of_rooms( - &self, - force: bool, - disable_federation: bool, -) -> Result { +async fn ban_list_of_rooms(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" @@ -293,7 +218,7 @@ async fn ban_list_of_rooms( if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias) { - info!("User specified admin room in bulk ban list, ignoring"); + warn!("User specified admin room in bulk ban list, ignoring"); continue; } } @@ -302,19 +227,12 @@ async fn ban_list_of_rooms( let room_id = match RoomId::parse(room_alias_or_id) { | Ok(room_id) => room_id, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force banning - warn!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the \ - list and try again: {e}" - ))); + // ignore rooms we failed to parse + warn!( + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" + ); + continue; }, }; @@ -355,21 +273,11 @@ async fn ban_list_of_rooms( room_id }, | Err(e) => { - // don't fail if force blocking - if force { - warn!( - "Failed to resolve room alias {room} to a \ - room ID: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain( - format!( - "Failed to resolve room alias {room} to a \ - room ID: {e}" - ), - )); + warn!( + "Failed to resolve room alias {room} to a room \ + ID: {e}" + ); + continue; }, } }, @@ -378,37 +286,21 @@ async fn ban_list_of_rooms( room_ids.push(room_id); }, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force deleting - error!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the \ - list and try again: {e}" - ))); + warn!( + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" + ); + continue; }, } } }, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force deleting - error!( - "Error parsing room \"{room}\" during bulk room banning, ignoring error \ - and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the list and try \ - again: {e}" - ))); + warn!( + "Error parsing room \"{room}\" during bulk room banning, ignoring error and \ + logging here: {e}" + ); + continue; }, } } @@ -419,56 +311,27 @@ async fn ban_list_of_rooms( debug!("Banned {room_id} successfully"); room_ban_count = room_ban_count.saturating_add(1); - debug!("Making all users leave the room {}", &room_id); - if force { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); + debug!("Making all users leave the room {room_id} and forgetting it"); + let mut users = self + .services + .rooms + .state_cache + .room_members(&room_id) + .map(ToOwned::to_owned) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); - while let Some(local_user) = users.next().await { - debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring \ - all errors, evicting admins too)", - ); + while let Some(ref user_id) = users.next().await { + debug!( + "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ + evicting admins too)", + ); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - warn!(%e, "Failed to leave room"); - } + if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + warn!("Failed to leave room: {e}"); } - } else { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - while let Some(local_user) = users.next().await { - if self.services.users.is_admin(local_user).await { - continue; - } - - debug!("Attempting leave for user {local_user} in room {room_id}"); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - error!( - "Error attempting to make local user {local_user} leave room {room_id} \ - during bulk room banning: {e}", - ); - - return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room \ - banning (room is still banned but not removing any more users and not \ - banning any more rooms): {}\nIf you would like to ignore errors, use \ - --force", - &local_user, &room_id, e - ))); - } - } + self.services.rooms.state_cache.forget(&room_id, user_id); } // remove any local aliases, ignore errors @@ -490,29 +353,17 @@ async fn ban_list_of_rooms( // unpublish from room directory, ignore errors self.services.rooms.directory.set_not_public(&room_id); - if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true); - } + self.services.rooms.metadata.disable_room(&room_id, true); } - if disable_federation { - Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, \ - and disabled incoming federation with the room." - ))) - } else { - Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk room ban, banned {room_ban_count} total rooms and evicted all users." - ))) - } + Ok(RoomMessageEventContent::text_plain(format!( + "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \ + disabled incoming federation with the room." + ))) } #[admin_command] -async fn unban_room( - &self, - enable_federation: bool, - room: Box, -) -> Result { +async fn unban_room(&self, room: Box) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, @@ -595,15 +446,8 @@ async fn unban_room( )); }; - if enable_federation { - self.services.rooms.metadata.disable_room(&room_id, false); - return Ok(RoomMessageEventContent::text_plain("Room unbanned.")); - } - - Ok(RoomMessageEventContent::text_plain( - "Room unbanned, you may need to re-enable federation with the room using enable-room if \ - this is a remote room to make it fully functional.", - )) + self.services.rooms.metadata.disable_room(&room_id, false); + Ok(RoomMessageEventContent::text_plain("Room unbanned and federation re-enabled.")) } #[admin_command] From 0b012b529f2c925f2bc20aee2381e2d30f116c46 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 8 Mar 2025 18:59:51 -0500 Subject: [PATCH 0731/1248] comment gotestfmt for now Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 12 ------------ bin/complement | 15 +++++++-------- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9a1366f1..cd7d2484 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -176,13 +176,6 @@ jobs: path: complement_test_results.jsonl if-no-files-found: error - - name: Upload Complement logs (gotestfmt) - uses: actions/upload-artifact@v4 - with: - name: complement_test_logs_gotestfmt.log - path: complement_test_logs_gotestfmt.log - if-no-files-found: error - - name: Diff Complement results with checked-in repo results run: | diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log) @@ -204,11 +197,6 @@ jobs: echo '```diff' >> $GITHUB_STEP_SUMMARY tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY fi build: diff --git a/bin/complement b/bin/complement index b869bad6..89521796 100755 --- a/bin/complement +++ b/bin/complement @@ -67,7 +67,6 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ go test -tags="conduwuit_blacklist" -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail @@ -79,17 +78,17 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" -if command -v gotestfmt &> /dev/null; then - echo "using gotestfmt on $LOG_FILE" - grep '^{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" -fi +#if command -v gotestfmt &> /dev/null; then +# echo "using gotestfmt on $LOG_FILE" +# grep '{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" +#fi echo "" echo "" echo "complement logs saved at $LOG_FILE" echo "complement results saved at $RESULTS_FILE" -if command -v gotestfmt &> /dev/null; then - echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" -fi +#if command -v gotestfmt &> /dev/null; then +# echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" +#fi echo "" echo "" From 06f2039eeeec2d5adf51e8ffbb470f01a8d9e868 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 00:44:56 -0500 Subject: [PATCH 0732/1248] bump ruwuma to maybe fix rare device key upload issues Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 4 +++- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d4688f5..f768eae1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index de90e63e..2bc1d20f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "bb42118bd85e731b652a6110896b6945085bf944" +rev = "d577100f5480c6c528e7a8ff59cd08d95a3a16e7" features = [ "compat", "rand", @@ -371,7 +371,9 @@ features = [ "unstable-msc3381", # polls "unstable-msc3489", # beacon / live location "unstable-msc3575", + "unstable-msc3930", # polls push rules "unstable-msc4075", + "unstable-msc4095", "unstable-msc4121", "unstable-msc4125", "unstable-msc4186", From d0c767c23c1dff11400388c5a8dd9e43f68705f1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 01:43:49 -0500 Subject: [PATCH 0733/1248] fix a few things to make some complement tests pass Signed-off-by: June Clementine Strawberry --- src/api/client/membership.rs | 4 +- src/api/client/room/create.rs | 8 +--- src/api/client/session.rs | 79 +++++++++++++++++++---------------- src/service/media/preview.rs | 23 ++++++---- src/service/users/mod.rs | 4 +- 5 files changed, 60 insertions(+), 58 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 940c8639..3f77e69e 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -517,9 +517,7 @@ pub(crate) async fn invite_user_route( join!(sender_ignored_recipient, recipient_ignored_by_sender); if sender_ignored_recipient { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + return Ok(invite_user::v3::Response {}); } if let Ok(target_user_membership) = services diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 1b8294a5..bb06e966 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -239,9 +239,7 @@ pub(crate) async fn create_room_route( if preset == RoomPreset::TrustedPrivateChat { for invite in &body.invite { if services.users.user_is_ignored(sender_user, invite).await { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + continue; } else if services.users.user_is_ignored(invite, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked @@ -420,9 +418,7 @@ pub(crate) async fn create_room_route( drop(state_lock); for user_id in &body.invite { if services.users.user_is_ignored(sender_user, user_id).await { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + continue; } else if services.users.user_is_ignored(user_id, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked diff --git a/src/api/client/session.rs b/src/api/client/session.rs index ab67ee18..3de625e4 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -3,7 +3,7 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{Err, debug, err, info, utils::ReadyExt}; -use futures::{StreamExt, TryFutureExt}; +use futures::StreamExt; use ruma::{ UserId, api::client::{ @@ -96,32 +96,50 @@ pub(crate) async fn login_route( &services.config.server_name, )?; - assert!( - services.globals.user_is_local(&user_id), - "User ID does not belong to this homeserver" - ); - assert!( - services.globals.user_is_local(&lowercased_user_id), - "User ID does not belong to this homeserver" - ); + if !services.globals.user_is_local(&user_id) + || !services.globals.user_is_local(&lowercased_user_id) + { + return Err!(Request(Unknown("User ID does not belong to this homeserver"))); + } + // first try the username as-is let hash = services .users .password_hash(&user_id) - .or_else(|_| services.users.password_hash(&lowercased_user_id)) .await - .inspect_err(|e| debug!("{e}")) - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + .inspect_err(|e| debug!("{e}")); - if hash.is_empty() { - return Err!(Request(UserDeactivated("The user has been deactivated"))); + match hash { + | Ok(hash) => { + if hash.is_empty() { + return Err!(Request(UserDeactivated("The user has been deactivated"))); + } + + hash::verify_password(password, &hash) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + user_id + }, + | Err(_e) => { + let hash_lowercased_user_id = services + .users + .password_hash(&lowercased_user_id) + .await + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + if hash_lowercased_user_id.is_empty() { + return Err!(Request(UserDeactivated("The user has been deactivated"))); + } + + hash::verify_password(password, &hash_lowercased_user_id) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + lowercased_user_id + }, } - - hash::verify_password(password, &hash) - .inspect_err(|e| debug!("{e}")) - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; - - user_id }, | login::v3::LoginInfo::Token(login::v3::Token { token }) => { debug!("Got token login type"); @@ -153,24 +171,11 @@ pub(crate) async fn login_route( } .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; - let lowercased_user_id = UserId::parse_with_server_name( - user_id.localpart().to_lowercase(), - &services.config.server_name, - )?; + if !services.globals.user_is_local(&user_id) { + return Err!(Request(Unknown("User ID does not belong to this homeserver"))); + } - assert!( - services.globals.user_is_local(&user_id), - "User ID does not belong to this homeserver" - ); - assert!( - services.globals.user_is_local(&lowercased_user_id), - "User ID does not belong to this homeserver" - ); - - if !info.is_user_match(&user_id) - && !info.is_user_match(&lowercased_user_id) - && !emergency_mode_enabled - { + if !info.is_user_match(&user_id) && !emergency_mode_enabled { return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index 17216869..ba5be7d4 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -7,7 +7,7 @@ use std::time::SystemTime; -use conduwuit::{Err, Result, debug}; +use conduwuit::{Err, Result, debug, err}; use conduwuit_core::implement; use ipaddress::IPAddress; use serde::Serialize; @@ -64,28 +64,33 @@ pub async fn get_url_preview(&self, url: &Url) -> Result { async fn request_url_preview(&self, url: &Url) -> Result { if let Ok(ip) = IPAddress::parse(url.host_str().expect("URL previously validated")) { if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Requesting from this address is forbidden")); + return Err!(Request(Forbidden("Requesting from this address is forbidden"))); } } let client = &self.services.client.url_preview; let response = client.head(url.as_str()).send().await?; + debug!(?url, "URL preview response headers: {:?}", response.headers()); + if let Some(remote_addr) = response.remote_addr() { + debug!(?url, "URL preview response remote address: {:?}", remote_addr); + if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Requesting from this address is forbidden")); + return Err!(Request(Forbidden("Requesting from this address is forbidden"))); } } } - let Some(content_type) = response - .headers() - .get(reqwest::header::CONTENT_TYPE) - .and_then(|x| x.to_str().ok()) - else { - return Err!(Request(Unknown("Unknown Content-Type"))); + let Some(content_type) = response.headers().get(reqwest::header::CONTENT_TYPE) else { + return Err!(Request(Unknown("Unknown or invalid Content-Type header"))); }; + + let content_type = content_type + .to_str() + .map_err(|e| err!(Request(Unknown("Unknown or invalid Content-Type header: {e}"))))?; + let data = match content_type { | html if html.starts_with("text/html") => self.download_html(url.as_str()).await?, | img if img.starts_with("image/") => self.download_image(url.as_str()).await?, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b3f5db88..5265e64b 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -278,11 +278,9 @@ impl Service { initial_device_display_name: Option, client_ip: Option, ) -> Result<()> { - // This method should never be called for nonexistent users. We shouldn't assert - // though... if !self.exists(user_id).await { return Err!(Request(InvalidParam(error!( - "Called create_device for non-existent {user_id}" + "Called create_device for non-existent user {user_id}" )))); } From 47ff91243d0da2088806351c040ac1386c92c63d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 03:33:29 -0400 Subject: [PATCH 0734/1248] update complement results Signed-off-by: June Clementine Strawberry --- .../complement/test_results.jsonl | 112 ++++++++++++++---- 1 file changed, 89 insertions(+), 23 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index fed43b48..7b06510b 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -6,9 +6,9 @@ {"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events"} {"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/incremental_sync"} {"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/initial_sync"} -{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty"} +{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty"} {"Action":"skip","Test":"TestArchivedRoomsHistory/timeline_is_empty/incremental_sync"} -{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} +{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} {"Action":"fail","Test":"TestAsyncUpload"} {"Action":"fail","Test":"TestAsyncUpload/Cannot_upload_to_a_media_ID_that_has_already_been_uploaded_to"} {"Action":"fail","Test":"TestAsyncUpload/Create_media"} @@ -82,7 +82,7 @@ {"Action":"pass","Test":"TestContent"} {"Action":"pass","Test":"TestContentCSAPIMediaV1"} {"Action":"pass","Test":"TestContentMediaV1"} -{"Action":"fail","Test":"TestCumulativeJoinLeaveJoinSync"} +{"Action":"pass","Test":"TestCumulativeJoinLeaveJoinSync"} {"Action":"pass","Test":"TestDeactivateAccount"} {"Action":"pass","Test":"TestDeactivateAccount/After_deactivating_account,_can't_log_in_with_password"} {"Action":"pass","Test":"TestDeactivateAccount/Can't_deactivate_account_with_wrong_password"} @@ -153,10 +153,10 @@ {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"} {"Action":"pass","Test":"TestFederationRedactSendsWithoutEvent"} {"Action":"pass","Test":"TestFederationRejectInvite"} -{"Action":"fail","Test":"TestFederationRoomsInvite"} -{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestFederationRoomsInvite"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation"} -{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_join_the_room_when_homeserver_is_already_participating_in_the_room"} @@ -173,7 +173,7 @@ {"Action":"pass","Test":"TestFetchMessagesFromNonExistentRoom"} {"Action":"pass","Test":"TestFilter"} {"Action":"fail","Test":"TestFilterMessagesByRelType"} -{"Action":"fail","Test":"TestGappedSyncLeaveSection"} +{"Action":"pass","Test":"TestGappedSyncLeaveSection"} {"Action":"fail","Test":"TestGetFilteredRoomMembers"} {"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/join"} {"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/leave"} @@ -191,7 +191,7 @@ {"Action":"pass","Test":"TestInboundFederationProfile/Inbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"} {"Action":"fail","Test":"TestInboundFederationRejectsEventsWithRejectedAuthEvents"} -{"Action":"fail","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} +{"Action":"pass","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} {"Action":"pass","Test":"TestIsDirectFlagFederation"} {"Action":"pass","Test":"TestIsDirectFlagLocal"} {"Action":"pass","Test":"TestJoinFederatedRoomFailOver"} @@ -281,7 +281,7 @@ {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} {"Action":"pass","Test":"TestLeakyTyping"} -{"Action":"fail","Test":"TestLeaveEventInviteRejection"} +{"Action":"pass","Test":"TestLeaveEventInviteRejection"} {"Action":"fail","Test":"TestLeaveEventVisibility"} {"Action":"fail","Test":"TestLeftRoomFixture"} {"Action":"fail","Test":"TestLeftRoomFixture/Can_get_'m.room.name'_state_for_a_departed_room"} @@ -292,10 +292,10 @@ {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} -{"Action":"fail","Test":"TestLogin"} -{"Action":"fail","Test":"TestLogin/parallel"} +{"Action":"pass","Test":"TestLogin"} +{"Action":"pass","Test":"TestLogin/parallel"} {"Action":"pass","Test":"TestLogin/parallel/GET_/login_yields_a_set_of_flows"} -{"Action":"fail","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} +{"Action":"pass","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} {"Action":"pass","Test":"TestLogin/parallel/POST_/login_as_non-existing_user_is_rejected"} {"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_log_in_as_a_user_with_just_the_local_part_of_the_id"} {"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_login_as_user"} @@ -354,12 +354,78 @@ {"Action":"fail","Test":"TestMembershipOnEvents"} {"Action":"fail","Test":"TestNetworkPartitionOrdering"} {"Action":"pass","Test":"TestNotPresentUserCannotBanOthers"} -{"Action":"fail","Test":"TestOlderLeftRoomsNotInLeaveSection"} +{"Action":"pass","Test":"TestOlderLeftRoomsNotInLeaveSection"} {"Action":"fail","Test":"TestOutboundFederationEventSizeGetMissingEvents"} {"Action":"fail","Test":"TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"} {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} +{"Action":"fail","Test":"TestPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanFastJoinDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanLazyLoadingSyncDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveDeviceListUpdateDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingGrandparentsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingParentsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithMissingParentsDuringPartialStateJoin"} +{"Action":"skip","Test":"TestPartialStateJoin/CanReceivePresenceDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveReceiptDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveSigningKeyUpdateDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveToDeviceDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveTypingDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanSendEventsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/Can_change_display_name_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_for_user_incorrectly_believed_to_be_in_room"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_failing_to_complete_partial_state_join"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_leaving_partial_state_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_new_member_leaves_partial_state_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracked_for_new_members_in_partial_state_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_pre-existing_members_in_partial_state_room"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_join_another_shared_room_before_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_after_partial_state_join_completes"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_before_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_when_pre-existing_members_in_partial_state_room_join_another_shared_room"} +{"Action":"fail","Test":"TestPartialStateJoin/EagerIncrementalSyncDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/EagerInitialSyncDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/EagerLongPollingSyncWokenWhenResyncCompletes"} +{"Action":"fail","Test":"TestPartialStateJoin/GappySyncAfterPartialStateSynced"} +{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_gappy_sync_includes_remote_memberships_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_incremental_sync_includes_remote_memberships_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_initial_sync_includes_remote_memberships_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_ban"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_kick"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/does_not_wait_for_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/is_seen_after_the_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_another_user_can_join_without_resync_completing"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_rejoin_succeeds_without_resync_completing"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/works_after_a_second_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/MembersRequestBlocksDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_no_longer_reach_departed_servers_after_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_all_servers_in_partial_state_rooms"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_newly_joined_servers_in_partial_state_rooms"} +{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinContinuesAfterRestart"} +{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinSyncsUsingOtherHomeservers"} +{"Action":"skip","Test":"TestPartialStateJoin/Purge_during_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejected_events_remain_rejected_after_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_join_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_knock_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_join_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_knock_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Resync_completes_even_when_events_arrive_before_their_prev_events"} +{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_deleted_during_a_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_queried_during_a_resync"} +{"Action":"skip","Test":"TestPartialStateJoin/Room_stats_are_correctly_updated_once_state_re-sync_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/State_accepted_incorrectly"} +{"Action":"fail","Test":"TestPartialStateJoin/State_rejected_incorrectly"} +{"Action":"fail","Test":"TestPartialStateJoin/User_directory_is_correctly_updated_once_state_re-sync_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/joined_members_blocks_during_partial_state_join"} {"Action":"fail","Test":"TestPollsLocalPushRules"} {"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} {"Action":"pass","Test":"TestPowerLevels"} @@ -559,11 +625,11 @@ {"Action":"pass","Test":"TestRoomState/Parallel/PUT_/createRoom_with_creation_content"} {"Action":"pass","Test":"TestRoomState/Parallel/PUT_/rooms/:room_id/state/m.room.topic_sets_topic"} {"Action":"pass","Test":"TestRoomSummary"} -{"Action":"fail","Test":"TestRoomsInvite"} -{"Action":"fail","Test":"TestRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestRoomsInvite"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Can_invite_users_to_invite-only_rooms"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite"} -{"Action":"fail","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_see_room_metadata"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Uninvited_users_cannot_join_the_room"} @@ -596,15 +662,15 @@ {"Action":"pass","Test":"TestSyncFilter"} {"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} {"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} -{"Action":"fail","Test":"TestSyncLeaveSection"} -{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} -{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} -{"Action":"fail","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection"} +{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} {"Action":"pass","Test":"TestSyncOmitsStateChangeOnFilteredEvents"} {"Action":"pass","Test":"TestSyncTimelineGap"} {"Action":"pass","Test":"TestSyncTimelineGap/full"} {"Action":"pass","Test":"TestSyncTimelineGap/incremental"} -{"Action":"fail","Test":"TestTentativeEventualJoiningAfterRejecting"} +{"Action":"pass","Test":"TestTentativeEventualJoiningAfterRejecting"} {"Action":"fail","Test":"TestThreadReceiptsInSyncMSC4102"} {"Action":"fail","Test":"TestThreadedReceipts"} {"Action":"fail","Test":"TestThreadsEndpoint"} @@ -635,8 +701,8 @@ {"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_device_keys_using_POST"} {"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_specific_device_keys_using_POST"} {"Action":"pass","Test":"TestUploadKey/Parallel/Can_upload_device_keys"} -{"Action":"fail","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} -{"Action":"fail","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} {"Action":"pass","Test":"TestUploadKey/Parallel/query_for_user_with_no_keys_returns_empty_key_dict"} {"Action":"pass","Test":"TestUploadKeyIdempotency"} {"Action":"pass","Test":"TestUploadKeyIdempotencyOverlap"} From 0e342aab7f2a173638fa723a9d36ae16fe9396d1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 13:44:57 -0400 Subject: [PATCH 0735/1248] fix a few error codes Signed-off-by: June Clementine Strawberry --- src/api/client/alias.rs | 2 +- src/api/client/context.rs | 18 ++++++++++++------ src/api/client/state.rs | 12 +++++++----- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 319e5141..9cd7e0c5 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -92,7 +92,7 @@ pub(crate) async fn get_alias_route( let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await else { - return Err!(Request(NotFound("Room with alias not found."))); + return Err!(Request(Unknown("Room with alias not found."))); }; let servers = room_available_servers(&services, &room_id, &room_alias, servers).await; diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 3f16c850..cb95dfef 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, err, ref_at, + Err, PduEvent, Result, at, debug_warn, err, ref_at, utils::{ IterStream, future::TryExtExt, @@ -35,8 +35,13 @@ pub(crate) async fn get_context_route( let sender = body.sender(); let (sender_user, sender_device) = sender; let room_id = &body.room_id; + let event_id = &body.event_id; let filter = &body.filter; + if !services.rooms.metadata.exists(room_id).await { + return Err!(Request(Forbidden("Room does not exist to this server"))); + } + // Use limit or else 10, with maximum 100 let limit: usize = body .limit @@ -47,29 +52,30 @@ pub(crate) async fn get_context_route( let base_id = services .rooms .timeline - .get_pdu_id(&body.event_id) + .get_pdu_id(event_id) .map_err(|_| err!(Request(NotFound("Event not found.")))); let base_pdu = services .rooms .timeline - .get_pdu(&body.event_id) + .get_pdu(event_id) .map_err(|_| err!(Request(NotFound("Base event not found.")))); let visible = services .rooms .state_accessor - .user_can_see_event(sender_user, &body.room_id, &body.event_id) + .user_can_see_event(sender_user, room_id, event_id) .map(Ok); let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?; - if base_pdu.room_id != body.room_id || base_pdu.event_id != body.event_id { + if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id { return Err!(Request(NotFound("Base event not found."))); } if !visible { - return Err!(Request(Forbidden("You don't have permission to view this event."))); + debug_warn!(req_evt = ?event_id, ?base_id, ?room_id, "Event requested by {sender_user} but is not allowed to see it, returning 404"); + return Err!(Request(NotFound("Event not found."))); } let base_count = base_id.pdu_count(); diff --git a/src/api/client/state.rs b/src/api/client/state.rs index c92091eb..d04aac35 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -27,7 +27,7 @@ pub(crate) async fn send_state_event_for_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); Ok(send_state_event::v3::Response { event_id: send_state_event_for_key_helper( @@ -103,7 +103,7 @@ pub(crate) async fn get_state_events_for_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services .rooms @@ -111,7 +111,9 @@ pub(crate) async fn get_state_events_for_key_route( .user_can_see_state_events(sender_user, &body.room_id) .await { - return Err!(Request(Forbidden("You don't have permission to view the room state."))); + return Err!(Request(NotFound(debug_warn!( + "You don't have permission to view the room state." + )))); } let event = services @@ -316,14 +318,14 @@ async fn allowed_to_send_state_event( services.rooms.alias.resolve_alias(&alias, None).await?; if alias_room_id != room_id { - return Err!(Request(Forbidden( + return Err!(Request(Unknown( "Room alias {alias} does not belong to room {room_id}" ))); } } }, | Err(e) => { - return Err!(Request(BadJson(debug_warn!( + return Err!(Request(InvalidParam(debug_warn!( "Room canonical alias event is invalid: {e}" )))); }, From 0e2ca7d7192684a945ac49aa53066c488dd40886 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 21:55:07 -0400 Subject: [PATCH 0736/1248] implement disable TLS validation config option Signed-off-by: June Clementine Strawberry --- nix/pkgs/complement/config.toml | 2 ++ src/core/config/check.rs | 4 ++++ src/core/config/mod.rs | 12 +++++++++++- src/service/client/mod.rs | 3 ++- 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 99c151c5..4d7637db 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -32,6 +32,8 @@ allow_legacy_media = true startup_netburst = true startup_netburst_keep = -1 +allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true + # valgrind makes things so slow dns_timeout = 60 dns_attempts = 20 diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 98223be4..f9d51eeb 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -28,6 +28,10 @@ pub fn check(config: &Config) -> Result { warn!("Note: conduwuit was built without optimisations (i.e. debug build)"); } + if config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure { + warn!("\n\nWARNING: \n\nTLS CERTIFICATE VALIDATION IS DISABLED, THIS IS HIGHLY INSECURE AND SHOULD NOT BE USED IN PRODUCTION.\n\n"); + } + warn_deprecated(config); warn_unknown_key(config); diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e69a56b9..6b669ad3 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -52,7 +52,7 @@ use crate::{Result, err, error::Error, utils::sys}; ### For more information, see: ### https://conduwuit.puppyirl.gay/configuration.html "#, - ignore = "catchall well_known tls blurhashing" + ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure" )] pub struct Config { /// The server_name is the pretty name of this server. It is used as a @@ -1806,6 +1806,16 @@ pub struct Config { #[serde(default = "true_fn")] pub config_reload_signal: bool, + /// Toggles ignore checking/validating TLS certificates + /// + /// This applies to everything, including URL previews, federation requests, + /// etc. This is a hidden argument that should NOT be used in production as + /// it is highly insecure and I will personally yell at you if I catch you + /// using this. + #[serde(default)] + pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure: + bool, + // external structure; separate section #[serde(default)] pub blurhashing: BlurhashConfig, diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index d5008491..d51e5721 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -128,7 +128,8 @@ fn base(config: &Config) -> Result { .pool_max_idle_per_host(config.request_idle_per_host.into()) .user_agent(conduwuit::version::user_agent()) .redirect(redirect::Policy::limited(6)) - .connection_verbose(true); + .danger_accept_invalid_certs(config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure) + .connection_verbose(cfg!(debug_assertions)); #[cfg(feature = "gzip_compression")] { From df1edcf498ac58e27e6ff261b0d53a773d82f69f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 10:32:11 -0400 Subject: [PATCH 0737/1248] adjust complement cert generation Signed-off-by: June Clementine Strawberry --- bin/complement | 1 + nix/pkgs/complement/config.toml | 2 -- nix/pkgs/complement/default.nix | 22 +++++++------------ nix/pkgs/complement/private_key.key | 28 +++++++++++++++++++++++++ nix/pkgs/complement/signing_request.csr | 16 ++++++++++++++ 5 files changed, 53 insertions(+), 16 deletions(-) create mode 100644 nix/pkgs/complement/private_key.key create mode 100644 nix/pkgs/complement/signing_request.csr diff --git a/bin/complement b/bin/complement index 89521796..92539f97 100755 --- a/bin/complement +++ b/bin/complement @@ -40,6 +40,7 @@ if [ ! -f "complement_oci_image.tar.gz" ]; then # if using macOS, use linux-complement #bin/nix-build-and-cache just .#linux-complement bin/nix-build-and-cache just .#complement + #nix build -L .#complement echo "complement conduwuit image tar.gz built at \"result\"" diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 4d7637db..759f8d78 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -47,6 +47,4 @@ sender_idle_timeout = 300 sender_retry_backoff_limit = 300 [global.tls] -certs = "/certificate.crt" dual_protocol = true -key = "/private_key.key" diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index d9af0779..bbd1bd74 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -42,25 +42,18 @@ let start = writeShellScriptBin "start" '' set -euxo pipefail - ${lib.getExe openssl} genrsa -out private_key.key 2048 - ${lib.getExe openssl} req \ - -new \ - -sha256 \ - -key private_key.key \ - -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" \ - -out signing_request.csr - cp ${./v3.ext} v3.ext - echo "DNS.1 = $SERVER_NAME" >> v3.ext + cp ${./v3.ext} /complement/v3.ext + echo "DNS.1 = $SERVER_NAME" >> /complement/v3.ext echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \ - >> v3.ext + >> /complement/v3.ext ${lib.getExe openssl} x509 \ -req \ - -extfile v3.ext \ - -in signing_request.csr \ + -extfile /complement/v3.ext \ + -in ${./signing_request.csr} \ -CA /complement/ca/ca.crt \ -CAkey /complement/ca/ca.key \ -CAcreateserial \ - -out certificate.crt \ + -out /complement/certificate.crt \ -days 1 \ -sha256 @@ -99,7 +92,8 @@ dockerTools.buildImage { else []; Env = [ - "SSL_CERT_FILE=/complement/ca/ca.crt" + "CONDUWUIT_TLS__KEY=${./private_key.key}" + "CONDUWUIT_TLS__CERTS=/complement/certificate.crt" "CONDUWUIT_CONFIG=${./config.toml}" "RUST_BACKTRACE=full" ]; diff --git a/nix/pkgs/complement/private_key.key b/nix/pkgs/complement/private_key.key new file mode 100644 index 00000000..5b9d4d4f --- /dev/null +++ b/nix/pkgs/complement/private_key.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb +iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT +LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a +09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc +ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga +Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO +/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu +WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB +DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb +piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN +D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ +8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+ +3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq +/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90 +FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q +td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M +Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A +91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV +8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh +VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW +UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K +kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz +KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7 +IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh +tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM +9MVtdgSkuh2gwkD/mMoAJXM= +-----END PRIVATE KEY----- diff --git a/nix/pkgs/complement/signing_request.csr b/nix/pkgs/complement/signing_request.csr new file mode 100644 index 00000000..707e73b4 --- /dev/null +++ b/nix/pkgs/complement/signing_request.csr @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICkTCCAXkCAQAwTDELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRYwFAYDVQQK +DA13b29mZXJzLCBpbmMuMRgwFgYDVQQDDA9jb21wbGVtZW50LW9ubHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS/odmZivxajebiyT7SMuhXqnMm+hF ++zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnTLvGEvNNx0px5M54H ++FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a09CphCFswO4PpxUU +ORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5uccebGMmCoO660hROST +BaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUgaQs/2tdT4kBzBH6kZ +OiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO/Ncsro/fAgMBAAGg +ADANBgkqhkiG9w0BAQsFAAOCAQEAjW+aD4E0phtRT5b2RyedY1uiSe7LQECsQnIO +wUSyGGG1GXYlJscyxxyzE9W9+QIALrxZkmc/+e02u+bFb1zQXW/uB/7u7FgXzrj6 +2YSDiWYXiYKvgGWEfCi3lpcTJK9x6WWkR+iREaoKRjcl0ynhhGuR7YwP38TNyu+z +FN6B1Lo398fvJkaTCiiHngWiwztXZ2d0MxkicuwZ1LJhIQA72OTl3QoRb5uiqbze +T9QJfU6W3v8cB8c8PuKMv5gl1QsGNtlfyQB56/X0cMxWl25vWXd2ankLkAGRTDJ8 +9YZHxP1ki4/yh75AknFq02nCOsmxYrAazCYgP2TzIPhQwBurKQ== +-----END CERTIFICATE REQUEST----- From 5ba0c02d526d77b9d983335af76585cd49be12c1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 12:29:54 -0400 Subject: [PATCH 0738/1248] bump ruwuma to fix a threads issue, fix more error codes, delete legacy sytest cruft Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +- Cargo.toml | 2 +- src/api/client/alias.rs | 2 +- src/api/client/state.rs | 12 +- tests/sytest/are-we-synapse-yet.list | 866 ----------------------- tests/sytest/are-we-synapse-yet.py | 266 ------- tests/sytest/show-expected-fail-tests.sh | 105 --- tests/sytest/sytest-blacklist | 7 - tests/sytest/sytest-whitelist | 516 -------------- 9 files changed, 22 insertions(+), 1776 deletions(-) delete mode 100644 tests/sytest/are-we-synapse-yet.list delete mode 100755 tests/sytest/are-we-synapse-yet.py delete mode 100755 tests/sytest/show-expected-fail-tests.sh delete mode 100644 tests/sytest/sytest-blacklist delete mode 100644 tests/sytest/sytest-whitelist diff --git a/Cargo.lock b/Cargo.lock index f768eae1..65e8eca1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 2bc1d20f..d611c08e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +rev = "f5ab6302aaa55a14827a9cb5b40e980dd135fe14" features = [ "compat", "rand", diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 9cd7e0c5..319e5141 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -92,7 +92,7 @@ pub(crate) async fn get_alias_route( let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await else { - return Err!(Request(Unknown("Room with alias not found."))); + return Err!(Request(NotFound("Room with alias not found."))); }; let servers = room_available_servers(&services, &room_id, &room_alias, servers).await; diff --git a/src/api/client/state.rs b/src/api/client/state.rs index d04aac35..db79735f 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -314,11 +314,17 @@ async fn allowed_to_send_state_event( } for alias in aliases { - let (alias_room_id, _servers) = - services.rooms.alias.resolve_alias(&alias, None).await?; + let (alias_room_id, _servers) = services + .rooms + .alias + .resolve_alias(&alias, None) + .await + .map_err(|e| { + err!(Request(Unknown("Failed resolving alias \"{alias}\": {e}"))) + })?; if alias_room_id != room_id { - return Err!(Request(Unknown( + return Err!(Request(BadAlias( "Room alias {alias} does not belong to room {room_id}" ))); } diff --git a/tests/sytest/are-we-synapse-yet.list b/tests/sytest/are-we-synapse-yet.list deleted file mode 100644 index 99091989..00000000 --- a/tests/sytest/are-we-synapse-yet.list +++ /dev/null @@ -1,866 +0,0 @@ -reg GET /register yields a set of flows -reg POST /register can create a user -reg POST /register downcases capitals in usernames -reg POST /register returns the same device_id as that in the request -reg POST /register rejects registration of usernames with '!' -reg POST /register rejects registration of usernames with '"' -reg POST /register rejects registration of usernames with ':' -reg POST /register rejects registration of usernames with '?' -reg POST /register rejects registration of usernames with '\' -reg POST /register rejects registration of usernames with '@' -reg POST /register rejects registration of usernames with '[' -reg POST /register rejects registration of usernames with ']' -reg POST /register rejects registration of usernames with '{' -reg POST /register rejects registration of usernames with '|' -reg POST /register rejects registration of usernames with '}' -reg POST /register rejects registration of usernames with '£' -reg POST /register rejects registration of usernames with 'é' -reg POST /register rejects registration of usernames with '\n' -reg POST /register rejects registration of usernames with ''' -reg POST /r0/admin/register with shared secret -reg POST /r0/admin/register admin with shared secret -reg POST /r0/admin/register with shared secret downcases capitals -reg POST /r0/admin/register with shared secret disallows symbols -reg POST rejects invalid utf-8 in JSON -log GET /login yields a set of flows -log POST /login can log in as a user -log POST /login returns the same device_id as that in the request -log POST /login can log in as a user with just the local part of the id -log POST /login as non-existing user is rejected -log POST /login wrong password is rejected -log Interactive authentication types include SSO -log Can perform interactive authentication with SSO -log The user must be consistent through an interactive authentication session with SSO -log The operation must be consistent through an interactive authentication session -v1s GET /events initially -v1s GET /initialSync initially -csa Version responds 200 OK with valid structure -pro PUT /profile/:user_id/displayname sets my name -pro GET /profile/:user_id/displayname publicly accessible -pro PUT /profile/:user_id/avatar_url sets my avatar -pro GET /profile/:user_id/avatar_url publicly accessible -dev GET /device/{deviceId} -dev GET /device/{deviceId} gives a 404 for unknown devices -dev GET /devices -dev PUT /device/{deviceId} updates device fields -dev PUT /device/{deviceId} gives a 404 for unknown devices -dev DELETE /device/{deviceId} -dev DELETE /device/{deviceId} requires UI auth user to match device owner -dev DELETE /device/{deviceId} with no body gives a 401 -dev The deleted device must be consistent through an interactive auth session -dev Users receive device_list updates for their own devices -pre GET /presence/:user_id/status fetches initial status -pre PUT /presence/:user_id/status updates my presence -crm POST /createRoom makes a public room -crm POST /createRoom makes a private room -crm POST /createRoom makes a private room with invites -crm POST /createRoom makes a room with a name -crm POST /createRoom makes a room with a topic -syn Can /sync newly created room -crm POST /createRoom creates a room with the given version -crm POST /createRoom rejects attempts to create rooms with numeric versions -crm POST /createRoom rejects attempts to create rooms with unknown versions -crm POST /createRoom ignores attempts to set the room version via creation_content -mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -mem GET /rooms/:room_id/joined_members fetches my membership -v1s GET /rooms/:room_id/initialSync fetches initial sync state -pub GET /publicRooms lists newly-created room -ali GET /directory/room/:room_alias yields room ID -mem GET /joined_rooms lists newly-created room -rst POST /rooms/:room_id/state/m.room.name sets name -rst GET /rooms/:room_id/state/m.room.name gets name -rst POST /rooms/:room_id/state/m.room.topic sets topic -rst GET /rooms/:room_id/state/m.room.topic gets topic -rst GET /rooms/:room_id/state fetches entire room state -crm POST /createRoom with creation content -ali PUT /directory/room/:room_alias creates alias -nsp GET /rooms/:room_id/aliases lists aliases -jon POST /rooms/:room_id/join can join a room -jon POST /join/:room_alias can join a room -jon POST /join/:room_id can join a room -jon POST /join/:room_id can join a room with custom content -jon POST /join/:room_alias can join a room with custom content -lev POST /rooms/:room_id/leave can leave a room -inv POST /rooms/:room_id/invite can send an invite -ban POST /rooms/:room_id/ban can ban a user -snd POST /rooms/:room_id/send/:event_type sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -get GET /rooms/:room_id/messages returns a message -get GET /rooms/:room_id/messages lazy loads members correctly -typ PUT /rooms/:room_id/typing/:user_id sets typing notification -typ Typing notifications don't leak (3 subtests) -rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels -rst PUT /rooms/:room_id/state/m.room.power_levels can set levels -rst PUT power_levels should not explode if the old power levels were empty -rst Both GET and PUT work -rct POST /rooms/:room_id/receipt can create receipts -red POST /rooms/:room_id/read_markers can create read marker -med POST /media/r0/upload can create an upload -med GET /media/r0/download can fetch the value again -cap GET /capabilities is present and well formed for registered user -cap GET /r0/capabilities is not public -reg Register with a recaptcha -reg registration is idempotent, without username specified -reg registration is idempotent, with username specified -reg registration remembers parameters -reg registration accepts non-ascii passwords -reg registration with inhibit_login inhibits login -reg User signups are forbidden from starting with '_' -reg Can register using an email address -log Can login with 3pid and password using m.login.password -log login types include SSO -log /login/cas/redirect redirects if the old m.login.cas login type is listed -log Can login with new user via CAS -lox Can logout current device -lox Can logout all devices -lox Request to logout with invalid an access token is rejected -lox Request to logout without an access token is rejected -log After changing password, can't log in with old password -log After changing password, can log in with new password -log After changing password, existing session still works -log After changing password, a different session no longer works by default -log After changing password, different sessions can optionally be kept -psh Pushers created with a different access token are deleted on password change -psh Pushers created with a the same access token are not deleted on password change -acc Can deactivate account -acc Can't deactivate account with wrong password -acc After deactivating account, can't log in with password -acc After deactivating account, can't log in with an email -v1s initialSync sees my presence status -pre Presence change reports an event to myself -pre Friends presence changes reports events -crm Room creation reports m.room.create to myself -crm Room creation reports m.room.member to myself -rst Setting room topic reports m.room.topic to myself -v1s Global initialSync -v1s Global initialSync with limit=0 gives no messages -v1s Room initialSync -v1s Room initialSync with limit=0 gives no messages -rst Setting state twice is idempotent -jon Joining room twice is idempotent -syn New room members see their own join event -v1s New room members see existing users' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new members' presence -v1s All room members see all room members' presence in global initialSync -f,jon Remote users can join room by alias -syn New room members see their own join event -v1s New room members see existing members' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new member's presence -v1s New room members see first user's profile information in global initialSync -v1s New room members see first user's profile information in per-room initialSync -f,jon Remote users may not join unfederated rooms -syn Local room members see posted message events -v1s Fetching eventstream a second time doesn't yield the message again -syn Local non-members don't see posted message events -get Local room members can get room messages -f,syn Remote room members also see posted message events -f,get Remote room members can get room messages -get Message history can be paginated -f,get Message history can be paginated over federation -eph Ephemeral messages received from clients are correctly expired -ali Room aliases can contain Unicode -f,ali Remote room alias queries can handle Unicode -ali Canonical alias can be set -ali Canonical alias can include alt_aliases -ali Regular users can add and delete aliases in the default room configuration -ali Regular users can add and delete aliases when m.room.aliases is restricted -ali Deleting a non-existent alias should return a 404 -ali Users can't delete other's aliases -ali Users with sufficient power-level can delete other's aliases -ali Can delete canonical alias -ali Alias creators can delete alias with no ops -ali Alias creators can delete canonical alias with no ops -ali Only room members can list aliases of a room -inv Can invite users to invite-only rooms -inv Uninvited users cannot join the room -inv Invited user can reject invite -f,inv Invited user can reject invite over federation -f,inv Invited user can reject invite over federation several times -inv Invited user can reject invite for empty room -f,inv Invited user can reject invite over federation for empty room -inv Invited user can reject local invite after originator leaves -inv Invited user can see room metadata -f,inv Remote invited user can see room metadata -inv Users cannot invite themselves to a room -inv Users cannot invite a user that is already in the room -ban Banned user is kicked and may not rejoin until unbanned -f,ban Remote banned user is kicked and may not rejoin until unbanned -ban 'ban' event respects room powerlevel -plv setting 'm.room.name' respects room powerlevel -plv setting 'm.room.power_levels' respects room powerlevel (2 subtests) -plv Unprivileged users can set m.room.topic if it only needs level 0 -plv Users cannot set ban powerlevel higher than their own (2 subtests) -plv Users cannot set kick powerlevel higher than their own (2 subtests) -plv Users cannot set redact powerlevel higher than their own (2 subtests) -v1s Check that event streams started after a client joined a room work (SYT-1) -v1s Event stream catches up fully after many messages -xxx POST /rooms/:room_id/redact/:event_id as power user redacts message -xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message -xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message -xxx POST /redact disallows redaction of event in different room -xxx Redaction of a redaction redacts the redaction reason -v1s A departed room is still included in /initialSync (SPEC-216) -v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) -rst Can get rooms/{roomId}/state for a departed room (SPEC-216) -mem Can get rooms/{roomId}/members for a departed room (SPEC-216) -get Can get rooms/{roomId}/messages for a departed room (SPEC-216) -rst Can get 'm.room.name' state for a departed room (SPEC-216) -syn Getting messages going forward is limited for a departed room (SPEC-216) -3pd Can invite existing 3pid -3pd Can invite existing 3pid with no ops into a private room -3pd Can invite existing 3pid in createRoom -3pd Can invite unbound 3pid -f,3pd Can invite unbound 3pid over federation -3pd Can invite unbound 3pid with no ops into a private room -f,3pd Can invite unbound 3pid over federation with no ops into a private room -f,3pd Can invite unbound 3pid over federation with users from both servers -3pd Can accept unbound 3pid invite after inviter leaves -3pd Can accept third party invite with /join -3pd 3pid invite join with wrong but valid signature are rejected -3pd 3pid invite join valid signature but revoked keys are rejected -3pd 3pid invite join valid signature but unreachable ID server are rejected -gst Guest user cannot call /events globally -gst Guest users can join guest_access rooms -gst Guest users can send messages to guest_access rooms if joined -gst Guest user calling /events doesn't tightloop -gst Guest users are kicked from guest_access rooms on revocation of guest_access -gst Guest user can set display names -gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation -gst Guest user can upgrade to fully featured user -gst Guest user cannot upgrade other users -pub GET /publicRooms lists rooms -pub GET /publicRooms includes avatar URLs -gst Guest users can accept invites to private rooms over federation -gst Guest users denied access over federation if guest access prohibited -mem Room members can override their displayname on a room-specific basis -mem Room members can join a room with an overridden displayname -mem Users cannot kick users from a room they are not in -mem Users cannot kick users who have already left a room -typ Typing notification sent to local room members -f,typ Typing notifications also sent to remote room members -typ Typing can be explicitly stopped -rct Read receipts are visible to /initialSync -rct Read receipts are sent as events -rct Receipts must be m.read -pro displayname updates affect room member events -pro avatar_url updates affect room member events -gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users -gst Guest non-joined user cannot call /events on shared room -gst Guest non-joined user cannot call /events on invited room -gst Guest non-joined user cannot call /events on joined room -gst Guest non-joined user cannot call /events on default room -gst Guest non-joined user can call /events on world_readable room -gst Guest non-joined users can get state for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms -gst Guest non-joined users cannot room initalSync for non-world_readable rooms -gst Guest non-joined users can room initialSync for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms after leaving -gst Guest non-joined users cannot send messages to guest_access rooms if not joined -gst Guest users can sync from world_readable guest_access rooms if joined -gst Guest users can sync from shared guest_access rooms if joined -gst Guest users can sync from invited guest_access rooms if joined -gst Guest users can sync from joined guest_access rooms if joined -gst Guest users can sync from default guest_access rooms if joined -ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users -ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users -ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users -ath m.room.history_visibility == "default" allows/forbids appropriately for Real users -ath Real non-joined user cannot call /events on shared room -ath Real non-joined user cannot call /events on invited room -ath Real non-joined user cannot call /events on joined room -ath Real non-joined user cannot call /events on default room -ath Real non-joined user can call /events on world_readable room -ath Real non-joined users can get state for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms -ath Real non-joined users cannot room initalSync for non-world_readable rooms -ath Real non-joined users can room initialSync for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms after leaving -ath Real non-joined users cannot send messages to guest_access rooms if not joined -ath Real users can sync from world_readable guest_access rooms if joined -ath Real users can sync from shared guest_access rooms if joined -ath Real users can sync from invited guest_access rooms if joined -ath Real users can sync from joined guest_access rooms if joined -ath Real users can sync from default guest_access rooms if joined -ath Only see history_visibility changes on boundaries -f,ath Backfill works correctly with history visibility set to joined -fgt Forgotten room messages cannot be paginated -fgt Forgetting room does not show up in v2 /sync -fgt Can forget room you've been kicked from -fgt Can't forget room you're still in -fgt Can re-join room if re-invited -ath Only original members of the room can see messages from erased users -mem /joined_rooms returns only joined rooms -mem /joined_members return joined members -ctx /context/ on joined room works -ctx /context/ on non world readable room does not work -ctx /context/ returns correct number of events -ctx /context/ with lazy_load_members filter works -get /event/ on joined room works -get /event/ on non world readable room does not work -get /event/ does not allow access to events before the user joined -mem Can get rooms/{roomId}/members -mem Can get rooms/{roomId}/members at a given point -mem Can filter rooms/{roomId}/members -upg /upgrade creates a new room -upg /upgrade should preserve room visibility for public rooms -upg /upgrade should preserve room visibility for private rooms -upg /upgrade copies >100 power levels to the new room -upg /upgrade copies the power levels to the new room -upg /upgrade preserves the power level of the upgrading user in old and new rooms -upg /upgrade copies important state to the new room -upg /upgrade copies ban events to the new room -upg local user has push rules copied to upgraded room -f,upg remote user has push rules copied to upgraded room -upg /upgrade moves aliases to the new room -upg /upgrade moves remote aliases to the new room -upg /upgrade preserves direct room state -upg /upgrade preserves room federation ability -upg /upgrade restricts power levels in the old room -upg /upgrade restricts power levels in the old room when the old PLs are unusual -upg /upgrade to an unknown version is rejected -upg /upgrade is rejected if the user can't send state events -upg /upgrade of a bogus room fails gracefully -upg Cannot send tombstone event that points to the same room -f,upg Local and remote users' homeservers remove a room from their public directory on upgrade -rst Name/topic keys are correct -f,pub Can get remote public room list -pub Can paginate public room list -pub Can search public room list -syn Can create filter -syn Can download filter -syn Can sync -syn Can sync a joined room -syn Full state sync includes joined rooms -syn Newly joined room is included in an incremental sync -syn Newly joined room has correct timeline in incremental sync -syn Newly joined room includes presence in incremental sync -syn Get presence for newly joined members in incremental sync -syn Can sync a room with a single message -syn Can sync a room with a message with a transaction id -syn A message sent after an initial sync appears in the timeline of an incremental sync. -syn A filtered timeline reaches its limit -syn Syncing a new room with a large timeline limit isn't limited -syn A full_state incremental update returns only recent timeline -syn A prev_batch token can be used in the v1 messages API -syn A next_batch token can be used in the v1 messages API -syn User sees their own presence in a sync -syn User is offline if they set_presence=offline in their sync -syn User sees updates to presence from other users in the incremental sync. -syn State is included in the timeline in the initial sync -f,syn State from remote users is included in the state in the initial sync -syn Changes to state are included in an incremental sync -syn Changes to state are included in an gapped incremental sync -f,syn State from remote users is included in the timeline in an incremental sync -syn A full_state incremental update returns all state -syn When user joins a room the state is included in the next sync -syn A change to displayname should not result in a full state sync -syn A change to displayname should appear in incremental /sync -syn When user joins a room the state is included in a gapped sync -syn When user joins and leaves a room in the same batch, the full state is still included in the next sync -syn Current state appears in timeline in private history -syn Current state appears in timeline in private history with many messages before -syn Current state appears in timeline in private history with many messages after -syn Rooms a user is invited to appear in an initial sync -syn Rooms a user is invited to appear in an incremental sync -syn Newly joined room is included in an incremental sync after invite -syn Sync can be polled for updates -syn Sync is woken up for leaves -syn Left rooms appear in the leave section of sync -syn Newly left rooms appear in the leave section of incremental sync -syn We should see our own leave event, even if history_visibility is restricted (SYN-662) -syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -syn Newly left rooms appear in the leave section of gapped sync -syn Previously left rooms don't appear in the leave section of sync -syn Left rooms appear in the leave section of full state sync -syn Archived rooms only contain history from before the user left -syn Banned rooms appear in the leave section of sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Typing events appear in initial sync -syn Typing events appear in incremental sync -syn Typing events appear in gapped sync -syn Read receipts appear in initial v2 /sync -syn New read receipts appear in incremental v2 /sync -syn Can pass a JSON filter as a query parameter -syn Can request federation format via the filter -syn Read markers appear in incremental v2 /sync -syn Read markers appear in initial v2 /sync -syn Read markers can be updated -syn Lazy loading parameters in the filter are strictly boolean -syn The only membership state included in an initial sync is for all the senders in the timeline -syn The only membership state included in an incremental sync is for senders in the timeline -syn The only membership state included in a gapped incremental sync is for senders in the timeline -syn Gapped incremental syncs include all state changes -syn Old leaves are present in gapped incremental syncs -syn Leaves are present in non-gapped incremental syncs -syn Old members are included in gappy incr LL sync if they start speaking -syn Members from the gap are included in gappy incr LL sync -syn We don't send redundant membership state across incremental syncs by default -syn We do send redundant membership state across incremental syncs if asked -syn Unnamed room comes with a name summary -syn Named room comes with just joined member count summary -syn Room summary only has 5 heroes -syn Room summary counts change when membership changes -rmv User can create and send/receive messages in a room with version 1 -rmv User can create and send/receive messages in a room with version 1 (2 subtests) -rmv local user can join room with version 1 -rmv User can invite local user to room with version 1 -rmv remote user can join room with version 1 -rmv User can invite remote user to room with version 1 -rmv Remote user can backfill in a room with version 1 -rmv Can reject invites over federation for rooms with version 1 -rmv Can receive redactions from regular users over federation in room version 1 -rmv User can create and send/receive messages in a room with version 2 -rmv User can create and send/receive messages in a room with version 2 (2 subtests) -rmv local user can join room with version 2 -rmv User can invite local user to room with version 2 -rmv remote user can join room with version 2 -rmv User can invite remote user to room with version 2 -rmv Remote user can backfill in a room with version 2 -rmv Can reject invites over federation for rooms with version 2 -rmv Can receive redactions from regular users over federation in room version 2 -rmv User can create and send/receive messages in a room with version 3 -rmv User can create and send/receive messages in a room with version 3 (2 subtests) -rmv local user can join room with version 3 -rmv User can invite local user to room with version 3 -rmv remote user can join room with version 3 -rmv User can invite remote user to room with version 3 -rmv Remote user can backfill in a room with version 3 -rmv Can reject invites over federation for rooms with version 3 -rmv Can receive redactions from regular users over federation in room version 3 -rmv User can create and send/receive messages in a room with version 4 -rmv User can create and send/receive messages in a room with version 4 (2 subtests) -rmv local user can join room with version 4 -rmv User can invite local user to room with version 4 -rmv remote user can join room with version 4 -rmv User can invite remote user to room with version 4 -rmv Remote user can backfill in a room with version 4 -rmv Can reject invites over federation for rooms with version 4 -rmv Can receive redactions from regular users over federation in room version 4 -rmv User can create and send/receive messages in a room with version 5 -rmv User can create and send/receive messages in a room with version 5 (2 subtests) -rmv local user can join room with version 5 -rmv User can invite local user to room with version 5 -rmv remote user can join room with version 5 -rmv User can invite remote user to room with version 5 -rmv Remote user can backfill in a room with version 5 -rmv Can reject invites over federation for rooms with version 5 -rmv Can receive redactions from regular users over federation in room version 5 -rmv User can create and send/receive messages in a room with version 6 -rmv User can create and send/receive messages in a room with version 6 (2 subtests) -rmv local user can join room with version 6 -rmv User can invite local user to room with version 6 -rmv remote user can join room with version 6 -rmv User can invite remote user to room with version 6 -rmv Remote user can backfill in a room with version 6 -rmv Can reject invites over federation for rooms with version 6 -rmv Can receive redactions from regular users over federation in room version 6 -rmv Inbound federation rejects invites which include invalid JSON for room version 6 -rmv Outbound federation rejects invite response which include invalid JSON for room version 6 -rmv Inbound federation rejects invite rejections which include invalid JSON for room version 6 -rmv Server rejects invalid JSON in a version 6 room -pre Presence changes are reported to local room members -f,pre Presence changes are also reported to remote room members -pre Presence changes to UNAVAILABLE are reported to local room members -f,pre Presence changes to UNAVAILABLE are reported to remote room members -v1s Newly created users see their own presence in /initialSync (SYT-34) -dvk Can upload device keys -dvk Should reject keys claiming to belong to a different user -dvk Can query device keys using POST -dvk Can query specific device keys using POST -dvk query for user with no keys returns empty key dict -dvk Can claim one time key using POST -f,dvk Can query remote device keys using POST -f,dvk Can claim remote one time key using POST -dvk Local device key changes appear in v2 /sync -dvk Local new device changes appear in v2 /sync -dvk Local delete device changes appear in v2 /sync -dvk Local update device changes appear in v2 /sync -dvk Can query remote device keys using POST after notification -f,dev Device deletion propagates over federation -f,dev If remote user leaves room, changes device and rejoins we see update in sync -f,dev If remote user leaves room we no longer receive device updates -dvk Local device key changes appear in /keys/changes -dvk New users appear in /keys/changes -f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes -dvk Get left notifs in sync and /keys/changes when other user leaves -dvk Get left notifs for other users in sync and /keys/changes when user leaves -f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes -dkb Can create backup version -dkb Can update backup version -dkb Responds correctly when backup is empty -dkb Can backup keys -dkb Can update keys with better versions -dkb Will not update keys with worse versions -dkb Will not back up to an old backup version -dkb Can delete backup -dkb Deleted & recreated backups are empty -dkb Can create more than 10 backup versions -xsk Can upload self-signing keys -xsk Fails to upload self-signing keys with no auth -xsk Fails to upload self-signing key without master key -xsk Changing master key notifies local users -xsk Changing user-signing key notifies local users -f,xsk can fetch self-signing keys over federation -f,xsk uploading self-signing key notifies over federation -f,xsk uploading signed devices gets propagated over federation -tag Can add tag -tag Can remove tag -tag Can list tags for a room -v1s Tags appear in the v1 /events stream -v1s Tags appear in the v1 /initalSync -v1s Tags appear in the v1 room initial sync -tag Tags appear in an initial v2 /sync -tag Newly updated tags appear in an incremental v2 /sync -tag Deleted tags appear in an incremental v2 /sync -tag local user has tags copied to the new room -f,tag remote user has tags copied to the new room -sch Can search for an event by body -sch Can get context around search results -sch Can back-paginate search results -sch Search works across an upgraded room and its predecessor -sch Search results with rank ordering do not include redacted events -sch Search results with recent ordering do not include redacted events -acc Can add account data -acc Can add account data to room -acc Can get account data without syncing -acc Can get room account data without syncing -v1s Latest account data comes down in /initialSync -v1s Latest account data comes down in room initialSync -v1s Account data appears in v1 /events stream -v1s Room account data appears in v1 /events stream -acc Latest account data appears in v2 /sync -acc New account data appears in incremental v2 /sync -oid Can generate a openid access_token that can be exchanged for information about a user -oid Invalid openid access tokens are rejected -oid Requests to userinfo without access tokens are rejected -std Can send a message directly to a device using PUT /sendToDevice -std Can recv a device message using /sync -std Can recv device messages until they are acknowledged -std Device messages with the same txn_id are deduplicated -std Device messages wake up /sync -std Can recv device messages over federation -fsd Device messages over federation wake up /sync -std Can send messages with a wildcard device id -std Can send messages with a wildcard device id to two devices -std Wildcard device messages wake up /sync -fsd Wildcard device messages over federation wake up /sync -adm /whois -nsp /purge_history -nsp /purge_history by ts -nsp Can backfill purged history -nsp Shutdown room -ign Ignore user in existing room -ign Ignore invite in full sync -ign Ignore invite in incremental sync -fky Checking local federation server -fky Federation key API allows unsigned requests for keys -fky Federation key API can act as a notary server via a GET request -fky Federation key API can act as a notary server via a POST request -fky Key notary server should return an expired key if it can't find any others -fky Key notary server must not overwrite a valid key with a spurious result from the origin server -fqu Non-numeric ports in server names are rejected -fqu Outbound federation can query profile data -fqu Inbound federation can query profile data -fqu Outbound federation can query room alias directory -fqu Inbound federation can query room alias directory -fsj Outbound federation can query v1 /send_join -fsj Outbound federation can query v2 /send_join -fmj Outbound federation passes make_join failures through to the client -fsj Inbound federation can receive v1 /send_join -fsj Inbound federation can receive v2 /send_join -fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms -fsj Inbound /v1/send_join rejects incorrectly-signed joins -fsj Inbound /v1/send_join rejects joins from other servers -fau Inbound federation rejects remote attempts to kick local users to rooms -frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support -frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support -frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -frv Inbound federation accepts attempts to join v2 rooms from servers with support -frv Outbound federation correctly handles unsupported room versions -frv A pair of servers can establish a join in a v2 room -fsj Outbound federation rejects send_join responses with no m.room.create event -frv Outbound federation rejects m.room.create events with an unknown room version -fsj Event with an invalid signature in the send_join response should not cause room join to fail -fsj Inbound: send_join rejects invalid JSON for room version 6 -fed Outbound federation can send events -fed Inbound federation can receive events -fed Inbound federation can receive redacted events -fed Ephemeral messages received from servers are correctly expired -fed Events whose auth_events are in the wrong room do not mess up the room state -fed Inbound federation can return events -fed Inbound federation redacts events from erased users -fme Outbound federation can request missing events -fme Inbound federation can return missing events for world_readable visibility -fme Inbound federation can return missing events for shared visibility -fme Inbound federation can return missing events for invite visibility -fme Inbound federation can return missing events for joined visibility -fme outliers whose auth_events are in a different room are correctly rejected -fbk Outbound federation can backfill events -fbk Inbound federation can backfill events -fbk Backfill checks the events requested belong to the room -fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -fiv Outbound federation can send invites via v1 API -fiv Outbound federation can send invites via v2 API -fiv Inbound federation can receive invites via v1 API -fiv Inbound federation can receive invites via v2 API -fiv Inbound federation can receive invite and reject when remote replies with a 403 -fiv Inbound federation can receive invite and reject when remote replies with a 500 -fiv Inbound federation can receive invite and reject when remote is unreachable -fiv Inbound federation rejects invites which are not signed by the sender -fiv Inbound federation can receive invite rejections -fiv Inbound federation rejects incorrectly-signed invite rejections -fsl Inbound /v1/send_leave rejects leaves from other servers -fst Inbound federation can get state for a room -fst Inbound federation of state requires event_id as a mandatory paramater -fst Inbound federation can get state_ids for a room -fst Inbound federation of state_ids requires event_id as a mandatory paramater -fst Federation rejects inbound events where the prev_events cannot be found -fst Room state at a rejected message event is the same as its predecessor -fst Room state at a rejected state event is the same as its predecessor -fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state -fst Federation handles empty auth_events in state_ids sanely -fst Getting state checks the events requested belong to the room -fst Getting state IDs checks the events requested belong to the room -fst Should not be able to take over the room by pretending there is no PL event -fpb Inbound federation can get public room list -fed Outbound federation sends receipts -fed Inbound federation rejects receipts from wrong remote -fed Inbound federation ignores redactions from invalid servers room > v3 -fed An event which redacts an event in a different room should be ignored -fed An event which redacts itself should be ignored -fed A pair of events which redact each other should be ignored -fdk Local device key changes get to remote servers -fdk Server correctly handles incoming m.device_list_update -fdk Server correctly resyncs when client query keys and there is no remote cache -fdk Server correctly resyncs when server leaves and rejoins a room -fdk Local device key changes get to remote servers with correct prev_id -fdk Device list doesn't change if remote server is down -fdk If a device list update goes missing, the server resyncs on the next one -fst Name/topic keys are correct -fau Remote servers cannot set power levels in rooms without existing powerlevels -fau Remote servers should reject attempts by non-creators to set the power levels -fau Inbound federation rejects typing notifications from wrong remote -fau Users cannot set notifications powerlevel higher than their own -fed Forward extremities remain so even after the next events are populated as outliers -fau Banned servers cannot send events -fau Banned servers cannot /make_join -fau Banned servers cannot /send_join -fau Banned servers cannot /make_leave -fau Banned servers cannot /send_leave -fau Banned servers cannot /invite -fau Banned servers cannot get room state -fau Banned servers cannot get room state ids -fau Banned servers cannot backfill -fau Banned servers cannot /event_auth -fau Banned servers cannot get missing events -fau Server correctly handles transactions that break edu limits -fau Inbound federation correctly soft fails events -fau Inbound federation accepts a second soft-failed event -fau Inbound federation correctly handles soft failed events as extremities -med Can upload with Unicode file name -med Can download with Unicode file name locally -f,med Can download with Unicode file name over federation -med Alternative server names do not cause a routing loop -med Can download specifying a different Unicode file name -med Can upload without a file name -med Can download without a file name locally -f,med Can download without a file name over federation -med Can upload with ASCII file name -med Can download file 'ascii' -med Can download file 'name with spaces' -med Can download file 'name;with;semicolons' -med Can download specifying a different ASCII file name -med Can send image in room message -med Can fetch images in room -med POSTed media can be thumbnailed -f,med Remote media can be thumbnailed -med Test URL preview -med Can read configuration endpoint -nsp Can quarantine media in rooms -udr User appears in user directory -udr User in private room doesn't appear in user directory -udr User joining then leaving public room appears and dissappears from directory -udr Users appear/disappear from directory when join_rules are changed -udr Users appear/disappear from directory when history_visibility are changed -udr Users stay in directory when join_rules are changed but history_visibility is world_readable -f,udr User in remote room doesn't appear in user directory after server left room -udr User directory correctly update on display name change -udr User in shared private room does appear in user directory -udr User in shared private room does appear in user directory until leave -udr User in dir while user still shares private rooms -nsp Create group -nsp Add group rooms -nsp Remove group rooms -nsp Get local group profile -nsp Get local group users -nsp Add/remove local group rooms -nsp Get local group summary -nsp Get remote group profile -nsp Get remote group users -nsp Add/remove remote group rooms -nsp Get remote group summary -nsp Add local group users -nsp Remove self from local group -nsp Remove other from local group -nsp Add remote group users -nsp Remove self from remote group -nsp Listing invited users of a remote group when not a member returns a 403 -nsp Add group category -nsp Remove group category -nsp Get group categories -nsp Add group role -nsp Remove group role -nsp Get group roles -nsp Add room to group summary -nsp Adding room to group summary keeps room_id when fetching rooms in group -nsp Adding multiple rooms to group summary have correct order -nsp Remove room from group summary -nsp Add room to group summary with category -nsp Remove room from group summary with category -nsp Add user to group summary -nsp Adding multiple users to group summary have correct order -nsp Remove user from group summary -nsp Add user to group summary with role -nsp Remove user from group summary with role -nsp Local group invites come down sync -nsp Group creator sees group in sync -nsp Group creator sees group in initial sync -nsp Get/set local group publicity -nsp Bulk get group publicity -nsp Joinability comes down summary -nsp Set group joinable and join it -nsp Group is not joinable by default -nsp Group is joinable over federation -nsp Room is transitioned on local and remote groups upon room upgrade -3pd Can bind 3PID via home server -3pd Can bind and unbind 3PID via homeserver -3pd Can unbind 3PID via homeserver when bound out of band -3pd 3PIDs are unbound after account deactivation -3pd Can bind and unbind 3PID via /unbind by specifying the identity server -3pd Can bind and unbind 3PID via /unbind without specifying the identity server -app AS can create a user -app AS can create a user with an underscore -app AS can create a user with inhibit_login -app AS cannot create users outside its own namespace -app Regular users cannot register within the AS namespace -app AS can make room aliases -app Regular users cannot create room aliases within the AS namespace -app AS-ghosted users can use rooms via AS -app AS-ghosted users can use rooms themselves -app Ghost user must register before joining room -app AS can set avatar for ghosted users -app AS can set displayname for ghosted users -app AS can't set displayname for random users -app Inviting an AS-hosted user asks the AS server -app Accesing an AS-hosted room alias asks the AS server -app Events in rooms with AS-hosted room aliases are sent to AS server -app AS user (not ghost) can join room without registering -app AS user (not ghost) can join room without registering, with user_id query param -app HS provides query metadata -app HS can provide query metadata on a single protocol -app HS will proxy request for 3PU mapping -app HS will proxy request for 3PL mapping -app AS can publish rooms in their own list -app AS and main public room lists are separate -app AS can deactivate a user -psh Test that a message is pushed -psh Invites are pushed -psh Rooms with names are correctly named in pushed -psh Rooms with canonical alias are correctly named in pushed -psh Rooms with many users are correctly pushed -psh Don't get pushed for rooms you've muted -psh Rejected events are not pushed -psh Can add global push rule for room -psh Can add global push rule for sender -psh Can add global push rule for content -psh Can add global push rule for override -psh Can add global push rule for underride -psh Can add global push rule for content -psh New rules appear before old rules by default -psh Can add global push rule before an existing rule -psh Can add global push rule after an existing rule -psh Can delete a push rule -psh Can disable a push rule -psh Adding the same push rule twice is idempotent -psh Messages that notify from another user increment unread notification count -psh Messages that highlight from another user increment unread highlight count -psh Can change the actions of default rules -psh Changing the actions of an unknown default rule fails with 404 -psh Can change the actions of a user specified rule -psh Changing the actions of an unknown rule fails with 404 -psh Can fetch a user's pushers -psh Push rules come down in an initial /sync -psh Adding a push rule wakes up an incremental /sync -psh Disabling a push rule wakes up an incremental /sync -psh Enabling a push rule wakes up an incremental /sync -psh Setting actions for a push rule wakes up an incremental /sync -psh Can enable/disable default rules -psh Enabling an unknown default rule fails with 404 -psh Test that rejected pushers are removed. -psh Notifications can be viewed with GET /notifications -psh Trying to add push rule with no scope fails with 400 -psh Trying to add push rule with invalid scope fails with 400 -psh Trying to add push rule with missing template fails with 400 -psh Trying to add push rule with missing rule_id fails with 400 -psh Trying to add push rule with empty rule_id fails with 400 -psh Trying to add push rule with invalid template fails with 400 -psh Trying to add push rule with rule_id with slashes fails with 400 -psh Trying to add push rule with override rule without conditions fails with 400 -psh Trying to add push rule with underride rule without conditions fails with 400 -psh Trying to add push rule with condition without kind fails with 400 -psh Trying to add push rule with content rule without pattern fails with 400 -psh Trying to add push rule with no actions fails with 400 -psh Trying to add push rule with invalid action fails with 400 -psh Trying to add push rule with invalid attr fails with 400 -psh Trying to add push rule with invalid value for enabled fails with 400 -psh Trying to get push rules with no trailing slash fails with 400 -psh Trying to get push rules with scope without trailing slash fails with 400 -psh Trying to get push rules with template without tailing slash fails with 400 -psh Trying to get push rules with unknown scope fails with 400 -psh Trying to get push rules with unknown template fails with 400 -psh Trying to get push rules with unknown attribute fails with 400 -psh Trying to get push rules with unknown rule_id fails with 404 -psh Rooms with names are correctly named in pushes -v1s GET /initialSync with non-numeric 'limit' -v1s GET /events with non-numeric 'limit' -v1s GET /events with negative 'limit' -v1s GET /events with non-numeric 'timeout' -ath Event size limits -syn Check creating invalid filters returns 4xx -f,pre New federated private chats get full presence information (SYN-115) -pre Left room members do not cause problems for presence -crm Rooms can be created with an initial invite list (SYN-205) (1 subtests) -typ Typing notifications don't leak -ban Non-present room members cannot ban others -psh Getting push rules doesn't corrupt the cache SYN-390 -inv Test that we can be reinvited to a room we created -syn Multiple calls to /sync should not cause 500 errors -gst Guest user can call /events on another world_readable room (SYN-606) -gst Real user can call /events on another world_readable room (SYN-606) -gst Events come down the correct room -pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -std Can send a to-device message to two users which both receive it using /sync -fme Outbound federation will ignore a missing event with bad JSON for room version 6 -fbk Outbound federation rejects backfill containing invalid JSON for events in room version 6 -jso Invalid JSON integers -jso Invalid JSON floats -jso Invalid JSON special values -inv Can invite users to invite-only rooms (2 subtests) -plv setting 'm.room.name' respects room powerlevel (2 subtests) -psh Messages that notify from another user increment notification_count -psh Messages that org.matrix.msc2625.mark_unread from another user increment org.matrix.msc2625.unread_count -dvk Can claim one time key using POST (2 subtests) -fdk Can query remote device keys using POST (1 subtests) -fdk Can claim remote one time key using POST (2 subtests) -fmj Inbound /make_join rejects attempts to join rooms where all users have left \ No newline at end of file diff --git a/tests/sytest/are-we-synapse-yet.py b/tests/sytest/are-we-synapse-yet.py deleted file mode 100755 index 3d21fa41..00000000 --- a/tests/sytest/are-we-synapse-yet.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import division -import argparse -import re -import sys - -# Usage: $ ./are-we-synapse-yet.py [-v] results.tap -# This script scans a results.tap file from Dendrite's CI process and spits out -# a rating of how close we are to Synapse parity, based purely on SyTests. -# The main complexity is grouping tests sensibly into features like 'Registration' -# and 'Federation'. Then it just checks the ones which are passing and calculates -# percentages for each group. Produces results like: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# -# or in verbose mode: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -# -# You can also tack `-v` on to see exactly which tests each category falls under. - -test_mappings = { - "nsp": "Non-Spec API", - "unk": "Unknown API (no group specified)", - "app": "Application Services API", - "f": "Federation", # flag to mark test involves federation - - "federation_apis": { - "fky": "Key API", - "fsj": "send_join API", - "fmj": "make_join API", - "fsl": "send_leave API", - "fiv": "Invite API", - "fqu": "Query API", - "frv": "room versions", - "fau": "Auth", - "fbk": "Backfill API", - "fme": "get_missing_events API", - "fst": "State APIs", - "fpb": "Public Room API", - "fdk": "Device Key APIs", - "fed": "Federation API", - "fsd": "Send-to-Device APIs", - }, - - "client_apis": { - "reg": "Registration", - "log": "Login", - "lox": "Logout", - "v1s": "V1 CS APIs", - "csa": "Misc CS APIs", - "pro": "Profile", - "dev": "Devices", - "dvk": "Device Keys", - "dkb": "Device Key Backup", - "xsk": "Cross-signing Keys", - "pre": "Presence", - "crm": "Create Room", - "syn": "Sync API", - "rmv": "Room Versions", - "rst": "Room State APIs", - "pub": "Public Room APIs", - "mem": "Room Membership", - "ali": "Room Aliases", - "jon": "Joining Rooms", - "lev": "Leaving Rooms", - "inv": "Inviting users to Rooms", - "ban": "Banning users", - "snd": "Sending events", - "get": "Getting events for Rooms", - "rct": "Receipts", - "red": "Read markers", - "med": "Media APIs", - "cap": "Capabilities API", - "typ": "Typing API", - "psh": "Push APIs", - "acc": "Account APIs", - "eph": "Ephemeral Events", - "plv": "Power Levels", - "xxx": "Redaction", - "3pd": "Third-Party ID APIs", - "gst": "Guest APIs", - "ath": "Room Auth", - "fgt": "Forget APIs", - "ctx": "Context APIs", - "upg": "Room Upgrade APIs", - "tag": "Tagging APIs", - "sch": "Search APIs", - "oid": "OpenID API", - "std": "Send-to-Device APIs", - "adm": "Server Admin API", - "ign": "Ignore Users", - "udr": "User Directory APIs", - "jso": "Enforced canonical JSON", - }, -} - -# optional 'not ' with test number then anything but '#' -re_testname = re.compile(r"^(not )?ok [0-9]+ ([^#]+)") - -# Parses lines like the following: -# -# SUCCESS: ok 3 POST /register downcases capitals in usernames -# FAIL: not ok 54 (expected fail) POST /createRoom creates a room with the given version -# SKIP: ok 821 Multiple calls to /sync should not cause 500 errors # skip lack of can_post_room_receipts -# EXPECT FAIL: not ok 822 (expected fail) Guest user can call /events on another world_readable room (SYN-606) # TODO expected fail -# -# Only SUCCESS lines are treated as success, the rest are not implemented. -# -# Returns a dict like: -# { name: "...", ok: True } -def parse_test_line(line): - if not line.startswith("ok ") and not line.startswith("not ok "): - return - re_match = re_testname.match(line) - test_name = re_match.groups()[1].replace("(expected fail) ", "").strip() - test_pass = False - if line.startswith("ok ") and not "# skip " in line: - test_pass = True - return { - "name": test_name, - "ok": test_pass, - } - -# Prints the stats for a complete section. -# header_name => "Client-Server APIs" -# gid_to_tests => { gid: { : True|False }} -# gid_to_name => { gid: "Group Name" } -# verbose => True|False -# Produces: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# or in verbose mode: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -def print_stats(header_name, gid_to_tests, gid_to_name, verbose): - subsections = [] # Registration: 100% (13/13 tests) - subsection_test_names = {} # 'subsection name': ["✓ Test 1", "✓ Test 2", "× Test 3"] - total_passing = 0 - total_tests = 0 - for gid, tests in gid_to_tests.items(): - group_total = len(tests) - if group_total == 0: - continue - group_passing = 0 - test_names_and_marks = [] - for name, passing in tests.items(): - if passing: - group_passing += 1 - test_names_and_marks.append(f"{'✓' if passing else '×'} {name}") - - total_tests += group_total - total_passing += group_passing - pct = "{0:.0f}%".format(group_passing/group_total * 100) - line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) - subsections.append(line) - subsection_test_names[line] = test_names_and_marks - - pct = "{0:.0f}%".format(total_passing/total_tests * 100) - print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) - print("-" * (len(header_name)+1)) - for line in subsections: - print(" %s" % (line,)) - if verbose: - for test_name_and_pass_mark in subsection_test_names[line]: - print(" %s" % (test_name_and_pass_mark,)) - print("") - print("") - -def main(results_tap_path, verbose): - # Load up test mappings - test_name_to_group_id = {} - fed_tests = set() - client_tests = set() - with open("./are-we-synapse-yet.list", "r") as f: - for line in f.readlines(): - test_name = " ".join(line.split(" ")[1:]).strip() - groups = line.split(" ")[0].split(",") - for gid in groups: - if gid == "f" or gid in test_mappings["federation_apis"]: - fed_tests.add(test_name) - else: - client_tests.add(test_name) - if gid == "f": - continue # we expect another group ID - test_name_to_group_id[test_name] = gid - - # parse results.tap - summary = { - "client": { - # gid: { - # test_name: OK - # } - }, - "federation": { - # gid: { - # test_name: OK - # } - }, - "appservice": { - "app": {}, - }, - "nonspec": { - "nsp": {}, - "unk": {} - }, - } - with open(results_tap_path, "r") as f: - for line in f.readlines(): - test_result = parse_test_line(line) - if not test_result: - continue - name = test_result["name"] - group_id = test_name_to_group_id.get(name) - if not group_id: - summary["nonspec"]["unk"][name] = test_result["ok"] - if group_id == "nsp": - summary["nonspec"]["nsp"][name] = test_result["ok"] - elif group_id == "app": - summary["appservice"]["app"][name] = test_result["ok"] - elif group_id in test_mappings["federation_apis"]: - group = summary["federation"].get(group_id, {}) - group[name] = test_result["ok"] - summary["federation"][group_id] = group - elif group_id in test_mappings["client_apis"]: - group = summary["client"].get(group_id, {}) - group[name] = test_result["ok"] - summary["client"][group_id] = group - - print("Are We Synapse Yet?") - print("===================") - print("") - print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose) - print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose) - print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose) - print_stats("Application Services APIs", summary["appservice"], test_mappings, verbose) - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("tap_file", help="path to results.tap") - parser.add_argument("-v", action="store_true", help="show individual test names in output") - args = parser.parse_args() - main(args.tap_file, args.v) \ No newline at end of file diff --git a/tests/sytest/show-expected-fail-tests.sh b/tests/sytest/show-expected-fail-tests.sh deleted file mode 100755 index 320d4ebd..00000000 --- a/tests/sytest/show-expected-fail-tests.sh +++ /dev/null @@ -1,105 +0,0 @@ -#! /bin/bash -# -# Parses a results.tap file from SyTest output and a file containing test names (a test whitelist) -# and checks whether a test name that exists in the whitelist (that should pass), failed or not. -# -# An optional blacklist file can be added, also containing test names, where if a test name is -# present, the script will not error even if the test is in the whitelist file and failed -# -# For each of these files, lines starting with '#' are ignored. -# -# Usage ./show-expected-fail-tests.sh results.tap whitelist [blacklist] - -results_file=$1 -whitelist_file=$2 -blacklist_file=$3 - -fail_build=0 - -if [ $# -lt 2 ]; then - echo "Usage: $0 results.tap whitelist [blacklist]" - exit 1 -fi - -if [ ! -f "$results_file" ]; then - echo "ERROR: Specified results file '${results_file}' doesn't exist." - fail_build=1 -fi - -if [ ! -f "$whitelist_file" ]; then - echo "ERROR: Specified test whitelist '${whitelist_file}' doesn't exist." - fail_build=1 -fi - -blacklisted_tests=() - -# Check if a blacklist file was provided -if [ $# -eq 3 ]; then - # Read test blacklist file - if [ ! -f "$blacklist_file" ]; then - echo "ERROR: Specified test blacklist file '${blacklist_file}' doesn't exist." - fail_build=1 - fi - - # Read each line, ignoring those that start with '#' - blacklisted_tests="" - search_non_comments=$(grep -v '^#' ${blacklist_file}) - while read -r line ; do - # Record the blacklisted test name - blacklisted_tests+=("${line}") - done <<< "${search_non_comments}" # This allows us to edit blacklisted_tests in the while loop -fi - -[ "$fail_build" = 0 ] || exit 1 - -passed_but_expected_fail=$(grep ' # TODO passed but expected fail' ${results_file} | sed -E 's/^ok [0-9]+ (\(expected fail\) )?//' | sed -E 's/( \([0-9]+ subtests\))? # TODO passed but expected fail$//') -tests_to_add="" -already_in_whitelist="" - -while read -r test_name; do - # Ignore empty lines - [ "${test_name}" = "" ] && continue - - grep "^${test_name}" "${whitelist_file}" > /dev/null 2>&1 - if [ "$?" != "0" ]; then - # Check if this test name is blacklisted - if printf '%s\n' "${blacklisted_tests[@]}" | grep -q -P "^${test_name}$"; then - # Don't notify about this test - continue - fi - - # Append this test_name to the existing list - tests_to_add="${tests_to_add}${test_name}\n" - fail_build=1 - else - already_in_whitelist="${already_in_whitelist}${test_name}\n" - fi -done <<< "${passed_but_expected_fail}" - -# TODO: Check that the same test doesn't exist in both the whitelist and blacklist -# TODO: Check that the same test doesn't appear twice in the whitelist|blacklist - -# Trim test output strings -tests_to_add=$(IFS=$'\n' echo "${tests_to_add[*]%%'\n'}") -already_in_whitelist=$(IFS=$'\n' echo "${already_in_whitelist[*]%%'\n'}") - -# Format output with markdown for buildkite annotation rendering purposes -if [ -n "${tests_to_add}" ] && [ -n "${already_in_whitelist}" ]; then - echo "### 📜 SyTest Whitelist Maintenance" -fi - -if [ -n "${tests_to_add}" ]; then - echo "**ERROR**: The following tests passed but are not present in \`$2\`. Please append them to the file:" - echo "\`\`\`" - echo -e "${tests_to_add}" - echo "\`\`\`" -fi - -if [ -n "${already_in_whitelist}" ]; then - echo "**WARN**: Tests in the whitelist still marked as **expected fail**:" - echo "\`\`\`" - echo -e "${already_in_whitelist}" - echo "\`\`\`" -fi - -exit ${fail_build} diff --git a/tests/sytest/sytest-blacklist b/tests/sytest/sytest-blacklist deleted file mode 100644 index 009de225..00000000 --- a/tests/sytest/sytest-blacklist +++ /dev/null @@ -1,7 +0,0 @@ -# This test checks for a room-alias key in the response which is not in the spec, we must add it back in whitelist when https://github.com/matrix-org/sytest/pull/880 is merged -POST /createRoom makes a public room -# These fails because they use a endpoint which is not in the spec, we must add them back in whitelist when https://github.com/matrix-org/sytest/issues/878 is closed -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -Can /sync newly created room -POST /createRoom ignores attempts to set the room version via creation_content \ No newline at end of file diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist deleted file mode 100644 index 1c969dba..00000000 --- a/tests/sytest/sytest-whitelist +++ /dev/null @@ -1,516 +0,0 @@ -/event/ does not allow access to events before the user joined -/event/ on joined room works -/event/ on non world readable room does not work -/joined_members return joined members -/joined_rooms returns only joined rooms -/whois -3pid invite join valid signature but revoked keys are rejected -3pid invite join valid signature but unreachable ID server are rejected -3pid invite join with wrong but valid signature are rejected -A change to displayname should appear in incremental /sync -A full_state incremental update returns all state -A full_state incremental update returns only recent timeline -A message sent after an initial sync appears in the timeline of an incremental sync. -A next_batch token can be used in the v1 messages API -A pair of events which redact each other should be ignored -A pair of servers can establish a join in a v2 room -A prev_batch token can be used in the v1 messages API -AS can create a user -AS can create a user with an underscore -AS can create a user with inhibit_login -AS can set avatar for ghosted users -AS can set displayname for ghosted users -AS can't set displayname for random users -AS cannot create users outside its own namespace -AS user (not ghost) can join room without registering -AS user (not ghost) can join room without registering, with user_id query param -After changing password, a different session no longer works by default -After changing password, can log in with new password -After changing password, can't log in with old password -After changing password, different sessions can optionally be kept -After changing password, existing session still works -After deactivating account, can't log in with an email -After deactivating account, can't log in with password -Alias creators can delete alias with no ops -Alias creators can delete canonical alias with no ops -Alternative server names do not cause a routing loop -An event which redacts an event in a different room should be ignored -An event which redacts itself should be ignored -Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -Backfill checks the events requested belong to the room -Backfill works correctly with history visibility set to joined -Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -Banned servers cannot /event_auth -Banned servers cannot /invite -Banned servers cannot /make_join -Banned servers cannot /make_leave -Banned servers cannot /send_join -Banned servers cannot /send_leave -Banned servers cannot backfill -Banned servers cannot get missing events -Banned servers cannot get room state -Banned servers cannot get room state ids -Banned servers cannot send events -Banned user is kicked and may not rejoin until unbanned -Both GET and PUT work -Can /sync newly created room -Can add account data -Can add account data to room -Can add tag -Can claim one time key using POST -Can claim remote one time key using POST -Can create filter -Can deactivate account -Can delete canonical alias -Can download file 'ascii' -Can download file 'name with spaces' -Can download file 'name;with;semicolons' -Can download filter -Can download specifying a different ASCII file name -Can download specifying a different Unicode file name -Can download with Unicode file name locally -Can download with Unicode file name over federation -Can download without a file name locally -Can download without a file name over federation -Can forget room you've been kicked from -Can get 'm.room.name' state for a departed room (SPEC-216) -Can get account data without syncing -Can get remote public room list -Can get room account data without syncing -Can get rooms/{roomId}/members -Can get rooms/{roomId}/members for a departed room (SPEC-216) -Can get rooms/{roomId}/state for a departed room (SPEC-216) -Can invite users to invite-only rooms -Can list tags for a room -Can logout all devices -Can logout current device -Can paginate public room list -Can pass a JSON filter as a query parameter -Can query device keys using POST -Can query remote device keys using POST -Can query specific device keys using POST -Can re-join room if re-invited -Can read configuration endpoint -Can receive redactions from regular users over federation in room version 1 -Can receive redactions from regular users over federation in room version 2 -Can receive redactions from regular users over federation in room version 3 -Can receive redactions from regular users over federation in room version 4 -Can receive redactions from regular users over federation in room version 5 -Can receive redactions from regular users over federation in room version 6 -Can recv a device message using /sync -Can recv a device message using /sync -Can recv device messages over federation -Can recv device messages until they are acknowledged -Can recv device messages until they are acknowledged -Can reject invites over federation for rooms with version 1 -Can reject invites over federation for rooms with version 2 -Can reject invites over federation for rooms with version 3 -Can reject invites over federation for rooms with version 4 -Can reject invites over federation for rooms with version 5 -Can reject invites over federation for rooms with version 6 -Can remove tag -Can search public room list -Can send a message directly to a device using PUT /sendToDevice -Can send a message directly to a device using PUT /sendToDevice -Can send a to-device message to two users which both receive it using /sync -Can send image in room message -Can send messages with a wildcard device id -Can send messages with a wildcard device id -Can send messages with a wildcard device id to two devices -Can send messages with a wildcard device id to two devices -Can sync -Can sync a joined room -Can sync a room with a message with a transaction id -Can sync a room with a single message -Can upload device keys -Can upload with ASCII file name -Can upload with Unicode file name -Can upload without a file name -Can't deactivate account with wrong password -Can't forget room you're still in -Changes to state are included in an gapped incremental sync -Changes to state are included in an incremental sync -Changing the actions of an unknown default rule fails with 404 -Changing the actions of an unknown rule fails with 404 -Checking local federation server -Creators can delete alias -Current state appears in timeline in private history -Current state appears in timeline in private history with many messages before -DELETE /device/{deviceId} -DELETE /device/{deviceId} requires UI auth user to match device owner -DELETE /device/{deviceId} with no body gives a 401 -Deleted tags appear in an incremental v2 /sync -Deleting a non-existent alias should return a 404 -Device list doesn't change if remote server is down -Device messages over federation wake up /sync -Device messages wake up /sync -Device messages wake up /sync -Device messages with the same txn_id are deduplicated -Device messages with the same txn_id are deduplicated -Enabling an unknown default rule fails with 404 -Event size limits -Event with an invalid signature in the send_join response should not cause room join to fail -Events come down the correct room -Events whose auth_events are in the wrong room do not mess up the room state -Existing members see new members' join events -Federation key API allows unsigned requests for keys -Federation key API can act as a notary server via a GET request -Federation key API can act as a notary server via a POST request -Federation rejects inbound events where the prev_events cannot be found -Fetching eventstream a second time doesn't yield the message again -Forgetting room does not show up in v2 /sync -Full state sync includes joined rooms -GET /capabilities is present and well formed for registered user -GET /device/{deviceId} -GET /device/{deviceId} gives a 404 for unknown devices -GET /devices -GET /directory/room/:room_alias yields room ID -GET /events initially -GET /events with negative 'limit' -GET /events with non-numeric 'limit' -GET /events with non-numeric 'timeout' -GET /initialSync initially -GET /joined_rooms lists newly-created room -GET /login yields a set of flows -GET /media/r0/download can fetch the value again -GET /profile/:user_id/avatar_url publicly accessible -GET /profile/:user_id/displayname publicly accessible -GET /publicRooms includes avatar URLs -GET /publicRooms lists newly-created room -GET /publicRooms lists rooms -GET /r0/capabilities is not public -GET /register yields a set of flows -GET /rooms/:room_id/joined_members fetches my membership -GET /rooms/:room_id/messages returns a message -GET /rooms/:room_id/state fetches entire room state -GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -GET /rooms/:room_id/state/m.room.name gets name -GET /rooms/:room_id/state/m.room.power_levels can fetch levels -GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -GET /rooms/:room_id/state/m.room.topic gets topic -Get left notifs for other users in sync and /keys/changes when user leaves -Getting messages going forward is limited for a departed room (SPEC-216) -Getting push rules doesn't corrupt the cache SYN-390 -Getting state IDs checks the events requested belong to the room -Getting state checks the events requested belong to the room -Ghost user must register before joining room -Guest non-joined user cannot call /events on default room -Guest non-joined user cannot call /events on invited room -Guest non-joined user cannot call /events on joined room -Guest non-joined user cannot call /events on shared room -Guest non-joined users can get individual state for world_readable rooms -Guest non-joined users can get individual state for world_readable rooms after leaving -Guest non-joined users can get state for world_readable rooms -Guest non-joined users cannot room initalSync for non-world_readable rooms -Guest non-joined users cannot send messages to guest_access rooms if not joined -Guest user can set display names -Guest user cannot call /events globally -Guest user cannot upgrade other users -Guest users can accept invites to private rooms over federation -Guest users can join guest_access rooms -Guest users can send messages to guest_access rooms if joined -If a device list update goes missing, the server resyncs on the next one -If remote user leaves room we no longer receive device updates -If remote user leaves room, changes device and rejoins we see update in /keys/changes -If remote user leaves room, changes device and rejoins we see update in sync -Inbound /make_join rejects attempts to join rooms where all users have left -Inbound /v1/make_join rejects remote attempts to join local users to rooms -Inbound /v1/send_join rejects incorrectly-signed joins -Inbound /v1/send_join rejects joins from other servers -Inbound /v1/send_leave rejects leaves from other servers -Inbound federation accepts a second soft-failed event -Inbound federation accepts attempts to join v2 rooms from servers with support -Inbound federation can backfill events -Inbound federation can get public room list -Inbound federation can get state for a room -Inbound federation can get state_ids for a room -Inbound federation can query profile data -Inbound federation can query room alias directory -Inbound federation can receive events -Inbound federation can receive invites via v1 API -Inbound federation can receive invites via v2 API -Inbound federation can receive redacted events -Inbound federation can receive v1 /send_join -Inbound federation can receive v2 /send_join -Inbound federation can return events -Inbound federation can return missing events for invite visibility -Inbound federation can return missing events for world_readable visibility -Inbound federation correctly soft fails events -Inbound federation of state requires event_id as a mandatory paramater -Inbound federation of state_ids requires event_id as a mandatory paramater -Inbound federation rejects attempts to join v1 rooms from servers without v1 support -Inbound federation rejects attempts to join v2 rooms from servers lacking version support -Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -Inbound federation rejects invite rejections which include invalid JSON for room version 6 -Inbound federation rejects invites which include invalid JSON for room version 6 -Inbound federation rejects receipts from wrong remote -Inbound federation rejects remote attempts to join local users to rooms -Inbound federation rejects remote attempts to kick local users to rooms -Inbound federation rejects typing notifications from wrong remote -Inbound: send_join rejects invalid JSON for room version 6 -Invalid JSON floats -Invalid JSON integers -Invalid JSON special values -Invited user can reject invite -Invited user can reject invite over federation -Invited user can reject invite over federation for empty room -Invited user can reject invite over federation several times -Invited user can see room metadata -Inviting an AS-hosted user asks the AS server -Lazy loading parameters in the filter are strictly boolean -Left rooms appear in the leave section of full state sync -Local delete device changes appear in v2 /sync -Local device key changes appear in /keys/changes -Local device key changes appear in v2 /sync -Local device key changes get to remote servers -Local new device changes appear in v2 /sync -Local non-members don't see posted message events -Local room members can get room messages -Local room members see posted message events -Local update device changes appear in v2 /sync -Local users can peek by room alias -Local users can peek into world_readable rooms by room ID -Message history can be paginated -Message history can be paginated over federation -Name/topic keys are correct -New account data appears in incremental v2 /sync -New read receipts appear in incremental v2 /sync -New room members see their own join event -New users appear in /keys/changes -Newly banned rooms appear in the leave section of incremental sync -Newly joined room is included in an incremental sync -Newly joined room is included in an incremental sync after invite -Newly left rooms appear in the leave section of gapped sync -Newly left rooms appear in the leave section of incremental sync -Newly updated tags appear in an incremental v2 /sync -Non-numeric ports in server names are rejected -Outbound federation can backfill events -Outbound federation can query profile data -Outbound federation can query room alias directory -Outbound federation can query v1 /send_join -Outbound federation can query v2 /send_join -Outbound federation can request missing events -Outbound federation can send events -Outbound federation can send invites via v1 API -Outbound federation can send invites via v2 API -Outbound federation can send room-join requests -Outbound federation correctly handles unsupported room versions -Outbound federation passes make_join failures through to the client -Outbound federation rejects backfill containing invalid JSON for events in room version 6 -Outbound federation rejects m.room.create events with an unknown room version -Outbound federation rejects send_join responses with no m.room.create event -Outbound federation sends receipts -Outbound federation will ignore a missing event with bad JSON for room version 6 -POST /createRoom creates a room with the given version -POST /createRoom ignores attempts to set the room version via creation_content -POST /createRoom makes a private room -POST /createRoom makes a private room with invites -POST /createRoom makes a public room -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -POST /createRoom rejects attempts to create rooms with numeric versions -POST /createRoom rejects attempts to create rooms with unknown versions -POST /createRoom with creation content -POST /join/:room_alias can join a room -POST /join/:room_alias can join a room with custom content -POST /join/:room_id can join a room -POST /join/:room_id can join a room with custom content -POST /login as non-existing user is rejected -POST /login can log in as a user -POST /login can log in as a user with just the local part of the id -POST /login returns the same device_id as that in the request -POST /login wrong password is rejected -POST /media/r0/upload can create an upload -POST /redact disallows redaction of event in different room -POST /register allows registration of usernames with '-' -POST /register allows registration of usernames with '.' -POST /register allows registration of usernames with '/' -POST /register allows registration of usernames with '3' -POST /register allows registration of usernames with '=' -POST /register allows registration of usernames with '_' -POST /register allows registration of usernames with 'q' -POST /register can create a user -POST /register downcases capitals in usernames -POST /register rejects registration of usernames with '!' -POST /register rejects registration of usernames with '"' -POST /register rejects registration of usernames with ''' -POST /register rejects registration of usernames with ':' -POST /register rejects registration of usernames with '?' -POST /register rejects registration of usernames with '@' -POST /register rejects registration of usernames with '[' -POST /register rejects registration of usernames with '\' -POST /register rejects registration of usernames with '\n' -POST /register rejects registration of usernames with ']' -POST /register rejects registration of usernames with '{' -POST /register rejects registration of usernames with '|' -POST /register rejects registration of usernames with '}' -POST /register rejects registration of usernames with '£' -POST /register rejects registration of usernames with 'é' -POST /register returns the same device_id as that in the request -POST /rooms/:room_id/ban can ban a user -POST /rooms/:room_id/invite can send an invite -POST /rooms/:room_id/join can join a room -POST /rooms/:room_id/leave can leave a room -POST /rooms/:room_id/read_markers can create read marker -POST /rooms/:room_id/receipt can create receipts -POST /rooms/:room_id/redact/:event_id as original message sender redacts message -POST /rooms/:room_id/redact/:event_id as power user redacts message -POST /rooms/:room_id/redact/:event_id as random user does not redact message -POST /rooms/:room_id/send/:event_type sends a message -POST /rooms/:room_id/state/m.room.name sets name -POST /rooms/:room_id/state/m.room.topic sets topic -POST /rooms/:room_id/upgrade can upgrade a room version -POST rejects invalid utf-8 in JSON -POSTed media can be thumbnailed -PUT /device/{deviceId} gives a 404 for unknown devices -PUT /device/{deviceId} updates device fields -PUT /directory/room/:room_alias creates alias -PUT /profile/:user_id/avatar_url sets my avatar -PUT /profile/:user_id/displayname sets my name -PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -PUT /rooms/:room_id/state/m.room.power_levels can set levels -PUT /rooms/:room_id/typing/:user_id sets typing notification -PUT power_levels should not explode if the old power levels were empty -Peeked rooms only turn up in the sync for the device who peeked them -Previously left rooms don't appear in the leave section of sync -Push rules come down in an initial /sync -Read markers appear in incremental v2 /sync -Read markers appear in initial v2 /sync -Read markers can be updated -Read receipts appear in initial v2 /sync -Real non-joined user cannot call /events on default room -Real non-joined user cannot call /events on invited room -Real non-joined user cannot call /events on joined room -Real non-joined user cannot call /events on shared room -Real non-joined users can get individual state for world_readable rooms -Real non-joined users can get individual state for world_readable rooms after leaving -Real non-joined users can get state for world_readable rooms -Real non-joined users cannot room initalSync for non-world_readable rooms -Real non-joined users cannot send messages to guest_access rooms if not joined -Receipts must be m.read -Redaction of a redaction redacts the redaction reason -Regular users can add and delete aliases in the default room configuration -Regular users can add and delete aliases when m.room.aliases is restricted -Regular users cannot create room aliases within the AS namespace -Regular users cannot register within the AS namespace -Remote media can be thumbnailed -Remote room alias queries can handle Unicode -Remote room members also see posted message events -Remote room members can get room messages -Remote user can backfill in a room with version 1 -Remote user can backfill in a room with version 2 -Remote user can backfill in a room with version 3 -Remote user can backfill in a room with version 4 -Remote user can backfill in a room with version 5 -Remote user can backfill in a room with version 6 -Remote users can join room by alias -Remote users may not join unfederated rooms -Request to logout with invalid an access token is rejected -Request to logout without an access token is rejected -Room aliases can contain Unicode -Room creation reports m.room.create to myself -Room creation reports m.room.member to myself -Room members can join a room with an overridden displayname -Room members can override their displayname on a room-specific basis -Room state at a rejected message event is the same as its predecessor -Room state at a rejected state event is the same as its predecessor -Rooms a user is invited to appear in an incremental sync -Rooms a user is invited to appear in an initial sync -Rooms can be created with an initial invite list (SYN-205) -Server correctly handles incoming m.device_list_update -Server correctly handles transactions that break edu limits -Server correctly resyncs when client query keys and there is no remote cache -Server correctly resyncs when server leaves and rejoins a room -Server rejects invalid JSON in a version 6 room -Setting room topic reports m.room.topic to myself -Should not be able to take over the room by pretending there is no PL event -Should reject keys claiming to belong to a different user -State from remote users is included in the state in the initial sync -State from remote users is included in the timeline in an incremental sync -State is included in the timeline in the initial sync -Sync can be polled for updates -Sync is woken up for leaves -Syncing a new room with a large timeline limit isn't limited -Tags appear in an initial v2 /sync -Trying to get push rules with unknown rule_id fails with 404 -Typing can be explicitly stopped -Typing events appear in gapped sync -Typing events appear in incremental sync -Typing events appear in initial sync -Typing notification sent to local room members -Typing notifications also sent to remote room members -Typing notifications don't leak -Uninvited users cannot join the room -Unprivileged users can set m.room.topic if it only needs level 0 -User appears in user directory -User in private room doesn't appear in user directory -User joining then leaving public room appears and dissappears from directory -User in shared private room does appear in user directory until leave -User can create and send/receive messages in a room with version 1 -User can create and send/receive messages in a room with version 2 -User can create and send/receive messages in a room with version 3 -User can create and send/receive messages in a room with version 4 -User can create and send/receive messages in a room with version 5 -User can create and send/receive messages in a room with version 6 -User can invite local user to room with version 1 -User can invite local user to room with version 2 -User can invite local user to room with version 3 -User can invite local user to room with version 4 -User can invite local user to room with version 5 -User can invite local user to room with version 6 -User can invite remote user to room with version 1 -User can invite remote user to room with version 2 -User can invite remote user to room with version 3 -User can invite remote user to room with version 4 -User can invite remote user to room with version 5 -User can invite remote user to room with version 6 -User directory correctly update on display name change -User in dir while user still shares private rooms -User in shared private room does appear in user directory -User is offline if they set_presence=offline in their sync -User signups are forbidden from starting with '_' -Users can't delete other's aliases -Users cannot invite a user that is already in the room -Users cannot invite themselves to a room -Users cannot kick users from a room they are not in -Users cannot kick users who have already left a room -Users cannot set ban powerlevel higher than their own -Users cannot set kick powerlevel higher than their own -Users cannot set notifications powerlevel higher than their own -Users cannot set redact powerlevel higher than their own -Users receive device_list updates for their own devices -Users with sufficient power-level can delete other's aliases -Version responds 200 OK with valid structure -We can't peek into rooms with invited history_visibility -We can't peek into rooms with joined history_visibility -We can't peek into rooms with shared history_visibility -We don't send redundant membership state across incremental syncs by default -We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -We should see our own leave event, even if history_visibility is restricted (SYN-662) -Wildcard device messages over federation wake up /sync -Wildcard device messages wake up /sync -Wildcard device messages wake up /sync -avatar_url updates affect room member events -displayname updates affect room member events -local user can join room with version 1 -local user can join room with version 2 -local user can join room with version 3 -local user can join room with version 4 -local user can join room with version 5 -local user can join room with version 6 -m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -m.room.history_visibility == "joined" allows/forbids appropriately for Real users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -query for user with no keys returns empty key dict -remote user can join room with version 1 -remote user can join room with version 2 -remote user can join room with version 3 -remote user can join room with version 4 -remote user can join room with version 5 -remote user can join room with version 6 -setting 'm.room.name' respects room powerlevel -setting 'm.room.power_levels' respects room powerlevel -Federation publicRoom Name/topic keys are correct From e704bbaf1166d0082a7aac27fdbd72e37d8fd664 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 12:30:39 -0400 Subject: [PATCH 0739/1248] update complement test results Signed-off-by: June Clementine Strawberry --- tests/test_results/complement/test_results.jsonl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 7b06510b..5fb850f1 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -534,10 +534,10 @@ {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel"} {"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_aliases"} {"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_alt_aliases"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases"} {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases#01"} {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_setting_rejects_deleted_aliases"} From 889fb3cf262d433bf2da461a7482a3e7400fc41f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 13:36:56 -0400 Subject: [PATCH 0740/1248] add download-artifact pattern for OCI images only Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd7d2484..3fd834e0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -550,6 +550,8 @@ jobs: - name: Download artifacts uses: actions/download-artifact@v4 + with: + pattern: "oci*" - name: Move OCI images into position run: | From 56dba8acb7b873c890313991630ebd23bbb47376 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 17:15:21 -0400 Subject: [PATCH 0741/1248] misc docs updates Signed-off-by: June Clementine Strawberry --- README.md | 62 +++-- arch/conduwuit.service | 1 + book.toml | 5 +- debian/conduwuit.service | 16 +- docs/SUMMARY.md | 1 - docs/assets/conduwuit_logo.svg | 36 +++ docs/assets/gay dog anarchists.png | Bin 0 -> 11533 bytes docs/deploying/generic.md | 22 -- docs/development/hot_reload.md | 3 + docs/development/testing.md | 19 +- docs/differences.md | 379 ----------------------------- docs/introduction.md | 4 - 12 files changed, 107 insertions(+), 441 deletions(-) create mode 100644 docs/assets/conduwuit_logo.svg create mode 100644 docs/assets/gay dog anarchists.png delete mode 100644 docs/differences.md diff --git a/README.md b/README.md index 13a1c67f..d8f99d45 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,16 @@ # conduwuit -[![conduwuit main room](https://img.shields.io/matrix/conduwuit%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit:puppygock.gay) [![conduwuit space](https://img.shields.io/matrix/conduwuit-space%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit-space%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit-space:puppygock.gay) [![CI and Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) +[![conduwuit main room](https://img.shields.io/matrix/conduwuit%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit:puppygock.gay) [![conduwuit space](https://img.shields.io/matrix/conduwuit-space%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit-space%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit-space:puppygock.gay) + +[![CI and Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) + +![GitHub Repo stars](https://img.shields.io/github/stars/girlbossceo/conduwuit?style=flat&color=%23fcba03&link=https%3A%2F%2Fgithub.com%2Fgirlbossceo%2Fconduwuit) ![GitHub commit activity](https://img.shields.io/github/commit-activity/m/girlbossceo/conduwuit?style=flat&color=%2303fcb1&link=https%3A%2F%2Fgithub.com%2Fgirlbossceo%2Fconduwuit%2Fpulse%2Fmonthly) ![GitHub Created At](https://img.shields.io/github/created-at/girlbossceo/conduwuit) ![GitHub Sponsors](https://img.shields.io/github/sponsors/girlbossceo?color=%23fc03ba&link=https%3A%2F%2Fgithub.com%2Fsponsors%2Fgirlbossceo) ![GitHub License](https://img.shields.io/github/license/girlbossceo/conduwuit) + + + +![Docker Image Size (tag)](https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest?label=image%20size%20(latest)&link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dlatest) ![Docker Image Size (tag)](https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main?label=image%20size%20(main)&link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dmain) + + @@ -53,6 +63,19 @@ A lot of critical stability and performance issues have been fixed, and a lot of necessary groundwork has finished; making this project way better than it was back in the start at ~early 2024. +#### Where is the differences page? + +conduwuit historically had a "differences" page that listed each and every single +different thing about conduwuit from Conduit, as a way to promote and advertise +conduwuit by showing significant amounts of work done. While this was feasible to +maintain back when the project was new in early-2024, this became impossible +very quickly and has unfortunately became heavily outdated, missing tons of things, etc. + +It's difficult to list out what we do differently, what are our notable features, etc +when there's so many things and features and bug fixes and performance optimisations, +the list goes on. We simply recommend folks to just try out conduwuit, or ask us +what features you are looking for and if they're implemented in conduwuit. + #### How is conduwuit funded? Is conduwuit sustainable? conduwuit has no external funding. This is made possible purely in my freetime with @@ -64,17 +87,15 @@ and we have no plans in stopping or slowing down any time soon! #### Can I migrate or switch from Conduit? -conduwuit is a complete drop-in replacement for Conduit. As long as you are using RocksDB, -the only "migration" you need to do is replace the binary or container image. There -is no harm or additional steps required for using conduwuit. See the -[Migrating from Conduit](https://conduwuit.puppyirl.gay/deploying/generic.html#migrating-from-conduit) section -on the generic deploying guide. +conduwuit had drop-in migration/replacement support for Conduit for about 12 months before +bugs somewhere along the line broke it. Maintaining this has been difficult and +the majority of Conduit users have already migrated, additionally debugging Conduit +is not one of our interests, and so Conduit migration no longer works. We also +feel that 12 months has been plenty of time for people to seamlessly migrate. -Note that as of conduwuit version 0.5.0, backwards compatibility with Conduit is -no longer supported. We only support migrating *from* Conduit, not back to -Conduit like before. If you are truly finding yourself wanting to migrate back -to Conduit, we would appreciate all your feedback and if we can assist with -any issues or concerns. +If you are a Conduit user looking to migrate, you will have to wipe and reset +your database. We may fix seamless migration support at some point, but it's not an interest +from us. #### Can I migrate from Synapse or Dendrite? @@ -98,9 +119,10 @@ is the official project Matrix room. You can get support here, ask questions or concerns, get assistance setting up conduwuit, etc. This room should stay relevant and focused on conduwuit. An offtopic general -chatter room can be found there as well. +chatter room can be found in the room topic there as well. + +Please keep the issue trackers focused on *actual* bug reports and enhancement requests. -Please keep the issue trackers focused on bug reports and enhancement requests. General support is extremely difficult to be offered over an issue tracker, and simple questions should be asked directly in an interactive platform like our Matrix room above as they can turn into a relevant discussion and/or may not be @@ -108,24 +130,34 @@ simple to answer. If you're not sure, just ask in the Matrix room. If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) +If you need to contact the primary maintainer, my contact methods are on my website: https://girlboss.ceo + #### Donate conduwuit development is purely made possible by myself and contributors. I do not get paid to work on this, and I work on it in my free time. Donations are heavily appreciated! 💜🥺 -- Liberapay (preferred): -- GitHub Sponsors (preferred): +- Liberapay: +- GitHub Sponsors: - Ko-fi: I do not and will not accept cryptocurrency donations, including things related. +Note that donations will NOT guarantee you or give you any kind of tangible product, +feature prioritisation, etc. By donating, you are agreeing that conduwuit is NOT +going to provide you any goods or services as part of your donation, and this +donation is purely a generous donation. We will not provide things like paid +personal/direct support, feature request priority, merchandise, etc. + #### Logo Original repo and Matrix room picture was from bran (<3). Current banner image and logo is directly from [this cohost post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for). +An SVG logo made by [@nktnet1](https://github.com/nktnet1) is available here: + #### Is it conduwuit or Conduwuit? Both, but I prefer conduwuit. diff --git a/arch/conduwuit.service b/arch/conduwuit.service index fa3616d8..4f45ddc0 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -4,6 +4,7 @@ Wants=network-online.target After=network-online.target Documentation=https://conduwuit.puppyirl.gay/ RequiresMountsFor=/var/lib/private/conduwuit +Alias=matrix-conduwuit.service [Service] DynamicUser=yes diff --git a/book.toml b/book.toml index 1d32c766..7eb1983b 100644 --- a/book.toml +++ b/book.toml @@ -13,12 +13,15 @@ create-missing = true extra-watch-dirs = ["debian", "docs"] [rust] -edition = "2021" +edition = "2024" [output.html] git-repository-url = "https://github.com/girlbossceo/conduwuit" edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}" git-repository-icon = "fa-github-square" +[output.html.redirect] +"/differences.html" = "https://conduwuit.puppyirl.gay/#where-is-the-differences-page" + [output.html.search] limit-results = 15 diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 4d6f4eef..a079499e 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -2,26 +2,14 @@ Description=conduwuit Matrix homeserver Wants=network-online.target After=network-online.target +Alias=matrix-conduwuit.service Documentation=https://conduwuit.puppyirl.gay/ [Service] DynamicUser=yes User=conduwuit Group=conduwuit -Type=notify-reload -ReloadSignal=SIGUSR1 - -TTYPath=/dev/tty25 -DeviceAllow=char-tty -StandardInput=tty-force -StandardOutput=tty -StandardError=journal+console -TTYReset=yes -# uncomment to allow buffer to be cleared every restart -TTYVTDisallocate=no - -TTYColumns=120 -TTYRows=40 +Type=notify Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8e07adc2..ad0f8135 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,7 +1,6 @@ # Summary - [Introduction](introduction.md) -- [Differences from upstream Conduit](differences.md) - [Configuration](configuration.md) - [Examples](configuration/examples.md) - [Deploying](deploying.md) diff --git a/docs/assets/conduwuit_logo.svg b/docs/assets/conduwuit_logo.svg new file mode 100644 index 00000000..9be5b453 --- /dev/null +++ b/docs/assets/conduwuit_logo.svg @@ -0,0 +1,36 @@ + + + + + + diff --git a/docs/assets/gay dog anarchists.png b/docs/assets/gay dog anarchists.png new file mode 100644 index 0000000000000000000000000000000000000000..871cf302a8f27d6ed02c983241c7c5ca6029ce99 GIT binary patch literal 11533 zcmd^l`9IX(`}fP1$`YwavWDy=`&QWrW#7i2ELoBoM$9m6mK0^*DTYe6gc=O8y~~!7 zEg>WF%962;{XXOK{@nNd{TseNJs;;>*LAMtoa=S2nRCTlF*js8%zGGuAU0zoJxd5W z&+`LHQqE{g$ zsAMIhX*sTTt?=_k=lJ&2!rtB+im94gKC-TBk0rx*^;sl%-pN zcQ9Dowdtoc#jYa_}hWPfyC&r%KM(}?T&jK9%m+P$?GL3a90I(p2*^u#~RjDwq+Touy; z;fx$mt`-BXf1@X&&klkNw|XLS_vEl_(5-?k>Siv);EEu&rvk7EZ=-?!?o&Cs1C2hc zlcqSj@}nM@L-LTV8y}LM6?c4yM!K+ENCu>@A7JWHSLnPylE%SXtwUtwfx1G^(%@;R zDh$8(W%@C#>aYQn9a3 z(ik6o+jmshXWx6eRhpQ5tr|^>eC6GJDE_z)`5XI?skYCOcy}LSKkmbW4H{e-JxWve z$}gR!&brE)rcTmv-i72e~Kn{>zy8r(L9OfVWY7nZ4K3n`%)Gu zPiS*o9yGqiGyAdDytp3_Lti198-_k2`*1^aAH2nB!0aor4?a@+a6@Jvy!ZW%_fepY z;=L7VaKuMxAG}p)5LUIn!&=|tXj%&f_I-P?L-S_NUYBO1!2l;MYM&hUV^N+%BW+(Z zqnS)~L+pd6KMg{v+i0}6=fJv+(IG52F2?GH%j<{8tYk5vsTHt4_|L&8%MbJp>{PUJOM zgT9fjWw3Nbi|*%DvaiWzDYSTyq9lVwLLtwn-^HsEJSz`xF5&CO^I3GwK^5EH|C9A*TO1F&;W4 zLi8~nH^!N9B3BFkiZ(_@mt~O9T&{{Ob_jUuP&p^LxO(L@(-0%Y2DzC-?}wvi=x&zM zqbx98B3LAyx;Sw%h!2s^g_iSq79LMn(jiOQ$YEOi3X2K?qJ{hw->CE-t!E6#rNGf=bKaJJR~8JT9{D z4OoNyD-)%vZbgfE21hk98V|ztb0JT{TkPex$x@=&-*mQSn5lxlT8)om$_iFSneAmL z$;re$NJ^AQA{~dyO~_Jm2nI;%4AJpfDLt;A6X|a>NZ0r!y3Dpfiq6%h#ZH4%1(gev zi$!4{$exYTpMuhY-ae%Im=S~_+l!bgaRe76B}jBkDm{#wjww^d&(h}wl8bHNFOYWz z)yIyY4DpH)9aBo#ah@?{)%aP4ysPBmAowtycLLQ1NicwT&kzR$5u#8RCvx^#DHpCS zx~$5`?(hYxmch^hU#2`EvacDbpZ+T=rAihKrSpDBb<9MyLgz1HN-PMT5U(I{;4DH3 z3MBhxlY$sH#E1iu2sH?t#ClQ{0|!`K8leR#kbOrny97!4n0 zNP9&6gCw*;o`S?`L4*g?A5#Wme)56`*;k5`eBeql^?MwugwFa5vHA?cAL@@T^EVzm zbwQNu>q|P$eB}}Kdk(6L&KgLcMFc_OT*#56QVkqeOj(JE9e<;7%V03x_sCC1vTp~e zpLy{**>{Ar%DkvY_MIgiKe%{?9E)P3*Str)grfty{vowg7bg-UM>Mq)ZoJyUt%LXF z{CSWZ8-Xw81i2pj0;NUIXN0+GVt1zTVhi^*ID=6YM|HO&NYe3(64Me&O>u_Na)o#) zuAdjlu~<^op+$(&8V>(q1gTDWhQiX%2@yl=376=^V&wP}ORwS%kz?EO#m8KusALXU zor#AN$tQ+*4KYz#Uy!N}%L@`!oe7R~Ak$S12rhKt97sNKLVWei#}*mc z$>6ADV^R={Uk;UggOtp2eqpXPHubkoeJF*jKNW$*P^^yg;Wk;9oNTHBIo8Vhbx@v&mt25&JHl!VktKhW3z=2u(#vpK zwKn{YcgW5y^wyGb?ir%0%@mn=U~X#uIlMYL8(Y&Jo&ImQoNGTTbDZc;h|_EF2B^0&E=o!3eO4$Qsb zLJGf4sKiF`yYN>!PtT7{YPjrBkt`lv zZs<{3F`iVH#w2cW>CrU5_lDW^?L!@(dlzSm=50d4G&aiW!=#95R`UT&m({PA%>i#? z1!Aklowe(}%x89({AC;~n1!jKOARVoTH6u30fgq>s>kC8hh|$JR7RoKz4#Y)ml||L zC=(9t%Z(8x0)=sL>VcQTlve|Yd(UnLuDm4;((x~TU7TH5+ibe`ZMY*gDkb#PL#@x& z16jM)l=*Mcly+2n{xu~c>tbGJeelm`7zx9)mrO;sfA1=*wTs_tU_Ylcl;YZnw;cY&pJ-vQyEZ*DIW_KbxolSc zX-L7ShNTyEbu@Og_DoZnaXsPDA+Nq8N6G>U|2NqH7gbwe6-V2?@eX}ZutG^HsS5o zdyjQvKy3*z-KdLIX+PvC#N6NI*yK0ZWnO2UTk;)2_Ld<(Xgkt4t$p}zmMi~*t0SGF zod)yLCtj&xseKD#H@2Gi&u!l6(s4aKo+k$f8wPlmxmC~j)ji(4nKGFZ9RK>ZSbKQB z+Mo&x=GX8H`iZgLAIH4t;T+1>nNervg*Yz1oV>YDU6*PKw!fZhx-4utcvQc0;EBbV z+zN-d!N<~lse`Y!uLi0#Z=A9+SZ_ZgY$IdU>i6a4i*vj3rkq7jZ3ShO2OW3Q?(T&V zCVP|dU()77G(r~@mh-%`T&u?X`cr%RUVY>iWKR-l&>@|UFO{}y_iLS@8=6ZD&1YQZ z@iS55AUH238a8-knOf+xoH#ck?>JZ8sQbFtBo18}@pH3IMlt*av+H+XbTUJ^zBtF<vslBt7b8Uqt58`lCnD-`-(l`YPUC zl_3n3I7|F=-n>`2J@fv9MnqvP|ESS1#ZgJC>%FZv^FMdA<}ROq%acuvbH23E(H6*O zee|`by3I`qmku8z?#-|jVe9UdS9}aIM4;3c(5xyRid1V?Qt!HnY6TU1NoPOKP~K>?#BLH641Q97+$$YH&oeMhLJl^Yu70#cI1zoYzv>T4I@hi@mN6C z;cNqY)+S4qh3}S{VUPJMY*?APge-O6<-sNu&s7znQ*2F^+`UQeZ0;!ADryi#Xmn%d z5}+FQ=D4>vaof3_Y_B{=8rLf;NYmYxq7mP}gpCv~uAB=x-Wp~jJ;|e$&MH9Ck>-^1 zER6Ki?|VxA9kdAd>Fcu9GPOQCU#C?1;lz=qSAtHxsC;kprqzhnXb}yMfrk?!<-d4a z!dmMo98V5^5;c=^9QLp6d7aN<)xrIS##U5#x>fZ&tz|?_jWcG@f>c;X;sR9 z-OzSRbq4Qh4dyq$cHhh=yZx9R6gtr!);jo}HNsi-dCp;ysF{H5_ZvygpF6w$dW83X z6PWb07ajBY4AUr)@s6KN#T^~d-00qJ>_?S4rCd^dJbQsuhU~HB;0eWpvA)Bbmo_I7 zQd*H;*A}NRX6xGO&q}=sS2XEO<3l6!n@V&TPnB-@{=8l?A<5psp@ty5gg-6XBv1@x znr~FD3>3YeI1-z=(wY?Hep0x|)b2cj$DFN|S*!JyJXXQM>*&Lzc&QBjW6z)GzUI}Y4zD@MMBU>I(`~$P^(;JQ z$V@tJcI5H=J2+MXeP5}3Mb=x@WAGC0c+anWS6DLm>wJnW*i#QLT5S*is-IxOhY6fE z;y$mX8c=LO{NQfKb**<|t~9$dOB(TYC9h)soyz1`mkpOn;oD<$ZJ$X@W=A6ZCBlDg zRNj91Sb&jd#~8?Z&(%e_+vw;Vl;2F0^$wMvOQfKKHBZ%lOZfXQ2hZB)m8u_Hu3Z0P zTwCvM$k3t7#FLC?$zW$qEoqNms`nBZdU!y2CfPzf!dOLTKo0R0l~=_O%Q%!Iw$QnF z|6e!Q&vUz$l@9f`WS?}oLRjy;hq~BzZ<@aIDj%0)+7GNoT^{OhJ_G!?kfDo-=P9r4 zm$H9@!_3GNMY&}9$HeaZX%1ID&kuy@`2Vt&)UT)7vWF7OF zd+`P4@_#lvRIpL?pYkhwEln^j)R9=&f79LDwxV8WbaUn+p#W~W<@xH-;Ono269<-i z-~q!U;AkCj40|E!&s#cCe(*W%5FUwq`Z}g2_-nz$RpJRD%m0XlS#Sl+=jp2q+`8gG zD1@6<^Aze$k*p<>-S*~aZNjtC8=+U;%F5n`-rrDtK-C_l&gc?e!JlrOd^w3px9k+$ zO}U9#5kmB%Uf8z=CK7oaQ3`LLAEA9?NH!oRx<2d5i?N~=GA|ayp=|o1CfqE8?^pPK zu=~m?oLtb6q0K6AU+q~AqWRIA0YOV+!dqB3(Bzq#UfW~3w8WX$XApxZck07tKH+Q4 zYSYDKaJ8WvDacIxW|z=AqyiMSr#ZSdqfaP@TRPmL2u4V@rxH0hhffz};7^6z&1!mK z=@Q3^j1W6H3!x|x#PG_?G~GILB%6%dsa=8sl%IpT(HBdWj~j%GUx>B}@ws>yYr&+# zQ?1wHQo+>e)__w1^!YDQ{^0wg*6y+C<84UkYZLR7G#OMLRh9alH0Zdbt9EbZ5YKUZ zY-aee4pmL2$y#t3)r&@3CzamZyjr*T`H~({!cNcyYmC+;B*+G+pTo#4_m4Vk-uAv2 z82PQ=E9%x2D?%M?q~`ETl^xUp(l3fiQP{r3rNQbTKIhbpRYSsfZ#wE;U*he9(^odT zY9*FCB#722rMC!{(t3637D#pnl_~nT;v=-$fFx>e_vKNv7-x-t7P1<5)rrssciio* zc&qp0U10c*5B>A+Bz<#a_m^uAvd&f^CbvWXvP!%$wNuN!>kusC3)lE$A?*bE>f_k) z9a&>_5+f-LYSlNy8+J`<)WPq`JE4l|>N9`DuooGhtVImdwP}(bnWGI7qtQqG!erYv zJm{D!jqKESJ|?@41P4nr*c%WA;GSWPL)9K~ZSf7#7p}{~?nSzj%+#ZPy4}HrN2a(d zjZ-zWcy&Idx@J!$8>vTUe&P<3CIZ1&tpCAY?dg|LW!q# zAs73~1&3D{$$KWq25$zh8DRXo7bsG>CHmJ-z>Y<}eO^Mu%g7UoBPWBM%zC6r$>u$U zDFLca!&yq(Yj%22Mfs-irJ`$N%+&~u_1!X5alRp}UWDY@m@<>hthlqDJ!guA&lNdw zZFo?tgeJEFB!pM-#aS-%(}UR)gN09|8ZWWGG38dv{*H(Uxk0T`m}FeOecZ=%P!$V) zPsFZ>R(uG&k2R{%2u+j?+fDePC@$@ZP1(p(ae=jf>k7~$Q zgz1Y6C;i_p`Boa}c!2WOhCqbBQhTGezn3pL>qXHf>foLbIxz9Ba%r$L71f?E1uqrq z)GWnD)SvB`SVwi_i^G2kHRPtab*E0r`X2x}AJ~`Ko79`U{k`tC$%Yl-ckgrX8OZEq zm)~kQE6!PO|0tAHW#(mIp_$dt6e3eE#M573nn}#6mksUr*bMc)s8-^qqPRbobY$*t z)_6z#ZLeBTMkbW{5tgc*nc1lU5LRWW*f{ zODnl>{gr*R_eo~jM0lOd0oS;ndGgMAo7$u6GkSzKuwexwB?x@YE(h{OaXVS+gDHc% zKR$J=Onpsr$K3=)AozHzxUii{QY_#(#cvWoo6$E@dF`pY2lpQZy5S!t0%v6=NaY!jkcg0rT$7t|;J&)|f2XODmNKSL~(Rh3_%#Rr| zsXcsfs8zbMS@o45q7U^l?II?V$eVGV4D7OBhzu8e=HvFbnY?Dqn27w^Npq#W zj0G%85YdZrqaK{or(N8Eg4q4?LBH>TJDVYxNrUp=fMVjo>_w+{EKhbAd2B#&!q3W` z>d@R2-YlY6pOl{XpBbT?BDu&&HVePNW6a{Fpbp}x#;3xuFWl&#yxA|5#2FnP{uBP+ zQT+62{ht3jqDvv3a1?8NB?-!`3%EcYIs+T{ej!8ipn}GiROY%9uHTYmz18J4qbJXS ztJ_c2)`C`lfGJRaJo}G{baZWQ-8B28GHB&c>%=F_;HT`9V`)AAt#vY3%I!>}nt9)s z*R`?8yp{rv=Y51U1XKb_Gltx?>lEmp(5gvxR))EtP4|UErPqf7P-_2*JrI0u zq>t`k2n^#!xlDL=WhBKS`x081xAMI$@DGkp5NJaKV@DTPHI@v}G z%BF{FoBAm_^Td2!zQeX$dS)y1CetNA~+gNMg6(slV{U#=f~ zBALKsrV`n~F*<{1AV_bo-*o1oyV%&D_D;eJbjziK;x=hWxp%B`Bq z0&v4&?7HVi(Vn8tb@}A9%ZfOo|L>ZFU=9s&jFOA{b z)w&;+9#xwr^Rg(;S*tH`;*~h&&)o{!pX)2!(tkG={|0h9wOwT;bqQJ;zR7|+7C%~r zG|b&9Y5cviUd+_0-ZHfKAw(BlUV2X(s)eDlS7MsYourbd8VqRaAt`oaQ?d? za{E8itrVit!q)#TS{o=v>l`yAiMyyd&e2t#Px47A^VqcR6l#l;mMs0m+micOXlSWD zFiFl^QMNDxZ<>Dd__bW_7qMr<|2RFMTB$KQZT@Z@d3m0-@s-zvWg)VLjkT#X=*x{M z4!;R~<<`UO4DynW)d{eD#jf0WMyUg?ND{I%GD)|*R$rVm)(RtYv7k_tC7pO&=e>w- zIdJ(<`&VRzam#J`-lKXxj^)o_=fEs$_k+QcgJ z>P$(Bs+Lucu{ReCo&5F?`6W5Ja<7r+cj_TMpUjUBhttSL?5&SZYxP>LR*$(K zAi8!ulLNyY?imGD+RA5Il`_^5?El;e_|mhJvFoDrM)RuA7~`qmsfq=!0@~<3W4lvl zpE2Y!yT(ac+RKq~%fj%kFBQE9j=d4no) z`Omp{@Q33hoS)9+mxQzx)b^}9RbaXl^5IY%Bz1)g!9V0NmFcU+gL4brH>ymE3b8&g?q;4wnvs?hpi=@czDZRdwEOZsf~cL-@`|g`TroWMiDP;-G!S6otG%`JuIv&iw)vs$XEXvMY3jY1OWqd5cWG|IeX;Q*yA{XrF zFp9s~BX)^o+89?}DKIxSTVyyiGFr84SBOe?Xbq=lnCw4pXO|iz@UZ^-2DB0M#A4D4 zAFel)xIp?f(~-bCZCizJYzXFhI~_M1(`B$UtJ?6yVr?fdRW{D0n{wM7o~Y3fZb-AM zkE6$rGS(7aDILtTpBHP0TgCWDwAXiBi*!@ro;+i7a(z*qf4_OsA4&T@82X1rv$QCDxudLT^PSFv-L=iGjgCJCzzI@QB0GB?6jTZg%`JK8!m)xA)Qh(t=~ zyO-Us*xO#;%go9r)MCHukx`g>^lqE+n$DrH3>~cU}vqoIEi?e3APveT4R>NK}xw5KZ-G5k_4@4uL6OnFGhcS81iPSy z=k@OtHx~z+?!+{NKX+*<(BMCPSCzPnZWw=M`*6pz&BwDLVtc+(nx z4JT(^vJY}0SvFQE9l41LYrh4M+B|nTQ^C)8avpGGyW^15Xp3D-kwq>sd}Cs=$RZ)u zwqT1Np{hzGZcwR|iZE=o%?T>)f5*l^O3>f+X6{2t59Gq0;vx#d1d7oZ40d^9hRrYn zbDm0nfHWGP9Oe$DI*`l(Y+Kuzt2Y~s2zwPiMJv5$aT=9=Kyp+k<|wi;6t8ji zBALa$5_Lq>07EyMvhea@w4Cca37d3~!t@qNNIVLJ`3u4%Vu9@fq|u~Dz;$dZfny@zjONPMWwW+7Y~I<7AB19l>K>BZ$0Aa*Uj- z6lo{r4RGZ&9A~OeW*JIA_279aOn#<>SxX>ZEjEgN4a7$h7wW*HHcrN5-Njsti1Rl= zBh5i1OH9_0<{)D8CTLf48r^i=HRm$mPg(e+`5eg0jz--*N@04o&;nky={a1%?n4Xs zS_Q+SNN60trC`X3gmmy4=L|WJ(4NWqb#^f#6iGs)GsMW@ED3UVI4Df=DGQx^7cg{l zChMNySsN-Ws8x&y?0ux2bY{SYC7sS-j+VpikhC)DTeur_Nr((BfXyUqhB2Atrvhx= z#YkaV{D3O+jH1%3>Jgk2g^3V*;=U2*|HVr!nB@HTL1dsFA=LtAbjpC%`_?8)rZp z<25$KXk&cX#sPr5q*2VB7VZnD;c%h?nI$g;WjKC}!gR%gP!s^R`<29Kzltt~?pyN0 z>?t84p#yD- zq3e_y{jC9FHJ!4sf&igjHeKIj2iipoYO!b_evY(5R0QG@aNPKD3R8I&s^>4zZf{AL zMFZ{9Qlsk{KzmBc!ZreE?=W58We3_HqSldGU^kyH>`93eAxqMsavw5_pm;W&6xg=} zl0>;97)oU`fDRUjL^1{)EG&X@*-W%ucYJ340CccG{zku>zJRrC3%J~DKp-?o>7av! zB~KK#1|2L+UsF)Bpo4|p+f4l==wP9HFP_Z?QXXNeuuQvP`8xPEfqd6< z0NWIOm_ zdNLr}sb2#fEKq2QdHOI-8h^)A43N=qLHZ*=MymUQY=^1^vz7aSG>EyG2QW#B6k`RX zhvHDvpo0afK9jB952XE#f18T}vM=eg6_6gvL9KyghpNwJYl99JXe6mSmkX>?Vj}+4 z7z8aCuQ - -See the `[global.well_known]` config section, or configure your web server -appropriately to send the delegation responses. - ## Adding a conduwuit user While conduwuit can run as any user it is better to use dedicated users for diff --git a/docs/development/hot_reload.md b/docs/development/hot_reload.md index 018eb4b3..65fd4adf 100644 --- a/docs/development/hot_reload.md +++ b/docs/development/hot_reload.md @@ -1,5 +1,8 @@ # Hot Reloading ("Live" Development) +Note that hot reloading has not been refactored in quite a while and is not +guaranteed to work at this time. + ### Summary When developing in debug-builds with the nightly toolchain, conduwuit is modular diff --git a/docs/development/testing.md b/docs/development/testing.md index 2d421767..a577698a 100644 --- a/docs/development/testing.md +++ b/docs/development/testing.md @@ -5,12 +5,11 @@ Have a look at [Complement's repository][complement] for an explanation of what it is. -To test against Complement, with Nix (or [Lix](https://lix.systems) and direnv -installed and set up, you can: +To test against Complement, with Nix (or [Lix](https://lix.systems) and +[direnv installed and set up][direnv] (run `direnv allow` after setting up the hook), you can: -* Run `./bin/complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl -./path/to/results.jsonl` to build a Complement image, run the tests, and output -the logs and results to the specified paths. This will also output the OCI image +* Run `./bin/complement "$COMPLEMENT_SRC"` to build a Complement image, run +the tests, and output the logs and results to the specified paths. This will also output the OCI image at `result` * Run `nix build .#complement` from the root of the repository to just build a Complement OCI image outputted to `result` (it's a `.tar.gz` file) @@ -18,5 +17,15 @@ Complement OCI image outputted to `result` (it's a `.tar.gz` file) output from the commit/revision you want to test (e.g. from main) [here][ci-workflows] +If you want to use your own prebuilt OCI image (such as from our CI) without needing +Nix installed, put the image at `complement_oci_image.tar.gz` in the root of the repo +and run the script. + +If you're on macOS and need to build an image, run `nix build .#linux-complement`. + +We have a Complement fork as some tests have needed to be fixed. This can be found +at: + [ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo [complement]: https://github.com/matrix-org/complement +[direnv]: https://direnv.net/docs/hook.html diff --git a/docs/differences.md b/docs/differences.md deleted file mode 100644 index 18ea7a1f..00000000 --- a/docs/differences.md +++ /dev/null @@ -1,379 +0,0 @@ -#### **Note: This list may not up to date. There are rapidly more and more -improvements, fixes, changes, etc being made that it is becoming more difficult -to maintain this list. I recommend that you give conduwuit a try and see the -differences for yourself. If you have any concerns, feel free to join the -conduwuit Matrix room and ask any pre-usage questions.** - -### list of features, bug fixes, etc that conduwuit does that Conduit does not - -Outgoing typing indicators, outgoing read receipts, **and** outgoing presence! - -## Performance - -- Concurrency support for individual homeserver key fetching for faster remote -room joins and room joins that will error less frequently -- Send `Cache-Control` response header with `immutable` and 1 year cache length -for all media requests (download and thumbnail) to instruct clients to cache -media, and reduce server load from media requests that could be otherwise cached -- Add feature flags and config options to enable/build with zstd, brotli, and/or -gzip HTTP body compression (response and request) -- Eliminate all usage of the thread-blocking `getaddrinfo(3)` call upon DNS -queries, significantly improving federation latency/ping and cache DNS results -(NXDOMAINs, successful queries, etc) using hickory-dns / hickory-resolver -- Enable HTTP/2 support on all requests -- Vastly improve RocksDB default settings to use new features that help with -performance significantly, uses settings tailored to SSDs, various ways to tweak -RocksDB, and a conduwuit setting to tell RocksDB to use settings that are -tailored to HDDs or slow spinning rust storage or buggy filesystems. -- Implement database flush and cleanup conduwuit operations when using RocksDB -- Implement RocksDB write buffer corking and coalescing in database write-heavy -areas -- Perform connection pooling and keepalives where necessary to significantly -improve federation performance and latency -- Various config options to tweak connection pooling, request timeouts, -connection timeouts, DNS timeouts and settings, etc with good defaults which -also help huge with performance via reusing connections and retrying where -needed -- Properly get and use the amount of parallelism / tokio workers -- Implement building conduwuit with jemalloc (which extends to the RocksDB -jemalloc feature for maximum gains) or hardened_malloc light variant, and -io_uring support, and produce CI builds with jemalloc and io_uring by default -for performance (Nix doesn't seem to build -[hardened_malloc-rs](https://github.com/girlbossceo/hardened_malloc-rs) -properly) -- Add support for caching DNS results with hickory-dns / hickory-resolver in -conduwuit (not a replacement for a proper resolver cache, but still far better -than nothing), also properly falls back on TCP for UDP errors or if a SRV -response is too large -- Add config option for using DNS over TCP, and config option for controlling -A/AAAA record lookup strategy (e.g. don't query AAAA records if you don't have -IPv6 connectivity) -- Overall significant database, Client-Server, and federation performance and -latency improvements (check out the ping room leaderboards if you don't believe -me :>) -- Add config options for RocksDB compression and bottommost compression, -including choosing the algorithm and compression level -- Use [loole](https://github.com/mahdi-shojaee/loole) MPSC channels instead of -tokio MPSC channels for huge performance boosts in sending channels (mainly -relevant for federation) and presence channels -- Use `tracing`/`log`'s `release_max_level_info` feature to improve performance, -build speeds, binary size, and CPU usage in release builds by avoid compiling -debug/trace log level macros that users will generally never use (can be -disabled with a build-time feature flag) -- Remove some unnecessary checks on EDU handling for incoming transactions, -effectively speeding them up -- Simplify, dedupe, etc huge chunks of the codebase, including some that were -unnecessary overhead, binary bloats, or preventing compiler/linker optimisations -- Implement zero-copy RocksDB database accessors, substantially improving -performance caused by unnecessary memory allocations - -## General Fixes/Features - -- Add legacy Element client hack fixing password changes and deactivations on -legacy Element Android/iOS due to usage of an unspecced `user` field for UIAA -- Raise and improve all the various request timeouts making some things like -room joins and client bugs error less or none at all than they should, and make -them all user configurable -- Add missing `reason` field to user ban events (`/ban`) -- Safer and cleaner shutdowns across incoming/outgoing requests (graceful -shutdown) and the database -- Stop sending `make_join` requests on room joins if 15 servers respond with -`M_UNSUPPORTED_ROOM_VERSION` or `M_INVALID_ROOM_VERSION` -- Stop sending `make_join` requests if 50 servers cannot provide `make_join` for -us -- Respect *most* client parameters for `/media/` requests (`allow_redirect` -still needs work) -- Return joined member count of rooms for push rules/conditions instead of a -hardcoded value of 10 -- Make `CONDUIT_CONFIG` optional, relevant for container users that configure -only by environment variables and no longer need to set `CONDUIT_CONFIG` to an -empty string. -- Allow HEAD and PATCH (MSC4138) HTTP requests in CORS for clients (despite not -being explicity mentioned in Matrix spec, HTTP spec says all HEAD requests need -to behave the same as GET requests, Synapse supports HEAD requests) -- Fix using conduwuit with flake-compat on NixOS -- Resolve and remove some "features" from upstream that result in concurrency -hazards, exponential backoff issues, or arbitrary performance limiters -- Find more servers for outbound federation `/hierarchy` requests instead of -just the room ID server name -- Support for suggesting servers to join through at -`/_matrix/client/v3/directory/room/{roomAlias}` -- Support for suggesting servers to join through us at -`/_matrix/federation/v1/query/directory` -- Misc edge-case search fixes (e.g. potentially missing some events) -- Misc `/sync` fixes (e.g. returning unnecessary data or incorrect/invalid -responses) -- Add `replaces_state` and `prev_sender` in `unsigned` for state event changes -which primarily makes Element's "See history" button on a state event functional -- Fix Conduit not allowing incoming federation requests for various world -readable rooms -- Fix Conduit not respecting the client-requested file name on media requests -- Prevent sending junk / non-membership events to `/send_join` and `/send_leave` -endpoints -- Only allow the requested membership type on `/send_join` and `/send_leave` -endpoints (e.g. don't allow leave memberships on join endpoints) -- Prevent state key impersonation on `/send_join` and `/send_leave` endpoints -- Validate `X-Matrix` origin and request body `"origin"` field on incoming -transactions -- Add `GET /_matrix/client/v1/register/m.login.registration_token/validity` -endpoint -- Explicitly define support for sliding sync at `/_matrix/client/versions` -(`org.matrix.msc3575`) -- Fix seeing empty status messages on user presences - -## Moderation - -- (Also see [Admin Room](#admin-room) for all the admin commands pertaining to -moderation, there's a lot!) -- Add support for room banning/blocking by ID using admin command -- Add support for serving `support` well-known from `[global.well_known]` -(MSC1929) (`/.well-known/matrix/support`) -- Config option to forbid publishing rooms to the room directory -(`lockdown_public_room_directory`) except for admins -- Admin commands to delete room aliases and unpublish rooms from our room -directory -- For all -[`/report`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3roomsroomidreporteventid) -requests: check if the reported event ID belongs to the reported room ID, raise -report reasoning character limit to 750, fix broken formatting, make a small -delayed random response per spec suggestion on privacy, and check if the sender -user is in the reported room. -- Support blocking servers from downloading remote media from, returning a 404 -- Don't allow `m.call.invite` events to be sent in public rooms (prevents -calling the entire room) -- On new public room creations, only allow moderators to send `m.call.invite`, -`org.matrix.msc3401.call`, and `org.matrix.msc3401.call.member` events to -prevent unprivileged users from calling the entire room -- Add support for a "global ACLs" feature (`forbidden_remote_server_names`) that -blocks inbound remote room invites, room joins by room ID on server name, room -joins by room alias on server name, incoming federated joins, and incoming -federated room directory requests. This is very helpful for blocking servers -that are purely toxic/bad and serve no value in allowing our users to suffer -from things like room invite spam or such. Please note that this is not a -substitute for room ACLs. -- Add support for a config option to forbid our local users from sending -federated room directory requests for -(`forbidden_remote_room_directory_server_names`). Similar to above, useful for -blocking servers that help prevent our users from wandering into bad areas of -Matrix via room directories of those malicious servers. -- Add config option for auto remediating/deactivating local non-admin users who -attempt to join bad/forbidden rooms (`auto_deactivate_banned_room_attempts`) -- Deactivating users will remove their profile picture, blurhash, display name, -and leave all rooms by default just like Synapse and for additional privacy -- Reject some EDUs from ACL'd users such as read receipts and typing indicators - -## Privacy/Security - -- Add config option for device name federation with a privacy-friendly default -(disabled) -- Add config option for requiring authentication to the `/publicRooms` endpoint -(room directory) with a default enabled for privacy -- Add config option for federating `/publicRooms` endpoint (room directory) to -other servers with a default disabled for privacy -- Uses proper `argon2` crate by RustCrypto instead of questionable `rust-argon2` -crate -- Generate passwords with 25 characters instead of 15 -- Config option `ip_range_denylist` to support refusing to send requests -(typically federation) to specific IP ranges, typically RFC 1918, non-routable, -testnet, etc addresses like Synapse for security (note: this is not a guaranteed -protection, and you should be using a firewall with zones if you want guaranteed -protection as doing this on the application level is prone to bypasses). -- Config option to block non-admin users from sending room invites or receiving -remote room invites. Admin users are still allowed. -- Config option to disable incoming and/or outgoing remote read receipts -- Config option to disable incoming and/or outgoing remote typing indicators -- Config option to disable incoming, outgoing, and/or local presence and for -timing out remote users -- Sanitise file names for the `Content-Disposition` header for all media -requests (thumbnails, downloads, uploads) -- Media repository on handling `Content-Disposition` and `Content-Type` is fully -spec compliant and secured -- Send secure default HTTP headers such as a strong restrictive CSP (see -MSC4149), deny iframes, disable `X-XSS-Protection`, disable interest cohort in -`Permission-Policy`, etc to mitigate any potential attack surface such as from -untrusted media - -## Administration/Logging - -- Commandline argument to specify the path to a config file instead of relying -on `CONDUIT_CONFIG` -- Revamped admin room infrastructure and commands -- Substantially clean up, improve, and fix logging (less noisy dead server -logging, registration attempts, more useful troubleshooting logging, proper -error propagation, etc) -- Configurable RocksDB logging (`LOG` files) with proper defaults (rotate, max -size, verbosity, etc) to stop LOG files from accumulating so much -- Explicit startup error if your configuration allows open registration without -a token or such like Synapse with a way to bypass it if needed -- Replace the lightning bolt emoji option with support for setting any arbitrary -text (e.g. another emoji) to suffix to all new user registrations, with a -conduwuit default of "🏳️‍⚧️" -- Implement config option to auto join rooms upon registration -- Warn on unknown config options specified -- Add `/_conduwuit/server_version` route to return the version of conduwuit -without relying on the federation API `/_matrix/federation/v1/version` -- Add `/_conduwuit/local_user_count` route to return the amount of registered -active local users on your homeserver *if federation is enabled* -- Add configurable RocksDB recovery modes to aid in recovering corrupted RocksDB -databases -- Support config options via `CONDUWUIT_` prefix and accessing non-global struct -config options with the `__` split (e.g. `CONDUWUIT_WELL_KNOWN__SERVER`) -- Add support for listening on multiple TCP ports and multiple addresses -- **Opt-in** Sentry.io telemetry and metrics, mainly used for crash reporting -- Log the client IP on various requests such as registrations, banned room join -attempts, logins, deactivations, federation transactions, etc -- Fix Conduit dropping some remote server federation response errors - -## Maintenance/Stability - -- GitLab CI ported to GitHub Actions -- Add support for the Matrix spec compliance test suite -[Complement](https://github.com/matrix-org/complement/) via the Nix flake and -various other fixes for it -- Implement running and diff'ing Complement results in CI and error if any -mismatch occurs to prevent large cases of conduwuit regressions -- Repo is (officially) mirrored to GitHub, GitLab, git.gay, git.girlcock.ceo, -sourcehut, and Codeberg (see README.md for their links) -- Docker container images published to GitLab Container Registry, GitHub -Container Registry, and Dockerhub -- Extensively revamp the example config to be extremely helpful and useful to -both new users and power users -- Fixed every single clippy (default lints) and rustc warnings, including some -that were performance related or potential safety issues / unsoundness -- Add a **lot** of other clippy and rustc lints and a rustfmt.toml file -- Repo uses [Renovate](https://docs.renovatebot.com/) and keeps ALL -dependencies as up to date as possible -- Purge unmaintained/irrelevant/broken database backends (heed, sled, persy) and -other unnecessary code or overhead -- webp support for images -- Add cargo audit support to CI -- Add documentation lints via lychee and markdownlint-cli to CI -- CI tests for all sorts of feature matrixes (jemalloc, non-defaullt, all -features, etc) -- Add static and dynamic linking smoke tests in CI to prevent any potential -linking regressions for Complement, static binaries, Nix devshells, etc -- Add timestamp by commit date when building OCI images for keeping image build -reproducibility and still have a meaningful "last modified date" for OCI image -- Add timestamp by commit date via `SOURCE_DATE_EPOCH` for Debian packages -- Startup check if conduwuit running in a container and is listening on -127.0.0.1 (generally containers are using NAT networking and 0.0.0.0 is the -intended listening address) -- Add a panic catcher layer to return panic messages in HTTP responses if a -panic occurs -- Add full compatibility support for SHA256 media file names instead of base64 -file names to overcome filesystem file name length limitations (OS error file -name too long) while still retaining upstream database compatibility -- Remove SQLite support due to being very poor performance, difficult to -maintain against RocksDB, and is a blocker to significantly improved database -code - -## Admin Room - -- Add support for a console CLI interface that can issue admin commands and -output them in your terminal -- Add support for an admin-user-only commandline admin room interface that can -be issued in any room with the `\\!admin` or `\!admin` prefix and returns the -response as yourself in the same room -- Add admin commands for uptime, server startup, server shutdown, and server -restart -- Fix admin room handler to not panic/crash if the admin room command response -fails (e.g. too large message) -- Add command to dynamically change conduwuit's tracing log level filter on the -fly -- Add admin command to fetch a server's `/.well-known/matrix/support` file -- Add debug admin command to force update user device lists (could potentially -resolve some E2EE flukes) -- Implement **RocksDB online backups**, listing RocksDB backups, and listing -database file counts all via admin commands -- Add various database visibility commands such as being able to query the -getters and iterators used in conduwuit, a very helpful online debugging utility -- Forbid the admin room from being made public or world readable history -- Add `!admin` as a way to call the admin bot -- Extend clear cache admin command to support clearing more caches such as DNS -and TLS name overrides -- Admin debug command to send a federation request/ping to a server's -`/_matrix/federation/v1/version` endpoint and measures the latency it took -- Add admin command to bulk delete media via a codeblock list of MXC URLs. -- Add admin command to delete both the thumbnail and media MXC URLs from an -event ID (e.g. from an abuse report) -- Add admin command to list all the rooms a local user is joined in -- Add admin command to list joined members in a room -- Add admin command to view the room topic of a room -- Add admin command to delete all remote media in the past X minutes as a form -of deleting media that you don't want on your server that a remote user posted -in a room, a `--force` flag to ignore errors, and support for reading `last -modified time` instead of `creation time` for filesystems that don't support -file created metadata -- Add admin command to return a room's full/complete state -- Admin debug command to fetch a PDU from a remote server and inserts it into -our database/timeline as backfill -- Add admin command to delete media via a specific MXC. This deletes the MXC -from our database, and the file locally. -- Add admin commands for banning (blocking) room IDs from our local users -joining (admins are always allowed) and evicts all our local users from that -room, in addition to bulk room banning support, and blocks room invites (remote -and local) to the banned room, as a moderation feature -- Add admin commands to output jemalloc memory stats and memory usage -- Add admin command to get rooms a *remote* user shares with us -- Add debug admin commands to get the earliest and latest PDU in a room -- Add debug admin command to echo a message -- Add admin command to insert rooms tags for a user, most useful for inserting -the `m.server_notice` tag on your admin room to make it "persistent" in the -"System Alerts" section of Element -- Add experimental admin debug command for Dendrite's `AdminDownloadState` -(`/admin/downloadState/{serverName}/{roomID}`) admin API endpoint to download -and use a remote server's room state in the room -- Disable URL previews by default in the admin room due to various command -outputs having "URLs" in them that clients may needlessly render/request -- Extend memory usage admin server command to support showing memory allocator -stats such as jemalloc's -- Add admin debug command to see memory allocator's full extended debug -statistics such as jemalloc's - -## Misc - -- Add guest support for accessing TURN servers via `turn_allow_guests` like -Synapse -- Support for creating rooms with custom room IDs like Maunium Synapse -(`room_id` request body field to `/createRoom`) -- Query parameter `?format=event|content` for returning either the room state -event's content (default) for the full room state event on -`/_matrix/client/v3/rooms/{roomId}/state/{eventType}[/{stateKey}]` requests (see -) -- Send a User-Agent on all of our requests -- Send `avatar_url` on invite room membership events/changes -- Support sending [`well_known` response to client login -responses](https://spec.matrix.org/v1.10/client-server-api/#post_matrixclientv3login) -if using config option `[well_known.client]` -- Implement `include_state` search criteria support for `/search` requests -(response now can include room states) -- Declare various missing Matrix versions and features at -`/_matrix/client/versions` -- Implement legacy Matrix `/v1/` media endpoints that some clients and servers -may still call -- Config option to change Conduit's behaviour of homeserver key fetching -(`query_trusted_key_servers_first`). This option sets whether conduwuit will -query trusted notary key servers first before the individual homeserver(s), or -vice versa which may help in joining certain rooms. -- Implement unstable MSC2666 support for querying mutual rooms with a user -- Implement unstable MSC3266 room summary API support -- Implement unstable MSC4125 support for specifying servers to join via on -federated invites -- Make conduwuit build and be functional under Nix + macOS -- Log out all sessions after unsetting the emergency password -- Assume well-knowns are broken if they exceed past 12288 characters. -- Add support for listening on both HTTP and HTTPS if using direct TLS with -conduwuit for usecases such as Complement -- Add config option for disabling RocksDB Direct IO if needed -- Add various documentation on maintaining conduwuit, using RocksDB online -backups, some troubleshooting, using admin commands, moderation documentation, -etc -- (Developers): Add support for [hot reloadable/"live" modular -development](development/hot_reload.md) -- (Developers): Add support for tokio-console -- (Developers): Add support for tracing flame graphs -- No cryptocurrency donations allowed, conduwuit is fully maintained by -independent queer maintainers, and with a strong priority on inclusitivity and -comfort for protected groups 🏳️‍⚧️ -- [Add a community Code of Conduct for all conduwuit community spaces, primarily -the Matrix space](https://conduwuit.puppyirl.gay/conduwuit_coc.html) diff --git a/docs/introduction.md b/docs/introduction.md index 9db76681..9d3a294a 100644 --- a/docs/introduction.md +++ b/docs/introduction.md @@ -4,10 +4,6 @@ {{#include ../README.md:body}} -#### What's different about your fork than upstream Conduit? - -See the [differences](differences.md) page - #### How can I deploy my own? - [Deployment options](deploying.md) From 1e23c95ec6e059c5d9b2b0083868596f1d38f5aa Mon Sep 17 00:00:00 2001 From: Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> Date: Mon, 10 Mar 2025 21:27:53 +0000 Subject: [PATCH 0742/1248] docs: refactor reverse proxy setup sections (#701) --- docs/deploying/generic.md | 59 ++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 88ba01d5..a07da560 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -145,25 +145,32 @@ sudo chmod 700 /var/lib/conduwuit/ ## Setting up the Reverse Proxy -Refer to the documentation or various guides online of your chosen reverse proxy -software. There are many examples of basic Apache/Nginx reverse proxy setups -out there. +We recommend Caddy as a reverse proxy, as it is trivial to use, handling TLS certificates, reverse proxy headers, etc transparently with proper defaults. +For other software, please refer to their respective documentation or online guides. -A [Caddy](https://caddyserver.com/) example will be provided as this -is the recommended reverse proxy for new users and is very trivial to use -(handles TLS, reverse proxy headers, etc transparently with proper defaults). +### Caddy -Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization -header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. +After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile` +and enter this (substitute for your server name). -If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent this (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). +```caddyfile +your.server.name, your.server.name:8448 { + # TCP reverse_proxy + reverse_proxy 127.0.0.1:6167 + # UNIX socket + #reverse_proxy unix//run/conduwuit/conduwuit.sock +} +``` -If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: -- `proxy_pass http://127.0.0.1:6167$request_uri;` -- `proxy_pass http://127.0.0.1:6167;` +That's it! Just start and enable the service and you're set. -Nginx users need to increase `client_max_body_size` (default is 1M) to match -`max_request_size` defined in conduwuit.toml. +```bash +sudo systemctl enable --now caddy +``` + +### Other Reverse Proxies + +As we would prefer our users to use Caddy, we will not provide configuration files for other proxys. You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs @@ -186,25 +193,19 @@ Examples of delegation: - - -### Caddy +For Apache and Nginx there are many examples available online. -Create `/etc/caddy/conf.d/conduwuit_caddyfile` and enter this (substitute for -your server name). +Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization +header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. -```caddyfile -your.server.name, your.server.name:8448 { - # TCP reverse_proxy - reverse_proxy 127.0.0.1:6167 - # UNIX socket - #reverse_proxy unix//run/conduwuit/conduwuit.sock -} -``` +If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). -That's it! Just start and enable the service and you're set. +If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: +- `proxy_pass http://127.0.0.1:6167$request_uri;` +- `proxy_pass http://127.0.0.1:6167;` -```bash -sudo systemctl enable --now caddy -``` +Nginx users need to increase `client_max_body_size` (default is 1M) to match +`max_request_size` defined in conduwuit.toml. ## You're done From 1366a3092f5be044fbe39225dd606ef3445899d5 Mon Sep 17 00:00:00 2001 From: Ginger <75683114+gingershaped@users.noreply.github.com> Date: Mon, 10 Mar 2025 17:28:19 -0400 Subject: [PATCH 0743/1248] Check the `room_types` filter when searching for local public rooms (#698) --- src/api/client/directory.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 88f0e668..7ce32e4c 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -15,7 +15,7 @@ use ruma::{ }, federation, }, - directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, + directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork, RoomTypeFilter}, events::{ StateEventType, room::{ @@ -289,6 +289,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( .map(ToOwned::to_owned) .then(|room_id| public_rooms_chunk(services, room_id)) .filter_map(|chunk| async move { + if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { + return None; + } if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { if name.as_str().to_lowercase().contains(&query) { From c4b05e77f3dd66636e26b64f8f4852703816c399 Mon Sep 17 00:00:00 2001 From: Odd Eivind Ebbesen Date: Mon, 10 Mar 2025 22:28:29 +0100 Subject: [PATCH 0744/1248] Fix up wording in the doc comments for admin media deletion (#694) --- src/admin/media/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index d212aab4..405c26d5 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -27,18 +27,18 @@ pub(super) enum MediaCommand { DeleteList, /// - Deletes all remote (and optionally local) media created before or - /// after \[duration] time using filesystem metadata first created at - /// date, or fallback to last modified date. This will always ignore - /// errors by default. + /// after [duration] time using filesystem metadata first created at date, + /// or fallback to last modified date. This will always ignore errors by + /// default. DeletePastRemoteMedia { /// - The relative time (e.g. 30s, 5m, 7d) within which to search duration: String, - /// - Only delete media created more recently than \[duration] ago + /// - Only delete media created before [duration] ago #[arg(long, short)] before: bool, - /// - Only delete media created after \[duration] ago + /// - Only delete media created after [duration] ago #[arg(long, short)] after: bool, From 3104586884b0027a1404bfe1986d569ff9e492d4 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 18:05:36 -0400 Subject: [PATCH 0745/1248] bump tracing-subscriber, allowlist cargo-doc lint in admin room Signed-off-by: June Clementine Strawberry --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 10 +++++----- src/admin/media/mod.rs | 1 + 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 65e8eca1..22d93237 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -440,7 +440,7 @@ dependencies = [ "bitflags 2.9.0", "cexpr", "clang-sys", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2", "quote", "regex", @@ -2382,7 +2382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -4833,7 +4833,7 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.41" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "log", "pin-project-lite", @@ -4844,7 +4844,7 @@ dependencies = [ [[package]] name = "tracing-attributes" version = "0.1.28" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "proc-macro2", "quote", @@ -4854,7 +4854,7 @@ dependencies = [ [[package]] name = "tracing-core" version = "0.1.33" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "once_cell", "valuable", @@ -4874,7 +4874,7 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "log", "once_cell", @@ -4901,8 +4901,8 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +version = "0.3.19" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "matchers", "nu-ansi-term", diff --git a/Cargo.toml b/Cargo.toml index d611c08e..1528349c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -210,7 +210,7 @@ default-features = false version = "0.1.41" default-features = false [workspace.dependencies.tracing-subscriber] -version = "=0.3.18" +version = "0.3.19" default-features = false features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"] [workspace.dependencies.tracing-core] @@ -541,16 +541,16 @@ version = "1.0.2" # https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c [patch.crates-io.tracing-subscriber] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-core] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-log] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" # adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 # adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index 405c26d5..641834b2 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -1,3 +1,4 @@ +#![allow(rustdoc::broken_intra_doc_links)] mod commands; use clap::Subcommand; From 7f95eef9abf86298a25fd0bd410835084742eaae Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 21:01:20 -0400 Subject: [PATCH 0746/1248] bump ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 22d93237..c93716f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 1528349c..c09cdaea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +rev = "69133fd53ca063552788c8dfbaf5e01c98dec3e7" features = [ "compat", "rand", From ae818d5b25977a6c4543bca16b78af6f2fa0cca7 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 21:08:41 -0400 Subject: [PATCH 0747/1248] remove most of cargo test from engage as crane does that but with more caching Signed-off-by: June Clementine Strawberry --- engage.toml | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/engage.toml b/engage.toml index 0a857b5a..210bafd5 100644 --- a/engage.toml +++ b/engage.toml @@ -161,24 +161,6 @@ name = "markdownlint" group = "lints" script = "markdownlint docs *.md || true" # TODO: fix the ton of markdown lints so we can drop `|| true` -[[task]] -name = "cargo/all" -group = "tests" -script = """ -env DIRENV_DEVSHELL=all-features \ - direnv exec . \ - cargo test \ - --workspace \ - --locked \ - --profile test \ - --all-targets \ - --no-fail-fast \ - --all-features \ - --color=always \ - -- \ - --color=always -""" - [[task]] name = "cargo/default" group = "tests" @@ -196,24 +178,6 @@ env DIRENV_DEVSHELL=default \ --color=always """ -[[task]] -name = "cargo/no-features" -group = "tests" -script = """ -env DIRENV_DEVSHELL=no-features \ - direnv exec . \ - cargo test \ - --workspace \ - --locked \ - --profile test \ - --all-targets \ - --no-fail-fast \ - --no-default-features \ - --color=always \ - -- \ - --color=always -""" - # Checks if the generated example config differs from the checked in repo's # example config. [[task]] From e920c44cb488d398bc57fe4ce7fdffb3ded5038a Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 21:15:11 -0400 Subject: [PATCH 0748/1248] ignore humantime dep as tracing console-subscriber uses it (somewhere) Signed-off-by: June Clementine Strawberry --- .cargo/audit.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cargo/audit.toml b/.cargo/audit.toml index bf44fbd6..37148cfb 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -1,5 +1,5 @@ [advisories] -ignore = ["RUSTSEC-2024-0436"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] +ignore = ["RUSTSEC-2024-0436", "RUSTSEC-2025-0014"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] informational_warnings = [] # warn for categories of informational advisories severity_threshold = "none" # CVSS severity ("none", "low", "medium", "high", "critical") From 0877f294393954bbe49279456f012e1fbb604f78 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 22:21:53 -0400 Subject: [PATCH 0749/1248] respect membership filters on /members Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++------- Cargo.toml | 2 +- src/api/client/membership.rs | 56 ++++++++++++++++++++++++++++++++++-- 3 files changed, 66 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c93716f9..ab155fd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index c09cdaea..2bf30d61 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "69133fd53ca063552788c8dfbaf5e01c98dec3e7" +rev = "24d018a0015bb85489ae84564701a49a643bcc57" features = [ "compat", "rand", diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 3f77e69e..11395e83 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -25,8 +25,9 @@ use ruma::{ error::ErrorKind, knock::knock_room, membership::{ - ThirdPartySigned, ban_user, forget_room, get_member_events, invite_user, - join_room_by_id, join_room_by_id_or_alias, + ThirdPartySigned, ban_user, forget_room, + get_member_events::{self, v3::MembershipEventFilter}, + invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members::{self, v3::RoomMember}, joined_rooms, kick_user, leave_room, unban_user, }, @@ -768,6 +769,54 @@ pub(crate) async fn joined_rooms_route( }) } +fn membership_filter( + pdu: PduEvent, + for_membership: Option<&MembershipEventFilter>, + not_membership: Option<&MembershipEventFilter>, +) -> Option { + let membership_state_filter = match for_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(MembershipEventFilter::Leave) => MembershipState::Leave, + | Some(_) | None => MembershipState::Join, + }; + + let not_membership_state_filter = match not_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Join) => MembershipState::Join, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(_) | None => MembershipState::Leave, + }; + + let evt_membership = pdu.get_content::().ok()?.membership; + + if for_membership.is_some() && not_membership.is_some() { + if membership_state_filter != evt_membership + || not_membership_state_filter == evt_membership + { + None + } else { + Some(pdu) + } + } else if for_membership.is_some() && not_membership.is_none() { + if membership_state_filter != evt_membership { + None + } else { + Some(pdu) + } + } else if not_membership.is_some() && for_membership.is_none() { + if not_membership_state_filter == evt_membership { + None + } else { + Some(pdu) + } + } else { + Some(pdu) + } +} + /// # `POST /_matrix/client/r0/rooms/{roomId}/members` /// /// Lists all joined users in a room (TODO: at a specific point in time, with a @@ -779,6 +828,8 @@ pub(crate) async fn get_member_events_route( body: Ruma, ) -> Result { let sender_user = body.sender_user(); + let membership = body.membership.as_ref(); + let not_membership = body.not_membership.as_ref(); if !services .rooms @@ -797,6 +848,7 @@ pub(crate) async fn get_member_events_route( .ready_filter_map(Result::ok) .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) .map(at!(1)) + .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) .map(PduEvent::into_member_event) .collect() .await, From 1d1ccec532bf3eaebf499d3ff4c9f7a24369c389 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 23:05:56 -0400 Subject: [PATCH 0750/1248] fix some nightly clippy lints Signed-off-by: June Clementine Strawberry --- Cargo.toml | 3 +++ clippy.toml | 3 ++- src/admin/processor.rs | 8 +++++--- src/api/client/account.rs | 4 ++-- src/api/client/state.rs | 2 +- src/core/utils/string.rs | 1 + 6 files changed, 14 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2bf30d61..fd477850 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -841,6 +841,9 @@ unused_crate_dependencies = "allow" unsafe_code = "allow" variant_size_differences = "allow" +# we check nightly clippy lints +unknown_lints = "allow" + ####################################### # # Clippy lints diff --git a/clippy.toml b/clippy.toml index 42427101..863759aa 100644 --- a/clippy.toml +++ b/clippy.toml @@ -2,9 +2,10 @@ array-size-threshold = 4096 cognitive-complexity-threshold = 94 # TODO reduce me ALARA excessive-nesting-threshold = 11 # TODO reduce me to 4 or 5 future-size-threshold = 7745 # TODO reduce me ALARA -stack-size-threshold = 196608 # reduce me ALARA +stack-size-threshold = 196608 # TODO reduce me ALARA too-many-lines-threshold = 780 # TODO reduce me to <= 100 type-complexity-threshold = 250 # reduce me to ~200 +large-error-threshold = 256 # TODO reduce me ALARA disallowed-macros = [ { path = "log::error", reason = "use conduwuit_core::error" }, diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 77a60959..53a15098 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -91,6 +91,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce } } +#[allow(clippy::result_large_err)] fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { let link = "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; @@ -100,7 +101,7 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { Err(reply(content, command.reply_id.as_deref())) } -// Parse and process a message from the admin room +/// Parse and process a message from the admin room async fn process( context: &Command<'_>, command: AdminCommand, @@ -164,7 +165,8 @@ fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { (capture, logs) } -// Parse chat messages from the admin room into an AdminCommand object +/// Parse chat messages from the admin room into an AdminCommand object +#[allow(clippy::result_large_err)] fn parse<'a>( services: &Arc, input: &'a CommandInput, @@ -232,7 +234,7 @@ fn complete_command(mut cmd: clap::Command, line: &str) -> String { ret.join(" ") } -// Parse chat messages from the admin room into an AdminCommand object +/// Parse chat messages from the admin room into an AdminCommand object fn parse_line(command_line: &str) -> Vec { let mut argv = command_line .split_whitespace() diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 2b8209d4..32438098 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -109,7 +109,7 @@ pub(crate) async fn get_register_available_route( if !info.is_user_match(&user_id) { return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } - }; + } if services.appservice.is_exclusive_user_id(&user_id).await { return Err!(Request(Exclusive("Username is reserved by an appservice."))); @@ -159,7 +159,7 @@ pub(crate) async fn register_route( | (None, _) => { info!(%is_guest, "Rejecting registration attempt as registration is disabled"); }, - }; + } return Err!(Request(Forbidden("Registration has been disabled."))); } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index db79735f..9563c26d 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -254,7 +254,7 @@ async fn allowed_to_send_state_event( "Room server ACL event is invalid: {e}" )))); }, - }; + } }, | StateEventType::RoomEncryption => // Forbid m.room.encryption if encryption is disabled diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index 9340d009..d8fa3f95 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -60,6 +60,7 @@ pub fn camel_to_snake_string(s: &str) -> String { } #[inline] +#[allow(clippy::unbuffered_bytes)] // these are allocated string utilities, not file I/O utils pub fn camel_to_snake_case(output: &mut O, input: I) -> Result<()> where I: std::io::Read, From 5dea52f0f87dc640274e0f3ecb38b96ac9293f44 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 23:45:53 -0400 Subject: [PATCH 0751/1248] stop doing complement cert gen and just use self-signed cert Signed-off-by: June Clementine Strawberry --- bin/complement | 2 +- flake.lock | 6 +++--- nix/pkgs/complement/certificate.crt | 21 +++++++++++++++++++ nix/pkgs/complement/default.nix | 19 +---------------- nix/pkgs/complement/signing_request.csr | 28 ++++++++++++------------- nix/pkgs/complement/v3.ext | 6 ++++++ 6 files changed, 46 insertions(+), 36 deletions(-) create mode 100644 nix/pkgs/complement/certificate.crt diff --git a/bin/complement b/bin/complement index 92539f97..3aa5a6f5 100755 --- a/bin/complement +++ b/bin/complement @@ -68,7 +68,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - go test -tags="conduwuit_blacklist" -timeout 1h -json ./tests/... | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/flake.lock b/flake.lock index 03fc205c..63cc2787 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1741378155, - "narHash": "sha256-rJSfqf3q4oWxcAwENtAowLZeCi8lktwKVH9XQvvZR64=", + "lastModified": 1741757487, + "narHash": "sha256-Fkx/krwI3h6wJ6Mj199KlXUNJNEwl7h1pR4/d2ncmKw=", "owner": "girlbossceo", "repo": "complement", - "rev": "1502a00d8551d0f6e8954a23e43868877c3e57d9", + "rev": "40982a261cfc36650f74967f99fb1a049b13e065", "type": "github" }, "original": { diff --git a/nix/pkgs/complement/certificate.crt b/nix/pkgs/complement/certificate.crt new file mode 100644 index 00000000..5dd4fdea --- /dev/null +++ b/nix/pkgs/complement/certificate.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL +BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz +IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy +NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m +ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt +/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88 +awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp +L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK +K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl +8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV +HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy +ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw +DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ +irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+ +HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e +VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3 +y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d +jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4= +-----END CERTIFICATE----- diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index bbd1bd74..9b010e14 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -3,10 +3,8 @@ , buildEnv , coreutils , dockerTools -, gawk , lib , main -, openssl , stdenv , tini , writeShellScriptBin @@ -42,21 +40,6 @@ let start = writeShellScriptBin "start" '' set -euxo pipefail - cp ${./v3.ext} /complement/v3.ext - echo "DNS.1 = $SERVER_NAME" >> /complement/v3.ext - echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \ - >> /complement/v3.ext - ${lib.getExe openssl} x509 \ - -req \ - -extfile /complement/v3.ext \ - -in ${./signing_request.csr} \ - -CA /complement/ca/ca.crt \ - -CAkey /complement/ca/ca.key \ - -CAcreateserial \ - -out /complement/certificate.crt \ - -days 1 \ - -sha256 - ${lib.getExe' coreutils "env"} \ CONDUWUIT_SERVER_NAME="$SERVER_NAME" \ ${lib.getExe main'} @@ -93,7 +76,7 @@ dockerTools.buildImage { Env = [ "CONDUWUIT_TLS__KEY=${./private_key.key}" - "CONDUWUIT_TLS__CERTS=/complement/certificate.crt" + "CONDUWUIT_TLS__CERTS=${./certificate.crt}" "CONDUWUIT_CONFIG=${./config.toml}" "RUST_BACKTRACE=full" ]; diff --git a/nix/pkgs/complement/signing_request.csr b/nix/pkgs/complement/signing_request.csr index 707e73b4..e2aa658e 100644 --- a/nix/pkgs/complement/signing_request.csr +++ b/nix/pkgs/complement/signing_request.csr @@ -1,16 +1,16 @@ -----BEGIN CERTIFICATE REQUEST----- -MIICkTCCAXkCAQAwTDELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRYwFAYDVQQK -DA13b29mZXJzLCBpbmMuMRgwFgYDVQQDDA9jb21wbGVtZW50LW9ubHkwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS/odmZivxajebiyT7SMuhXqnMm+hF -+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnTLvGEvNNx0px5M54H -+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a09CphCFswO4PpxUU -ORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5uccebGMmCoO660hROST -BaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUgaQs/2tdT4kBzBH6kZ -OiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO/Ncsro/fAgMBAAGg -ADANBgkqhkiG9w0BAQsFAAOCAQEAjW+aD4E0phtRT5b2RyedY1uiSe7LQECsQnIO -wUSyGGG1GXYlJscyxxyzE9W9+QIALrxZkmc/+e02u+bFb1zQXW/uB/7u7FgXzrj6 -2YSDiWYXiYKvgGWEfCi3lpcTJK9x6WWkR+iREaoKRjcl0ynhhGuR7YwP38TNyu+z -FN6B1Lo398fvJkaTCiiHngWiwztXZ2d0MxkicuwZ1LJhIQA72OTl3QoRb5uiqbze -T9QJfU6W3v8cB8c8PuKMv5gl1QsGNtlfyQB56/X0cMxWl25vWXd2ankLkAGRTDJ8 -9YZHxP1ki4/yh75AknFq02nCOsmxYrAazCYgP2TzIPhQwBurKQ== +MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK +DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH +uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR +xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb +o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B +hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe +vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB +CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79 +ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3 +r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb +XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK +MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76 +U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/ -----END CERTIFICATE REQUEST----- diff --git a/nix/pkgs/complement/v3.ext b/nix/pkgs/complement/v3.ext index 6083d960..0deaa48a 100644 --- a/nix/pkgs/complement/v3.ext +++ b/nix/pkgs/complement/v3.ext @@ -4,3 +4,9 @@ keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment subjectAltName = @alt_names [alt_names] +DNS.1 = *.docker.internal +DNS.2 = hs1 +DNS.3 = hs2 +DNS.4 = hs3 +DNS.5 = hs4 +IP.1 = 127.0.0.1 From 258b399de93e74b00695ab42697dc31f5a49aa81 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 13 Mar 2025 10:52:13 -0400 Subject: [PATCH 0752/1248] bump ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- flake.lock | 6 +++--- nix/pkgs/complement/config.toml | 6 +++--- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab155fd0..c28f4eab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index fd477850..db55b9b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "24d018a0015bb85489ae84564701a49a643bcc57" +rev = "d197318a2507d38ffe6ee524d0d52728ca72538a" features = [ "compat", "rand", diff --git a/flake.lock b/flake.lock index 63cc2787..1f87b9b6 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1741757487, - "narHash": "sha256-Fkx/krwI3h6wJ6Mj199KlXUNJNEwl7h1pR4/d2ncmKw=", + "lastModified": 1741891349, + "narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=", "owner": "girlbossceo", "repo": "complement", - "rev": "40982a261cfc36650f74967f99fb1a049b13e065", + "rev": "e587b3df569cba411aeac7c20b6366d03c143745", "type": "github" }, "original": { diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 759f8d78..7f4ecef7 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -6,7 +6,7 @@ allow_public_room_directory_over_federation = true allow_public_room_directory_without_auth = true allow_registration = true database_path = "/database" -log = "trace,h2=warn,hyper=warn" +log = "trace,h2=debug,hyper=debug" port = [8008, 8448] trusted_servers = [] only_query_trusted_key_servers = false @@ -19,11 +19,11 @@ url_preview_domain_explicit_denylist = ["*"] media_compat_file_link = false media_startup_check = true prune_missing_media = true -log_colors = false +log_colors = true admin_room_notices = false allow_check_for_updates = false intentionally_unknown_config_option_for_testing = true -rocksdb_log_level = "debug" +rocksdb_log_level = "info" rocksdb_max_log_files = 1 rocksdb_recovery_mode = 0 rocksdb_paranoid_file_checks = true From 6c29792b3d9dfe1e65c5d3545296d431e058e375 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 22:21:42 -0400 Subject: [PATCH 0753/1248] respect include_leave syncv3 filter Signed-off-by: June Clementine Strawberry --- src/api/client/sync/v3.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index fb59837b..70c4c6a7 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -219,6 +219,7 @@ pub(crate) async fn build_sync_events( sender_user, next_batch, full_state, + filter.room.include_leave, &filter, ) .map_ok(move |left_room| (room_id, left_room)) @@ -412,6 +413,7 @@ async fn handle_left_room( sender_user: &UserId, next_batch: u64, full_state: bool, + include_leave: bool, filter: &FilterDefinition, ) -> Result> { let left_count = services @@ -540,6 +542,10 @@ async fn handle_left_room( continue; }; + if !include_leave && pdu.sender == sender_user { + continue; + } + left_state_events.push(pdu.to_sync_state_event()); } } From ee3c585555a80c037bdaa861beeecbf6e19a7f04 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 14 Mar 2025 15:57:18 -0400 Subject: [PATCH 0754/1248] skip a few flakey complement tests Signed-off-by: June Clementine Strawberry --- bin/complement | 4 +- .../complement/test_results.jsonl | 82 ++----------------- 2 files changed, 8 insertions(+), 78 deletions(-) diff --git a/bin/complement b/bin/complement index 3aa5a6f5..c437503e 100755 --- a/bin/complement +++ b/bin/complement @@ -18,7 +18,7 @@ RESULTS_FILE="${3:-complement_test_results.jsonl}" COMPLEMENT_BASE_IMAGE="${COMPLEMENT_BASE_IMAGE:-complement-conduwuit:main}" # Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time -#SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' +SKIPPED_COMPLEMENT_TESTS='TestPartialStateJoin.*|TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.*|TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias|TestUnbanViaInvite.*|TestRoomState/Parallel/GET_/publicRooms_lists.*"|TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then @@ -68,7 +68,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" -skip="$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 5fb850f1..6b5f670e 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -174,10 +174,10 @@ {"Action":"pass","Test":"TestFilter"} {"Action":"fail","Test":"TestFilterMessagesByRelType"} {"Action":"pass","Test":"TestGappedSyncLeaveSection"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/join"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/leave"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers/not_membership"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/join"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/leave"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/not_membership"} {"Action":"fail","Test":"TestGetMissingEventsGapFilling"} {"Action":"pass","Test":"TestGetRoomMembers"} {"Action":"fail","Test":"TestGetRoomMembersAtPoint"} @@ -360,72 +360,6 @@ {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} -{"Action":"fail","Test":"TestPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanFastJoinDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanLazyLoadingSyncDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveDeviceListUpdateDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingGrandparentsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingParentsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithMissingParentsDuringPartialStateJoin"} -{"Action":"skip","Test":"TestPartialStateJoin/CanReceivePresenceDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveReceiptDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveSigningKeyUpdateDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveToDeviceDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveTypingDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanSendEventsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/Can_change_display_name_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_for_user_incorrectly_believed_to_be_in_room"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_failing_to_complete_partial_state_join"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_leaving_partial_state_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_new_member_leaves_partial_state_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracked_for_new_members_in_partial_state_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_pre-existing_members_in_partial_state_room"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_join_another_shared_room_before_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_after_partial_state_join_completes"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_before_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_when_pre-existing_members_in_partial_state_room_join_another_shared_room"} -{"Action":"fail","Test":"TestPartialStateJoin/EagerIncrementalSyncDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/EagerInitialSyncDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/EagerLongPollingSyncWokenWhenResyncCompletes"} -{"Action":"fail","Test":"TestPartialStateJoin/GappySyncAfterPartialStateSynced"} -{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_gappy_sync_includes_remote_memberships_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_incremental_sync_includes_remote_memberships_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_initial_sync_includes_remote_memberships_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_ban"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_kick"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/does_not_wait_for_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/is_seen_after_the_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_another_user_can_join_without_resync_completing"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_rejoin_succeeds_without_resync_completing"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/works_after_a_second_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/MembersRequestBlocksDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_no_longer_reach_departed_servers_after_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_all_servers_in_partial_state_rooms"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_newly_joined_servers_in_partial_state_rooms"} -{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinContinuesAfterRestart"} -{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinSyncsUsingOtherHomeservers"} -{"Action":"skip","Test":"TestPartialStateJoin/Purge_during_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejected_events_remain_rejected_after_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_join_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_knock_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_join_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_knock_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Resync_completes_even_when_events_arrive_before_their_prev_events"} -{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_deleted_during_a_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_queried_during_a_resync"} -{"Action":"skip","Test":"TestPartialStateJoin/Room_stats_are_correctly_updated_once_state_re-sync_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/State_accepted_incorrectly"} -{"Action":"fail","Test":"TestPartialStateJoin/State_rejected_incorrectly"} -{"Action":"fail","Test":"TestPartialStateJoin/User_directory_is_correctly_updated_once_state_re-sync_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/joined_members_blocks_during_partial_state_join"} {"Action":"fail","Test":"TestPollsLocalPushRules"} {"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} {"Action":"pass","Test":"TestPowerLevels"} @@ -561,16 +495,13 @@ {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} -{"Action":"fail","Test":"TestRoomDeleteAlias"} -{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel"} +{"Action":"pass","Test":"TestRoomDeleteAlias"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_alias_with_no_ops"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_canonical_alias_with_no_ops"} -{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Deleting_a_non-existent_alias_should_return_a_404"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_in_the_default_room_configuration"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.room.aliases_is_restricted"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_can't_delete_other's_aliases"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other's_aliases"} {"Action":"fail","Test":"TestRoomForget"} {"Action":"fail","Test":"TestRoomForget/Parallel"} {"Action":"pass","Test":"TestRoomForget/Parallel/Can't_forget_room_you're_still_in"} @@ -687,7 +618,6 @@ {"Action":"pass","Test":"TestTyping"} {"Action":"pass","Test":"TestTyping/Typing_can_be_explicitly_stopped"} {"Action":"pass","Test":"TestTyping/Typing_notification_sent_to_local_room_members"} -{"Action":"fail","Test":"TestUnbanViaInvite"} {"Action":"fail","Test":"TestUnknownEndpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Client-server_endpoints"} {"Action":"fail","Test":"TestUnknownEndpoints/Key_endpoints"} From 4518f554081532400bfae64b931cd135dbceb755 Mon Sep 17 00:00:00 2001 From: cy Date: Wed, 12 Mar 2025 20:46:14 -0400 Subject: [PATCH 0755/1248] guard against using someone else's access token in UIAA --- src/service/uiaa/mod.rs | 10 ++++++++-- tests/test_results/complement/test_results.jsonl | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 39dd2b41..7803c736 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{ - Error, Result, err, error, implement, utils, + Err, Error, Result, err, error, implement, utils, utils::{hash, string::EMPTY}, }; use database::{Deserialized, Json, Map}; @@ -150,12 +150,18 @@ pub async fn try_auth( )); }; - let user_id = UserId::parse_with_server_name( + let user_id_from_username = UserId::parse_with_server_name( username.clone(), self.services.globals.server_name(), ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; + // Check if the access token being used matches the credentials used for UIAA + if user_id.localpart() != user_id_from_username.localpart() { + return Err!(Request(Forbidden("User ID and access token mismatch."))); + } + let user_id = user_id_from_username; + // Check if password is correct if let Ok(hash) = self.services.users.password_hash(&user_id).await { let hash_matches = hash::verify_password(password, &hash).is_ok(); diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 6b5f670e..01d2ca4a 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -127,7 +127,7 @@ {"Action":"fail","Test":"TestDeviceListsUpdateOverFederationOnRoomJoin"} {"Action":"fail","Test":"TestDeviceManagement"} {"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}"} -{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} +{"Action":"pass","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} {"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}"} {"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}_gives_a_404_for_unknown_devices"} {"Action":"pass","Test":"TestDeviceManagement/GET_/devices"} From 658c19d55eb5fdf30f27e189c414208e2eae6e24 Mon Sep 17 00:00:00 2001 From: cy Date: Fri, 14 Mar 2025 23:01:28 -0400 Subject: [PATCH 0756/1248] check if we already have a more preferable key backup before adding --- src/api/client/backup.rs | 81 ++++++++++++++++--- .../complement/test_results.jsonl | 8 +- 2 files changed, 76 insertions(+), 13 deletions(-) diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 714e3f86..63c47e01 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,3 +1,5 @@ +use std::cmp::Ordering; + use axum::extract::State; use conduwuit::{Err, err}; use ruma::{ @@ -232,16 +234,77 @@ pub(crate) async fn add_backup_keys_for_session_route( ))); } - services + // Check if we already have a better key + let mut ok_to_replace = true; + if let Some(old_key) = &services .key_backups - .add_key( - body.sender_user(), - &body.version, - &body.room_id, - &body.session_id, - &body.session_data, - ) - .await?; + .get_session(body.sender_user(), &body.version, &body.room_id, &body.session_id) + .await + .ok() + { + let old_is_verified = old_key + .get_field::("is_verified")? + .unwrap_or_default(); + + let new_is_verified = body + .session_data + .get_field::("is_verified")? + .ok_or_else(|| err!(Request(BadJson("`is_verified` field should exist"))))?; + + // Prefer key that `is_verified` + if old_is_verified != new_is_verified { + if old_is_verified { + ok_to_replace = false; + } + } else { + // If both have same `is_verified`, prefer the one with lower + // `first_message_index` + let old_first_message_index = old_key + .get_field::("first_message_index")? + .unwrap_or(UInt::MAX); + + let new_first_message_index = body + .session_data + .get_field::("first_message_index")? + .ok_or_else(|| { + err!(Request(BadJson("`first_message_index` field should exist"))) + })?; + + ok_to_replace = match new_first_message_index.cmp(&old_first_message_index) { + | Ordering::Less => true, + | Ordering::Greater => false, + | Ordering::Equal => { + // If both have same `first_message_index`, prefer the one with lower + // `forwarded_count` + let old_forwarded_count = old_key + .get_field::("forwarded_count")? + .unwrap_or(UInt::MAX); + + let new_forwarded_count = body + .session_data + .get_field::("forwarded_count")? + .ok_or_else(|| { + err!(Request(BadJson("`forwarded_count` field should exist"))) + })?; + + new_forwarded_count < old_forwarded_count + }, + }; + }; + } + + if ok_to_replace { + services + .key_backups + .add_key( + body.sender_user(), + &body.version, + &body.room_id, + &body.session_id, + &body.session_data, + ) + .await?; + } Ok(add_backup_keys_for_session::v3::Response { count: services diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 01d2ca4a..97170a5c 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -134,10 +134,10 @@ {"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_gives_a_404_for_unknown_devices"} {"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_updates_device_fields"} {"Action":"pass","Test":"TestDisplayNameUpdate"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} {"Action":"pass","Test":"TestEvent"} {"Action":"pass","Test":"TestEvent/Parallel"} {"Action":"pass","Test":"TestEvent/Parallel/Large_Event"} From 7bf92c8a3710eeff229bd86bc81a89daa94b66d5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 17 Mar 2025 22:50:29 -0400 Subject: [PATCH 0757/1248] replace unnecessary check when updating device keys Signed-off-by: June Clementine Strawberry --- src/api/client/backup.rs | 2 +- src/api/client/keys.rs | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 63c47e01..83955fea 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -290,7 +290,7 @@ pub(crate) async fn add_backup_keys_for_session_route( new_forwarded_count < old_forwarded_count }, }; - }; + } } if ok_to_replace { diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 9cd50e85..f50d7afa 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -80,14 +80,26 @@ pub(crate) async fn upload_keys_route( ))); } - // TODO: merge this and the existing event? - // This check is needed to assure that signatures are kept - if services + if let Ok(existing_keys) = services .users .get_device_keys(sender_user, sender_device) .await - .is_err() { + if existing_keys.json().get() == device_keys.json().get() { + debug!( + ?sender_user, + ?sender_device, + ?device_keys, + "Ignoring user uploaded keys as they are an exact copy already in the \ + database" + ); + } else { + services + .users + .add_device_keys(sender_user, sender_device, device_keys) + .await; + } + } else { services .users .add_device_keys(sender_user, sender_device, device_keys) From 33c5afe050491988ee8224af25b9b06e892f4b50 Mon Sep 17 00:00:00 2001 From: cy Date: Wed, 19 Mar 2025 20:55:14 -0400 Subject: [PATCH 0758/1248] delete pushers created with different access token on password change --- src/api/client/account.rs | 23 ++++++++++++- src/api/client/push.rs | 2 +- src/database/maps.rs | 4 +++ src/service/pusher/mod.rs | 34 +++++++++++++------ .../complement/test_results.jsonl | 4 +-- 5 files changed, 53 insertions(+), 14 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 32438098..5dd622d7 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -4,7 +4,8 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils, - utils::ReadyExt, warn, + utils::{ReadyExt, stream::BroadbandExt}, + warn, }; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; @@ -627,6 +628,26 @@ pub(crate) async fn change_password_route( .ready_filter(|id| *id != sender_device) .for_each(|id| services.users.remove_device(sender_user, id)) .await; + + // Remove all pushers except the ones associated with this session + services + .pusher + .get_pushkeys(sender_user) + .map(ToOwned::to_owned) + .broad_filter_map(|pushkey| async move { + services + .pusher + .get_pusher_device(&pushkey) + .await + .ok() + .filter(|pusher_device| pusher_device != sender_device) + .is_some() + .then_some(pushkey) + }) + .for_each(|pushkey| async move { + services.pusher.delete_pusher(sender_user, &pushkey).await; + }) + .await; } info!("User {sender_user} changed their password."); diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 384b9dbc..cc1d3be2 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -503,7 +503,7 @@ pub(crate) async fn set_pushers_route( services .pusher - .set_pusher(sender_user, &body.action) + .set_pusher(sender_user, body.sender_device(), &body.action) .await?; Ok(set_pusher::v3::Response::new()) diff --git a/src/database/maps.rs b/src/database/maps.rs index 138bb038..1da9acc0 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -219,6 +219,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "senderkey_pusher", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "pushkey_deviceid", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "server_signingkeys", ..descriptor::RANDOM diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 2b269b3d..27490fb8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -10,7 +10,7 @@ use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ - RoomId, UInt, UserId, + DeviceId, OwnedDeviceId, RoomId, UInt, UserId, api::{ IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, client::push::{Pusher, PusherKind, set_pusher}, @@ -48,6 +48,7 @@ struct Services { struct Data { senderkey_pusher: Arc, + pushkey_deviceid: Arc, } impl crate::Service for Service { @@ -55,6 +56,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { db: Data { senderkey_pusher: args.db["senderkey_pusher"].clone(), + pushkey_deviceid: args.db["pushkey_deviceid"].clone(), }, services: Services { globals: args.depend::("globals"), @@ -75,6 +77,7 @@ impl Service { pub async fn set_pusher( &self, sender: &UserId, + sender_device: &DeviceId, pusher: &set_pusher::v3::PusherAction, ) -> Result { match pusher { @@ -123,24 +126,35 @@ impl Service { } } - let key = (sender, data.pusher.ids.pushkey.as_str()); + let pushkey = data.pusher.ids.pushkey.as_str(); + let key = (sender, pushkey); self.db.senderkey_pusher.put(key, Json(pusher)); + self.db.pushkey_deviceid.insert(pushkey, sender_device); }, | set_pusher::v3::PusherAction::Delete(ids) => { - let key = (sender, ids.pushkey.as_str()); - self.db.senderkey_pusher.del(key); - - self.services - .sending - .cleanup_events(None, Some(sender), Some(ids.pushkey.as_str())) - .await - .ok(); + self.delete_pusher(sender, ids.pushkey.as_str()).await; }, } Ok(()) } + pub async fn delete_pusher(&self, sender: &UserId, pushkey: &str) { + let key = (sender, pushkey); + self.db.senderkey_pusher.del(key); + self.db.pushkey_deviceid.remove(pushkey); + + self.services + .sending + .cleanup_events(None, Some(sender), Some(pushkey)) + .await + .ok(); + } + + pub async fn get_pusher_device(&self, pushkey: &str) -> Result { + self.db.pushkey_deviceid.get(pushkey).await.deserialized() + } + pub async fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result { let senderkey = (sender, pushkey); self.db diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 97170a5c..ac2733f8 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -69,8 +69,8 @@ {"Action":"pass","Test":"TestChangePassword/After_changing_password,_can_log_in_with_new_password"} {"Action":"pass","Test":"TestChangePassword/After_changing_password,_different_sessions_can_optionally_be_kept"} {"Action":"pass","Test":"TestChangePassword/After_changing_password,_existing_session_still_works"} -{"Action":"fail","Test":"TestChangePasswordPushers"} -{"Action":"fail","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} +{"Action":"pass","Test":"TestChangePasswordPushers"} +{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} {"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} {"Action":"fail","Test":"TestClientSpacesSummary"} {"Action":"fail","Test":"TestClientSpacesSummary/max_depth"} From 07ec9d6d852a8ebb623c96b580af36e0d0d11697 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 26 Mar 2025 01:32:45 +0000 Subject: [PATCH 0759/1248] re-sort pushkey_deviceid (33c5afe050) Signed-off-by: Jason Volk --- src/database/maps.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/database/maps.rs b/src/database/maps.rs index 1da9acc0..311c629f 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -121,14 +121,18 @@ pub(super) static MAPS: &[Descriptor] = &[ index_size: 512, ..descriptor::SEQUENTIAL }, - Descriptor { - name: "presenceid_presence", - ..descriptor::SEQUENTIAL_SMALL - }, Descriptor { name: "publicroomids", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "pushkey_deviceid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "presenceid_presence", + ..descriptor::SEQUENTIAL_SMALL + }, Descriptor { name: "readreceiptid_readreceipt", ..descriptor::RANDOM @@ -219,10 +223,6 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "senderkey_pusher", ..descriptor::RANDOM_SMALL }, - Descriptor { - name: "pushkey_deviceid", - ..descriptor::RANDOM_SMALL - }, Descriptor { name: "server_signingkeys", ..descriptor::RANDOM From aa4d2e236330693c61d5cb116b4c438b15431aec Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 9 Mar 2025 03:14:00 +0000 Subject: [PATCH 0760/1248] fix unused import without feature jemalloc_conf fix span passed by value Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 3 +-- src/router/request.rs | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 6870c1c0..51caf3a3 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -8,7 +8,6 @@ use std::{ }; use arrayvec::ArrayVec; -use const_str::concat_bytes; use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; @@ -20,7 +19,7 @@ use crate::{ #[cfg(feature = "jemalloc_conf")] #[unsafe(no_mangle)] -pub static malloc_conf: &[u8] = concat_bytes!( +pub static malloc_conf: &[u8] = const_str::concat_bytes!( "lg_extent_max_active_fit:4", ",oversize_threshold:16777216", ",tcache_max:2097152", diff --git a/src/router/request.rs b/src/router/request.rs index 00769b3f..dba90324 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -37,7 +37,7 @@ pub(crate) async fn handle( let parent = Span::current(); let task = services.server.runtime().spawn(async move { tokio::select! { - response = execute(&services_, req, next, parent) => response, + response = execute(&services_, req, next, &parent) => response, response = services_.server.until_shutdown() .then(|()| { let timeout = services_.server.config.client_shutdown_timeout; @@ -79,7 +79,7 @@ async fn execute( services: &Arc, req: http::Request, next: axum::middleware::Next, - parent: Span, + parent: &Span, ) -> Response { #[cfg(debug_assertions)] conduwuit::defer! {{ From 7294368015025ae4d7677c28837d3ac0a79539e6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 12 Mar 2025 23:10:38 +0000 Subject: [PATCH 0761/1248] parallelize IO for PublicRoomsChunk vector Signed-off-by: Jason Volk --- src/api/client/directory.rs | 118 +++++++++++++++++++----------------- 1 file changed, 64 insertions(+), 54 deletions(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 7ce32e4c..80b314b9 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,7 +1,17 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, Error, Result, info, warn}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{ + Err, Error, Result, info, + utils::{ + TryFutureExtExt, + stream::{ReadyExt, WidebandExt}, + }, + warn, +}; +use futures::{ + FutureExt, StreamExt, TryFutureExt, + future::{join, join4, join5}, +}; use ruma::{ OwnedRoomId, RoomId, ServerName, UInt, UserId, api::{ @@ -287,8 +297,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( .directory .public_rooms() .map(ToOwned::to_owned) - .then(|room_id| public_rooms_chunk(services, room_id)) - .filter_map(|chunk| async move { + .wide_then(|room_id| public_rooms_chunk(services, room_id)) + .ready_filter_map(|chunk| { if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { return None; } @@ -394,60 +404,60 @@ async fn user_can_publish_room( } async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk { + let name = services.rooms.state_accessor.get_name(&room_id).ok(); + + let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok(); + + let canonical_alias = services + .rooms + .state_accessor + .get_canonical_alias(&room_id) + .ok(); + + let avatar_url = services.rooms.state_accessor.get_avatar(&room_id); + + let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok(); + + let world_readable = services.rooms.state_accessor.is_world_readable(&room_id); + + let join_rule = services + .rooms + .state_accessor + .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") + .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { + | JoinRule::Public => PublicRoomJoinRule::Public, + | JoinRule::Knock => "knock".into(), + | JoinRule::KnockRestricted(_) => "knock_restricted".into(), + | _ => "invite".into(), + }); + + let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id); + + let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id); + + let ( + (avatar_url, canonical_alias, guest_can_join, join_rule, name), + (num_joined_members, room_type, topic, world_readable), + ) = join( + join5(avatar_url, canonical_alias, guest_can_join, join_rule, name), + join4(num_joined_members, room_type, topic, world_readable), + ) + .boxed() + .await; + PublicRoomsChunk { - canonical_alias: services - .rooms - .state_accessor - .get_canonical_alias(&room_id) - .await - .ok(), - name: services.rooms.state_accessor.get_name(&room_id).await.ok(), - num_joined_members: services - .rooms - .state_cache - .room_joined_count(&room_id) - .await + avatar_url: avatar_url.into_option().unwrap_or_default().url, + canonical_alias, + guest_can_join, + join_rule: join_rule.unwrap_or_default(), + name, + num_joined_members: num_joined_members .unwrap_or(0) .try_into() .expect("joined count overflows ruma UInt"), - topic: services - .rooms - .state_accessor - .get_room_topic(&room_id) - .await - .ok(), - world_readable: services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await, - guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, - avatar_url: services - .rooms - .state_accessor - .get_avatar(&room_id) - .await - .into_option() - .unwrap_or_default() - .url, - join_rule: services - .rooms - .state_accessor - .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") - .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { - | JoinRule::Public => PublicRoomJoinRule::Public, - | JoinRule::Knock => "knock".into(), - | JoinRule::KnockRestricted(_) => "knock_restricted".into(), - | _ => "invite".into(), - }) - .await - .unwrap_or_default(), - room_type: services - .rooms - .state_accessor - .get_room_type(&room_id) - .await - .ok(), room_id, + room_type, + topic, + world_readable, } } From a57336ec1388ab26a692cf26768474bc3069df75 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 14 Mar 2025 06:54:08 +0000 Subject: [PATCH 0762/1248] assume canonical order in db serialization test Signed-off-by: Jason Volk --- src/database/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/tests.rs b/src/database/tests.rs index 140bc56d..1446a1fc 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -152,8 +152,8 @@ fn ser_json_macro() { let content = serde_json::to_value(content).expect("failed to serialize content"); let sender: &UserId = "@foo:example.com".try_into().unwrap(); let serialized = serialize_to_vec(Json(json!({ - "sender": sender, "content": content, + "sender": sender, }))) .expect("failed to serialize value"); From 17003ba773228055de107f9d8baf1b2848d86c1f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 01:23:54 +0000 Subject: [PATCH 0763/1248] add FIFO compaction for persistent-cache descriptor; comments/cleanup Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 14 ++++++++--- src/database/engine/descriptor.rs | 39 ++++++++++++++++++++++++------- src/database/engine/open.rs | 6 ++--- src/database/maps.rs | 4 ++-- 4 files changed, 46 insertions(+), 17 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 5ddb9473..7ceec722 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -1,8 +1,8 @@ use conduwuit::{Config, Result, err, utils::math::Expected}; use rocksdb::{ BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, - DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, - UniversalCompactOptions, UniversalCompactionStopStyle, + DBCompressionType as CompressionType, DataBlockIndexType, FifoCompactOptions, + LruCacheOptions, Options, UniversalCompactOptions, UniversalCompactionStopStyle, }; use super::descriptor::{CacheDisp, Descriptor}; @@ -16,7 +16,7 @@ pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; pub(crate) fn cf_options(ctx: &Context, opts: Options, desc: &Descriptor) -> Result { let cache = get_cache(ctx, desc); let config = &ctx.server.config; - descriptor_cf_options(opts, desc.clone(), config, cache.as_ref()) + descriptor_cf_options(opts, *desc, config, cache.as_ref()) } fn descriptor_cf_options( @@ -46,6 +46,7 @@ fn descriptor_cf_options( opts.set_compaction_style(desc.compaction); opts.set_compaction_pri(desc.compaction_pri); opts.set_universal_compaction_options(&uc_options(&desc)); + opts.set_fifo_compaction_options(&fifo_options(&desc)); let compression_shape: Vec<_> = desc .compression_shape @@ -142,6 +143,13 @@ fn set_compression(desc: &mut Descriptor, config: &Config) { } } +fn fifo_options(desc: &Descriptor) -> FifoCompactOptions { + let mut opts = FifoCompactOptions::default(); + opts.set_max_table_files_size(desc.limit_size); + + opts +} + fn uc_options(desc: &Descriptor) -> UniversalCompactOptions { let mut opts = UniversalCompactOptions::default(); opts.set_stop_style(UniversalCompactionStopStyle::Total); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 816555d2..2274da9c 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -6,14 +6,8 @@ use rocksdb::{ use super::cf_opts::SENTINEL_COMPRESSION_LEVEL; +/// Column Descriptor #[derive(Debug, Clone, Copy)] -pub(crate) enum CacheDisp { - Unique, - Shared, - SharedWith(&'static str), -} - -#[derive(Debug, Clone)] pub(crate) struct Descriptor { pub(crate) name: &'static str, pub(crate) dropped: bool, @@ -30,6 +24,7 @@ pub(crate) struct Descriptor { pub(crate) file_shape: i32, pub(crate) level0_width: i32, pub(crate) merge_width: (i32, i32), + pub(crate) limit_size: u64, pub(crate) ttl: u64, pub(crate) compaction: CompactionStyle, pub(crate) compaction_pri: CompactionPri, @@ -46,7 +41,16 @@ pub(crate) struct Descriptor { pub(crate) auto_readahead_max: usize, } -pub(crate) static BASE: Descriptor = Descriptor { +/// Cache Disposition +#[derive(Debug, Clone, Copy)] +pub(crate) enum CacheDisp { + Unique, + Shared, + SharedWith(&'static str), +} + +/// Base descriptor supplying common defaults to all derived descriptors. +static BASE: Descriptor = Descriptor { name: EMPTY, dropped: false, cache_disp: CacheDisp::Shared, @@ -62,6 +66,7 @@ pub(crate) static BASE: Descriptor = Descriptor { file_shape: 2, level0_width: 2, merge_width: (2, 16), + limit_size: 0, ttl: 60 * 60 * 24 * 21, compaction: CompactionStyle::Level, compaction_pri: CompactionPri::MinOverlappingRatio, @@ -78,6 +83,10 @@ pub(crate) static BASE: Descriptor = Descriptor { auto_readahead_max: 1024 * 1024 * 2, }; +/// Tombstone descriptor for columns which have been or will be deleted. +pub(crate) static DROPPED: Descriptor = Descriptor { dropped: true, ..BASE }; + +/// Descriptor for large datasets with random updates across the keyspace. pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, write_size: 1024 * 1024 * 32, @@ -88,6 +97,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { ..BASE }; +/// Descriptor for large datasets with updates to the end of the keyspace. pub(crate) static SEQUENTIAL: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestLargestSeqFirst, write_size: 1024 * 1024 * 64, @@ -101,6 +111,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { ..BASE }; +/// Descriptor for small datasets with random updates across the keyspace. pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, write_size: 1024 * 1024 * 16, @@ -117,6 +128,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { ..RANDOM }; +/// Descriptor for small datasets with updates to the end of the keyspace. pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, write_size: 1024 * 1024 * 16, @@ -132,3 +144,14 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compressed_index: false, ..SEQUENTIAL }; + +/// Descriptor for small persistent caches with random updates. Oldest entries +/// are deleted after limit_size reached. +pub(crate) static RANDOM_SMALL_CACHE: Descriptor = Descriptor { + compaction: CompactionStyle::Fifo, + cache_disp: CacheDisp::Unique, + limit_size: 1024 * 1024 * 64, + ttl: 60 * 60 * 24 * 14, + file_shape: 2, + ..RANDOM_SMALL +}; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 24010c3a..84e59a6a 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -101,13 +101,11 @@ fn configure_cfds( debug!("Creating new column {name:?} not previously found in existing database."); }); - let missing_descriptors = missing - .clone() - .map(|_| Descriptor { dropped: true, ..descriptor::BASE }); + let missing_descriptors = missing.clone().map(|_| descriptor::DROPPED); let cfopts: Vec<_> = desc .iter() - .cloned() + .copied() .chain(missing_descriptors) .map(|ref desc| cf_options(ctx, db_opts.clone(), desc)) .collect::>()?; diff --git a/src/database/maps.rs b/src/database/maps.rs index 311c629f..19f9ced4 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -233,7 +233,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "servername_destination", - ..descriptor::RANDOM_SMALL + ..descriptor::RANDOM_SMALL_CACHE }, Descriptor { name: "servername_educount", @@ -241,7 +241,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "servername_override", - ..descriptor::RANDOM_SMALL + ..descriptor::RANDOM_SMALL_CACHE }, Descriptor { name: "servernameevent_data", From d8ea8b378cf2ee9ff7644fdb6c5a33d05923a51d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 02:35:10 +0000 Subject: [PATCH 0764/1248] add Map::clear() to db interface Signed-off-by: Jason Volk --- src/database/map.rs | 1 + src/database/map/clear.rs | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 src/database/map/clear.rs diff --git a/src/database/map.rs b/src/database/map.rs index c5a908ba..ed38e1fc 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,3 +1,4 @@ +mod clear; pub mod compact; mod contains; mod count; diff --git a/src/database/map/clear.rs b/src/database/map/clear.rs new file mode 100644 index 00000000..321ec79c --- /dev/null +++ b/src/database/map/clear.rs @@ -0,0 +1,30 @@ +use std::sync::Arc; + +use conduwuit::{ + Result, implement, + utils::stream::{ReadyExt, TryIgnore}, +}; +use futures::{Stream, TryStreamExt}; + +use crate::keyval::Key; + +/// Delete all data stored in this map. !!! USE WITH CAUTION !!! +/// +/// See for_clear() with additional details. +#[implement(super::Map)] +#[tracing::instrument(level = "trace")] +pub async fn clear(self: &Arc) { + self.for_clear().ignore_err().ready_for_each(|_| ()).await; +} + +/// Delete all data stored in this map. !!! USE WITH CAUTION !!! +/// +/// Provides stream of keys undergoing deletion along with any errors. +/// +/// Note this operation applies to a snapshot of the data when invoked. +/// Additional data written during or after this call may be missed. +#[implement(super::Map)] +#[tracing::instrument(level = "trace")] +pub fn for_clear(self: &Arc) -> impl Stream>> + Send { + self.raw_keys().inspect_ok(|key| self.remove(key)) +} From 9ce95a703038e8603da62f15516f205ca70ad962 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 04:07:53 +0000 Subject: [PATCH 0765/1248] make service memory_usage()/clear_cache() async trait Signed-off-by: Jason Volk --- src/service/globals/mod.rs | 6 ++- src/service/rooms/event_handler/mod.rs | 4 +- src/service/rooms/spaces/mod.rs | 14 +++++- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/state_accessor/mod.rs | 6 ++- src/service/rooms/state_compressor/mod.rs | 6 ++- src/service/rooms/timeline/mod.rs | 4 +- src/service/service.rs | 4 +- src/service/services.rs | 57 ++++++++++------------- 9 files changed, 61 insertions(+), 44 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 74f83228..1dd7db8e 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,6 +7,7 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use conduwuit::{Result, Server, error, utils::bytes::pretty}; use data::Data; use regex::RegexSet; @@ -27,6 +28,7 @@ pub struct Service { type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let db = Data::new(&args); @@ -73,7 +75,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let (ber_count, ber_bytes) = self.bad_event_ratelimiter.read()?.iter().fold( (0_usize, 0_usize), |(mut count, mut bytes), (event_id, _)| { @@ -89,7 +91,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { + async fn clear_cache(&self) { self.bad_event_ratelimiter .write() .expect("locked for writing") diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e9e79ce4..4944f3ec 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -17,6 +17,7 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use conduwuit::{ Err, PduEvent, Result, RoomVersion, Server, utils::{MutexMap, TryFutureExtExt}, @@ -54,6 +55,7 @@ struct Services { type RoomMutexMap = MutexMap; type HandleTimeMap = HashMap; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -79,7 +81,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex_federation = self.mutex_federation.len(); writeln!(out, "federation_mutex: {mutex_federation}")?; diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1da38234..55897f9c 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -2,8 +2,9 @@ mod pagination_token; #[cfg(test)] mod tests; -use std::sync::Arc; +use std::{fmt::Write, sync::Arc}; +use async_trait::async_trait; use conduwuit::{ Err, Error, Result, implement, utils::{ @@ -70,6 +71,7 @@ pub enum Identifier<'a> { type Cache = LruCache>; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -90,6 +92,16 @@ impl crate::Service for Service { })) } + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { + let roomid_spacehierarchy_cache = self.roomid_spacehierarchy_cache.lock().await.len(); + + writeln!(out, "roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}")?; + + Ok(()) + } + + async fn clear_cache(&self) { self.roomid_spacehierarchy_cache.lock().await.clear(); } + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 8683a3be..56955497 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; +use async_trait::async_trait; use conduwuit::{ PduEvent, Result, err, result::FlatOk, @@ -56,6 +57,7 @@ struct Data { type RoomMutexMap = MutexMap; pub type RoomMutexGuard = MutexMapGuard; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -79,7 +81,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex = self.mutex.len(); writeln!(out, "state_mutex: {mutex}")?; diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 7004e35a..652fdbd7 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -8,6 +8,7 @@ use std::{ sync::{Arc, Mutex as StdMutex, Mutex}, }; +use async_trait::async_trait; use conduwuit::{ Result, err, utils, utils::math::{Expected, usize_from_f64}, @@ -57,6 +58,7 @@ struct Data { shorteventid_shortstatehash: Arc, } +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -86,7 +88,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { use utils::bytes::pretty; let (svc_count, svc_bytes) = self.server_visibility_cache.lock()?.iter().fold( @@ -119,7 +121,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { + async fn clear_cache(&self) { self.server_visibility_cache.lock().expect("locked").clear(); self.user_visibility_cache.lock().expect("locked").clear(); } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 305d3187..56a91d0e 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -5,6 +5,7 @@ use std::{ sync::{Arc, Mutex}, }; +use async_trait::async_trait; use conduwuit::{ Result, arrayvec::ArrayVec, @@ -65,6 +66,7 @@ type ParentStatesVec = Vec; pub type CompressedState = BTreeSet; pub type CompressedStateEvent = [u8; 2 * size_of::()]; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -82,7 +84,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let (cache_len, ents) = { let cache = self.stateinfo_cache.lock().expect("locked"); let ents = cache.iter().map(at!(1)).flat_map(|vec| vec.iter()).fold( @@ -108,7 +110,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } + async fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 826a1dae..dc359d22 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -9,6 +9,7 @@ use std::{ sync::Arc, }; +use async_trait::async_trait; use conduwuit::{ Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, @@ -109,6 +110,7 @@ struct Services { type RoomMutexMap = MutexMap; pub type RoomMutexGuard = MutexMapGuard; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -142,7 +144,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex_insert = self.mutex_insert.len(); writeln!(out, "insert_mutex: {mutex_insert}")?; diff --git a/src/service/service.rs b/src/service/service.rs index 2907a562..574efd8f 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -31,10 +31,10 @@ pub(crate) trait Service: Any + Send + Sync { fn interrupt(&self) {} /// Clear any caches or similar runtime state. - fn clear_cache(&self) {} + async fn clear_cache(&self) {} /// Memory usage report in a markdown string. - fn memory_usage(&self, _out: &mut dyn Write) -> Result<()> { Ok(()) } + async fn memory_usage(&self, _out: &mut (dyn Write + Send)) -> Result { Ok(()) } /// Return the name of the service. /// i.e. `crate::service::make_name(std::module_path!())` diff --git a/src/service/services.rs b/src/service/services.rs index 269a1f87..dc390054 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -1,12 +1,12 @@ use std::{ any::Any, collections::BTreeMap, - fmt::Write, sync::{Arc, RwLock}, }; -use conduwuit::{Result, Server, debug, debug_info, info, trace}; +use conduwuit::{Result, Server, debug, debug_info, info, trace, utils::stream::IterStream}; use database::Database; +use futures::{Stream, StreamExt, TryStreamExt}; use tokio::sync::Mutex; use crate::{ @@ -171,40 +171,21 @@ impl Services { } pub async fn clear_cache(&self) { - for (service, ..) in self.service.read().expect("locked for reading").values() { - if let Some(service) = service.upgrade() { - service.clear_cache(); - } - } - - //TODO - self.rooms - .spaces - .roomid_spacehierarchy_cache - .lock() - .await - .clear(); + self.services() + .for_each(|service| async move { + service.clear_cache().await; + }) + .await; } pub async fn memory_usage(&self) -> Result { - let mut out = String::new(); - for (service, ..) in self.service.read().expect("locked for reading").values() { - if let Some(service) = service.upgrade() { - service.memory_usage(&mut out)?; - } - } - - //TODO - let roomid_spacehierarchy_cache = self - .rooms - .spaces - .roomid_spacehierarchy_cache - .lock() + self.services() + .map(Ok) + .try_fold(String::new(), |mut out, service| async move { + service.memory_usage(&mut out).await?; + Ok(out) + }) .await - .len(); - writeln!(out, "roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}")?; - - Ok(out) } fn interrupt(&self) { @@ -217,6 +198,18 @@ impl Services { } } + /// Iterate from snapshot of the services map + fn services(&self) -> impl Stream> + Send { + self.service + .read() + .expect("locked for reading") + .values() + .filter_map(|val| val.0.upgrade()) + .collect::>() + .into_iter() + .stream() + } + #[inline] pub fn try_get(&self, name: &str) -> Result> where From 8010505853c1c0a78254b0fd31e83d90baff7af3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 04:08:57 +0000 Subject: [PATCH 0766/1248] implement clear_cache() for resolver service Signed-off-by: Jason Volk --- src/service/resolver/cache.rs | 17 ++++++++++++++++- src/service/resolver/dns.rs | 4 ++++ src/service/resolver/mod.rs | 7 +++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 6b05c00c..cfea7187 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -7,7 +7,7 @@ use conduwuit::{ utils::{math::Expected, rand, stream::TryIgnore}, }; use database::{Cbor, Deserialized, Map}; -use futures::{Stream, StreamExt}; +use futures::{Stream, StreamExt, future::join}; use ruma::ServerName; use serde::{Deserialize, Serialize}; @@ -45,6 +45,21 @@ impl Cache { } } +#[implement(Cache)] +pub async fn clear(&self) { join(self.clear_destinations(), self.clear_overrides()).await; } + +#[implement(Cache)] +pub async fn clear_destinations(&self) { self.destinations.clear().await; } + +#[implement(Cache)] +pub async fn clear_overrides(&self) { self.overrides.clear().await; } + +#[implement(Cache)] +pub fn del_destination(&self, name: &ServerName) { self.destinations.remove(name); } + +#[implement(Cache)] +pub fn del_override(&self, name: &ServerName) { self.overrides.remove(name); } + #[implement(Cache)] pub fn set_destination(&self, name: &ServerName, dest: &CachedDest) { self.destinations.raw_put(name, Cbor(dest)); diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index 98ad7e60..e4245a5b 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -78,6 +78,10 @@ impl Resolver { server: server.clone(), })) } + + /// Clear the in-memory hickory-dns caches + #[inline] + pub fn clear_cache(&self) { self.resolver.clear_cache(); } } impl Resolve for Resolver { diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 2ec9c0ef..246d6bc1 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,6 +6,7 @@ mod tests; use std::sync::Arc; +use async_trait::async_trait; use conduwuit::{Result, Server, arrayvec::ArrayString, utils::MutexMap}; use self::{cache::Cache, dns::Resolver}; @@ -26,6 +27,7 @@ struct Services { type Resolving = MutexMap; type NameBuf = ArrayString<256>; +#[async_trait] impl crate::Service for Service { #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] fn build(args: crate::Args<'_>) -> Result> { @@ -41,5 +43,10 @@ impl crate::Service for Service { })) } + async fn clear_cache(&self) { + self.resolver.clear_cache(); + self.cache.clear().await; + } + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } From 23e3f6526fd0318525a4cd1fe065dcf7f1d56935 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 19 Mar 2025 03:49:12 +0000 Subject: [PATCH 0767/1248] split well_known resolver into unit Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 55 +++--------------------------- src/service/resolver/mod.rs | 2 ++ src/service/resolver/tests.rs | 2 -- src/service/resolver/well_known.rs | 49 ++++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 53 deletions(-) create mode 100644 src/service/resolver/well_known.rs diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index b037cf77..1ad76f66 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -3,7 +3,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use conduwuit::{Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace}; +use conduwuit::{Err, Result, debug, debug_info, err, error, trace}; use futures::{FutureExt, TryFutureExt}; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; @@ -72,6 +72,9 @@ impl super::Service { if let Some(pos) = dest.as_str().find(':') { self.actual_dest_2(dest, cache, pos).await? } else { + self.conditional_query_and_cache(dest.as_str(), 8448, true) + .await?; + self.services.server.check_running()?; match self.request_well_known(dest.as_str()).await? { | Some(delegated) => self.actual_dest_3(&mut host, cache, delegated).await?, @@ -243,56 +246,6 @@ impl super::Service { Ok(add_port_to_hostname(dest.as_str())) } - #[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] - async fn request_well_known(&self, dest: &str) -> Result> { - self.conditional_query_and_cache(dest, 8448, true).await?; - - self.services.server.check_running()?; - trace!("Requesting well known for {dest}"); - let response = self - .services - .client - .well_known - .get(format!("https://{dest}/.well-known/matrix/server")) - .send() - .await; - - trace!("response: {response:?}"); - if let Err(e) = &response { - debug!("error: {e:?}"); - return Ok(None); - } - - let response = response?; - if !response.status().is_success() { - debug!("response not 2XX"); - return Ok(None); - } - - let text = response.text().await?; - trace!("response text: {text:?}"); - if text.len() >= 12288 { - debug_warn!("response contains junk"); - return Ok(None); - } - - let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); - - let m_server = body - .get("m.server") - .unwrap_or(&serde_json::Value::Null) - .as_str() - .unwrap_or_default(); - - if ruma::identifiers_validation::server_name::validate(m_server).is_err() { - debug_error!("response content missing or invalid"); - return Ok(None); - } - - debug_info!("{dest:?} found at {m_server:?}"); - Ok(Some(m_server.to_owned())) - } - #[inline] async fn conditional_query_and_cache( &self, diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 246d6bc1..c513cec9 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -2,7 +2,9 @@ pub mod actual; pub mod cache; mod dns; pub mod fed; +#[cfg(test)] mod tests; +mod well_known; use std::sync::Arc; diff --git a/src/service/resolver/tests.rs b/src/service/resolver/tests.rs index 6e9d0e71..068e08bd 100644 --- a/src/service/resolver/tests.rs +++ b/src/service/resolver/tests.rs @@ -1,5 +1,3 @@ -#![cfg(test)] - use super::fed::{FedDest, add_port_to_hostname, get_ip_with_port}; #[test] diff --git a/src/service/resolver/well_known.rs b/src/service/resolver/well_known.rs new file mode 100644 index 00000000..68a8e620 --- /dev/null +++ b/src/service/resolver/well_known.rs @@ -0,0 +1,49 @@ +use conduwuit::{Result, debug, debug_error, debug_info, debug_warn, implement, trace}; + +#[implement(super::Service)] +#[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] +pub(super) async fn request_well_known(&self, dest: &str) -> Result> { + trace!("Requesting well known for {dest}"); + let response = self + .services + .client + .well_known + .get(format!("https://{dest}/.well-known/matrix/server")) + .send() + .await; + + trace!("response: {response:?}"); + if let Err(e) = &response { + debug!("error: {e:?}"); + return Ok(None); + } + + let response = response?; + if !response.status().is_success() { + debug!("response not 2XX"); + return Ok(None); + } + + let text = response.text().await?; + trace!("response text: {text:?}"); + if text.len() >= 12288 { + debug_warn!("response contains junk"); + return Ok(None); + } + + let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); + + let m_server = body + .get("m.server") + .unwrap_or(&serde_json::Value::Null) + .as_str() + .unwrap_or_default(); + + if ruma::identifiers_validation::server_name::validate(m_server).is_err() { + debug_error!("response content missing or invalid"); + return Ok(None); + } + + debug_info!("{dest:?} found at {m_server:?}"); + Ok(Some(m_server.to_owned())) +} From d1b82ea2253179836cf7400f70960d583b25af50 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 21 Mar 2025 08:10:44 +0000 Subject: [PATCH 0768/1248] use #[ignore] for todo'ed tests Signed-off-by: Jason Volk --- src/database/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/tests.rs b/src/database/tests.rs index 1446a1fc..c1a9f47c 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -325,8 +325,8 @@ fn ser_array() { assert_eq!(&s, &v, "vec serialization does not match"); } -#[cfg(todo)] #[test] +#[ignore] fn de_array() { let a: u64 = 123_456; let b: u64 = 987_654; @@ -357,8 +357,8 @@ fn de_array() { assert_eq!(vec[1], b, "deserialized vec [1] does not match"); } -#[cfg(todo)] #[test] +#[ignore] fn de_complex() { type Key<'a> = (&'a UserId, ArrayVec, &'a RoomId); From 9d0ce3965ea655943304b41ca679507b850130d3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 22 Mar 2025 07:09:11 +0000 Subject: [PATCH 0769/1248] fix lints Signed-off-by: Jason Volk --- src/api/client/context.rs | 2 +- src/core/error/response.rs | 2 +- src/service/media/preview.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index cb95dfef..b109711e 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -105,7 +105,7 @@ pub(crate) async fn get_context_route( .collect(); let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) = - join3(base_event, events_before, events_after).await; + join3(base_event, events_before, events_after).boxed().await; let lazy_loading_context = lazy_loading::Context { user_id: sender_user, diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 00ade5ae..ae6fce62 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -86,7 +86,7 @@ pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode { pub(super) fn ruma_error_message(error: &ruma::api::client::error::Error) -> String { if let ErrorBody::Standard { message, .. } = &error.body { - return message.to_string(); + return message.clone(); } format!("{error}") diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index ba5be7d4..91660a58 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -256,7 +256,7 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { if allowlist_url_contains .iter() - .any(|url_s| url.to_string().contains(&url_s.to_string())) + .any(|url_s| url.to_string().contains(url_s)) { debug!("URL {} is allowed by url_preview_url_contains_allowlist (check 4/4)", &host); return true; From 07ba00f74e2dfea314d0e5236f0415b2de6d543c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 26 Mar 2025 04:40:38 +0000 Subject: [PATCH 0770/1248] abstract raw query command iterations Signed-off-by: Jason Volk --- src/admin/query/raw.rs | 141 ++++++++++------------------------------- 1 file changed, 35 insertions(+), 106 deletions(-) diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 23f11cc8..c503eee5 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -1,15 +1,16 @@ -use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; +use std::{borrow::Cow, collections::BTreeMap, ops::Deref, sync::Arc}; use clap::Subcommand; use conduwuit::{ Err, Result, apply, at, is_zero, utils::{ - IterStream, - stream::{ReadyExt, TryIgnore, TryParallelExt}, + stream::{IterStream, ReadyExt, TryIgnore, TryParallelExt}, string::EMPTY, }, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use conduwuit_database::Map; +use conduwuit_service::Services; +use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; use ruma::events::room::message::RoomMessageEventContent; use tokio::time::Instant; @@ -172,22 +173,18 @@ pub(super) async fn compact( ) -> Result { use conduwuit_database::compact::Options; - let default_all_maps = map - .is_none() - .then(|| { - self.services - .db - .keys() - .map(Deref::deref) - .map(ToOwned::to_owned) - }) - .into_iter() - .flatten(); + let default_all_maps: Option<_> = map.is_none().then(|| { + self.services + .db + .keys() + .map(Deref::deref) + .map(ToOwned::to_owned) + }); let maps: Vec<_> = map .unwrap_or_default() .into_iter() - .chain(default_all_maps) + .chain(default_all_maps.into_iter().flatten()) .map(|map| self.services.db.get(&map)) .filter_map(Result::ok) .cloned() @@ -237,25 +234,8 @@ pub(super) async fn raw_count( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let count = maps - .iter() - .stream() + let count = with_maps_or(map.as_deref(), self.services) .then(|map| map.raw_count_prefix(&prefix)) .ready_fold(0_usize, usize::saturating_add) .await; @@ -300,25 +280,8 @@ pub(super) async fn raw_keys_sizes( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_keys_prefix(&prefix)) .flatten() .ignore_err() @@ -345,25 +308,8 @@ pub(super) async fn raw_keys_total( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_keys_prefix(&prefix)) .flatten() .ignore_err() @@ -387,25 +333,8 @@ pub(super) async fn raw_vals_sizes( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_stream_prefix(&prefix)) .flatten() .ignore_err() @@ -433,25 +362,8 @@ pub(super) async fn raw_vals_total( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_stream_prefix(&prefix)) .flatten() .ignore_err() @@ -573,3 +485,20 @@ pub(super) async fn raw_maps(&self) -> Result { Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}"))) } + +fn with_maps_or<'a>( + map: Option<&'a str>, + services: &'a Services, +) -> impl Stream> + Send + 'a { + let default_all_maps = map + .is_none() + .then(|| services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + map.into_iter() + .chain(default_all_maps) + .map(|map| services.db.get(map)) + .filter_map(Result::ok) + .stream() +} From dfe058a244ad7592114c86d504fb6fed744ad524 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 27 Mar 2025 01:08:42 +0000 Subject: [PATCH 0771/1248] default config item to 'none' when zstd_compression not featured Signed-off-by: Jason Volk --- src/core/config/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 6b669ad3..52df19ac 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2158,7 +2158,12 @@ fn default_rocksdb_max_log_file_size() -> usize { fn default_rocksdb_parallelism_threads() -> usize { 0 } -fn default_rocksdb_compression_algo() -> String { "zstd".to_owned() } +fn default_rocksdb_compression_algo() -> String { + cfg!(feature = "zstd_compression") + .then_some("zstd") + .unwrap_or("none") + .to_owned() +} /// Default RocksDB compression level is 32767, which is internally read by /// RocksDB as the default magic number and translated to the library's default From c99f5770a01ebae978461605c0f6eb954f7bad1b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 27 Mar 2025 04:07:24 +0000 Subject: [PATCH 0772/1248] mark get_summary_and_children_federation Send Signed-off-by: Jason Volk --- src/service/rooms/spaces/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 55897f9c..af597445 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -238,7 +238,7 @@ async fn get_summary_and_children_federation( fn get_stripped_space_child_events<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + 'a { +) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) From 7f448d88a430cc2869fe9ab366fd29b3fddb0f13 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 27 Mar 2025 03:34:33 +0000 Subject: [PATCH 0773/1248] use qualified crate names from within workspace Signed-off-by: Jason Volk --- src/main/clap.rs | 9 +++++++-- src/main/logging.rs | 2 +- src/main/main.rs | 4 +--- src/main/mods.rs | 8 ++++---- src/main/restart.rs | 2 +- src/main/runtime.rs | 11 ++++++----- src/main/sentry.rs | 4 ++-- src/main/server.rs | 10 +++++----- src/main/signal.rs | 2 +- 9 files changed, 28 insertions(+), 24 deletions(-) diff --git a/src/main/clap.rs b/src/main/clap.rs index c7f33bfe..35a7ea41 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -3,7 +3,7 @@ use std::path::PathBuf; use clap::{ArgAction, Parser}; -use conduwuit::{ +use conduwuit_core::{ Err, Result, config::{Figment, FigmentValue}, err, toml, @@ -12,7 +12,12 @@ use conduwuit::{ /// Commandline arguments #[derive(Parser, Debug)] -#[clap(version = conduwuit::version(), about, long_about = None, name = "conduwuit")] +#[clap( + about, + long_about = None, + name = "conduwuit", + version = conduwuit_core::version(), +)] pub(crate) struct Args { #[arg(short, long)] /// Path to the config TOML file (optional) diff --git a/src/main/logging.rs b/src/main/logging.rs index 7ce86d56..eeeda127 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{ +use conduwuit_core::{ Result, config::Config, debug_warn, err, diff --git a/src/main/main.rs b/src/main/main.rs index 2bfc3c06..fbc63b17 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -7,11 +7,9 @@ mod sentry; mod server; mod signal; -extern crate conduwuit_core as conduwuit; - use std::sync::{Arc, atomic::Ordering}; -use conduwuit::{Error, Result, debug_info, error, rustc_flags_capture}; +use conduwuit_core::{Error, Result, debug_info, error, rustc_flags_capture}; use server::Server; rustc_flags_capture! {} diff --git a/src/main/mods.rs b/src/main/mods.rs index 6dc79b2f..d585a381 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -9,13 +9,13 @@ use std::{ sync::{Arc, atomic::Ordering}, }; -use conduwuit::{Error, Result, debug, error, mods}; +use conduwuit_core::{Error, Result, debug, error, mods}; use conduwuit_service::Services; use crate::Server; type StartFuncResult = Pin>> + Send>>; -type StartFuncProto = fn(&Arc) -> StartFuncResult; +type StartFuncProto = fn(&Arc) -> StartFuncResult; type RunFuncResult = Pin> + Send>>; type RunFuncProto = fn(&Arc) -> RunFuncResult; @@ -34,8 +34,8 @@ const MODULE_NAMES: &[&str] = &[ ]; #[cfg(panic_trap)] -conduwuit::mod_init! {{ - conduwuit::debug::set_panic_trap(); +conduwuit_core::mod_init! {{ + conduwuit_core::debug::set_panic_trap(); }} pub(crate) async fn run(server: &Arc, starts: bool) -> Result<(bool, bool), Error> { diff --git a/src/main/restart.rs b/src/main/restart.rs index e6f45b82..b9d1dc94 100644 --- a/src/main/restart.rs +++ b/src/main/restart.rs @@ -2,7 +2,7 @@ use std::{env, os::unix::process::CommandExt, process::Command}; -use conduwuit::{debug, info, utils}; +use conduwuit_core::{debug, info, utils}; #[cold] pub(super) fn restart() -> ! { diff --git a/src/main/runtime.rs b/src/main/runtime.rs index b3174e9c..b1657289 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -9,8 +9,8 @@ use std::{ }; #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -use conduwuit::result::LogDebugErr; -use conduwuit::{ +use conduwuit_core::result::LogDebugErr; +use conduwuit_core::{ Result, is_true, utils::sys::compute::{nth_core_available, set_affinity}, }; @@ -122,7 +122,7 @@ fn set_worker_affinity() { #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] fn set_worker_mallctl(id: usize) { - use conduwuit::alloc::je::{ + use conduwuit_core::alloc::je::{ is_affine_arena, this_thread::{set_arena, set_muzzy_decay}, }; @@ -135,7 +135,8 @@ fn set_worker_mallctl(id: usize) { .get() .expect("GC_MUZZY initialized by runtime::new()"); - let muzzy_auto_disable = conduwuit::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; + let muzzy_auto_disable = + conduwuit_core::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { set_muzzy_decay(-1).log_debug_err().ok(); } @@ -188,7 +189,7 @@ fn thread_park() { fn gc_on_park() { #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] - conduwuit::alloc::je::this_thread::decay() + conduwuit_core::alloc::je::this_thread::decay() .log_debug_err() .ok(); } diff --git a/src/main/sentry.rs b/src/main/sentry.rs index 1ea1f3ae..68f12eb7 100644 --- a/src/main/sentry.rs +++ b/src/main/sentry.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, OnceLock}, }; -use conduwuit::{config::Config, debug, trace}; +use conduwuit_core::{config::Config, debug, trace}; use sentry::{ Breadcrumb, ClientOptions, Level, types::{ @@ -43,7 +43,7 @@ fn options(config: &Config) -> ClientOptions { traces_sample_rate: config.sentry_traces_sample_rate, debug: cfg!(debug_assertions), release: sentry::release_name!(), - user_agent: conduwuit::version::user_agent().into(), + user_agent: conduwuit_core::version::user_agent().into(), attach_stacktrace: config.sentry_attach_stacktrace, before_send: Some(Arc::new(before_send)), before_breadcrumb: Some(Arc::new(before_breadcrumb)), diff --git a/src/main/server.rs b/src/main/server.rs index 44ca69b0..8f697ca4 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,6 +1,6 @@ use std::{path::PathBuf, sync::Arc}; -use conduwuit::{ +use conduwuit_core::{ Error, Result, config::Config, info, @@ -14,7 +14,7 @@ use crate::{clap::Args, logging::TracingFlameGuard}; /// Server runtime state; complete pub(crate) struct Server { /// Server runtime state; public portion - pub(crate) server: Arc, + pub(crate) server: Arc, pub(crate) services: Mutex>>, @@ -25,7 +25,7 @@ pub(crate) struct Server { #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] // Module instances; TODO: move to mods::loaded mgmt vector - pub(crate) mods: tokio::sync::RwLock>, + pub(crate) mods: tokio::sync::RwLock>, } impl Server { @@ -66,11 +66,11 @@ impl Server { database_path = ?config.database_path, log_levels = %config.log, "{}", - conduwuit::version(), + conduwuit_core::version(), ); Ok(Arc::new(Self { - server: Arc::new(conduwuit::Server::new(config, runtime.cloned(), Log { + server: Arc::new(conduwuit_core::Server::new(config, runtime.cloned(), Log { reload: tracing_reload_handle, capture, })), diff --git a/src/main/signal.rs b/src/main/signal.rs index 343b95c9..a5d07774 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{debug_error, trace, warn}; +use conduwuit_core::{debug_error, trace, warn}; use tokio::signal; use super::server::Server; From b2bf35cfab8aac82e4cde1c7c5a7b6e713bba5db Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 06:42:30 +0000 Subject: [PATCH 0774/1248] fix benches from state-res Signed-off-by: Jason Volk --- src/core/state_res/benches.rs | 672 ++++++++++++++++++++++++++ src/core/state_res/mod.rs | 3 + src/core/state_res/state_res_bench.rs | 648 ------------------------- 3 files changed, 675 insertions(+), 648 deletions(-) create mode 100644 src/core/state_res/benches.rs delete mode 100644 src/core/state_res/state_res_bench.rs diff --git a/src/core/state_res/benches.rs b/src/core/state_res/benches.rs new file mode 100644 index 00000000..7a1ae5bf --- /dev/null +++ b/src/core/state_res/benches.rs @@ -0,0 +1,672 @@ +#[cfg(conduwuit_bench)] +extern crate test; + +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + sync::{ + Arc, + atomic::{AtomicU64, Ordering::SeqCst}, + }, +}; + +use futures::{future, future::ready}; +use maplit::{btreemap, hashmap, hashset}; +use ruma::{ + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, Signatures, UserId, + events::{ + StateEventType, TimelineEventType, + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + }, + int, room_id, uint, user_id, +}; +use serde_json::{ + json, + value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, +}; + +use self::event::PduEvent; +use crate::state_res::{self as state_res, Error, Event, Result, StateMap}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn lexico_topo_sort(c: &mut test::Bencher) { + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + + c.iter(|| { + let _ = state_res::lexicographical_topological_sort(&graph, &|_| { + future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }); + }); +} + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn resolution_shallow_auth_chain(c: &mut test::Bencher) { + let parallel_fetches = 32; + let mut store = TestStore(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, _) = store.set_up(); + + c.iter(|| async { + let ev_map = store.0.clone(); + let state_sets = [&state_at_bob, &state_at_charlie]; + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); + let auth_chain_sets: Vec> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + parallel_fetches, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + }); +} + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn resolve_deeper_event_set(c: &mut test::Bencher) { + let parallel_fetches = 32; + let mut inner = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + c.iter(|| async { + let state_sets = [&state_set_a, &state_set_b]; + let auth_chain_sets: Vec> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + parallel_fetches, + ) + .await + { + | Ok(state) => state, + | Err(_) => panic!("resolution failed during benchmarking"), + }; + }); +} + +//*///////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION DETAILS AHEAD +// +/////////////////////////////////////////////////////////////////////*/ +struct TestStore(HashMap>); + +#[allow(unused)] +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + self.0 + .get(event_id) + .map(Arc::clone) + .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) + } + + /// Returns the events that correspond to the `event_ids` sorted in the same + /// order. + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { + let mut events = vec![]; + for id in event_ids { + events.push(self.get_event(room_id, id)?); + } + Ok(events) + } + + /// Returns a Vec of the related auth events to the given `event`. + fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } + + /// Returns a vector representing the difference in auth chains of the given + /// `events`. + fn auth_chain_diff( + &self, + room_id: &RoomId, + event_ids: Vec>, + ) -> Result> { + let mut auth_chain_sets = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self + .auth_event_ids(room_id, ids)? + .into_iter() + .collect::>(); + auth_chain_sets.push(chain); + } + + if let Some(first) = auth_chain_sets.first().cloned() { + let common = auth_chain_sets + .iter() + .skip(1) + .fold(first, |a, b| a.intersection(b).cloned().collect::>()); + + Ok(auth_chain_sets + .into_iter() + .flatten() + .filter(|id| !common.contains(id.borrow())) + .collect()) + } else { + Ok(vec![]) + } + } +} + +impl TestStore { + #[allow(clippy::type_complexity)] + fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), Arc::clone(&create_event)); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0 + .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0 + .insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + format!("${}:foo", id).try_into().unwrap() +} + +fn alice() -> &'static UserId { user_id!("@alice:foo") } + +fn bob() -> &'static UserId { user_id!("@bob:foo") } + +fn charlie() -> &'static UserId { user_id!("@charlie:foo") } + +fn ella() -> &'static UserId { user_id!("@ella:foo") } + +fn room_id() -> &'static RoomId { room_id!("!test:foo") } + +fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> Arc +where + S: AsRef, +{ + // We don't care if the addition happens in order just that it is atomic + // (each event has its own value) + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${}:foo", id) + }; + let auth_events = auth_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + let prev_events = prev_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: btreemap! {}, + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new(String::new()), + signatures: Signatures::new(), + }), + }) +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn INITIAL_EVENTS() -> HashMap> { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn BAN_STATE_SET() -> HashMap> { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +/// Convenience trait for adding event type plus state key to state maps. +trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for &TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +mod event { + use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + events::{TimelineEventType, pdu::Pdu}, + }; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + use super::Event; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.room_id, + | Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.sender, + | Pdu::RoomV3Pdu(ev) => &ev.sender, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.kind, + | Pdu::RoomV3Pdu(ev) => &ev.kind, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.content, + | Pdu::RoomV3Pdu(ev) => &ev.content, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index 6bff0cf8..2020d65c 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -9,6 +9,9 @@ mod state_event; #[cfg(test)] mod test_utils; +#[cfg(test)] +mod benches; + use std::{ borrow::Borrow, cmp::{Ordering, Reverse}, diff --git a/src/core/state_res/state_res_bench.rs b/src/core/state_res/state_res_bench.rs deleted file mode 100644 index a2bd2c23..00000000 --- a/src/core/state_res/state_res_bench.rs +++ /dev/null @@ -1,648 +0,0 @@ -// Because of criterion `cargo bench` works, -// but if you use `cargo bench -- --save-baseline ` -// or pass any other args to it, it fails with the error -// `cargo bench unknown option --save-baseline`. -// To pass args to criterion, use this form -// `cargo bench --bench -- --save-baseline `. - -#![allow(clippy::exhaustive_structs)] - -use std::{ - borrow::Borrow, - collections::{HashMap, HashSet}, - sync::{ - atomic::{AtomicU64, Ordering::SeqCst}, - Arc, - }, -}; - -use criterion::{criterion_group, criterion_main, Criterion}; -use event::PduEvent; -use futures::{future, future::ready}; -use ruma::{int, uint}; -use maplit::{btreemap, hashmap, hashset}; -use ruma::{ - room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, - Signatures, UserId, -}; -use ruma::events::{ - pdu::{EventHash, Pdu, RoomV3Pdu}, - room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - }, - StateEventType, TimelineEventType, -}; -use conduwuit::state_res::{self as state_res, Error, Event, Result, StateMap}; -use serde_json::{ - json, - value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, -}; - -static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); - -fn lexico_topo_sort(c: &mut Criterion) { - c.bench_function("lexicographical topological sort", |b| { - let graph = hashmap! { - event_id("l") => hashset![event_id("o")], - event_id("m") => hashset![event_id("n"), event_id("o")], - event_id("n") => hashset![event_id("o")], - event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges - event_id("p") => hashset![event_id("o")], - }; - b.iter(|| { - let _ = state_res::lexicographical_topological_sort(&graph, &|_| { - future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) - }); - }); - }); -} - -fn resolution_shallow_auth_chain(c: &mut Criterion) { - c.bench_function("resolve state of 5 events one fork", |b| { - let mut store = TestStore(hashmap! {}); - - // build up the DAG - let (state_at_bob, state_at_charlie, _) = store.set_up(); - - b.iter(|| async { - let ev_map = store.0.clone(); - let state_sets = [&state_at_bob, &state_at_charlie]; - let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); - let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); - let auth_chain_sets = state_sets - .iter() - .map(|map| { - store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() - }) - .collect(); - - let _ = match state_res::resolve( - &RoomVersionId::V6, - state_sets.into_iter(), - &auth_chain_sets, - &fetch, - &exists, - ) - .await - { - Ok(state) => state, - Err(e) => panic!("{e}"), - }; - }); - }); -} - -fn resolve_deeper_event_set(c: &mut Criterion) { - c.bench_function("resolve state of 10 events 3 conflicting", |b| { - let mut inner = INITIAL_EVENTS(); - let ban = BAN_STATE_SET(); - - inner.extend(ban); - let store = TestStore(inner.clone()); - - let state_set_a = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("MB")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| { - (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) - }) - .collect::>(); - - let state_set_b = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("IME")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| { - (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) - }) - .collect::>(); - - b.iter(|| async { - let state_sets = [&state_set_a, &state_set_b]; - let auth_chain_sets = state_sets - .iter() - .map(|map| { - store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() - }) - .collect(); - - let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); - let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); - let _ = match state_res::resolve( - &RoomVersionId::V6, - state_sets.into_iter(), - &auth_chain_sets, - &fetch, - &exists, - ) - .await - { - Ok(state) => state, - Err(_) => panic!("resolution failed during benchmarking"), - }; - }); - }); -} - -criterion_group!( - benches, - lexico_topo_sort, - resolution_shallow_auth_chain, - resolve_deeper_event_set -); - -criterion_main!(benches); - -//*///////////////////////////////////////////////////////////////////// -// -// IMPLEMENTATION DETAILS AHEAD -// -/////////////////////////////////////////////////////////////////////*/ -struct TestStore(HashMap>); - -#[allow(unused)] -impl TestStore { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { - self.0 - .get(event_id) - .map(Arc::clone) - .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) - } - - /// Returns the events that correspond to the `event_ids` sorted in the same order. - fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { - let mut events = vec![]; - for id in event_ids { - events.push(self.get_event(room_id, id)?); - } - Ok(events) - } - - /// Returns a Vec of the related auth events to the given `event`. - fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { - let mut result = HashSet::new(); - let mut stack = event_ids; - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains(&ev_id) { - continue; - } - - result.insert(ev_id.clone()); - - let event = self.get_event(room_id, ev_id.borrow())?; - - stack.extend(event.auth_events().map(ToOwned::to_owned)); - } - - Ok(result) - } - - /// Returns a vector representing the difference in auth chains of the given `events`. - fn auth_chain_diff(&self, room_id: &RoomId, event_ids: Vec>) -> Result> { - let mut auth_chain_sets = vec![]; - for ids in event_ids { - // TODO state store `auth_event_ids` returns self in the event ids list - // when an event returns `auth_event_ids` self is not contained - let chain = self.auth_event_ids(room_id, ids)?.into_iter().collect::>(); - auth_chain_sets.push(chain); - } - - if let Some(first) = auth_chain_sets.first().cloned() { - let common = auth_chain_sets - .iter() - .skip(1) - .fold(first, |a, b| a.intersection(b).cloned().collect::>()); - - Ok(auth_chain_sets - .into_iter() - .flatten() - .filter(|id| !common.contains(id.borrow())) - .collect()) - } else { - Ok(vec![]) - } - } -} - -impl TestStore { - #[allow(clippy::type_complexity)] - fn set_up( - &mut self, - ) -> (StateMap, StateMap, StateMap) { - let create_event = to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ); - let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), Arc::clone(&create_event)); - - let alice_mem = to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().to_string().as_str()), - member_content_join(), - &[cre.clone()], - &[cre.clone()], - ); - self.0.insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); - - let join_rules = to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &[cre.clone(), alice_mem.event_id().to_owned()], - &[alice_mem.event_id().to_owned()], - ); - self.0.insert(join_rules.event_id().to_owned(), join_rules.clone()); - - // Bob and Charlie join at the same time, so there is a fork - // this will be represented in the state_sets when we resolve - let bob_mem = to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_join(), - &[cre.clone(), join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0.insert(bob_mem.event_id().to_owned(), bob_mem.clone()); - - let charlie_mem = to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().to_string().as_str()), - member_content_join(), - &[cre, join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0.insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); - - let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - (state_at_bob, state_at_charlie, expected) - } -} - -fn event_id(id: &str) -> OwnedEventId { - if id.contains('$') { - return id.try_into().unwrap(); - } - format!("${}:foo", id).try_into().unwrap() -} - -fn alice() -> &'static UserId { - user_id!("@alice:foo") -} - -fn bob() -> &'static UserId { - user_id!("@bob:foo") -} - -fn charlie() -> &'static UserId { - user_id!("@charlie:foo") -} - -fn ella() -> &'static UserId { - user_id!("@ella:foo") -} - -fn room_id() -> &'static RoomId { - room_id!("!test:foo") -} - -fn member_content_ban() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() -} - -fn member_content_join() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() -} - -fn to_pdu_event( - id: &str, - sender: &UserId, - ev_type: TimelineEventType, - state_key: Option<&str>, - content: Box, - auth_events: &[S], - prev_events: &[S], -) -> Arc -where - S: AsRef, -{ - // We don't care if the addition happens in order just that it is atomic - // (each event has its own value) - let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); - let id = if id.contains('$') { id.to_owned() } else { format!("${}:foo", id) }; - let auth_events = auth_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); - let prev_events = prev_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); - - let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { - event_id: id.try_into().unwrap(), - rest: Pdu::RoomV3Pdu(RoomV3Pdu { - room_id: room_id().to_owned(), - sender: sender.to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), - state_key, - kind: ev_type, - content, - redacts: None, - unsigned: btreemap! {}, - auth_events, - prev_events, - depth: uint!(0), - hashes: EventHash::new(String::new()), - signatures: Signatures::new(), - }), - }) -} - -// all graphs start with these input events -#[allow(non_snake_case)] -fn INITIAL_EVENTS() -> HashMap> { - vec![ - to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ), - to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().as_str()), - member_content_join(), - &["CREATE"], - &["CREATE"], - ), - to_pdu_event( - "IPOWER", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), - &["CREATE", "IMA"], - &["IMA"], - ), - to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["IPOWER"], - ), - to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IJR"], - ), - to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().to_string().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IMB"], - ), - to_pdu_event::<&EventId>( - "START", - charlie(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - to_pdu_event::<&EventId>( - "END", - charlie(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - ] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -// all graphs start with these input events -#[allow(non_snake_case)] -fn BAN_STATE_SET() -> HashMap> { - vec![ - to_pdu_event( - "PA", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], // auth_events - &["START"], // prev_events - ), - to_pdu_event( - "PB", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["END"], - ), - to_pdu_event( - "MB", - alice(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_ban(), - &["CREATE", "IMA", "PB"], - &["PA"], - ), - to_pdu_event( - "IME", - ella(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_join(), - &["CREATE", "IJR", "PA"], - &["MB"], - ), - ] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -/// Convenience trait for adding event type plus state key to state maps. -trait EventTypeExt { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); -} - -impl EventTypeExt for &TimelineEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { - (self.to_string().into(), state_key.into()) - } -} - -mod event { - use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; - use ruma_events::{pdu::Pdu, TimelineEventType}; - use ruma_state_res::Event; - use serde::{Deserialize, Serialize}; - use serde_json::value::RawValue as RawJsonValue; - - impl Event for PduEvent { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { - &self.event_id - } - - fn room_id(&self) -> &RoomId { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.room_id, - Pdu::RoomV3Pdu(ev) => &ev.room_id, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn sender(&self) -> &UserId { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.sender, - Pdu::RoomV3Pdu(ev) => &ev.sender, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn event_type(&self) -> &TimelineEventType { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.kind, - Pdu::RoomV3Pdu(ev) => &ev.kind, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn content(&self) -> &RawJsonValue { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.content, - Pdu::RoomV3Pdu(ev) => &ev.content, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - match &self.rest { - Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, - Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn state_key(&self) -> Option<&str> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), - Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn prev_events(&self) -> Box + Send + '_> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), - Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn auth_events(&self) -> Box + Send + '_> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), - Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn redacts(&self) -> Option<&Self::Id> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), - Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - } - - #[derive(Clone, Debug, Deserialize, Serialize)] - pub(crate) struct PduEvent { - pub(crate) event_id: OwnedEventId, - #[serde(flatten)] - pub(crate) rest: Pdu, - } -} From 6365f1a887a02564237fd6176ee7e3d72480ffbf Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 28 Mar 2025 14:14:48 -0400 Subject: [PATCH 0775/1248] remove sccache from ci for now Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3fd834e0..5043f23b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,16 +21,6 @@ concurrency: cancel-in-progress: true env: - # sccache only on main repo - SCCACHE_GHA_ENABLED: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}" - RUSTC_WRAPPER: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" - SCCACHE_BUCKET: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" - SCCACHE_S3_USE_SSL: ${{ vars.SCCACHE_S3_USE_SSL }} - SCCACHE_REGION: ${{ vars.SCCACHE_REGION }} - SCCACHE_ENDPOINT: ${{ vars.SCCACHE_ENDPOINT }} - SCCACHE_CACHE_MULTIARCH: ${{ vars.SCCACHE_CACHE_MULTIARCH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} # Required to make some things output color TERM: ansi # Publishing to my nix binary cache @@ -123,13 +113,6 @@ jobs: bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features' bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic' - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting @@ -247,13 +230,6 @@ jobs: direnv allow nix develop .#all-features --command true --impure - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting From 75b6daa67f31d29035113d217accc80505119e63 Mon Sep 17 00:00:00 2001 From: Ginger <75683114+gingershaped@users.noreply.github.com> Date: Fri, 28 Mar 2025 12:22:23 -0400 Subject: [PATCH 0776/1248] Fix off-by-one error when fetching room hierarchy --- src/api/client/space.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index a667f852..a55c927d 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -159,7 +159,7 @@ where break; } - if parents.len() >= max_depth { + if parents.len() > max_depth { continue; } From 3e57b7d35d5bd6cfed5900b377f7c68970213518 Mon Sep 17 00:00:00 2001 From: Ginger <75683114+gingershaped@users.noreply.github.com> Date: Fri, 28 Mar 2025 12:58:18 -0400 Subject: [PATCH 0777/1248] Update expected test results --- tests/test_results/complement/test_results.jsonl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index ac2733f8..c0e28750 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -73,7 +73,7 @@ {"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} {"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} {"Action":"fail","Test":"TestClientSpacesSummary"} -{"Action":"fail","Test":"TestClientSpacesSummary/max_depth"} +{"Action":"pass","Test":"TestClientSpacesSummary/max_depth"} {"Action":"fail","Test":"TestClientSpacesSummary/pagination"} {"Action":"fail","Test":"TestClientSpacesSummary/query_whole_graph"} {"Action":"fail","Test":"TestClientSpacesSummary/redact_link"} From 0e2009dbf5c3dfe1cfd1fd37078c74e871ffa5c6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 22:47:00 +0000 Subject: [PATCH 0778/1248] fix client hierarchy loop condition Signed-off-by: Jason Volk --- src/api/client/space.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index a55c927d..567ac62f 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -155,10 +155,6 @@ where break; } - if children.is_empty() { - break; - } - if parents.len() > max_depth { continue; } From d0132706cd9b5bd0c6df5507cb42bcbade86f28b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 23:34:42 +0000 Subject: [PATCH 0779/1248] add --read-only and --maintenance program option Signed-off-by: Jason Volk --- Cargo.toml | 1 + src/main/clap.rs | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index db55b9b8..8b49c3b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -892,6 +892,7 @@ needless_continue = { level = "allow", priority = 1 } no_effect_underscore_binding = { level = "allow", priority = 1 } similar_names = { level = "allow", priority = 1 } single_match_else = { level = "allow", priority = 1 } +struct_excessive_bools = { level = "allow", priority = 1 } struct_field_names = { level = "allow", priority = 1 } unnecessary_wraps = { level = "allow", priority = 1 } unused_async = { level = "allow", priority = 1 } diff --git a/src/main/clap.rs b/src/main/clap.rs index 35a7ea41..707a1c76 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -27,6 +27,14 @@ pub(crate) struct Args { #[arg(long, short('O'))] pub(crate) option: Vec, + /// Run in a stricter read-only --maintenance mode. + #[arg(long)] + pub(crate) read_only: bool, + + /// Run in maintenance mode while refusing connections. + #[arg(long)] + pub(crate) maintenance: bool, + #[cfg(feature = "console")] /// Activate admin command console automatically after startup. #[arg(long, num_args(0))] @@ -121,6 +129,15 @@ pub(super) fn parse() -> Args { Args::parse() } /// Synthesize any command line options with configuration file options. pub(crate) fn update(mut config: Figment, args: &Args) -> Result { + if args.read_only { + config = config.join(("rocksdb_read_only", true)); + } + + if args.maintenance || args.read_only { + config = config.join(("startup_netburst", false)); + config = config.join(("listening", false)); + } + #[cfg(feature = "console")] // Indicate the admin console should be spawned automatically if the // configuration file hasn't already. From b03c493bf994449c8c5dd5b1122ab9c87a289df5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 20:33:38 +0000 Subject: [PATCH 0780/1248] add stub for database benches Signed-off-by: Jason Volk --- src/database/benches.rs | 17 +++++++++++++++++ src/database/mod.rs | 2 ++ 2 files changed, 19 insertions(+) create mode 100644 src/database/benches.rs diff --git a/src/database/benches.rs b/src/database/benches.rs new file mode 100644 index 00000000..56d1411c --- /dev/null +++ b/src/database/benches.rs @@ -0,0 +1,17 @@ +#[cfg(conduwuit_bench)] +extern crate test; + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn ser_str(b: &mut test::Bencher) { + use conduwuit::ruma::{RoomId, UserId}; + + use crate::ser::serialize_to_vec; + + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + b.iter(|| { + let key = (user_id, room_id); + let _s = serialize_to_vec(key).expect("failed to serialize user_id"); + }); +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 0481d1bd..1262a79a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -5,6 +5,8 @@ conduwuit::mod_ctor! {} conduwuit::mod_dtor! {} conduwuit::rustc_flags_capture! {} +#[cfg(test)] +mod benches; mod cork; mod de; mod deserialized; From a93cb34dd6e10038d6504af209c78e4967467bcb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 02:48:08 +0000 Subject: [PATCH 0781/1248] disambiguate UInt/u64 type related in client/api/directory; use err macros. Signed-off-by: Jason Volk --- src/api/client/directory.rs | 86 +++++++++++++------------------------ 1 file changed, 30 insertions(+), 56 deletions(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 80b314b9..f2f668c8 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,12 +1,13 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Error, Result, info, + Err, Result, err, info, utils::{ TryFutureExtExt, + math::Expected, + result::FlatOk, stream::{ReadyExt, WidebandExt}, }, - warn, }; use futures::{ FutureExt, StreamExt, TryFutureExt, @@ -20,7 +21,6 @@ use ruma::{ get_public_rooms, get_public_rooms_filtered, get_room_visibility, set_room_visibility, }, - error::ErrorKind, room, }, federation, @@ -71,11 +71,7 @@ pub(crate) async fn get_public_rooms_filtered_route( ) .await .map_err(|e| { - warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest( - ErrorKind::Unknown, - "Failed to return the requested server's public room list.", - ) + err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) })?; Ok(response) @@ -113,11 +109,7 @@ pub(crate) async fn get_public_rooms_route( ) .await .map_err(|e| { - warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest( - ErrorKind::Unknown, - "Failed to return the requested server's public room list.", - ) + err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) })?; Ok(get_public_rooms::v3::Response { @@ -137,7 +129,7 @@ pub(crate) async fn set_room_visibility_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist @@ -181,10 +173,9 @@ pub(crate) async fn set_room_visibility_route( .await; } - return Err(Error::BadRequest( - ErrorKind::forbidden(), + return Err!(Request(Forbidden( "Publishing rooms to the room directory is not allowed", - )); + ))); } services.rooms.directory.set_public(&body.room_id); @@ -202,10 +193,7 @@ pub(crate) async fn set_room_visibility_route( }, | room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id), | _ => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room visibility type is not supported.", - )); + return Err!(Request(InvalidParam("Room visibility type is not supported.",))); }, } @@ -221,7 +209,7 @@ pub(crate) async fn get_room_visibility_route( ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist - return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + return Err!(Request(NotFound("Room not found"))); } Ok(get_room_visibility::v3::Response { @@ -269,8 +257,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( } // Use limit or else 10, with maximum 100 - let limit = limit.map_or(10, u64::from); - let mut num_since: u64 = 0; + let limit: usize = limit.map_or(10_u64, u64::from).try_into()?; + let mut num_since: usize = 0; if let Some(s) = &since { let mut characters = s.chars(); @@ -278,14 +266,14 @@ pub(crate) async fn get_public_rooms_filtered_helper( | Some('n') => false, | Some('p') => true, | _ => { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")); + return Err!(Request(InvalidParam("Invalid `since` token"))); }, }; num_since = characters .collect::() .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?; + .map_err(|_| err!(Request(InvalidParam("Invalid `since` token."))))?; if backwards { num_since = num_since.saturating_sub(limit); @@ -302,6 +290,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { return None; } + if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { if name.as_str().to_lowercase().contains(&query) { @@ -333,40 +322,24 @@ pub(crate) async fn get_public_rooms_filtered_helper( all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); - let total_room_count_estimate = UInt::try_from(all_rooms.len()).unwrap_or_else(|_| uint!(0)); + let total_room_count_estimate = UInt::try_from(all_rooms.len()) + .unwrap_or_else(|_| uint!(0)) + .into(); - let chunk: Vec<_> = all_rooms - .into_iter() - .skip( - num_since - .try_into() - .expect("num_since should not be this high"), - ) - .take(limit.try_into().expect("limit should not be this high")) - .collect(); + let chunk: Vec<_> = all_rooms.into_iter().skip(num_since).take(limit).collect(); - let prev_batch = if num_since == 0 { - None - } else { - Some(format!("p{num_since}")) - }; + let prev_batch = num_since.ne(&0).then_some(format!("p{num_since}")); - let next_batch = if chunk.len() < limit.try_into().unwrap() { - None - } else { - Some(format!( - "n{}", - num_since - .checked_add(limit) - .expect("num_since and limit should not be that large") - )) - }; + let next_batch = chunk + .len() + .ge(&limit) + .then_some(format!("n{}", num_since.expected_add(limit))); Ok(get_public_rooms_filtered::v3::Response { chunk, prev_batch, next_batch, - total_room_count_estimate: Some(total_room_count_estimate), + total_room_count_estimate, }) } @@ -384,7 +357,7 @@ async fn user_can_publish_room( .await { | Ok(event) => serde_json::from_str(event.content.get()) - .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) + .map_err(|_| err!(Database("Invalid event content for m.room.power_levels"))) .map(|content: RoomPowerLevelsEventContent| { RoomPowerLevels::from(content) .user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) @@ -452,9 +425,10 @@ async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> Public join_rule: join_rule.unwrap_or_default(), name, num_joined_members: num_joined_members - .unwrap_or(0) - .try_into() - .expect("joined count overflows ruma UInt"), + .map(TryInto::try_into) + .map(Result::ok) + .flat_ok() + .unwrap_or_else(|| uint!(0)), room_id, room_type, topic, From 095734a8e7835abf793911ff24ddf0f55c89012f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 03:01:58 +0000 Subject: [PATCH 0782/1248] bump tokio to 1.44.1 Signed-off-by: Jason Volk --- Cargo.lock | 559 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 2 +- 2 files changed, 291 insertions(+), 270 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c28f4eab..8c0e797b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,9 +55,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "arbitrary" @@ -79,7 +79,7 @@ checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -105,9 +105,9 @@ dependencies = [ [[package]] name = "as_variant" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38fa22307249f86fb7fad906fcae77f2564caeb56d7209103c551cd1cf4798f" +checksum = "9dbc3a507a82b17ba0d98f6ce8fd6954ea0c8152e98009d36a40d8dcc8ce078a" [[package]] name = "assign" @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310c9bcae737a48ef5cdee3174184e6d548b292739ede61a1f955ef76a738861" +checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" dependencies = [ "brotli", "flate2", @@ -161,18 +161,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "async-trait" -version = "0.1.86" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -221,27 +221,25 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.12.5" +version = "1.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4e8200b9a4a5801a769d50eeabc05670fec7e959a8cb7a63a93e4e519942ae" +checksum = "dabb68eb3a7aa08b46fddfd59a3d55c978243557a90ab804769f7e20e67d2b01" dependencies = [ "aws-lc-sys", - "paste", "zeroize", ] [[package]] name = "aws-lc-sys" -version = "0.26.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" +checksum = "77926887776171ced7d662120a75998e444d3750c951abfe07f90da130514b1f" dependencies = [ "bindgen 0.69.5", "cc", "cmake", "dunce", "fs_extra", - "paste", ] [[package]] @@ -334,16 +332,15 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" +checksum = "495c05f60d6df0093e8fb6e74aa5846a0ad06abaf96d76166283720bf740f8ab" dependencies = [ "arc-swap", "bytes", - "futures-util", + "fs-err", "http", "http-body", - "http-body-util", "hyper", "hyper-util", "pin-project-lite", @@ -352,7 +349,6 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls", - "tower 0.4.13", "tower-service", ] @@ -404,9 +400,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.6.0" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" [[package]] name = "bindgen" @@ -427,7 +423,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.98", + "syn 2.0.100", "which", ] @@ -446,7 +442,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -535,9 +531,9 @@ checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytemuck" -version = "1.21.0" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" [[package]] name = "byteorder" @@ -553,9 +549,9 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bytesize" @@ -585,9 +581,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.16" +version = "1.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" dependencies = [ "jobserver", "libc", @@ -656,9 +652,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.31" +version = "4.5.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" +checksum = "e958897981290da2a852763fe9cdb89cd36977a5d729023127095fa94d95e2ff" dependencies = [ "clap_builder", "clap_derive", @@ -666,9 +662,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.31" +version = "4.5.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" +checksum = "83b0f35019843db2160b5bb19ae09b4e6411ac33fc6a712003c33e03090e2489" dependencies = [ "anstyle", "clap_lex", @@ -676,14 +672,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.28" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -787,7 +783,7 @@ dependencies = [ "ipaddress", "itertools 0.13.0", "log", - "rand", + "rand 0.8.5", "reqwest", "ruma", "serde", @@ -830,7 +826,7 @@ dependencies = [ "maplit", "nix", "num-traits", - "rand", + "rand 0.8.5", "regex", "reqwest", "ring", @@ -842,7 +838,7 @@ dependencies = [ "serde_yaml", "smallstr", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -880,7 +876,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -937,7 +933,7 @@ dependencies = [ "log", "loole", "lru-cache", - "rand", + "rand 0.8.5", "regex", "reqwest", "ruma", @@ -1194,7 +1190,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1221,7 +1217,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1264,9 +1260,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" dependencies = [ "powerfmt", ] @@ -1290,7 +1286,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1317,7 +1313,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2", "subtle", @@ -1326,9 +1322,9 @@ dependencies = [ [[package]] name = "either" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" dependencies = [ "serde", ] @@ -1342,7 +1338,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1373,9 +1369,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ "event-listener", "pin-project-lite", @@ -1472,6 +1468,16 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "fs-err" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" +dependencies = [ + "autocfg", + "tokio", +] + [[package]] name = "fs_extra" version = "1.3.0" @@ -1543,7 +1549,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1601,14 +1607,16 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", ] [[package]] @@ -1645,7 +1653,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.1", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", @@ -1654,9 +1662,9 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" dependencies = [ "cfg-if", "crunchy", @@ -1751,7 +1759,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tinyvec", "tokio", @@ -1772,7 +1780,7 @@ dependencies = [ "lru-cache", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", "thiserror 1.0.69", @@ -1798,17 +1806,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - [[package]] name = "hostname" version = "0.4.0" @@ -1831,14 +1828,14 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -1866,12 +1863,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "pin-project-lite", @@ -1879,9 +1876,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -1891,9 +1888,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" [[package]] name = "hyper" @@ -2009,9 +2006,9 @@ dependencies = [ [[package]] name = "icu_locid_transform_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" [[package]] name = "icu_normalizer" @@ -2033,9 +2030,9 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] name = "icu_properties" @@ -2054,9 +2051,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] name = "icu_provider" @@ -2083,7 +2080,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2109,9 +2106,9 @@ dependencies = [ [[package]] name = "image" -version = "0.25.5" +version = "0.25.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6f44aed642f18953a158afeb30206f4d50da59fbc66ecb53c66488de73563b" +checksum = "db35664ce6b9810857a38a906215e75a9c879f0696556a39f59c62829710251a" dependencies = [ "bytemuck", "byteorder-lite", @@ -2137,7 +2134,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f" dependencies = [ "byteorder-lite", - "quick-error 2.0.1", + "quick-error", ] [[package]] @@ -2158,9 +2155,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2187,7 +2184,7 @@ checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2251,9 +2248,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" @@ -2338,7 +2335,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2361,9 +2358,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.170" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libfuzzer-sys" @@ -2387,9 +2384,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "pkg-config", @@ -2426,9 +2423,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.26" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "loole" @@ -2506,12 +2503,6 @@ dependencies = [ "xml5ever", ] -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - [[package]] name = "matchers" version = "0.1.0" @@ -2566,7 +2557,7 @@ checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2713,7 +2704,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2777,9 +2768,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.3" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "openssl-probe" @@ -2795,7 +2786,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.7.1", + "indexmap 2.8.0", "js-sys", "once_cell", "pin-project-lite", @@ -2844,7 +2835,7 @@ dependencies = [ "opentelemetry", "ordered-float 4.6.0", "percent-encoding", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tokio", "tokio-stream", @@ -2921,7 +2912,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2951,7 +2942,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2986,7 +2977,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", - "rand", + "rand 0.8.5", ] [[package]] @@ -3000,22 +2991,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3042,9 +3033,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "png" @@ -3067,9 +3058,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ "zerocopy", ] @@ -3082,28 +3073,28 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "prettyplease" -version = "0.2.29" +version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" +checksum = "5316f57387668042f561aae71480de936257848f9c43ce528e311d89a07cadeb" dependencies = [ "proc-macro2", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "proc-macro-crate" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -3116,7 +3107,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "version_check", "yansi", ] @@ -3137,7 +3128,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" dependencies = [ "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3160,7 +3151,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3199,12 +3190,6 @@ dependencies = [ "bytemuck", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quick-error" version = "2.0.1" @@ -3213,37 +3198,39 @@ checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" dependencies = [ "bytes", + "cfg_aliases", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", "rustls", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tracing", + "web-time 1.1.0", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand", + "getrandom 0.3.2", + "rand 0.9.0", "ring", "rustc-hash 2.1.1", "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tracing", "web-time 1.1.0", @@ -3251,9 +3238,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" dependencies = [ "cfg_aliases", "libc", @@ -3265,13 +3252,19 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "rand" version = "0.8.5" @@ -3279,8 +3272,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "zerocopy", ] [[package]] @@ -3290,7 +3294,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -3302,6 +3316,15 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", +] + [[package]] name = "rav1e" version = "0.7.1" @@ -3328,8 +3351,8 @@ dependencies = [ "once_cell", "paste", "profiling", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "simd_helpers", "system-deps", "thiserror 1.0.69", @@ -3346,7 +3369,7 @@ dependencies = [ "avif-serialize", "imgref", "loop9", - "quick-error 2.0.1", + "quick-error", "rav1e", "rayon", "rgb", @@ -3374,9 +3397,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b568323e98e49e2a0899dcee453dd679fae22d69adf9b11dd508d1549b7e2f" +checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" dependencies = [ "bitflags 2.9.0", ] @@ -3476,12 +3499,11 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +checksum = "48375394603e3dd4b2d64371f7148fd8c7baa2680e28741f2cb8d23b59e3d4c4" dependencies = [ - "hostname 0.3.1", - "quick-error 1.2.3", + "hostname", ] [[package]] @@ -3492,9 +3514,9 @@ checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" [[package]] name = "ring" -version = "0.17.12" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9b823fa29b721a59671b41d6b06e66b29e0628e207e8b1c3ceeda701ec928d" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", @@ -3554,7 +3576,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.12", "url", "web-time 1.1.0", ] @@ -3570,11 +3592,11 @@ dependencies = [ "form_urlencoded", "getrandom 0.2.15", "http", - "indexmap 2.7.1", + "indexmap 2.8.0", "js_int", "konst", "percent-encoding", - "rand", + "rand 0.8.5", "regex", "ruma-identifiers-validation", "ruma-macros", @@ -3582,7 +3604,7 @@ dependencies = [ "serde_html_form", "serde_json", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "time", "tracing", "url", @@ -3597,7 +3619,7 @@ version = "0.28.1" source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", - "indexmap 2.7.1", + "indexmap 2.8.0", "js_int", "js_option", "percent-encoding", @@ -3609,7 +3631,7 @@ dependencies = [ "serde", "serde_json", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", "url", "web-time 1.1.0", @@ -3629,12 +3651,12 @@ dependencies = [ "js_int", "memchr", "mime", - "rand", + "rand 0.8.5", "ruma-common", "ruma-events", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", ] @@ -3644,7 +3666,7 @@ version = "0.9.5" source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -3668,7 +3690,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.98", + "syn 2.0.100", "toml", ] @@ -3692,12 +3714,12 @@ dependencies = [ "base64 0.22.1", "ed25519-dalek", "pkcs8", - "rand", + "rand 0.8.5", "ruma-common", "serde_json", "sha2", "subslice", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -3768,9 +3790,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" dependencies = [ "aws-lc-rs", "log", @@ -3814,9 +3836,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" dependencies = [ "aws-lc-rs", "ring", @@ -3826,9 +3848,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "rustyline-async" @@ -3840,16 +3862,16 @@ dependencies = [ "futures-util", "pin-project", "thingbuf", - "thiserror 2.0.11", + "thiserror 2.0.12", "unicode-segmentation", "unicode-width 0.2.0", ] [[package]] name = "ryu" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "sanitize-filename" @@ -3909,9 +3931,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" @@ -3953,7 +3975,7 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" dependencies = [ - "hostname 0.4.0", + "hostname", "libc", "os_info", "rustc_version", @@ -3968,7 +3990,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" dependencies = [ "once_cell", - "rand", + "rand 0.8.5", "sentry-types", "serde", "serde_json", @@ -4039,7 +4061,7 @@ checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" dependencies = [ "debugid", "hex", - "rand", + "rand 0.8.5", "serde", "serde_json", "thiserror 1.0.69", @@ -4050,22 +4072,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4075,7 +4097,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -4083,9 +4105,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.139" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", @@ -4095,9 +4117,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" dependencies = [ "itoa", "serde", @@ -4140,7 +4162,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -4220,7 +4242,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -4306,9 +4328,9 @@ checksum = "f42444fea5b87a39db4218d9422087e66a85d0e7a0963a439b07bcdf91804006" [[package]] name = "string_cache" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938d512196766101d333398efde81bc1f37b00cb42c2f8350e5df639f040bbbe" +checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" dependencies = [ "new_debug_unreachable", "parking_lot", @@ -4357,9 +4379,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.98" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -4383,7 +4405,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4453,11 +4475,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.12", ] [[package]] @@ -4468,18 +4490,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4555,9 +4577,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", @@ -4570,15 +4592,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", @@ -4596,9 +4618,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -4611,9 +4633,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" dependencies = [ "backtrace", "bytes", @@ -4635,7 +4657,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4685,9 +4707,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" dependencies = [ "bytes", "futures-core", @@ -4723,7 +4745,7 @@ version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "serde", "serde_spanned", "toml_datetime", @@ -4771,7 +4793,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -4835,7 +4857,6 @@ name = "tracing" version = "0.1.41" source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -4848,7 +4869,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4969,9 +4990,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-segmentation" @@ -5056,11 +5077,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.15.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.2", "serde", ] @@ -5116,9 +5137,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" -version = "0.13.3+wasi-0.2.2" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] @@ -5145,7 +5166,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "wasm-bindgen-shared", ] @@ -5180,7 +5201,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5265,9 +5286,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" [[package]] name = "wildmatch" @@ -5496,9 +5517,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" dependencies = [ "memchr", ] @@ -5515,9 +5536,9 @@ dependencies = [ [[package]] name = "wit-bindgen-rt" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags 2.9.0", ] @@ -5571,29 +5592,28 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" dependencies = [ - "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -5613,7 +5633,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "synstructure", ] @@ -5642,7 +5662,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -5656,19 +5676,20 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ + "bindgen 0.71.1", "cc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 8b49c3b8..ab7a935c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,7 @@ default-features = false features = ["std", "async-await"] [workspace.dependencies.tokio] -version = "1.42.0" +version = "1.44.1" default-features = false features = [ "fs", From 5bf5afaec83d4e68cbfd5220cd760a7940e7dda5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 01:54:55 +0000 Subject: [PATCH 0783/1248] instrument tokio before/after poll hooks Signed-off-by: Jason Volk --- src/main/runtime.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/main/runtime.rs b/src/main/runtime.rs index b1657289..920476db 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -61,6 +61,8 @@ pub(super) fn new(args: &Args) -> Result { #[cfg(tokio_unstable)] builder .on_task_spawn(task_spawn) + .on_before_task_poll(task_enter) + .on_after_task_poll(task_leave) .on_task_terminate(task_terminate); #[cfg(tokio_unstable)] @@ -215,3 +217,25 @@ fn task_spawn(meta: &tokio::runtime::TaskMeta<'_>) {} ), )] fn task_terminate(meta: &tokio::runtime::TaskMeta<'_>) {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "enter", + level = "trace", + skip_all, + fields( + id = %meta.id() + ), +)] +fn task_enter(meta: &tokio::runtime::TaskMeta<'_>) {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "leave", + level = "trace", + skip_all, + fields( + id = %meta.id() + ), +)] +fn task_leave(meta: &tokio::runtime::TaskMeta<'_>) {} From dc6e9e74d9e9fb0bbdddb35c6b00d16544860095 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 01:56:00 +0000 Subject: [PATCH 0784/1248] add spans for for jemalloc mallctl points Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 51caf3a3..2424e99c 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -335,6 +335,12 @@ where Ok(res) } +#[tracing::instrument( + name = "get", + level = "trace" + skip_all, + fields(?key) +)] fn get(key: &Key) -> Result where T: Copy + Debug, @@ -346,6 +352,12 @@ where unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err) } +#[tracing::instrument( + name = "xchg", + level = "trace" + skip_all, + fields(?key, ?val) +)] fn xchg(key: &Key, val: T) -> Result where T: Copy + Debug, From bee4c6255a815a9c7bc577d7afa66f69e26ea735 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 21:19:47 +0000 Subject: [PATCH 0785/1248] reorg PduEvent strip tools and callsites Signed-off-by: Jason Volk --- src/api/client/context.rs | 6 +- src/api/client/message.rs | 2 +- src/api/client/room/event.rs | 2 +- src/api/client/room/initial_sync.rs | 2 +- src/api/client/search.rs | 2 +- src/api/client/sync/v3.rs | 8 +- src/api/client/sync/v4.rs | 4 +- src/api/client/sync/v5.rs | 4 +- src/api/client/threads.rs | 2 +- src/core/pdu/strip.rs | 182 ++++++++++++++++++++-------- src/service/rooms/spaces/mod.rs | 5 +- src/service/rooms/state/mod.rs | 2 +- src/service/sending/sender.rs | 2 +- 13 files changed, 152 insertions(+), 71 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index b109711e..1dda7b53 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -182,7 +182,7 @@ pub(crate) async fn get_context_route( .await; Ok(get_context::v3::Response { - event: base_event.map(at!(1)).as_ref().map(PduEvent::to_room_event), + event: base_event.map(at!(1)).map(PduEvent::into_room_event), start: events_before .last() @@ -201,13 +201,13 @@ pub(crate) async fn get_context_route( events_before: events_before .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), events_after: events_after .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), state, diff --git a/src/api/client/message.rs b/src/api/client/message.rs index c755cc47..03c7335a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -157,7 +157,7 @@ pub(crate) async fn get_message_events_route( let chunk = events .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(); Ok(get_message_events::v3::Response { diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index 84b591cd..2b115b5c 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -40,5 +40,5 @@ pub(crate) async fn get_room_event_route( event.add_age().ok(); - Ok(get_room_event::v3::Response { event: event.to_room_event() }) + Ok(get_room_event::v3::Response { event: event.into_room_event() }) } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index e4c76ae0..ca63610b 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -55,7 +55,7 @@ pub(crate) async fn room_initial_sync_route( chunk: events .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), }; diff --git a/src/api/client/search.rs b/src/api/client/search.rs index f3366843..d66df881 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -143,7 +143,7 @@ async fn category_room_events( .map(at!(2)) .flatten() .stream() - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .map(|result| SearchResult { rank: None, result: Some(result), diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 70c4c6a7..a8248f95 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -461,7 +461,7 @@ async fn handle_left_room( events: Vec::new(), }, state: RoomState { - events: vec![event.to_sync_state_event()], + events: vec![event.into_sync_state_event()], }, })); } @@ -546,7 +546,7 @@ async fn handle_left_room( continue; } - left_state_events.push(pdu.to_sync_state_event()); + left_state_events.push(pdu.into_sync_state_event()); } } @@ -865,8 +865,8 @@ async fn load_joined_room( }, state: RoomState { events: state_events - .iter() - .map(PduEvent::to_sync_state_event) + .into_iter() + .map(PduEvent::into_sync_state_event) .collect(), }, ephemeral: Ephemeral { events: edus }, diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 5fdcbab8..7e902973 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, PduCount, Result, debug, error, extract_variant, + Error, PduCount, PduEvent, Result, debug, error, extract_variant, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, @@ -634,7 +634,7 @@ pub(crate) async fn sync_events_v4_route( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(|s| s.to_sync_state_event()) + .map(PduEvent::into_sync_state_event) .ok() }) .collect() diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index b4c1b815..48b41b21 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, Result, TypeStateKey, debug, error, extract_variant, trace, + Error, PduEvent, Result, TypeStateKey, debug, error, extract_variant, trace, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma}, @@ -507,7 +507,7 @@ async fn process_rooms( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(|s| s.to_sync_state_event()) + .map(PduEvent::into_sync_state_event) .ok() }) .collect() diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index d25e52c0..00bfe553 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -53,7 +53,7 @@ pub(crate) async fn get_threads_route( chunk: threads .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), }) } diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 4e7c5b83..3683caaa 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -10,35 +10,18 @@ use serde_json::{json, value::Value as JsonValue}; use crate::implement; -#[must_use] -#[implement(super::Pdu)] -pub fn to_sync_room_event(&self) -> Raw { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") -} - /// This only works for events that are also AnyRoomEvents. #[must_use] #[implement(super::Pdu)] -pub fn to_any_event(&self) -> Raw { +pub fn into_any_event(self) -> Raw { + serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works") +} + +/// This only works for events that are also AnyRoomEvents. +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_any_event_value(self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -59,12 +42,24 @@ pub fn to_any_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_room_event(self) -> Raw { self.to_room_event() } + +#[implement(super::Pdu)] +#[must_use] pub fn to_room_event(&self) -> Raw { + serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_room_event_value(&self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -85,12 +80,25 @@ pub fn to_room_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_message_like_event(self) -> Raw { self.to_message_like_event() } + +#[implement(super::Pdu)] +#[must_use] pub fn to_message_like_event(&self) -> Raw { + serde_json::from_value(self.to_message_like_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_message_like_event_value(&self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -111,11 +119,55 @@ pub fn to_message_like_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_sync_room_event(self) -> Raw { self.to_sync_room_event() } + +#[implement(super::Pdu)] +#[must_use] +pub fn to_sync_room_event(&self) -> Raw { + serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_sync_room_event_value(&self) -> JsonValue { + let (redacts, content) = self.copy_redacts(); + let mut json = json!({ + "content": content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &redacts { + json["redacts"] = json!(redacts); + } + + json +} + +#[implement(super::Pdu)] +#[must_use] +pub fn into_state_event(self) -> Raw { + serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] pub fn into_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, @@ -134,15 +186,17 @@ pub fn into_state_event_value(self) -> JsonValue { json } -#[must_use] #[implement(super::Pdu)] -pub fn into_state_event(self) -> Raw { - serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") +#[must_use] +pub fn into_sync_state_event(self) -> Raw { + serde_json::from_value(self.into_sync_state_event_value()) + .expect("Raw::from_value always works") } -#[must_use] #[implement(super::Pdu)] -pub fn to_sync_state_event(&self) -> Raw { +#[must_use] +#[inline] +pub fn into_sync_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -156,39 +210,65 @@ pub fn to_sync_state_event(&self) -> Raw { json["unsigned"] = json!(unsigned); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_stripped_state_event(self) -> Raw { + self.to_stripped_state_event() +} + +#[implement(super::Pdu)] +#[must_use] pub fn to_stripped_state_event(&self) -> Raw { - let json = json!({ + serde_json::from_value(self.to_stripped_state_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_stripped_state_event_value(&self) -> JsonValue { + json!({ "content": self.content, "type": self.kind, "sender": self.sender, "state_key": self.state_key, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") + }) } -#[must_use] #[implement(super::Pdu)] -pub fn to_stripped_spacechild_state_event(&self) -> Raw { - let json = json!({ +#[must_use] +pub fn into_stripped_spacechild_state_event(self) -> Raw { + serde_json::from_value(self.into_stripped_spacechild_state_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue { + json!({ "content": self.content, "type": self.kind, "sender": self.sender, "state_key": self.state_key, "origin_server_ts": self.origin_server_ts, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") + }) } -#[must_use] #[implement(super::Pdu)] +#[must_use] pub fn into_member_event(self) -> Raw> { + serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_member_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -204,5 +284,5 @@ pub fn into_member_event(self) -> Raw> { json["unsigned"] = json!(unsigned); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index af597445..a10fe7fc 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -6,7 +6,7 @@ use std::{fmt::Write, sync::Arc}; use async_trait::async_trait; use conduwuit::{ - Err, Error, Result, implement, + Err, Error, PduEvent, Result, implement, utils::{ IterStream, future::BoolExt, @@ -267,11 +267,12 @@ fn get_stripped_space_child_events<'a>( } if RoomId::parse(&state_key).is_ok() { - return Some(pdu.to_stripped_spacechild_state_event()); + return Some(pdu); } None }) + .map(PduEvent::into_stripped_spacechild_state_event) } /// Gets the summary of a space using either local or remote (federation) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 56955497..803ba9d7 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -341,7 +341,7 @@ impl Service { .await .into_iter() .filter_map(Result::ok) - .map(|e| e.to_stripped_state_event()) + .map(PduEvent::into_stripped_state_event) .chain(once(event.to_stripped_state_event())) .collect() } diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 616f0846..fab02f6b 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -697,7 +697,7 @@ impl Service { match event { | SendingEvent::Pdu(pdu_id) => { if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { - pdu_jsons.push(pdu.to_room_event()); + pdu_jsons.push(pdu.into_room_event()); } }, | SendingEvent::Edu(edu) => From db99d3a001841db61bb79544912099b7346456b4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 01:58:14 +0000 Subject: [PATCH 0786/1248] remove recently-made-unnecessary unsafe block Signed-off-by: Jason Volk --- src/core/utils/sys/storage.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index b11df7bb..452b04b2 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -123,10 +123,7 @@ pub fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { let stat = fs::metadata(path)?; let dev_id = stat.dev().try_into()?; - - // SAFETY: These functions may not need to be marked as unsafe. - // see: https://github.com/rust-lang/libc/issues/3759 - let (major, minor) = unsafe { (libc::major(dev_id), libc::minor(dev_id)) }; + let (major, minor) = (libc::major(dev_id), libc::minor(dev_id)); Ok((major.try_into()?, minor.try_into()?)) } From d60920c72890b7ebf70d47bfc37f4477fa9716aa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 22:59:29 +0000 Subject: [PATCH 0787/1248] workaround some large type name length issues Signed-off-by: Jason Volk --- src/api/mod.rs | 1 + src/core/error/err.rs | 1 + src/core/mod.rs | 2 ++ src/core/state_res/mod.rs | 4 ---- src/core/utils/mod.rs | 4 ++++ src/database/mod.rs | 2 ++ src/main/main.rs | 2 ++ src/router/mod.rs | 2 ++ src/service/mod.rs | 1 + src/service/rooms/event_handler/mod.rs | 15 +++--------- .../rooms/event_handler/resolve_state.rs | 6 +++-- src/service/rooms/spaces/mod.rs | 24 ++++++++++--------- .../rooms/state_accessor/room_state.rs | 4 ++-- src/service/rooms/state_accessor/state.rs | 5 +++- 14 files changed, 41 insertions(+), 32 deletions(-) diff --git a/src/api/mod.rs b/src/api/mod.rs index 8df17a59..090cf897 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,3 +1,4 @@ +#![type_length_limit = "16384"] //TODO: reduce me #![allow(clippy::toplevel_ref_arg)] pub mod client; diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 0962c4ee..9c24d3b4 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -136,6 +136,7 @@ macro_rules! err_log { } #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err_lev { (debug_warn) => { if $crate::debug::logging() { diff --git a/src/core/mod.rs b/src/core/mod.rs index cd56774a..80ebbdcb 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "12288"] + pub mod alloc; pub mod config; pub mod debug; diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index 2020d65c..1db92e59 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -149,7 +149,6 @@ where &event_fetch, parallel_fetches, ) - .boxed() .await?; debug!(count = sorted_control_levels.len(), "power events"); @@ -164,7 +163,6 @@ where &event_fetch, parallel_fetches, ) - .boxed() .await?; debug!(count = resolved_control.len(), "resolved power events"); @@ -192,7 +190,6 @@ where let sorted_left_events = mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches) - .boxed() .await?; trace!(list = ?sorted_left_events, "events left, sorted"); @@ -204,7 +201,6 @@ where &event_fetch, parallel_fetches, ) - .boxed() .await?; // Add unconflicted state to the resolved state diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 53460c59..7593990c 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -173,6 +173,7 @@ macro_rules! is_equal { /// Functor for |x| *x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! deref_at { ($idx:tt) => { |t| *t.$idx @@ -181,6 +182,7 @@ macro_rules! deref_at { /// Functor for |ref x| x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! ref_at { ($idx:tt) => { |ref t| &t.$idx @@ -189,6 +191,7 @@ macro_rules! ref_at { /// Functor for |&x| x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! val_at { ($idx:tt) => { |&t| t.$idx @@ -197,6 +200,7 @@ macro_rules! val_at { /// Functor for |x| x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! at { ($idx:tt) => { |t| t.$idx diff --git a/src/database/mod.rs b/src/database/mod.rs index 1262a79a..ffcefee9 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "3072"] + extern crate conduwuit_core as conduwuit; extern crate rust_rocksdb as rocksdb; diff --git a/src/main/main.rs b/src/main/main.rs index fbc63b17..52f40384 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "49152"] //TODO: reduce me + pub(crate) mod clap; mod logging; mod mods; diff --git a/src/router/mod.rs b/src/router/mod.rs index f64dcb67..7038c5df 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "32768"] //TODO: reduce me + mod layers; mod request; mod router; diff --git a/src/service/mod.rs b/src/service/mod.rs index 0bde0255..8f4a84b0 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,3 +1,4 @@ +#![type_length_limit = "2048"] #![allow(refining_impl_trait)] mod manager; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 4944f3ec..45675da8 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -18,11 +18,7 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{ - Err, PduEvent, Result, RoomVersion, Server, - utils::{MutexMap, TryFutureExtExt}, -}; -use futures::TryFutureExt; +use conduwuit::{Err, PduEvent, Result, RoomVersion, Server, utils::MutexMap}; use ruma::{ OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, events::room::create::RoomCreateEventContent, @@ -103,13 +99,8 @@ impl Service { self.services.timeline.pdu_exists(&event_id).await } - async fn event_fetch(&self, event_id: OwnedEventId) -> Option> { - self.services - .timeline - .get_pdu(&event_id) - .map_ok(Arc::new) - .ok() - .await + async fn event_fetch(&self, event_id: OwnedEventId) -> Option { + self.services.timeline.get_pdu(&event_id).await.ok() } } diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 9033c3a8..b3a7a71b 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -110,12 +110,14 @@ pub async fn state_resolution<'a, StateSets>( where StateSets: Iterator> + Clone + Send, { + let event_fetch = |event_id| self.event_fetch(event_id); + let event_exists = |event_id| self.event_exists(event_id); state_res::resolve( room_version, state_sets, auth_chain_sets, - &|event_id| self.event_fetch(event_id), - &|event_id| self.event_exists(event_id), + &event_fetch, + &event_exists, automatic_width(), ) .map_err(|e| err!(error!("State resolution failed: {e:?}"))) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index a10fe7fc..da52e095 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -9,7 +9,7 @@ use conduwuit::{ Err, Error, PduEvent, Result, implement, utils::{ IterStream, - future::BoolExt, + future::{BoolExt, TryExtExt}, math::usize_from_f64, stream::{BroadbandExt, ReadyExt}, }, @@ -36,7 +36,7 @@ use ruma::{ use tokio::sync::{Mutex, MutexGuard}; pub use self::pagination_token::PaginationToken; -use crate::{Dep, conduwuit::utils::TryFutureExtExt, rooms, sending}; +use crate::{Dep, rooms, sending}; pub struct Service { services: Services, @@ -141,7 +141,8 @@ pub async fn get_summary_and_children_local( } let children_pdus: Vec<_> = self - .get_stripped_space_child_events(current_room) + .get_space_child_events(current_room) + .map(PduEvent::into_stripped_spacechild_state_event) .collect() .await; @@ -235,10 +236,10 @@ async fn get_summary_and_children_federation( /// Simply returns the stripped m.space.child events of a room #[implement(Service)] -fn get_stripped_space_child_events<'a>( +fn get_space_child_events<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { +) -> impl Stream + Send + 'a { self.services .state .get_room_shortstatehash(room_id) @@ -246,6 +247,7 @@ fn get_stripped_space_child_events<'a>( self.services .state_accessor .state_keys_with_ids(current_shortstatehash, &StateEventType::SpaceChild) + .boxed() }) .map(Result::into_iter) .map(IterStream::stream) @@ -256,8 +258,8 @@ fn get_stripped_space_child_events<'a>( .timeline .get_pdu(&event_id) .map_ok(move |pdu| (state_key, pdu)) - .await .ok() + .await }) .ready_filter_map(move |(state_key, pdu)| { if let Ok(content) = pdu.get_content::() { @@ -266,13 +268,12 @@ fn get_stripped_space_child_events<'a>( } } - if RoomId::parse(&state_key).is_ok() { - return Some(pdu); + if RoomId::parse(&state_key).is_err() { + return None; } - None + Some(pdu) }) - .map(PduEvent::into_stripped_spacechild_state_event) } /// Gets the summary of a space using either local or remote (federation) @@ -501,7 +502,8 @@ async fn cache_insert( allowed_room_ids, room_id: room_id.clone(), children_state: self - .get_stripped_space_child_events(&room_id) + .get_space_child_events(&room_id) + .map(PduEvent::into_stripped_spacechild_state_event) .collect() .await, }; diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index ff26b33a..642cd5d2 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -31,7 +31,7 @@ pub fn room_state_full<'a>( self.services .state .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok).boxed()) .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) .try_flatten_stream() } @@ -46,7 +46,7 @@ pub fn room_state_full_pdus<'a>( self.services .state .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok).boxed()) .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) .try_flatten_stream() } diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 02a6194e..8f2dd76f 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -235,6 +235,7 @@ pub fn state_keys_with_shortids<'a>( .ignore_err() .unzip() .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .boxed() .shared(); let shortstatekeys = short_ids @@ -390,8 +391,10 @@ pub fn state_full_shortids( .map(parse_compressed_state_event) .collect() }) - .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) + .map_ok(Vec::into_iter) + .map_ok(IterStream::try_stream) .try_flatten_stream() + .boxed() } #[implement(super::Service)] From d3b65af6163baed6e6f55922235ccc9e9f5a4e98 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 02:28:01 +0000 Subject: [PATCH 0788/1248] remove several services.globals config wrappers Signed-off-by: Jason Volk --- src/api/client/account.rs | 10 ++++----- src/api/client/membership.rs | 4 ++-- src/api/client/presence.rs | 42 ++++++++++------------------------- src/api/client/profile.rs | 4 ++-- src/api/client/read_marker.rs | 4 ++-- src/api/client/room/create.rs | 2 +- src/api/client/send.rs | 3 +-- src/api/client/sync/v3.rs | 6 ++--- src/api/client/typing.rs | 2 +- src/api/client/unstable.rs | 8 +++---- src/api/server/invite.rs | 3 +-- src/service/globals/mod.rs | 30 ------------------------- 12 files changed, 34 insertions(+), 84 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 5dd622d7..efa8b142 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -146,7 +146,7 @@ pub(crate) async fn register_route( let is_guest = body.kind == RegistrationKind::Guest; let emergency_mode_enabled = services.config.emergency_password.is_some(); - if !services.globals.allow_registration() && body.appservice_info.is_none() { + if !services.config.allow_registration && body.appservice_info.is_none() { match (body.username.as_ref(), body.initial_device_display_name.as_ref()) { | (Some(username), Some(device_display_name)) => { info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); @@ -166,8 +166,8 @@ pub(crate) async fn register_route( } if is_guest - && (!services.globals.allow_guest_registration() - || (services.globals.allow_registration() + && (!services.config.allow_guest_registration + || (services.config.allow_registration && services.globals.registration_token.is_some())) { info!( @@ -441,7 +441,7 @@ pub(crate) async fn register_route( } // log in conduit admin channel if a guest registered - if body.appservice_info.is_none() && is_guest && services.globals.log_guest_registrations() { + if body.appservice_info.is_none() && is_guest && services.config.log_guest_registrations { debug_info!("New guest user \"{user_id}\" registered on this server."); if !device_display_name.is_empty() { @@ -490,7 +490,7 @@ pub(crate) async fn register_route( if body.appservice_info.is_none() && !services.server.config.auto_join_rooms.is_empty() - && (services.globals.allow_guests_auto_join_rooms() || !is_guest) + && (services.config.allow_guests_auto_join_rooms || !is_guest) { for room in &services.server.config.auto_join_rooms { let Ok(room_id) = services.rooms.alias.resolve(room).await else { diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 11395e83..315a363c 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -491,7 +491,7 @@ pub(crate) async fn invite_user_route( ) -> Result { let sender_user = body.sender_user(); - if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { info!( "User {sender_user} is not an admin and attempted to send an invite to room {}", &body.room_id @@ -1628,7 +1628,7 @@ pub(crate) async fn invite_helper( reason: Option, is_direct: bool, ) -> Result { - if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { info!( "User {sender_user} is not an admin and attempted to send an invite to room \ {room_id}" diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index 9b41a721..548e5cce 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -1,12 +1,10 @@ use std::time::Duration; use axum::extract::State; -use ruma::api::client::{ - error::ErrorKind, - presence::{get_presence, set_presence}, -}; +use conduwuit::{Err, Result}; +use ruma::api::client::presence::{get_presence, set_presence}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/presence/{userId}/status` /// @@ -15,24 +13,17 @@ pub(crate) async fn set_presence_route( State(services): State, body: Ruma, ) -> Result { - if !services.globals.allow_local_presence() { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Presence is disabled on this server", - )); + if !services.config.allow_local_presence { + return Err!(Request(Forbidden("Presence is disabled on this server"))); } - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if sender_user != &body.user_id && body.appservice_info.is_none() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to set presence of other users", - )); + if body.sender_user() != body.user_id && body.appservice_info.is_none() { + return Err!(Request(InvalidParam("Not allowed to set presence of other users"))); } services .presence - .set_presence(sender_user, &body.presence, None, None, body.status_msg.clone()) + .set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone()) .await?; Ok(set_presence::v3::Response {}) @@ -47,21 +38,15 @@ pub(crate) async fn get_presence_route( State(services): State, body: Ruma, ) -> Result { - if !services.globals.allow_local_presence() { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Presence is disabled on this server", - )); + if !services.config.allow_local_presence { + return Err!(Request(Forbidden("Presence is disabled on this server",))); } - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut presence_event = None; - let has_shared_rooms = services .rooms .state_cache - .user_sees_user(sender_user, &body.user_id) + .user_sees_user(body.sender_user(), &body.user_id) .await; if has_shared_rooms { @@ -99,9 +84,6 @@ pub(crate) async fn get_presence_route( presence: presence.content.presence, }) }, - | _ => Err(Error::BadRequest( - ErrorKind::NotFound, - "Presence state for this user was not found", - )), + | _ => Err!(Request(NotFound("Presence state for this user was not found"))), } } diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 12e5ebcc..5abe5b23 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -52,7 +52,7 @@ pub(crate) async fn set_displayname_route( update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms) .await; - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -147,7 +147,7 @@ pub(crate) async fn set_avatar_url_route( ) .await; - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index 187616b4..b334e356 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -50,7 +50,7 @@ pub(crate) async fn set_read_marker_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &ruma::presence::PresenceState::Online) @@ -126,7 +126,7 @@ pub(crate) async fn create_receipt_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &ruma::presence::PresenceState::Online) diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index bb06e966..bdc5d5a5 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -372,7 +372,7 @@ pub(crate) async fn create_room_route( // Silently skip encryption events if they are not allowed if pdu_builder.event_type == TimelineEventType::RoomEncryption - && !services.globals.allow_encryption() + && !services.config.allow_encryption { continue; } diff --git a/src/api/client/send.rs b/src/api/client/send.rs index b01d1ed6..1af74f57 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -25,8 +25,7 @@ pub(crate) async fn send_message_event_route( let appservice_info = body.appservice_info.as_ref(); // Forbid m.room.encrypted if encryption is disabled - if MessageLikeEventType::RoomEncrypted == body.event_type - && !services.globals.allow_encryption() + if MessageLikeEventType::RoomEncrypted == body.event_type && !services.config.allow_encryption { return Err!(Request(Forbidden("Encryption has been disabled"))); } diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a8248f95..530c1278 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -118,7 +118,7 @@ pub(crate) async fn sync_events_route( let (sender_user, sender_device) = body.sender(); // Presence update - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &body.body.set_presence) @@ -279,8 +279,8 @@ pub(crate) async fn build_sync_events( }); let presence_updates: OptionFuture<_> = services - .globals - .allow_local_presence() + .config + .allow_local_presence .then(|| process_presence_updates(services, since, sender_user)) .into(); diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index ccfa7340..b02cc473 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -64,7 +64,7 @@ pub(crate) async fn create_typing_event_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(&body.user_id, &ruma::presence::PresenceState::Online) diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 08da5a37..45ad103e 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -205,7 +205,7 @@ pub(crate) async fn delete_timezone_key_route( services.users.set_timezone(&body.user_id, None); - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -233,7 +233,7 @@ pub(crate) async fn set_timezone_key_route( services.users.set_timezone(&body.user_id, body.tz.clone()); - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -326,7 +326,7 @@ pub(crate) async fn set_profile_key_route( ); } - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -385,7 +385,7 @@ pub(crate) async fn delete_profile_key_route( .set_profile_key(&body.user_id, &body.key_name, None); } - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 463cb9ab..f4cc6eb2 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -103,8 +103,7 @@ pub(crate) async fn create_invite_route( return Err!(Request(Forbidden("This room is banned on this homeserver."))); } - if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user).await - { + if services.config.block_non_admin_invites && !services.users.is_admin(&invited_user).await { return Err!(Request(Forbidden("This server does not allow room invites."))); } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 1dd7db8e..a7a9be9d 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -111,20 +111,6 @@ impl Service { #[inline] pub fn server_name(&self) -> &ServerName { self.server.name.as_ref() } - pub fn allow_registration(&self) -> bool { self.server.config.allow_registration } - - pub fn allow_guest_registration(&self) -> bool { self.server.config.allow_guest_registration } - - pub fn allow_guests_auto_join_rooms(&self) -> bool { - self.server.config.allow_guests_auto_join_rooms - } - - pub fn log_guest_registrations(&self) -> bool { self.server.config.log_guest_registrations } - - pub fn allow_encryption(&self) -> bool { self.server.config.allow_encryption } - - pub fn allow_federation(&self) -> bool { self.server.config.allow_federation } - pub fn allow_public_room_directory_over_federation(&self) -> bool { self.server .config @@ -183,22 +169,6 @@ impl Service { pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames } - pub fn allow_local_presence(&self) -> bool { self.server.config.allow_local_presence } - - pub fn allow_incoming_presence(&self) -> bool { self.server.config.allow_incoming_presence } - - pub fn allow_outgoing_presence(&self) -> bool { self.server.config.allow_outgoing_presence } - - pub fn allow_incoming_read_receipts(&self) -> bool { - self.server.config.allow_incoming_read_receipts - } - - pub fn allow_outgoing_read_receipts(&self) -> bool { - self.server.config.allow_outgoing_read_receipts - } - - pub fn block_non_admin_invites(&self) -> bool { self.server.config.block_non_admin_invites } - /// checks if `user_id` is local to us via server_name comparison #[inline] pub fn user_is_local(&self, user_id: &UserId) -> bool { From 3f0f89cddb28041ddeec94d8c80410a04153235b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 04:25:48 +0000 Subject: [PATCH 0789/1248] use async_trait without axum re-export Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/api/Cargo.toml | 1 + src/api/router/args.rs | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 8c0e797b..aa639b30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -766,6 +766,7 @@ dependencies = [ name = "conduwuit_api" version = "0.5.0" dependencies = [ + "async-trait", "axum", "axum-client-ip", "axum-extra", diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 385e786f..7890561c 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -35,6 +35,7 @@ brotli_compression = [ ] [dependencies] +async-trait.workspace = true axum-client-ip.workspace = true axum-extra.workspace = true axum.workspace = true diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 65a68fa4..26713dcc 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -1,6 +1,7 @@ use std::{mem, ops::Deref}; -use axum::{async_trait, body::Body, extract::FromRequest}; +use async_trait::async_trait; +use axum::{body::Body, extract::FromRequest}; use bytes::{BufMut, Bytes, BytesMut}; use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY}; use ruma::{ From 5768ca844295d892cfdcc9c80c8a57ef71c0e30c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 04:23:47 +0000 Subject: [PATCH 0790/1248] upgrade dependency ByteSize Signed-off-by: Jason Volk --- Cargo.lock | 103 ++++++++++++++++++---------------------- Cargo.toml | 2 +- src/core/utils/bytes.rs | 6 +-- 3 files changed, 49 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa639b30..ab9af9e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,7 +79,7 @@ checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -161,7 +161,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -172,7 +172,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -423,7 +423,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.100", + "syn", "which", ] @@ -442,7 +442,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.100", + "syn", ] [[package]] @@ -555,9 +555,9 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bytesize" -version = "1.3.2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2c12f985c78475a6b8d629afd0c360260ef34cfef52efccdcfd31972f81c2e" +checksum = "a3c8f83209414aacf0eeae3cf730b18d6981697fba62f200fcfb92b9f082acba" [[package]] name = "bzip2-sys" @@ -679,7 +679,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -877,7 +877,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1062,9 +1062,9 @@ dependencies = [ [[package]] name = "crokey" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520e83558f4c008ac06fa6a86e5c1d4357be6f994cce7434463ebcdaadf47bb1" +checksum = "c5ff945e42bb93d29b10ba509970066a269903a932f0ea07d99d8621f97e90d7" dependencies = [ "crokey-proc_macros", "crossterm", @@ -1075,15 +1075,15 @@ dependencies = [ [[package]] name = "crokey-proc_macros" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370956e708a1ce65fe4ac5bb7185791e0ece7485087f17736d54a23a0895049f" +checksum = "665f2180fd82d0ba2bf3deb45fafabb18f23451024ff71ee47f6bfdfb4bbe09e" dependencies = [ "crossterm", "proc-macro2", "quote", "strict", - "syn 1.0.109", + "syn", ] [[package]] @@ -1191,7 +1191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1218,7 +1218,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1287,7 +1287,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1339,7 +1339,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1550,7 +1550,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1829,7 +1829,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2081,7 +2081,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2185,7 +2185,7 @@ checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2336,7 +2336,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.100", + "syn", ] [[package]] @@ -2558,7 +2558,7 @@ checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2705,7 +2705,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2943,7 +2943,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3007,7 +3007,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3079,7 +3079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5316f57387668042f561aae71480de936257848f9c43ce528e311d89a07cadeb" dependencies = [ "proc-macro2", - "syn 2.0.100", + "syn", ] [[package]] @@ -3108,7 +3108,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "version_check", "yansi", ] @@ -3129,7 +3129,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" dependencies = [ "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3152,7 +3152,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3691,7 +3691,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.100", + "syn", "toml", ] @@ -4088,7 +4088,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4297,9 +4297,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4367,17 +4367,6 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - [[package]] name = "syn" version = "2.0.100" @@ -4406,7 +4395,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4491,7 +4480,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4502,7 +4491,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4658,7 +4647,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4870,7 +4859,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -5167,7 +5156,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.100", + "syn", "wasm-bindgen-shared", ] @@ -5202,7 +5191,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5593,7 +5582,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "synstructure", ] @@ -5614,7 +5603,7 @@ checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -5634,7 +5623,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "synstructure", ] @@ -5663,7 +5652,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ab7a935c..e6751acf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -510,7 +510,7 @@ version = "1.0.37" version = "1.0.89" [workspace.dependencies.bytesize] -version = "1.3.2" +version = "2.0.1" [workspace.dependencies.core_affinity] version = "0.8.1" diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs index 04101be4..507b9b9a 100644 --- a/src/core/utils/bytes.rs +++ b/src/core/utils/bytes.rs @@ -17,15 +17,13 @@ pub fn from_str(str: &str) -> Result { Ok(bytes) } -/// Output a human-readable size string w/ si-unit suffix +/// Output a human-readable size string w/ iec-unit suffix #[inline] #[must_use] pub fn pretty(bytes: usize) -> String { - const SI_UNITS: bool = true; - let bytes: u64 = bytes.try_into().expect("failed to convert usize to u64"); - bytesize::to_string(bytes, SI_UNITS) + ByteSize::b(bytes).display().iec().to_string() } #[inline] From bee1f896243f9fafc588b98f43412637f6a5dd90 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 05:03:15 +0000 Subject: [PATCH 0791/1248] bump dependencies Signed-off-by: Jason Volk --- Cargo.lock | 136 +++++++++++++++++++++++++++++------------- Cargo.toml | 48 +++++++-------- src/core/Cargo.toml | 1 + src/core/error/mod.rs | 2 + 4 files changed, 121 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab9af9e8..fb19dfdb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -731,8 +731,8 @@ dependencies = [ "opentelemetry-jaeger", "opentelemetry_sdk", "sentry", - "sentry-tower", - "sentry-tracing", + "sentry-tower 0.35.0", + "sentry-tracing 0.35.0", "tokio", "tokio-metrics", "tracing", @@ -782,7 +782,7 @@ dependencies = [ "http-body-util", "hyper", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "log", "rand 0.8.5", "reqwest", @@ -802,6 +802,7 @@ dependencies = [ "argon2", "arrayvec", "axum", + "axum-extra", "bytes", "bytesize", "cargo_toml", @@ -820,7 +821,7 @@ dependencies = [ "http", "http-body-util", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "libc", "libloading", "log", @@ -874,7 +875,7 @@ dependencies = [ name = "conduwuit_macros" version = "0.5.0" dependencies = [ - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", "syn", @@ -904,8 +905,8 @@ dependencies = [ "rustls", "sd-notify", "sentry", - "sentry-tower", - "sentry-tracing", + "sentry-tower 0.35.0", + "sentry-tracing 0.35.0", "serde_json", "tokio", "tower 0.5.2", @@ -930,7 +931,7 @@ dependencies = [ "http", "image", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "log", "loole", "lru-cache", @@ -997,9 +998,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-str" -version = "0.5.7" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3618cccc083bb987a415d85c02ca6c9994ea5b44731ec28b9ecf09658655fba9" +checksum = "9e991226a70654b49d34de5ed064885f0bef0348a8e70018b8ff1ac80aa984a2" [[package]] name = "const_panic" @@ -1948,9 +1949,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1961,7 +1962,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -2543,18 +2543,18 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minicbor" -version = "0.25.1" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0452a60c1863c1f50b5f77cd295e8d2786849f35883f0b9e18e7e6e1b5691b0" +checksum = "1936e27fffe7d8557c060eb82cb71668608cd1a5fb56b63e66d22ae8d7564321" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.15.3" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" +checksum = "a9882ef5c56df184b8ffc107fc6c61e33ee3a654b021961d790a78571bb9d67a" dependencies = [ "proc-macro2", "quote", @@ -2563,9 +2563,9 @@ dependencies = [ [[package]] name = "minicbor-serde" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "becf18ac384ecf6f53b2db3b1549eebff664c67ecf259ae99be5912193291686" +checksum = "54e45e8beeefea1b8b6f52fa188a5b6ea3746c2885606af8d4d8bf31cee633fb" dependencies = [ "minicbor", "serde", @@ -3938,21 +3938,21 @@ checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016958f51b96861dead7c1e02290f138411d05e94fad175c8636a835dee6e51e" +checksum = "3a7332159e544e34db06b251b1eda5e546bd90285c3f58d9c8ff8450b484e0da" dependencies = [ "httpdate", "reqwest", "rustls", "sentry-backtrace", "sentry-contexts", - "sentry-core", + "sentry-core 0.36.0", "sentry-debug-images", "sentry-log", "sentry-panic", - "sentry-tower", - "sentry-tracing", + "sentry-tower 0.36.0", + "sentry-tracing 0.36.0", "tokio", "ureq", "webpki-roots", @@ -3960,27 +3960,27 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57712c24e99252ef175b4b06c485294f10ad6bc5b5e1567ff3803ee7a0b7d3f" +checksum = "565ec31ad37bab8e6d9f289f34913ed8768347b133706192f10606dabd5c6bc4" dependencies = [ "backtrace", "once_cell", "regex", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] name = "sentry-contexts" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" +checksum = "e860275f25f27e8c0c7726ce116c7d5c928c5bba2ee73306e52b20a752298ea6" dependencies = [ "hostname", "libc", "os_info", "rustc_version", - "sentry-core", + "sentry-core 0.36.0", "uname", ] @@ -3992,40 +3992,53 @@ checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" dependencies = [ "once_cell", "rand 0.8.5", - "sentry-types", + "sentry-types 0.35.0", + "serde", + "serde_json", +] + +[[package]] +name = "sentry-core" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "653942e6141f16651273159f4b8b1eaeedf37a7554c00cd798953e64b8a9bf72" +dependencies = [ + "once_cell", + "rand 0.8.5", + "sentry-types 0.36.0", "serde", "serde_json", ] [[package]] name = "sentry-debug-images" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8982a69133d3f5e4efdbfa0776937fca43c3a2e275a8fe184f50b1b0aa92e07c" +checksum = "2a60bc2154e6df59beed0ac13d58f8dfaf5ad20a88548a53e29e4d92e8e835c2" dependencies = [ "findshlibs", "once_cell", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] name = "sentry-log" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efcbfbb74628eaef033c1154d4bb082437c7592ce2282c7c5ccb455c4c97a06d" +checksum = "1c96d796cba1b3a0793e7f53edc420c61f9419fba8fb34ad5519f5c7d01af6b2" dependencies = [ "log", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] name = "sentry-panic" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de296dae6f01e931b65071ee5fe28d66a27909857f744018f107ed15fd1f6b25" +checksum = "105e3a956c8aa9dab1e4087b1657b03271bfc49d838c6ae9bfc7c58c802fd0ef" dependencies = [ "sentry-backtrace", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] @@ -4033,10 +4046,21 @@ name = "sentry-tower" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcdaf9b1939589476bd57751d12a9653bbfe356610fc476d03d7683189183ab7" +dependencies = [ + "sentry-core 0.35.0", + "tower-layer", + "tower-service", +] + +[[package]] +name = "sentry-tower" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "082f781dfc504d984e16d99f8dbf94d6ee4762dd0fc28de25713d0f900a8164d" dependencies = [ "http", "pin-project", - "sentry-core", + "sentry-core 0.36.0", "tower-layer", "tower-service", "url", @@ -4047,9 +4071,20 @@ name = "sentry-tracing" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" +dependencies = [ + "sentry-core 0.35.0", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sentry-tracing" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e75c831b4d8b34a5aec1f65f67c5d46a26c7c5d3c7abd8b5ef430796900cf8" dependencies = [ "sentry-backtrace", - "sentry-core", + "sentry-core 0.36.0", "tracing-core", "tracing-subscriber", ] @@ -4071,6 +4106,23 @@ dependencies = [ "uuid", ] +[[package]] +name = "sentry-types" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d4203359e60724aa05cf2385aaf5d4f147e837185d7dd2b9ccf1ee77f4420c8" +dependencies = [ + "debugid", + "hex", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror 1.0.69", + "time", + "url", + "uuid", +] + [[package]] name = "serde" version = "1.0.219" diff --git a/Cargo.toml b/Cargo.toml index e6751acf..ba706656 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,11 +27,11 @@ version = "0.5.0" name = "conduwuit" [workspace.dependencies.arrayvec] -version = "0.7.4" +version = "0.7.6" features = ["serde"] [workspace.dependencies.smallvec] -version = "1.13.2" +version = "1.14.0" features = [ "const_generics", "const_new", @@ -45,7 +45,7 @@ version = "0.3" features = ["ffi", "std", "union"] [workspace.dependencies.const-str] -version = "0.5.7" +version = "0.6.2" [workspace.dependencies.ctor] version = "0.2.9" @@ -81,13 +81,13 @@ version = "0.8.5" # Used for the http request / response body type for Ruma endpoints used with reqwest [workspace.dependencies.bytes] -version = "1.9.0" +version = "1.10.1" [workspace.dependencies.http-body-util] -version = "0.1.2" +version = "0.1.3" [workspace.dependencies.http] -version = "1.2.0" +version = "1.3.1" [workspace.dependencies.regex] version = "1.11.1" @@ -111,7 +111,7 @@ default-features = false features = ["typed-header", "tracing"] [workspace.dependencies.axum-server] -version = "0.7.1" +version = "0.7.2" default-features = false # to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest @@ -122,7 +122,7 @@ version = "0.7" version = "0.6.1" [workspace.dependencies.tower] -version = "0.5.1" +version = "0.5.2" default-features = false features = ["util"] @@ -156,12 +156,12 @@ features = [ ] [workspace.dependencies.serde] -version = "1.0.216" +version = "1.0.219" default-features = false features = ["rc"] [workspace.dependencies.serde_json] -version = "1.0.133" +version = "1.0.140" default-features = false features = ["raw_value"] @@ -237,7 +237,7 @@ features = [ ] [workspace.dependencies.futures] -version = "0.3.30" +version = "0.3.31" default-features = false features = ["std", "async-await"] @@ -275,7 +275,7 @@ features = ["alloc", "std"] default-features = false [workspace.dependencies.hyper] -version = "1.5.1" +version = "1.6.0" default-features = false features = [ "server", @@ -285,7 +285,7 @@ features = [ [workspace.dependencies.hyper-util] # hyper-util >=0.1.9 seems to have DNS issues -version = "=0.1.8" +version = "0.1.10" default-features = false features = [ "server-auto", @@ -295,7 +295,7 @@ features = [ # to support multiple variations of setting a config option [workspace.dependencies.either] -version = "1.13.0" +version = "1.15.0" default-features = false features = ["serde"] @@ -311,7 +311,7 @@ default-features = false # Used for conduwuit::Error type [workspace.dependencies.thiserror] -version = "2.0.7" +version = "2.0.12" default-features = false # Used when hashing the state @@ -321,7 +321,7 @@ default-features = false # Used to make working with iterators easier, was already a transitive depdendency [workspace.dependencies.itertools] -version = "0.13.0" +version = "0.14.0" # to parse user-friendly time durations in admin commands #TODO: overlaps chrono? @@ -337,7 +337,7 @@ version = "0.4.0" version = "2.3.1" [workspace.dependencies.async-trait] -version = "0.1.83" +version = "0.1.88" [workspace.dependencies.lru-cache] version = "0.1.2" @@ -423,7 +423,7 @@ features = ["rt-tokio"] # optional sentry metrics for crash/panic reporting [workspace.dependencies.sentry] -version = "0.35.0" +version = "0.36.0" default-features = false features = [ "backtrace", @@ -499,18 +499,18 @@ default-features = false version = "0.1" [workspace.dependencies.syn] -version = "2.0.90" +version = "2.0" default-features = false features = ["full", "extra-traits"] [workspace.dependencies.quote] -version = "1.0.37" +version = "1.0" [workspace.dependencies.proc-macro2] -version = "1.0.89" +version = "1.0" [workspace.dependencies.bytesize] -version = "2.0.1" +version = "2.0" [workspace.dependencies.core_affinity] version = "0.8.1" @@ -522,11 +522,11 @@ version = "0.2" version = "0.2" [workspace.dependencies.minicbor] -version = "0.25.1" +version = "0.26.3" features = ["std"] [workspace.dependencies.minicbor-serde] -version = "0.3.2" +version = "0.4.1" features = ["std"] [workspace.dependencies.maplit] diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index b40dd3ad..4848e742 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -59,6 +59,7 @@ conduwuit_mods = [ argon2.workspace = true arrayvec.workspace = true axum.workspace = true +axum-extra.workspace = true bytes.workspace = true bytesize.workspace = true cargo_toml.workspace = true diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 02ab6fa3..e46edf09 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -81,6 +81,8 @@ pub enum Error { #[error("Tracing reload error: {0}")] TracingReload(#[from] tracing_subscriber::reload::Error), #[error(transparent)] + TypedHeader(#[from] axum_extra::typed_header::TypedHeaderRejection), + #[error(transparent)] Yaml(#[from] serde_yaml::Error), // ruma/conduwuit From 0f81c1e1ccdcb0c5c6d5a27e82f16eb37b1e61c8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 1 Apr 2025 02:14:51 +0000 Subject: [PATCH 0792/1248] revert hyper-util upgrade due to continued DNS issues Signed-off-by: Jason Volk --- Cargo.lock | 5 +++-- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb19dfdb..77d03506 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1949,9 +1949,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -1962,6 +1962,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", + "tower 0.4.13", "tower-service", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index ba706656..62bbaf16 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -285,7 +285,7 @@ features = [ [workspace.dependencies.hyper-util] # hyper-util >=0.1.9 seems to have DNS issues -version = "0.1.10" +version = "=0.1.8" default-features = false features = [ "server-auto", From 1b71b99c514f69bdd2fbcdb7996dcc00860d2057 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 10:49:38 -0400 Subject: [PATCH 0793/1248] fix weird issue with acl c2s check Signed-off-by: June Clementine Strawberry --- src/api/client/state.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 9563c26d..23583356 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -229,6 +229,9 @@ async fn allowed_to_send_state_event( if acl_content.deny.contains(&String::from("*")) && !acl_content.is_allowed(services.globals.server_name()) + && !acl_content + .allow + .contains(&services.globals.server_name().to_string()) { return Err!(Request(BadJson(debug_warn!( ?room_id, @@ -240,6 +243,9 @@ async fn allowed_to_send_state_event( if !acl_content.allow.contains(&String::from("*")) && !acl_content.is_allowed(services.globals.server_name()) + && !acl_content + .allow + .contains(&services.globals.server_name().to_string()) { return Err!(Request(BadJson(debug_warn!( ?room_id, From ea246d91d975a89a947c35260a4d50684fd2913b Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:38:47 -0400 Subject: [PATCH 0794/1248] remove pointless and buggy *_visibility in-memory caches Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 8 --- src/core/config/mod.rs | 12 ---- src/service/rooms/state_accessor/mod.rs | 68 ++----------------- .../rooms/state_accessor/server_can.rs | 22 +----- src/service/rooms/state_accessor/user_can.rs | 22 +----- 5 files changed, 10 insertions(+), 122 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 15e6dd37..75ecddab 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -195,14 +195,6 @@ # #servernameevent_data_cache_capacity = varies by system -# This item is undocumented. Please contribute documentation for it. -# -#server_visibility_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#user_visibility_cache_capacity = varies by system - # This item is undocumented. Please contribute documentation for it. # #stateinfo_cache_capacity = varies by system diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 52df19ac..7be140a5 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -252,14 +252,6 @@ pub struct Config { #[serde(default = "default_servernameevent_data_cache_capacity")] pub servernameevent_data_cache_capacity: u32, - /// default: varies by system - #[serde(default = "default_server_visibility_cache_capacity")] - pub server_visibility_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_user_visibility_cache_capacity")] - pub user_visibility_cache_capacity: u32, - /// default: varies by system #[serde(default = "default_stateinfo_cache_capacity")] pub stateinfo_cache_capacity: u32, @@ -2035,10 +2027,6 @@ fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } -fn default_server_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(500) } - -fn default_user_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } - fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 652fdbd7..b57465ce 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -3,21 +3,13 @@ mod server_can; mod state; mod user_can; -use std::{ - fmt::Write, - sync::{Arc, Mutex as StdMutex, Mutex}, -}; +use std::sync::Arc; use async_trait::async_trait; -use conduwuit::{ - Result, err, utils, - utils::math::{Expected, usize_from_f64}, -}; +use conduwuit::{Result, err}; use database::Map; -use lru_cache::LruCache; use ruma::{ - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, UserId, + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, RoomId, UserId, events::{ StateEventType, room::{ @@ -37,11 +29,9 @@ use ruma::{ space::SpaceRoomJoinRule, }; -use crate::{Dep, rooms, rooms::short::ShortStateHash}; +use crate::{Dep, rooms}; pub struct Service { - pub server_visibility_cache: Mutex>, - pub user_visibility_cache: Mutex>, services: Services, db: Data, } @@ -61,19 +51,7 @@ struct Data { #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { - let config = &args.server.config; - let server_visibility_cache_capacity = - f64::from(config.server_visibility_cache_capacity) * config.cache_capacity_modifier; - let user_visibility_cache_capacity = - f64::from(config.user_visibility_cache_capacity) * config.cache_capacity_modifier; - Ok(Arc::new(Self { - server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( - server_visibility_cache_capacity, - )?)), - user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( - user_visibility_cache_capacity, - )?)), services: Services { state_cache: args.depend::("rooms::state_cache"), timeline: args.depend::("rooms::timeline"), @@ -88,44 +66,6 @@ impl crate::Service for Service { })) } - async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - use utils::bytes::pretty; - - let (svc_count, svc_bytes) = self.server_visibility_cache.lock()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, _)| { - ( - count.expected_add(1), - bytes - .expected_add(key.0.capacity()) - .expected_add(size_of_val(&key.1)), - ) - }, - ); - - let (uvc_count, uvc_bytes) = self.user_visibility_cache.lock()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, _)| { - ( - count.expected_add(1), - bytes - .expected_add(key.0.capacity()) - .expected_add(size_of_val(&key.1)), - ) - }, - ); - - writeln!(out, "server_visibility_cache: {svc_count} ({})", pretty(svc_bytes))?; - writeln!(out, "user_visibility_cache: {uvc_count} ({})", pretty(uvc_bytes))?; - - Ok(()) - } - - async fn clear_cache(&self) { - self.server_visibility_cache.lock().expect("locked").clear(); - self.user_visibility_cache.lock().expect("locked").clear(); - } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index 2e8f3325..7d1b197f 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{error, implement, utils::stream::ReadyExt}; +use conduwuit::{debug_info, implement, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ EventId, RoomId, ServerName, @@ -22,15 +22,6 @@ pub async fn server_can_see_event( return true; }; - if let Some(visibility) = self - .server_visibility_cache - .lock() - .expect("locked") - .get_mut(&(origin.to_owned(), shortstatehash)) - { - return *visibility; - } - let history_visibility = self .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") .await @@ -44,7 +35,7 @@ pub async fn server_can_see_event( .room_members(room_id) .ready_filter(|member| member.server_name() == origin); - let visibility = match history_visibility { + match history_visibility { | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny @@ -62,12 +53,5 @@ pub async fn server_can_see_event( error!("Unknown history visibility {history_visibility}"); false }, - }; - - self.server_visibility_cache - .lock() - .expect("locked") - .insert((origin.to_owned(), shortstatehash), visibility); - - visibility + } } diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index c30e1da8..32a766a8 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{Err, Error, Result, error, implement, pdu::PduBuilder}; +use conduwuit::{Err, Error, Result, debug_info, implement, pdu::PduBuilder}; use ruma::{ EventId, RoomId, UserId, events::{ @@ -98,15 +98,6 @@ pub async fn user_can_see_event( return true; }; - if let Some(visibility) = self - .user_visibility_cache - .lock() - .expect("locked") - .get_mut(&(user_id.to_owned(), shortstatehash)) - { - return *visibility; - } - let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; let history_visibility = self @@ -116,7 +107,7 @@ pub async fn user_can_see_event( c.history_visibility }); - let visibility = match history_visibility { + match history_visibility { | HistoryVisibility::WorldReadable => true, | HistoryVisibility::Shared => currently_member, | HistoryVisibility::Invited => { @@ -131,14 +122,7 @@ pub async fn user_can_see_event( error!("Unknown history visibility {history_visibility}"); false }, - }; - - self.user_visibility_cache - .lock() - .expect("locked") - .insert((user_id.to_owned(), shortstatehash), visibility); - - visibility + } } /// Whether a user is allowed to see an event, based on From 74012c5289831c16976fc283a4233bfb6b49ce8b Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:44:44 -0400 Subject: [PATCH 0795/1248] significantly improve get_missing_events fed code Signed-off-by: June Clementine Strawberry --- src/api/server/backfill.rs | 12 ++- src/api/server/get_missing_events.rs | 111 ++++++++++++++------------- 2 files changed, 65 insertions(+), 58 deletions(-) diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 5c875807..3cfbcedc 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -6,11 +6,17 @@ use conduwuit::{ utils::{IterStream, ReadyExt, stream::TryTools}, }; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill, uint}; +use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill}; use super::AccessCheck; use crate::Ruma; +/// arbitrary number but synapse's is 100 and we can handle lots of these +/// anyways +const LIMIT_MAX: usize = 150; +/// no spec defined number but we can handle a lot of these +const LIMIT_DEFAULT: usize = 50; + /// # `GET /_matrix/federation/v1/backfill/` /// /// Retrieves events from before the sender joined the room, if the room's @@ -30,9 +36,9 @@ pub(crate) async fn get_backfill_route( let limit = body .limit - .min(uint!(100)) .try_into() - .expect("UInt could not be converted to usize"); + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); let from = body .v diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index 3d0bbb07..d72918fa 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,13 +1,19 @@ use axum::extract::State; -use conduwuit::{Error, Result}; -use ruma::{ - CanonicalJsonValue, EventId, RoomId, - api::{client::error::ErrorKind, federation::event::get_missing_events}, +use conduwuit::{ + Result, debug, debug_info, debug_warn, + utils::{self}, + warn, }; +use ruma::api::federation::event::get_missing_events; use super::AccessCheck; use crate::Ruma; +/// arbitrary number but synapse's is 20 and we can handle lots of these anyways +const LIMIT_MAX: usize = 50; +/// spec says default is 10 +const LIMIT_DEFAULT: usize = 10; + /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. @@ -24,7 +30,11 @@ pub(crate) async fn get_missing_events_route( .check() .await?; - let limit = body.limit.try_into()?; + let limit = body + .limit + .try_into() + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); let mut queued_events = body.latest_events.clone(); // the vec will never have more entries the limit @@ -32,60 +42,51 @@ pub(crate) async fn get_missing_events_route( let mut i: usize = 0; while i < queued_events.len() && events.len() < limit { - if let Ok(pdu) = services + let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else { + debug_info!(?body.origin, "Event {} does not exist locally, skipping", &queued_events[i]); + i = i.saturating_add(1); + continue; + }; + + if pdu.room_id != body.room_id { + warn!(?body.origin, + "Got an event for the wrong room in database. Found {:?} in {:?}, server requested events in {:?}. Skipping.", + pdu.event_id, pdu.room_id, body.room_id + ); + i = i.saturating_add(1); + continue; + } + + if body.earliest_events.contains(&queued_events[i]) { + i = i.saturating_add(1); + continue; + } + + if !services .rooms - .timeline - .get_pdu_json(&queued_events[i]) + .state_accessor + .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) .await { - let room_id_str = pdu - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database."))?; - - let event_room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room_id in event in database."))?; - - if event_room_id != body.room_id { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event from wrong room.")); - } - - if body.earliest_events.contains(&queued_events[i]) { - i = i.saturating_add(1); - continue; - } - - if !services - .rooms - .state_accessor - .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) - .await - { - i = i.saturating_add(1); - continue; - } - - let prev_events = pdu - .get("prev_events") - .and_then(CanonicalJsonValue::as_array) - .unwrap_or_default(); - - queued_events.extend( - prev_events - .iter() - .map(<&EventId>::try_from) - .filter_map(Result::ok) - .map(ToOwned::to_owned), - ); - - events.push( - services - .sending - .convert_to_outgoing_federation_event(pdu) - .await, - ); + debug!(?body.origin, "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id); + i = i.saturating_add(1); + continue; } - i = i.saturating_add(1); + + let Ok(pdu_json) = utils::to_canonical_object(&pdu) else { + debug_warn!(?body.origin, "Failed to convert PDU in database to canonical JSON: {pdu:?}"); + i = i.saturating_add(1); + continue; + }; + + queued_events.extend(pdu.prev_events.iter().map(ToOwned::to_owned)); + + events.push( + services + .sending + .convert_to_outgoing_federation_event(pdu_json) + .await, + ); } Ok(get_missing_events::v1::Response { events }) From 1036f8dfa8fabb9642b9638b54381e00016eef9c Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:46:01 -0400 Subject: [PATCH 0796/1248] default shared history vis on unknown visibilities, drop needless error log Signed-off-by: June Clementine Strawberry --- src/service/rooms/state_accessor/server_can.rs | 4 ++-- src/service/rooms/state_accessor/user_can.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index 7d1b197f..c946fbfd 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -50,8 +50,8 @@ pub async fn server_can_see_event( .await }, | _ => { - error!("Unknown history visibility {history_visibility}"); - false + debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); + true }, } } diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 32a766a8..aa54407b 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -119,8 +119,8 @@ pub async fn user_can_see_event( self.user_was_joined(shortstatehash, user_id).await }, | _ => { - error!("Unknown history visibility {history_visibility}"); - false + debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); + currently_member }, } } From 0e0b8cc4032732378966f07b38b97af89788e399 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:51:17 -0400 Subject: [PATCH 0797/1248] fixup+update msc3266, add fed support, parallelise IO Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +- Cargo.toml | 2 +- src/api/client/room/mod.rs | 9 +- src/api/client/room/summary.rs | 308 ++++++++++++++++++++++++ src/api/client/room/upgrade.rs | 2 +- src/api/client/unstable.rs | 138 +---------- src/service/rooms/spaces/mod.rs | 54 +++-- src/service/rooms/state_accessor/mod.rs | 28 ++- 8 files changed, 389 insertions(+), 174 deletions(-) create mode 100644 src/api/client/room/summary.rs diff --git a/Cargo.lock b/Cargo.lock index 77d03506..a53258bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3531,7 +3531,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "assign", "js_int", @@ -3551,7 +3551,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "ruma-common", @@ -3563,7 +3563,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "as_variant", "assign", @@ -3586,7 +3586,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "as_variant", "base64 0.22.1", @@ -3618,7 +3618,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3643,7 +3643,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "bytes", "headers", @@ -3665,7 +3665,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3699,7 +3699,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "ruma-common", @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 62bbaf16..940ece86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "d197318a2507d38ffe6ee524d0d52728ca72538a" +rev = "ea1278657125e9414caada074e8c172bc252fb1c" features = [ "compat", "rand", diff --git a/src/api/client/room/mod.rs b/src/api/client/room/mod.rs index 16fcadab..86d68f7e 100644 --- a/src/api/client/room/mod.rs +++ b/src/api/client/room/mod.rs @@ -2,9 +2,14 @@ mod aliases; mod create; mod event; mod initial_sync; +mod summary; mod upgrade; pub(crate) use self::{ - aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route, - initial_sync::room_initial_sync_route, upgrade::upgrade_room_route, + aliases::get_room_aliases_route, + create::create_room_route, + event::get_room_event_route, + initial_sync::room_initial_sync_route, + summary::{get_room_summary, get_room_summary_legacy}, + upgrade::upgrade_room_route, }; diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs new file mode 100644 index 00000000..34820e83 --- /dev/null +++ b/src/api/client/room/summary.rs @@ -0,0 +1,308 @@ +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduwuit::{ + Err, Result, debug_warn, + utils::{IterStream, future::TryExtExt}, +}; +use futures::{FutureExt, StreamExt, future::join3, stream::FuturesUnordered}; +use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, UserId, + api::{ + client::room::get_summary, + federation::space::{SpaceHierarchyParentSummary, get_hierarchy}, + }, + events::room::member::MembershipState, + space::SpaceRoomJoinRule::{self, *}, +}; +use service::Services; + +use crate::{Ruma, RumaResponse}; + +/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` +/// +/// Returns a short description of the state of a room. +/// +/// This is the "wrong" endpoint that some implementations/clients may use +/// according to the MSC. Request and response bodies are the same as +/// `get_room_summary`. +/// +/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) +pub(crate) async fn get_room_summary_legacy( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result> { + get_room_summary(State(services), InsecureClientIp(client), body) + .boxed() + .await + .map(RumaResponse) +} + +/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` +/// +/// Returns a short description of the state of a room. +/// +/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) +#[tracing::instrument(skip_all, fields(%client), name = "room_summary")] +pub(crate) async fn get_room_summary( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let (room_id, servers) = services + .rooms + .alias + .resolve_with_servers(&body.room_id_or_alias, Some(body.via.clone())) + .await?; + + if services.rooms.metadata.is_banned(&room_id).await { + return Err!(Request(Forbidden("This room is banned on this homeserver."))); + } + + room_summary_response(&services, &room_id, &servers, body.sender_user.as_deref()) + .boxed() + .await +} + +async fn room_summary_response( + services: &Services, + room_id: &RoomId, + servers: &[OwnedServerName], + sender_user: Option<&UserId>, +) -> Result { + if services.rooms.metadata.exists(room_id).await { + return local_room_summary_response(services, room_id, sender_user) + .boxed() + .await; + } + + let room = + remote_room_summary_hierarchy_response(services, room_id, servers, sender_user).await?; + + Ok(get_summary::msc3266::Response { + room_id: room_id.to_owned(), + canonical_alias: room.canonical_alias, + avatar_url: room.avatar_url, + guest_can_join: room.guest_can_join, + name: room.name, + num_joined_members: room.num_joined_members, + topic: room.topic, + world_readable: room.world_readable, + join_rule: room.join_rule, + room_type: room.room_type, + room_version: room.room_version, + membership: if sender_user.is_none() { + None + } else { + Some(MembershipState::Leave) + }, + encryption: room.encryption, + allowed_room_ids: room.allowed_room_ids, + }) +} + +async fn local_room_summary_response( + services: &Services, + room_id: &RoomId, + sender_user: Option<&UserId>, +) -> Result { + let join_rule = services.rooms.state_accessor.get_space_join_rule(room_id); + let world_readable = services.rooms.state_accessor.is_world_readable(room_id); + let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); + + let ((join_rule, allowed_room_ids), world_readable, guest_can_join) = + join3(join_rule, world_readable, guest_can_join).await; + + user_can_see_summary( + services, + room_id, + &join_rule, + guest_can_join, + world_readable, + &allowed_room_ids, + sender_user, + ) + .await?; + + let canonical_alias = services + .rooms + .state_accessor + .get_canonical_alias(room_id) + .ok(); + let name = services.rooms.state_accessor.get_name(room_id).ok(); + let topic = services.rooms.state_accessor.get_room_topic(room_id).ok(); + let room_type = services.rooms.state_accessor.get_room_type(room_id).ok(); + let avatar_url = services + .rooms + .state_accessor + .get_avatar(room_id) + .map(|res| res.into_option().unwrap_or_default().url); + let room_version = services.rooms.state.get_room_version(room_id).ok(); + let encryption = services + .rooms + .state_accessor + .get_room_encryption(room_id) + .ok(); + let num_joined_members = services + .rooms + .state_cache + .room_joined_count(room_id) + .unwrap_or(0); + + let ( + canonical_alias, + name, + num_joined_members, + topic, + avatar_url, + room_type, + room_version, + encryption, + ) = futures::join!( + canonical_alias, + name, + num_joined_members, + topic, + avatar_url, + room_type, + room_version, + encryption, + ); + + Ok(get_summary::msc3266::Response { + room_id: room_id.to_owned(), + canonical_alias, + avatar_url, + guest_can_join, + name, + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + topic, + world_readable, + join_rule, + room_type, + room_version, + membership: if let Some(sender_user) = sender_user { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + .map_or(Some(MembershipState::Leave), |content| Some(content.membership)) + } else { + None + }, + encryption, + allowed_room_ids, + }) +} + +/// used by MSC3266 to fetch a room's info if we do not know about it +async fn remote_room_summary_hierarchy_response( + services: &Services, + room_id: &RoomId, + servers: &[OwnedServerName], + sender_user: Option<&UserId>, +) -> Result { + if !services.config.allow_federation { + return Err!(Request(Forbidden("Federation is disabled."))); + } + + if services.rooms.metadata.is_disabled(room_id).await { + return Err!(Request(Forbidden( + "Federaton of room {room_id} is currently disabled on this server." + ))); + } + + let request = get_hierarchy::v1::Request::new(room_id.to_owned()); + + let mut requests: FuturesUnordered<_> = servers + .iter() + .map(|server| { + services + .sending + .send_federation_request(server, request.clone()) + }) + .collect(); + + while let Some(Ok(response)) = requests.next().await { + let room = response.room.clone(); + if room.room_id != room_id { + debug_warn!( + "Room ID {} returned does not belong to the requested room ID {}", + room.room_id, + room_id + ); + continue; + } + + return user_can_see_summary( + services, + room_id, + &room.join_rule, + room.guest_can_join, + room.world_readable, + &room.allowed_room_ids, + sender_user, + ) + .await + .map(|()| room); + } + + Err!(Request(NotFound( + "Room is unknown to this server and was unable to fetch over federation with the \ + provided servers available" + ))) +} + +async fn user_can_see_summary( + services: &Services, + room_id: &RoomId, + join_rule: &SpaceRoomJoinRule, + guest_can_join: bool, + world_readable: bool, + allowed_room_ids: &[OwnedRoomId], + sender_user: Option<&UserId>, +) -> Result { + match sender_user { + | Some(sender_user) => { + let user_can_see_state_events = services + .rooms + .state_accessor + .user_can_see_state_events(sender_user, room_id); + let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false); + let user_in_allowed_restricted_room = allowed_room_ids + .iter() + .stream() + .any(|room| services.rooms.state_cache.is_joined(sender_user, room)); + + let (user_can_see_state_events, is_guest, user_in_allowed_restricted_room) = + join3(user_can_see_state_events, is_guest, user_in_allowed_restricted_room) + .boxed() + .await; + + if user_can_see_state_events + || (is_guest && guest_can_join) + || matches!(&join_rule, &Public | &Knock | &KnockRestricted) + || user_in_allowed_restricted_room + { + return Ok(()); + } + + Err!(Request(Forbidden( + "Room is not world readable, not publicly accessible/joinable, restricted room \ + conditions not met, and guest access is forbidden. Not allowed to see details \ + of this room." + ))) + }, + | None => { + if matches!(join_rule, Public | Knock | KnockRestricted) || world_readable { + return Ok(()); + } + + Err!(Request(Forbidden( + "Room is not world readable or publicly accessible/joinable, authentication is \ + required" + ))) + }, + } +} diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 4ac341a9..3cfb3c28 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -103,7 +103,7 @@ pub(crate) async fn upgrade_room_route( // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( body.room_id.clone(), - (*tombstone_event_id).to_owned(), + Some(tombstone_event_id), )); // Send a m.room.create event containing a predecessor field and the applicable diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 45ad103e..e21eaf21 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::Err; +use conduwuit::{Err, Error, Result}; use futures::StreamExt; use ruma::{ OwnedRoomId, @@ -14,16 +14,14 @@ use ruma::{ delete_profile_key, delete_timezone_key, get_profile_key, get_timezone_key, set_profile_key, set_timezone_key, }, - room::get_summary, }, federation, }, - events::room::member::MembershipState, presence::PresenceState, }; use super::{update_avatar_url, update_displayname}; -use crate::{Error, Result, Ruma, RumaResponse}; +use crate::Ruma; /// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms` /// @@ -38,13 +36,10 @@ pub(crate) async fn get_mutual_rooms_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); - if sender_user == &body.user_id { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "You cannot request rooms in common with yourself.", - )); + if sender_user == body.user_id { + return Err!(Request(Unknown("You cannot request rooms in common with yourself."))); } if !services.users.exists(&body.user_id).await { @@ -65,129 +60,6 @@ pub(crate) async fn get_mutual_rooms_route( }) } -/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` -/// -/// Returns a short description of the state of a room. -/// -/// This is the "wrong" endpoint that some implementations/clients may use -/// according to the MSC. Request and response bodies are the same as -/// `get_room_summary`. -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -pub(crate) async fn get_room_summary_legacy( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - get_room_summary(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` -/// -/// Returns a short description of the state of a room. -/// -/// TODO: support fetching remote room info if we don't know the room -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -#[tracing::instrument(skip_all, fields(%client), name = "room_summary")] -pub(crate) async fn get_room_summary( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref(); - - let room_id = services.rooms.alias.resolve(&body.room_id_or_alias).await?; - - if !services.rooms.metadata.exists(&room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); - } - - if sender_user.is_none() - && !services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Room is not world readable, authentication is required", - )); - } - - Ok(get_summary::msc3266::Response { - room_id: room_id.clone(), - canonical_alias: services - .rooms - .state_accessor - .get_canonical_alias(&room_id) - .await - .ok(), - avatar_url: services - .rooms - .state_accessor - .get_avatar(&room_id) - .await - .into_option() - .unwrap_or_default() - .url, - guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, - name: services.rooms.state_accessor.get_name(&room_id).await.ok(), - num_joined_members: services - .rooms - .state_cache - .room_joined_count(&room_id) - .await - .unwrap_or(0) - .try_into()?, - topic: services - .rooms - .state_accessor - .get_room_topic(&room_id) - .await - .ok(), - world_readable: services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await, - join_rule: services - .rooms - .state_accessor - .get_join_rule(&room_id) - .await - .unwrap_or_default() - .0, - room_type: services - .rooms - .state_accessor - .get_room_type(&room_id) - .await - .ok(), - room_version: services.rooms.state.get_room_version(&room_id).await.ok(), - membership: if let Some(sender_user) = sender_user { - services - .rooms - .state_accessor - .get_member(&room_id, sender_user) - .await - .map_or_else(|_| MembershipState::Leave, |content| content.membership) - .into() - } else { - None - }, - encryption: services - .rooms - .state_accessor - .get_room_encryption(&room_id) - .await - .ok(), - }) -} - /// # `DELETE /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz` /// /// Deletes the `tz` (timezone) of a user, as per MSC4133 and MSC4175. diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index da52e095..f51a5e3a 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -27,7 +27,6 @@ use ruma::{ }, events::{ StateEventType, - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, }, serde::Raw, @@ -306,25 +305,18 @@ async fn get_room_summary( children_state: Vec>, identifier: &Identifier<'_>, ) -> Result { - let join_rule = self + let (join_rule, allowed_room_ids) = self .services .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); + .get_space_join_rule(room_id) + .await; - let allowed_room_ids = self - .services - .state_accessor - .allowed_room_ids(join_rule.clone()); - - let join_rule = join_rule.clone().into(); let is_accessible_child = self .is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids) .await; if !is_accessible_child { - return Err!(Request(Forbidden("User is not allowed to see the room",))); + return Err!(Request(Forbidden("User is not allowed to see the room"))); } let name = self.services.state_accessor.get_name(room_id).ok(); @@ -355,6 +347,14 @@ async fn get_room_summary( .get_avatar(room_id) .map(|res| res.into_option().unwrap_or_default().url); + let room_version = self.services.state.get_room_version(room_id).ok(); + + let encryption = self + .services + .state_accessor + .get_room_encryption(room_id) + .ok(); + let ( canonical_alias, name, @@ -364,6 +364,8 @@ async fn get_room_summary( guest_can_join, avatar_url, room_type, + room_version, + encryption, ) = futures::join!( canonical_alias, name, @@ -372,7 +374,9 @@ async fn get_room_summary( world_readable, guest_can_join, avatar_url, - room_type + room_type, + room_version, + encryption, ); Ok(SpaceHierarchyParentSummary { @@ -387,9 +391,9 @@ async fn get_room_summary( allowed_room_ids, join_rule, room_id: room_id.to_owned(), - num_joined_members: num_joined_members - .try_into() - .expect("user count should not be that big"), + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + encryption, + room_version, }) } @@ -487,6 +491,8 @@ async fn cache_insert( join_rule, room_type, allowed_room_ids, + encryption, + room_version, } = child; let summary = SpaceHierarchyParentSummary { @@ -506,6 +512,8 @@ async fn cache_insert( .map(PduEvent::into_stripped_spacechild_state_event) .collect() .await, + encryption, + room_version, }; cache.insert(current_room.to_owned(), Some(CachedSpaceHierarchySummary { summary })); @@ -527,7 +535,9 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, - .. + allowed_room_ids, + encryption, + room_version, } = value.summary; Self { @@ -542,6 +552,9 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, + encryption, + room_version, + allowed_room_ids, } } } @@ -562,7 +575,9 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR join_rule, room_type, children_state, - .. + allowed_room_ids, + encryption, + room_version, } = summary; SpaceHierarchyRoomsChunk { @@ -577,5 +592,8 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR join_rule, room_type, children_state, + encryption, + room_version, + allowed_room_ids, } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index b57465ce..7fff5935 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -129,22 +129,34 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Returns the join rule (`SpaceRoomJoinRule`) for a given room - pub async fn get_join_rule( + /// Returns the space join rule (`SpaceRoomJoinRule`) for a given room and + /// any allowed room IDs if available. Will default to Invite and empty vec + /// if doesnt exist or invalid, + pub async fn get_space_join_rule( &self, room_id: &RoomId, - ) -> Result<(SpaceRoomJoinRule, Vec)> { + ) -> (SpaceRoomJoinRule, Vec) { self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") .await - .map(|c: RoomJoinRulesEventContent| { - (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) - }) - .or_else(|_| Ok((SpaceRoomJoinRule::Invite, vec![]))) + .map_or_else( + |_| (SpaceRoomJoinRule::Invite, vec![]), + |c: RoomJoinRulesEventContent| { + (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) + }, + ) + } + + /// Returns the join rules for a given room (`JoinRule` type). Will default + /// to Invite if doesnt exist or invalid + pub async fn get_join_rules(&self, room_id: &RoomId) -> JoinRule { + self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") + .await + .map_or_else(|_| JoinRule::Invite, |c: RoomJoinRulesEventContent| (c.join_rule)) } /// Returns an empty vec if not a restricted room pub fn allowed_room_ids(&self, join_rule: JoinRule) -> Vec { - let mut room_ids = Vec::with_capacity(1); + let mut room_ids = Vec::with_capacity(1); // restricted rooms generally only have 1 allowed room ID if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { for rule in r.allow { if let AllowRule::RoomMembership(RoomMembership { room_id: membership }) = rule { From 24be5794774b7585b6ec1e3dbaa901967d241972 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 3 Apr 2025 12:20:10 -0400 Subject: [PATCH 0798/1248] add appservice MSC4190 support Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++--- Cargo.toml | 2 +- src/api/client/account.rs | 12 ++-- src/api/client/appservice.rs | 8 ++- src/api/client/device.rs | 112 +++++++++++++++++++++++------- src/service/sending/appservice.rs | 18 +++-- src/service/users/mod.rs | 1 - 7 files changed, 125 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a53258bc..2bcfcee4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3531,7 +3531,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "assign", "js_int", @@ -3551,7 +3551,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "ruma-common", @@ -3563,7 +3563,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "as_variant", "assign", @@ -3586,7 +3586,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "as_variant", "base64 0.22.1", @@ -3618,7 +3618,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3643,7 +3643,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "bytes", "headers", @@ -3665,7 +3665,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3699,7 +3699,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "ruma-common", @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 940ece86..0abaa2f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "ea1278657125e9414caada074e8c172bc252fb1c" +rev = "0701341a2fd5a6ea74beada18d5974cc401a4fc1" features = [ "compat", "rand", diff --git a/src/api/client/account.rs b/src/api/client/account.rs index efa8b142..e5894d47 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -318,14 +318,14 @@ pub(crate) async fn register_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services.uiaa.create( &UserId::parse_with_server_name("", services.globals.server_name()) .unwrap(), "".into(), &uiaainfo, - &json, + json, ); return Err(Error::Uiaa(uiaainfo)); }, @@ -373,8 +373,12 @@ pub(crate) async fn register_route( ) .await?; - // Inhibit login does not work for guests - if !is_guest && body.inhibit_login { + if (!is_guest && body.inhibit_login) + || body + .appservice_info + .as_ref() + .is_some_and(|appservice| appservice.registration.device_management) + { return Ok(register::v3::Response { access_token: None, user_id, diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs index 84955309..eb6b3312 100644 --- a/src/api/client/appservice.rs +++ b/src/api/client/appservice.rs @@ -22,7 +22,13 @@ pub(crate) async fn appservice_ping( ))); } - if appservice_info.registration.url.is_none() { + if appservice_info.registration.url.is_none() + || appservice_info + .registration + .url + .as_ref() + .is_some_and(|url| url.is_empty() || url == "null") + { return Err!(Request(UrlNotSet( "Appservice does not have a URL set, there is nothing to ping." ))); diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 6a845aed..7603c866 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,9 +1,9 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, err}; +use conduwuit::{Err, debug, err}; use futures::StreamExt; use ruma::{ - MilliSecondsSinceUnixEpoch, + MilliSecondsSinceUnixEpoch, OwnedDeviceId, api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, @@ -12,7 +12,7 @@ use ruma::{ }; use super::SESSION_ID_LENGTH; -use crate::{Error, Result, Ruma, utils}; +use crate::{Error, Result, Ruma, client::DEVICE_ID_LENGTH, utils}; /// # `GET /_matrix/client/r0/devices` /// @@ -59,26 +59,58 @@ pub(crate) async fn update_device_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); + let appservice = body.appservice_info.as_ref(); - let mut device = services + match services .users .get_device_metadata(sender_user, &body.device_id) .await - .map_err(|_| err!(Request(NotFound("Device not found."))))?; + { + | Ok(mut device) => { + device.display_name.clone_from(&body.display_name); + device.last_seen_ip.clone_from(&Some(client.to_string())); + device + .last_seen_ts + .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); - device.display_name.clone_from(&body.display_name); - device.last_seen_ip.clone_from(&Some(client.to_string())); - device - .last_seen_ts - .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); + services + .users + .update_device_metadata(sender_user, &body.device_id, &device) + .await?; - services - .users - .update_device_metadata(sender_user, &body.device_id, &device) - .await?; + Ok(update_device::v3::Response {}) + }, + | Err(_) => { + let Some(appservice) = appservice else { + return Err!(Request(NotFound("Device not found."))); + }; + if !appservice.registration.device_management { + return Err!(Request(NotFound("Device not found."))); + } - Ok(update_device::v3::Response {}) + debug!( + "Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \ + and device ID does not exist", + appservice.registration.id + ); + + let device_id = OwnedDeviceId::from(utils::random_string(DEVICE_ID_LENGTH)); + + services + .users + .create_device( + sender_user, + &device_id, + &appservice.registration.as_token, + None, + Some(client.to_string()), + ) + .await?; + + return Ok(update_device::v3::Response {}); + }, + } } /// # `DELETE /_matrix/client/r0/devices/{deviceId}` @@ -95,8 +127,21 @@ pub(crate) async fn delete_device_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); + let appservice = body.appservice_info.as_ref(); + + if appservice.is_some_and(|appservice| appservice.registration.device_management) { + debug!( + "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ + enabled" + ); + services + .users + .remove_device(sender_user, &body.device_id) + .await; + + return Ok(delete_device::v3::Response {}); + } // UIAA let mut uiaainfo = UiaaInfo { @@ -120,11 +165,11 @@ pub(crate) async fn delete_device_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err!(Uiaa(uiaainfo)); }, @@ -142,11 +187,12 @@ pub(crate) async fn delete_device_route( Ok(delete_device::v3::Response {}) } -/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// # `POST /_matrix/client/v3/delete_devices` /// -/// Deletes the given device. +/// Deletes the given list of devices. /// -/// - Requires UIAA to verify user password +/// - Requires UIAA to verify user password unless from an appservice with +/// MSC4190 enabled. /// /// For each device: /// - Invalidates access token @@ -158,8 +204,20 @@ pub(crate) async fn delete_devices_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); + let appservice = body.appservice_info.as_ref(); + + if appservice.is_some_and(|appservice| appservice.registration.device_management) { + debug!( + "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ + enabled" + ); + for device_id in &body.devices { + services.users.remove_device(sender_user, device_id).await; + } + + return Ok(delete_devices::v3::Response {}); + } // UIAA let mut uiaainfo = UiaaInfo { @@ -183,11 +241,11 @@ pub(crate) async fn delete_devices_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index 7fa0be9a..c7fae11f 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -25,6 +25,10 @@ where return Ok(None); }; + if dest == *"null" || dest.is_empty() { + return Ok(None); + } + trace!("Appservice URL \"{dest}\", Appservice ID: {}", registration.id); let hs_token = registration.hs_token.as_str(); @@ -34,7 +38,11 @@ where SendAccessToken::IfRequired(hs_token), &VERSIONS, ) - .map_err(|e| err!(BadServerResponse(warn!("Failed to find destination {dest}: {e}"))))? + .map_err(|e| { + err!(BadServerResponse( + warn!(appservice = %registration.id, "Failed to find destination {dest}: {e:?}") + )) + })? .map(BytesMut::freeze); let mut parts = http_request.uri().clone().into_parts(); @@ -51,7 +59,7 @@ where let reqwest_request = reqwest::Request::try_from(http_request)?; let mut response = client.execute(reqwest_request).await.map_err(|e| { - warn!("Could not send request to appservice \"{}\" at {dest}: {e}", registration.id); + warn!("Could not send request to appservice \"{}\" at {dest}: {e:?}", registration.id); e })?; @@ -71,7 +79,7 @@ where if !status.is_success() { debug_error!("Appservice response bytes: {:?}", utils::string_from_bytes(&body)); - return Err!(BadServerResponse(error!( + return Err!(BadServerResponse(warn!( "Appservice \"{}\" returned unsuccessful HTTP response {status} at {dest}", registration.id ))); @@ -84,8 +92,8 @@ where ); response.map(Some).map_err(|e| { - err!(BadServerResponse(error!( - "Appservice \"{}\" returned invalid response bytes {dest}: {e}", + err!(BadServerResponse(warn!( + "Appservice \"{}\" returned invalid/malformed response bytes {dest}: {e}", registration.id ))) }) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 5265e64b..87a8b93b 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -350,7 +350,6 @@ impl Service { token: &str, ) -> Result<()> { let key = (user_id, device_id); - // should not be None, but we shouldn't assert either lol... if self.db.userdeviceid_metadata.qry(&key).await.is_err() { return Err!(Database(error!( ?user_id, From f14756fb767abda97dc966ad842c958d970d77b9 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 3 Apr 2025 12:20:53 -0400 Subject: [PATCH 0799/1248] leave room locally if room is banned, rescind knocks on deactivation too Signed-off-by: June Clementine Strawberry --- src/api/client/membership.rs | 87 +++++++++++++++++++++------- src/api/client/sync/v3.rs | 12 ++-- src/api/client/sync/v4.rs | 5 +- src/api/client/sync/v5.rs | 5 +- src/service/rooms/state_cache/mod.rs | 6 +- 5 files changed, 87 insertions(+), 28 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 315a363c..ef40e972 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -475,9 +475,9 @@ pub(crate) async fn leave_room_route( State(services): State, body: Ruma, ) -> Result { - leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()).await?; - - Ok(leave_room::v3::Response::new()) + leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) + .await + .map(|()| leave_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` @@ -1763,8 +1763,8 @@ pub(crate) async fn invite_helper( Ok(()) } -// Make a user leave all their joined rooms, forgets all rooms, and ignores -// errors +// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms, +// and ignores errors pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { let rooms_joined = services .rooms @@ -1778,7 +1778,17 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { .rooms_invited(user_id) .map(|(r, _)| r); - let all_rooms: Vec<_> = rooms_joined.chain(rooms_invited).collect().await; + let rooms_knocked = services + .rooms + .state_cache + .rooms_knocked(user_id) + .map(|(r, _)| r); + + let all_rooms: Vec<_> = rooms_joined + .chain(rooms_invited) + .chain(rooms_knocked) + .collect() + .await; for room_id in all_rooms { // ignore errors @@ -1795,7 +1805,40 @@ pub async fn leave_room( user_id: &UserId, room_id: &RoomId, reason: Option, -) -> Result<()> { +) -> Result { + let default_member_content = RoomMemberEventContent { + membership: MembershipState::Leave, + reason: reason.clone(), + join_authorized_via_users_server: None, + is_direct: None, + avatar_url: None, + displayname: None, + third_party_invite: None, + blurhash: None, + }; + + if services.rooms.metadata.is_banned(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + { + // the room is banned/disabled, the room must be rejected locally since we + // cant/dont want to federate with this server + services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + None, + None, + true, + ) + .await?; + + return Ok(()); + } + // Ask a remote server if we don't have this room and are not knocking on it if !services .rooms @@ -1828,7 +1871,7 @@ pub async fn leave_room( .update_membership( room_id, user_id, - RoomMemberEventContent::new(MembershipState::Leave), + default_member_content, user_id, last_state, None, @@ -1848,26 +1891,23 @@ pub async fn leave_room( ) .await else { - // Fix for broken rooms - warn!( + debug_warn!( "Trying to leave a room you are not a member of, marking room as left locally." ); - services + return services .rooms .state_cache .update_membership( room_id, user_id, - RoomMemberEventContent::new(MembershipState::Leave), + default_member_content, user_id, None, None, true, ) - .await?; - - return Ok(()); + .await; }; services @@ -1897,7 +1937,7 @@ async fn remote_leave_room( room_id: &RoomId, ) -> Result<()> { let mut make_leave_response_and_server = - Err!(BadServerResponse("No server available to assist in leaving.")); + Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); let mut servers: HashSet = services .rooms @@ -1977,20 +2017,25 @@ async fn remote_leave_room( let (make_leave_response, remote_server) = make_leave_response_and_server?; let Some(room_version_id) = make_leave_response.room_version else { - return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); + return Err!(BadServerResponse(warn!( + "No room version was returned by {remote_server} for {room_id}, room version is \ + likely not supported by conduwuit" + ))); }; if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); + return Err!(BadServerResponse(warn!( + "Remote room version {room_version_id} for {room_id} is not supported by conduwuit", + ))); } let mut leave_event_stub = serde_json::from_str::( make_leave_response.event.get(), ) .map_err(|e| { - err!(BadServerResponse("Invalid make_leave event json received from server: {e:?}")) + err!(BadServerResponse(warn!( + "Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}" + ))) })?; // TODO: Is origin needed? diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 530c1278..83ffa55a 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -15,6 +15,7 @@ use conduwuit::{ math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, + warn, }; use conduwuit_service::{ Services, @@ -428,9 +429,12 @@ async fn handle_left_room( return Ok(None); } - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { // This is just a rejected invite, not a room we know - // Insert a leave event anyways + // Insert a leave event anyways for the client let event = PduEvent { event_id: EventId::new(services.globals.server_name()), sender: sender_user.to_owned(), @@ -489,7 +493,7 @@ async fn handle_left_room( .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str()) .await else { - error!("Left room but no left state event"); + warn!("Left {room_id} but no left state event"); return Ok(None); }; @@ -499,7 +503,7 @@ async fn handle_left_room( .pdu_shortstatehash(&left_event_id) .await else { - error!(event_id = %left_event_id, "Leave event has no state"); + warn!(event_id = %left_event_id, "Leave event has no state in {room_id}"); return Ok(None); }; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 7e902973..f7edb8c0 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -438,7 +438,10 @@ pub(crate) async fn sync_events_v4_route( let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { continue; } let todo_room = diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 48b41b21..c4e71d88 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -214,7 +214,10 @@ async fn fetch_subscriptions( ) { let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { continue; } let todo_room = diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 23ba0520..d3dbc143 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -40,6 +40,7 @@ struct Services { account_data: Dep, config: Dep, globals: Dep, + metadata: Dep, state_accessor: Dep, users: Dep, } @@ -73,6 +74,7 @@ impl crate::Service for Service { account_data: args.depend::("account_data"), config: args.depend::("config"), globals: args.depend::("globals"), + metadata: args.depend::("rooms::metadata"), state_accessor: args .depend::("rooms::state_accessor"), users: args.depend::("users"), @@ -271,7 +273,9 @@ impl Service { self.mark_as_left(user_id, room_id); if self.services.globals.user_is_local(user_id) - && self.services.config.forget_forced_upon_leave + && (self.services.config.forget_forced_upon_leave + || self.services.metadata.is_banned(room_id).await + || self.services.metadata.is_disabled(room_id).await) { self.forget(room_id, user_id); } From 5d1404e9dfff9bc0e5bed4bab6d75c9c94b38183 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 02:52:42 +0000 Subject: [PATCH 0800/1248] fix well-known using the hooked resolver Signed-off-by: Jason Volk --- src/service/client/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index d51e5721..1aeeb492 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -56,7 +56,7 @@ impl crate::Service for Service { .build()?, well_known: base(config)? - .dns_resolver(resolver.resolver.hooked.clone()) + .dns_resolver(resolver.resolver.clone()) .connect_timeout(Duration::from_secs(config.well_known_conn_timeout)) .read_timeout(Duration::from_secs(config.well_known_timeout)) .timeout(Duration::from_secs(config.well_known_timeout)) From 58adb6fead27c863849c63184f145be209e40e1b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 04:05:42 +0000 Subject: [PATCH 0801/1248] upgrade hickory and hyper-util dependencies Signed-off-by: Jason Volk --- Cargo.lock | 195 +++++++++++++++++++++++++++++++-- Cargo.toml | 10 +- src/service/resolver/actual.rs | 39 ++++--- src/service/resolver/dns.rs | 24 ++-- 4 files changed, 229 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2bcfcee4..545f0f0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,6 +142,17 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -927,7 +938,7 @@ dependencies = [ "const-str", "either", "futures", - "hickory-resolver", + "hickory-resolver 0.25.1", "http", "image", "ipaddress", @@ -1061,6 +1072,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crokey" version = "1.1.1" @@ -1584,6 +1601,19 @@ dependencies = [ "slab", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -1769,6 +1799,34 @@ dependencies = [ "url", ] +[[package]] +name = "hickory-proto" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d844af74f7b799e41c78221be863bade11c430d46042c3b49ca8ae0c6d27287" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "critical-section", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.0", + "ring", + "serde", + "thiserror 2.0.12", + "tinyvec", + "tokio", + "tracing", + "url", +] + [[package]] name = "hickory-resolver" version = "0.24.4" @@ -1777,7 +1835,7 @@ checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" dependencies = [ "cfg-if", "futures-util", - "hickory-proto", + "hickory-proto 0.24.4", "ipconfig", "lru-cache", "once_cell", @@ -1790,6 +1848,28 @@ dependencies = [ "tracing", ] +[[package]] +name = "hickory-resolver" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a128410b38d6f931fcc6ca5c107a3b02cabd6c05967841269a4ad65d23c44331" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto 0.25.1", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.0", + "resolv-conf", + "serde", + "smallvec", + "thiserror 2.0.12", + "tokio", + "tracing", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1816,7 +1896,7 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows", + "windows 0.52.0", ] [[package]] @@ -1949,9 +2029,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" dependencies = [ "bytes", "futures-channel", @@ -1959,10 +2039,10 @@ dependencies = [ "http", "http-body", "hyper", + "libc", "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -2439,6 +2519,19 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "loop9" version = "0.1.5" @@ -2609,6 +2702,25 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid", +] + [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -2773,6 +2885,10 @@ name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "openssl-probe" @@ -3052,6 +3168,12 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + [[package]] name = "powerfmt" version = "0.2.0" @@ -3463,7 +3585,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "hickory-resolver", + "hickory-resolver 0.24.4", "http", "http-body", "http-body-util", @@ -3893,6 +4015,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -4464,6 +4592,12 @@ dependencies = [ "version-compare", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "target-lexicon" version = "0.12.16" @@ -5367,7 +5501,17 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core", + "windows-core 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", "windows-targets 0.52.6", ] @@ -5380,6 +5524,41 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "windows-registry" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 0abaa2f9..6c5c291f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -284,8 +284,7 @@ features = [ ] [workspace.dependencies.hyper-util] -# hyper-util >=0.1.9 seems to have DNS issues -version = "=0.1.8" +version = "0.1.11" default-features = false features = [ "server-auto", @@ -306,8 +305,13 @@ default-features = false features = ["env", "toml"] [workspace.dependencies.hickory-resolver] -version = "0.24.2" +version = "0.25.1" default-features = false +features = [ + "serde", + "system-config", + "tokio", +] # Used for conduwuit::Error type [workspace.dependencies.thiserror] diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 1ad76f66..0151c4d7 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -5,7 +5,7 @@ use std::{ use conduwuit::{Err, Result, debug, debug_info, err, error, trace}; use futures::{FutureExt, TryFutureExt}; -use hickory_resolver::error::ResolveError; +use hickory_resolver::ResolveError; use ipaddress::IPAddress; use ruma::ServerName; @@ -334,25 +334,28 @@ impl super::Service { } fn handle_resolve_error(e: &ResolveError, host: &'_ str) -> Result<()> { - use hickory_resolver::error::ResolveErrorKind; + use hickory_resolver::{ResolveErrorKind::Proto, proto::ProtoErrorKind}; - match *e.kind() { - | ResolveErrorKind::NoRecordsFound { .. } => { - // Raise to debug_warn if we can find out the result wasn't from cache - debug!(%host, "No DNS records found: {e}"); - Ok(()) - }, - | ResolveErrorKind::Timeout => { - Err!(warn!(%host, "DNS {e}")) - }, - | ResolveErrorKind::NoConnections => { - error!( - "Your DNS server is overloaded and has ran out of connections. It is \ - strongly recommended you remediate this issue to ensure proper federation \ - connectivity." - ); + match e.kind() { + | Proto(e) => match e.kind() { + | ProtoErrorKind::NoRecordsFound { .. } => { + // Raise to debug_warn if we can find out the result wasn't from cache + debug!(%host, "No DNS records found: {e}"); + Ok(()) + }, + | ProtoErrorKind::Timeout => { + Err!(warn!(%host, "DNS {e}")) + }, + | ProtoErrorKind::NoConnections => { + error!( + "Your DNS server is overloaded and has ran out of connections. It is \ + strongly recommended you remediate this issue to ensure proper \ + federation connectivity." + ); - Err!(error!(%host, "DNS error: {e}")) + Err!(error!(%host, "DNS error: {e}")) + }, + | _ => Err!(error!(%host, "DNS error: {e}")), }, | _ => Err!(error!(%host, "DNS error: {e}")), } diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index e4245a5b..3a0b2551 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -2,19 +2,19 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; use conduwuit::{Result, Server, err}; use futures::FutureExt; -use hickory_resolver::{TokioAsyncResolver, lookup_ip::LookupIp}; +use hickory_resolver::{TokioResolver, lookup_ip::LookupIp}; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use super::cache::{Cache, CachedOverride}; pub struct Resolver { - pub(crate) resolver: Arc, + pub(crate) resolver: Arc, pub(crate) hooked: Arc, server: Arc, } pub(crate) struct Hooked { - resolver: Arc, + resolver: Arc, cache: Arc, server: Arc, } @@ -42,7 +42,7 @@ impl Resolver { let mut ns = sys_conf.clone(); if config.query_over_tcp_only { - ns.protocol = hickory_resolver::config::Protocol::Tcp; + ns.protocol = hickory_resolver::proto::xfer::Protocol::Tcp; } ns.trust_negative_responses = !config.query_all_nameservers; @@ -51,6 +51,7 @@ impl Resolver { } opts.cache_size = config.dns_cache_entries as usize; + opts.preserve_intermediates = true; opts.negative_min_ttl = Some(Duration::from_secs(config.dns_min_ttl_nxdomain)); opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 30)); opts.positive_min_ttl = Some(Duration::from_secs(config.dns_min_ttl)); @@ -60,8 +61,7 @@ impl Resolver { opts.try_tcp_on_error = config.dns_tcp_fallback; opts.num_concurrent_reqs = 1; opts.edns0 = true; - opts.shuffle_dns_servers = true; - opts.rotate = true; + opts.case_randomization = true; opts.ip_strategy = match config.ip_lookup_strategy { | 1 => hickory_resolver::config::LookupIpStrategy::Ipv4Only, | 2 => hickory_resolver::config::LookupIpStrategy::Ipv6Only, @@ -69,9 +69,13 @@ impl Resolver { | 4 => hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4, | _ => hickory_resolver::config::LookupIpStrategy::Ipv4thenIpv6, }; - opts.authentic_data = false; - let resolver = Arc::new(TokioAsyncResolver::tokio(conf, opts)); + let rt_prov = hickory_resolver::proto::runtime::TokioRuntimeProvider::new(); + let conn_prov = hickory_resolver::name_server::TokioConnectionProvider::new(rt_prov); + let mut builder = TokioResolver::builder_with_config(conf, conn_prov); + *builder.options_mut() = opts; + let resolver = Arc::new(builder.build()); + Ok(Arc::new(Self { resolver: resolver.clone(), hooked: Arc::new(Hooked { resolver, cache, server: server.clone() }), @@ -105,7 +109,7 @@ impl Resolve for Hooked { async fn hooked_resolve( cache: Arc, server: Arc, - resolver: Arc, + resolver: Arc, name: Name, ) -> Result> { match cache.get_override(name.as_str()).await { @@ -129,7 +133,7 @@ async fn hooked_resolve( async fn resolve_to_reqwest( server: Arc, - resolver: Arc, + resolver: Arc, name: Name, ) -> ResolvingResult { use std::{io, io::ErrorKind::Interrupted}; From 0b56204f89d37470346c1940e70354deebfd1a3a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 04:34:11 +0000 Subject: [PATCH 0802/1248] bump additional dependencies Signed-off-by: Jason Volk --- Cargo.lock | 264 ++++++++++++++++++++++++++++++----------------------- Cargo.toml | 20 ++-- 2 files changed, 161 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 545f0f0d..da33af05 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.12.6" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dabb68eb3a7aa08b46fddfd59a3d55c978243557a90ab804769f7e20e67d2b01" +checksum = "19b756939cb2f8dc900aa6dcd505e6e2428e9cae7ff7b028c49e3946efa70878" dependencies = [ "aws-lc-sys", "zeroize", @@ -242,9 +242,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77926887776171ced7d662120a75998e444d3750c951abfe07f90da130514b1f" +checksum = "b9f7720b74ed28ca77f90769a71fd8c637a0137f6fae4ae947e1050229cff57f" dependencies = [ "bindgen 0.69.5", "cc", @@ -663,9 +663,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.34" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e958897981290da2a852763fe9cdb89cd36977a5d729023127095fa94d95e2ff" +checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" dependencies = [ "clap_builder", "clap_derive", @@ -673,9 +673,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.34" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b0f35019843db2160b5bb19ae09b4e6411ac33fc6a712003c33e03090e2489" +checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" dependencies = [ "anstyle", "clap_lex", @@ -742,8 +742,8 @@ dependencies = [ "opentelemetry-jaeger", "opentelemetry_sdk", "sentry", - "sentry-tower 0.35.0", - "sentry-tracing 0.35.0", + "sentry-tower", + "sentry-tracing", "tokio", "tokio-metrics", "tracing", @@ -916,8 +916,8 @@ dependencies = [ "rustls", "sd-notify", "sentry", - "sentry-tower 0.35.0", - "sentry-tracing 0.35.0", + "sentry-tower", + "sentry-tracing", "serde_json", "tokio", "tower 0.5.2", @@ -1454,9 +1454,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ "crc32fast", "miniz_oxide", @@ -2016,9 +2016,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", "hyper-util", @@ -2336,10 +2336,11 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.2", "libc", ] @@ -3574,9 +3575,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" dependencies = [ "async-compression", "base64 0.22.1", @@ -3612,6 +3613,7 @@ dependencies = [ "tokio-rustls", "tokio-socks", "tokio-util", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", @@ -4067,21 +4069,21 @@ checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a7332159e544e34db06b251b1eda5e546bd90285c3f58d9c8ff8450b484e0da" +checksum = "255914a8e53822abd946e2ce8baa41d4cded6b8e938913b7f7b9da5b7ab44335" dependencies = [ "httpdate", "reqwest", "rustls", "sentry-backtrace", "sentry-contexts", - "sentry-core 0.36.0", + "sentry-core", "sentry-debug-images", "sentry-log", "sentry-panic", - "sentry-tower 0.36.0", - "sentry-tracing 0.36.0", + "sentry-tower", + "sentry-tracing", "tokio", "ureq", "webpki-roots", @@ -4089,107 +4091,83 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "565ec31ad37bab8e6d9f289f34913ed8768347b133706192f10606dabd5c6bc4" +checksum = "00293cd332a859961f24fd69258f7e92af736feaeb91020cff84dac4188a4302" dependencies = [ "backtrace", "once_cell", "regex", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-contexts" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e860275f25f27e8c0c7726ce116c7d5c928c5bba2ee73306e52b20a752298ea6" +checksum = "961990f9caa76476c481de130ada05614cd7f5aa70fb57c2142f0e09ad3fb2aa" dependencies = [ "hostname", "libc", "os_info", "rustc_version", - "sentry-core 0.36.0", + "sentry-core", "uname", ] [[package]] name = "sentry-core" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" +checksum = "1a6409d845707d82415c800290a5d63be5e3df3c2e417b0997c60531dfbd35ef" dependencies = [ "once_cell", "rand 0.8.5", - "sentry-types 0.35.0", - "serde", - "serde_json", -] - -[[package]] -name = "sentry-core" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653942e6141f16651273159f4b8b1eaeedf37a7554c00cd798953e64b8a9bf72" -dependencies = [ - "once_cell", - "rand 0.8.5", - "sentry-types 0.36.0", + "sentry-types", "serde", "serde_json", ] [[package]] name = "sentry-debug-images" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60bc2154e6df59beed0ac13d58f8dfaf5ad20a88548a53e29e4d92e8e835c2" +checksum = "71ab5df4f3b64760508edfe0ba4290feab5acbbda7566a79d72673065888e5cc" dependencies = [ "findshlibs", "once_cell", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-log" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c96d796cba1b3a0793e7f53edc420c61f9419fba8fb34ad5519f5c7d01af6b2" +checksum = "693841da8dfb693af29105edfbea1d91348a13d23dd0a5d03761eedb9e450c46" dependencies = [ "log", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-panic" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "105e3a956c8aa9dab1e4087b1657b03271bfc49d838c6ae9bfc7c58c802fd0ef" +checksum = "609b1a12340495ce17baeec9e08ff8ed423c337c1a84dffae36a178c783623f3" dependencies = [ "sentry-backtrace", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-tower" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdaf9b1939589476bd57751d12a9653bbfe356610fc476d03d7683189183ab7" -dependencies = [ - "sentry-core 0.35.0", - "tower-layer", - "tower-service", -] - -[[package]] -name = "sentry-tower" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "082f781dfc504d984e16d99f8dbf94d6ee4762dd0fc28de25713d0f900a8164d" +checksum = "4b98005537e38ee3bc10e7d36e7febe9b8e573d03f2ddd85fcdf05d21f9abd6d" dependencies = [ "http", "pin-project", - "sentry-core 0.36.0", + "sentry-core", "tower-layer", "tower-service", "url", @@ -4197,49 +4175,21 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" -dependencies = [ - "sentry-core 0.35.0", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sentry-tracing" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e75c831b4d8b34a5aec1f65f67c5d46a26c7c5d3c7abd8b5ef430796900cf8" +checksum = "49f4e86402d5c50239dc7d8fd3f6d5e048221d5fcb4e026d8d50ab57fe4644cb" dependencies = [ "sentry-backtrace", - "sentry-core 0.36.0", + "sentry-core", "tracing-core", "tracing-subscriber", ] [[package]] name = "sentry-types" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" -dependencies = [ - "debugid", - "hex", - "rand 0.8.5", - "serde", - "serde_json", - "thiserror 1.0.69", - "time", - "url", - "uuid", -] - -[[package]] -name = "sentry-types" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d4203359e60724aa05cf2385aaf5d4f147e837185d7dd2b9ccf1ee77f4420c8" +checksum = "3d3f117b8755dbede8260952de2aeb029e20f432e72634e8969af34324591631" dependencies = [ "debugid", "hex", @@ -5532,8 +5482,8 @@ checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ "windows-implement", "windows-interface", - "windows-result", - "windows-strings", + "windows-result 0.2.0", + "windows-strings 0.1.0", "windows-targets 0.52.6", ] @@ -5560,14 +5510,20 @@ dependencies = [ ] [[package]] -name = "windows-registry" -version = "0.2.0" +name = "windows-link" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-registry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ - "windows-result", - "windows-strings", - "windows-targets 0.52.6", + "windows-result 0.3.2", + "windows-strings 0.3.1", + "windows-targets 0.53.0", ] [[package]] @@ -5579,16 +5535,34 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-strings" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result", + "windows-result 0.2.0", "windows-targets 0.52.6", ] +[[package]] +name = "windows-strings" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -5640,13 +5614,29 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -5659,6 +5649,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -5671,6 +5667,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -5683,12 +5685,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -5701,6 +5715,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -5713,6 +5733,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -5725,6 +5751,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -5737,6 +5769,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" version = "0.7.4" diff --git a/Cargo.toml b/Cargo.toml index 6c5c291f..3ffa9e44 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -141,12 +141,12 @@ features = [ ] [workspace.dependencies.rustls] -version = "0.23.19" +version = "0.23.25" default-features = false features = ["aws_lc_rs"] [workspace.dependencies.reqwest] -version = "0.12.9" +version = "0.12.15" default-features = false features = [ "rustls-tls-native-roots", @@ -204,7 +204,7 @@ features = [ # logging [workspace.dependencies.log] -version = "0.4.22" +version = "0.4.27" default-features = false [workspace.dependencies.tracing] version = "0.1.41" @@ -224,7 +224,7 @@ default-features = false # used for conduwuit's CLI and admin room command parsing [workspace.dependencies.clap] -version = "4.5.23" +version = "4.5.35" default-features = false features = [ "derive", @@ -320,7 +320,7 @@ default-features = false # Used when hashing the state [workspace.dependencies.ring] -version = "0.17.8" +version = "0.17.14" default-features = false # Used to make working with iterators easier, was already a transitive depdendency @@ -427,7 +427,7 @@ features = ["rt-tokio"] # optional sentry metrics for crash/panic reporting [workspace.dependencies.sentry] -version = "0.36.0" +version = "0.37.0" default-features = false features = [ "backtrace", @@ -443,9 +443,9 @@ features = [ ] [workspace.dependencies.sentry-tracing] -version = "0.35.0" +version = "0.37.0" [workspace.dependencies.sentry-tower] -version = "0.35.0" +version = "0.37.0" # jemalloc usage [workspace.dependencies.tikv-jemalloc-sys] @@ -479,7 +479,7 @@ default-features = false features = ["resource"] [workspace.dependencies.sd-notify] -version = "0.4.3" +version = "0.4.5" default-features = false [workspace.dependencies.hardened_malloc-rs] @@ -496,7 +496,7 @@ version = "0.4.3" default-features = false [workspace.dependencies.termimad] -version = "0.31.1" +version = "0.31.2" default-features = false [workspace.dependencies.checked_ops] From f9529937ce9a8dacf186fb4f60ef0c3315bb02a0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 19:36:24 +0000 Subject: [PATCH 0803/1248] patch hyper-util due to conflicts with federation resolver hooks Signed-off-by: Jason Volk --- Cargo.lock | 3 +-- Cargo.toml | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da33af05..8918a631 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2030,8 +2030,7 @@ dependencies = [ [[package]] name = "hyper-util" version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +source = "git+https://github.com/girlbossceo/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941" dependencies = [ "bytes", "futures-channel", diff --git a/Cargo.toml b/Cargo.toml index 3ffa9e44..bf7ec2bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -570,10 +570,16 @@ rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d" git = "https://github.com/girlbossceo/async-channel" rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280" +# adds affinity masks for selecting more than one core at a time [patch.crates-io.core_affinity] git = "https://github.com/girlbossceo/core_affinity_rs" rev = "9c8e51510c35077df888ee72a36b4b05637147da" +# reverts hyperium#148 conflicting with our delicate federation resolver hooks +[patch.crates-io.hyper-util] +git = "https://github.com/girlbossceo/hyper-util" +rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" + # # Our crates # From 45fd3875c8932e56d1ab092004065b0800861201 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 00:59:23 +0000 Subject: [PATCH 0804/1248] move runtime shutdown out of main; gather final stats Signed-off-by: Jason Volk --- src/main/main.rs | 7 +++---- src/main/runtime.rs | 43 ++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/src/main/main.rs b/src/main/main.rs index 52f40384..1a9d3fe4 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -16,15 +16,14 @@ use server::Server; rustc_flags_capture! {} -fn main() -> Result<(), Error> { +fn main() -> Result { let args = clap::parse(); let runtime = runtime::new(&args)?; let server = Server::new(&args, Some(runtime.handle()))?; + runtime.spawn(signal::signal(server.clone())); runtime.block_on(async_main(&server))?; - - // explicit drop here to trace thread and tls dtors - drop(runtime); + runtime::shutdown(&server, runtime); #[cfg(unix)] if server.server.restarting.load(Ordering::Acquire) { diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 920476db..1c58ea81 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -1,7 +1,7 @@ use std::{ iter::once, sync::{ - OnceLock, + Arc, OnceLock, atomic::{AtomicUsize, Ordering}, }, thread, @@ -11,17 +11,18 @@ use std::{ #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use conduwuit_core::result::LogDebugErr; use conduwuit_core::{ - Result, is_true, + Result, debug, is_true, utils::sys::compute::{nth_core_available, set_affinity}, }; use tokio::runtime::Builder; -use crate::clap::Args; +use crate::{clap::Args, server::Server}; const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; +const SHUTDOWN_TIMEOUT: Duration = Duration::from_millis(10000); #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] const DISABLE_MUZZY_THRESHOLD: usize = 4; @@ -83,6 +84,42 @@ fn enable_histogram(builder: &mut Builder, args: &Args) { .metrics_poll_time_histogram_configuration(linear); } +#[cfg(tokio_unstable)] +#[tracing::instrument(name = "stop", level = "info", skip_all)] +pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { + use conduwuit_core::event; + use tracing::Level; + + // The final metrics output is promoted to INFO when tokio_unstable is active in + // a release/bench mode and DEBUG is likely optimized out + const LEVEL: Level = if cfg!(debug_assertions) { + Level::DEBUG + } else { + Level::INFO + }; + + debug!( + timeout = ?SHUTDOWN_TIMEOUT, + "Waiting for runtime..." + ); + + runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); + let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default(); + + event!(LEVEL, ?runtime_metrics, "Final runtime metrics"); +} + +#[cfg(not(tokio_unstable))] +#[tracing::instrument(name = "stop", level = "info", skip_all)] +pub(super) fn shutdown(_server: &Arc, runtime: tokio::runtime::Runtime) { + debug!( + timeout = ?SHUTDOWN_TIMEOUT, + "Waiting for runtime..." + ); + + runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); +} + #[tracing::instrument( name = "fork", level = "debug", From 29d55b80366e17737094d3ad9a8031fe20c6286e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Apr 2025 04:12:24 +0000 Subject: [PATCH 0805/1248] move systemd stopping notification point Signed-off-by: Jason Volk --- src/core/server.rs | 19 ++++++++++--------- src/router/run.rs | 4 ++++ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/core/server.rs b/src/core/server.rs index b67759d6..4b673f32 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -69,10 +69,6 @@ impl Server { return Err!("Reloading not enabled"); } - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) - .expect("failed to notify systemd of reloading state"); - if self.reloading.swap(true, Ordering::AcqRel) { return Err!("Reloading already in progress"); } @@ -98,10 +94,6 @@ impl Server { } pub fn shutdown(&self) -> Result { - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) - .expect("failed to notify systemd of stopping state"); - if self.stopping.swap(true, Ordering::AcqRel) { return Err!("Shutdown already in progress"); } @@ -144,7 +136,16 @@ impl Server { } #[inline] - pub fn running(&self) -> bool { !self.stopping.load(Ordering::Acquire) } + pub fn running(&self) -> bool { !self.is_stopping() } + + #[inline] + pub fn is_stopping(&self) -> bool { self.stopping.load(Ordering::Relaxed) } + + #[inline] + pub fn is_reloading(&self) -> bool { self.reloading.load(Ordering::Relaxed) } + + #[inline] + pub fn is_restarting(&self) -> bool { self.restarting.load(Ordering::Relaxed) } #[inline] pub fn is_ours(&self, name: &str) -> bool { name == self.config.server_name } diff --git a/src/router/run.rs b/src/router/run.rs index 31789626..ff54594f 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -77,6 +77,10 @@ pub(crate) async fn start(server: Arc) -> Result> { pub(crate) async fn stop(services: Arc) -> Result<()> { debug!("Shutting down..."); + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) + .expect("failed to notify systemd of stopping state"); + // Wait for all completions before dropping or we'll lose them to the module // unload and explode. services.stop().await; From 94b107b42b722aff9518f64ad603ce01665b25f3 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 3 Apr 2025 16:08:02 -0400 Subject: [PATCH 0806/1248] add some debug logging and misc cleanup to keys/signatures/upload Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++----- Cargo.toml | 2 +- src/api/client/keys.rs | 95 ++++++++++++++++++++++++++-------------- src/service/users/mod.rs | 18 +++++--- 4 files changed, 86 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8918a631..0753f81d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3654,7 +3654,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "assign", "js_int", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "ruma-common", @@ -3686,7 +3686,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "as_variant", "assign", @@ -3709,7 +3709,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "as_variant", "base64 0.22.1", @@ -3741,7 +3741,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3766,7 +3766,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "bytes", "headers", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3797,7 +3797,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "ruma-common", @@ -3807,7 +3807,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3822,7 +3822,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "ruma-common", @@ -3834,7 +3834,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index bf7ec2bb..a44fc0f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "0701341a2fd5a6ea74beada18d5974cc401a4fc1" +rev = "edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" features = [ "compat", "rand", diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index f50d7afa..f6224343 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -9,7 +9,8 @@ use ruma::{ client::{ error::ErrorKind, keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + claim_keys, get_key_changes, get_keys, upload_keys, + upload_signatures::{self, v3::Failure}, upload_signing_keys, }, uiaa::{AuthFlow, AuthType, UiaaInfo}, @@ -308,53 +309,81 @@ async fn check_for_new_keys( /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. +/// +/// TODO: clean this timo-code up more. tried to improve it a bit to stop +/// exploding the entire request on bad sigs, but needs way more work. pub(crate) async fn upload_signatures_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + use upload_signatures::v3::FailureErrorCode::*; + + if body.signed_keys.is_empty() { + debug!("Empty signed_keys sent in key signature upload"); + return Ok(upload_signatures::v3::Response::new()); + } + + let sender_user = body.sender_user(); + let mut failures: BTreeMap> = BTreeMap::new(); + let mut failure_reasons: BTreeMap = BTreeMap::new(); + let failure = Failure { + errcode: InvalidSignature, + error: String::new(), + }; for (user_id, keys) in &body.signed_keys { for (key_id, key) in keys { - let key = serde_json::to_value(key) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; + let Ok(key) = serde_json::to_value(key) + .inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}")) + else { + let mut failure = failure.clone(); + failure.error = String::from("Invalid \"key\" JSON"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; - for signature in key - .get("signatures") - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Missing signatures field."))? - .get(sender_user.to_string()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid user in signatures field.", - ))? - .as_object() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature."))? - .clone() - { - // Signature validation? - let signature = ( - signature.0, - signature - .1 - .as_str() - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid signature value.", - ))? - .to_owned(), - ); + let Some(signatures) = key.get("signatures") else { + let mut failure = failure.clone(); + failure.error = String::from("Missing \"signatures\" field"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; - services + let Some(sender_user_val) = signatures.get(sender_user.to_string()) else { + let mut failure = failure.clone(); + failure.error = String::from("Invalid user in signatures field"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; + + let Some(sender_user_object) = sender_user_val.as_object() else { + let mut failure = failure.clone(); + failure.error = String::from("signatures field is not a JSON object"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; + + for (signature, val) in sender_user_object.clone() { + let signature = (signature, val.to_string()); + + if let Err(e) = services .users .sign_key(user_id, key_id, signature, sender_user) - .await?; + .await + .inspect_err(|e| debug_warn!("{e}")) + { + let mut failure = failure.clone(); + failure.error = format!("Error signing key: {e}"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + } } } + + failures.insert(user_id.to_owned(), failure_reasons.clone()); } - Ok(upload_signatures::v3::Response { - failures: BTreeMap::new(), // TODO: integrate - }) + Ok(upload_signatures::v3::Response { failures }) } /// # `POST /_matrix/client/r0/keys/changes` diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 87a8b93b..1eb289fc 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -593,7 +593,7 @@ impl Service { key_id: &str, signature: (String, String), sender_id: &UserId, - ) -> Result<()> { + ) -> Result { let key = (target_id, key_id); let mut cross_signing_key: serde_json::Value = self @@ -601,21 +601,27 @@ impl Service { .keyid_key .qry(&key) .await - .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key."))))? + .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key"))))? .deserialized() - .map_err(|e| err!(Database("key in keyid_key is invalid. {e:?}")))?; + .map_err(|e| err!(Database(debug_warn!("key in keyid_key is invalid: {e:?}"))))?; let signatures = cross_signing_key .get_mut("signatures") - .ok_or_else(|| err!(Database("key in keyid_key has no signatures field.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("key in keyid_key has no signatures field"))) + })? .as_object_mut() - .ok_or_else(|| err!(Database("key in keyid_key has invalid signatures field.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("key in keyid_key has invalid signatures field."))) + })? .entry(sender_id.to_string()) .or_insert_with(|| serde_json::Map::new().into()); signatures .as_object_mut() - .ok_or_else(|| err!(Database("signatures in keyid_key for a user is invalid.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("signatures in keyid_key for a user is invalid."))) + })? .insert(signature.0, signature.1.into()); let key = (target_id, key_id); From b7109131e29804ac6b4e30aaaa40f213d092a63a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 22:06:51 +0000 Subject: [PATCH 0807/1248] further simplify get_missing_events; various log calls Signed-off-by: Jason Volk --- src/api/server/get_missing_events.rs | 47 +++++++++---------- .../rooms/state_accessor/server_can.rs | 8 +--- src/service/rooms/state_accessor/user_can.rs | 14 ++---- 3 files changed, 29 insertions(+), 40 deletions(-) diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index d72918fa..04dc30ed 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,9 +1,5 @@ use axum::extract::State; -use conduwuit::{ - Result, debug, debug_info, debug_warn, - utils::{self}, - warn, -}; +use conduwuit::{Result, debug, debug_error, utils::to_canonical_object}; use ruma::api::federation::event::get_missing_events; use super::AccessCheck; @@ -43,19 +39,13 @@ pub(crate) async fn get_missing_events_route( let mut i: usize = 0; while i < queued_events.len() && events.len() < limit { let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else { - debug_info!(?body.origin, "Event {} does not exist locally, skipping", &queued_events[i]); - i = i.saturating_add(1); - continue; - }; - - if pdu.room_id != body.room_id { - warn!(?body.origin, - "Got an event for the wrong room in database. Found {:?} in {:?}, server requested events in {:?}. Skipping.", - pdu.event_id, pdu.room_id, body.room_id + debug!( + ?body.origin, + "Event {} does not exist locally, skipping", &queued_events[i] ); i = i.saturating_add(1); continue; - } + }; if body.earliest_events.contains(&queued_events[i]) { i = i.saturating_add(1); @@ -68,25 +58,32 @@ pub(crate) async fn get_missing_events_route( .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) .await { - debug!(?body.origin, "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id); + debug!( + ?body.origin, + "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id + ); i = i.saturating_add(1); continue; } - let Ok(pdu_json) = utils::to_canonical_object(&pdu) else { - debug_warn!(?body.origin, "Failed to convert PDU in database to canonical JSON: {pdu:?}"); + let Ok(event) = to_canonical_object(&pdu) else { + debug_error!( + ?body.origin, + "Failed to convert PDU in database to canonical JSON: {pdu:?}" + ); i = i.saturating_add(1); continue; }; - queued_events.extend(pdu.prev_events.iter().map(ToOwned::to_owned)); + let prev_events = pdu.prev_events.iter().map(ToOwned::to_owned); - events.push( - services - .sending - .convert_to_outgoing_federation_event(pdu_json) - .await, - ); + let event = services + .sending + .convert_to_outgoing_federation_event(event) + .await; + + queued_events.extend(prev_events); + events.push(event); } Ok(get_missing_events::v1::Response { events }) diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index c946fbfd..2befec22 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{debug_info, implement, utils::stream::ReadyExt}; +use conduwuit::{implement, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ EventId, RoomId, ServerName, @@ -36,7 +36,6 @@ pub async fn server_can_see_event( .ready_filter(|member| member.server_name() == origin); match history_visibility { - | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny current_server_members @@ -49,9 +48,6 @@ pub async fn server_can_see_event( .any(|member| self.user_was_joined(shortstatehash, member)) .await }, - | _ => { - debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); - true - }, + | HistoryVisibility::WorldReadable | HistoryVisibility::Shared | _ => true, } } diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index aa54407b..67e0b52b 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{Err, Error, Result, debug_info, implement, pdu::PduBuilder}; +use conduwuit::{Err, Result, implement, pdu::PduBuilder}; use ruma::{ EventId, RoomId, UserId, events::{ @@ -76,8 +76,8 @@ pub async fn user_can_redact( || redacting_event .as_ref() .is_ok_and(|redacting_event| redacting_event.sender == sender)), - | _ => Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", + | _ => Err!(Database( + "No m.room.power_levels or m.room.create events in database for room" )), } }, @@ -108,8 +108,6 @@ pub async fn user_can_see_event( }); match history_visibility { - | HistoryVisibility::WorldReadable => true, - | HistoryVisibility::Shared => currently_member, | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny self.user_was_invited(shortstatehash, user_id).await @@ -118,10 +116,8 @@ pub async fn user_can_see_event( // Allow if any member on requested server was joined, else deny self.user_was_joined(shortstatehash, user_id).await }, - | _ => { - debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); - currently_member - }, + | HistoryVisibility::WorldReadable => true, + | HistoryVisibility::Shared | _ => currently_member, } } From 6a073b4fa4c728b15f94de88ac37d136c97982bf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Apr 2025 06:28:34 +0000 Subject: [PATCH 0808/1248] remove additional unnecessary Arc Signed-off-by: Jason Volk --- .../fetch_and_handle_outliers.rs | 6 ++--- src/service/rooms/event_handler/fetch_prev.rs | 7 ++---- .../rooms/event_handler/handle_outlier_pdu.rs | 22 ++++++------------- .../rooms/event_handler/handle_prev_pdu.rs | 6 +---- .../rooms/event_handler/state_at_incoming.rs | 5 ++--- .../event_handler/upgrade_outlier_pdu.rs | 2 +- 6 files changed, 15 insertions(+), 33 deletions(-) diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 80e91eff..b0a7d827 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -1,6 +1,5 @@ use std::{ collections::{BTreeMap, HashSet, VecDeque, hash_map}, - sync::Arc, time::Instant, }; @@ -8,7 +7,6 @@ use conduwuit::{ PduEvent, debug, debug_error, debug_warn, implement, pdu, trace, utils::continue_exponential_backoff_secs, warn, }; -use futures::TryFutureExt; use ruma::{ CanonicalJsonValue, OwnedEventId, RoomId, ServerName, api::federation::event::get_event, }; @@ -31,7 +29,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( events: &'a [OwnedEventId], create_event: &'a PduEvent, room_id: &'a RoomId, -) -> Vec<(Arc, Option>)> { +) -> Vec<(PduEvent, Option>)> { let back_off = |id| match self .services .globals @@ -53,7 +51,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(local_pdu) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await { + if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { trace!("Found {id} in db"); events_with_auth_events.push((id, Some(local_pdu), vec![])); continue; diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index e817430b..0f92d6e6 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet, VecDeque}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use conduwuit::{ PduEvent, Result, debug_warn, err, implement, @@ -31,7 +28,7 @@ pub(super) async fn fetch_prev( initial_set: Vec, ) -> Result<( Vec, - HashMap, BTreeMap)>, + HashMap)>, )> { let mut graph: HashMap = HashMap::with_capacity(initial_set.len()); let mut eventid_info = HashMap::new(); diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 99e90a50..5339249d 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -1,12 +1,9 @@ -use std::{ - collections::{BTreeMap, HashMap, hash_map}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashMap, hash_map}; use conduwuit::{ Err, Error, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, }; -use futures::{TryFutureExt, future::ready}; +use futures::future::ready; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, api::client::error::ErrorKind, events::StateEventType, @@ -24,7 +21,7 @@ pub(super) async fn handle_outlier_pdu<'a>( room_id: &'a RoomId, mut value: CanonicalJsonObject, auth_events_known: bool, -) -> Result<(Arc, BTreeMap)> { +) -> Result<(PduEvent, BTreeMap)> { // 1. Remove unsigned field value.remove("unsigned"); @@ -95,7 +92,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // Build map of auth events let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); for id in &incoming_pdu.auth_events { - let Ok(auth_event) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await else { + let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { warn!("Could not find auth event {id}"); continue; }; @@ -123,15 +120,10 @@ pub(super) async fn handle_outlier_pdu<'a>( // The original create event must be in the auth events if !matches!( - auth_events - .get(&(StateEventType::RoomCreate, String::new().into())) - .map(AsRef::as_ref), + auth_events.get(&(StateEventType::RoomCreate, String::new().into())), Some(_) | None ) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Incoming event refers to wrong create event.", - )); + return Err!(Request(InvalidParam("Incoming event refers to wrong create event."))); } let state_fetch = |ty: &StateEventType, sk: &str| { @@ -161,5 +153,5 @@ pub(super) async fn handle_outlier_pdu<'a>( trace!("Added pdu as outlier."); - Ok((Arc::new(incoming_pdu), val)) + Ok((incoming_pdu, val)) } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index cf69a515..85e0a6b9 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -1,6 +1,5 @@ use std::{ collections::{BTreeMap, HashMap}, - sync::Arc, time::Instant, }; @@ -24,10 +23,7 @@ pub(super) async fn handle_prev_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - eventid_info: &mut HashMap< - OwnedEventId, - (Arc, BTreeMap), - >, + eventid_info: &mut HashMap)>, create_event: &PduEvent, first_ts_in_room: UInt, prev_id: &EventId, diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 8326f9da..0402ff14 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -2,7 +2,6 @@ use std::{ borrow::Borrow, collections::{HashMap, HashSet}, iter::Iterator, - sync::Arc, }; use conduwuit::{ @@ -20,7 +19,7 @@ use crate::rooms::short::ShortStateHash; #[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_degree_one( &self, - incoming_pdu: &Arc, + incoming_pdu: &PduEvent, ) -> Result>> { let prev_event = &incoming_pdu.prev_events[0]; let Ok(prev_event_sstatehash) = self @@ -67,7 +66,7 @@ pub(super) async fn state_at_incoming_degree_one( #[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_resolved( &self, - incoming_pdu: &Arc, + incoming_pdu: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, ) -> Result>> { diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index c1a1c3eb..086dc6bd 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -18,7 +18,7 @@ use crate::rooms::{ #[implement(super::Service)] pub(super) async fn upgrade_outlier_to_timeline_pdu( &self, - incoming_pdu: Arc, + incoming_pdu: PduEvent, val: BTreeMap, create_event: &PduEvent, origin: &ServerName, From d036394ec79cf94aee484e6bea41421396dcd749 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Apr 2025 09:53:42 +0000 Subject: [PATCH 0809/1248] refactor incoming prev events loop; mitigate large future Signed-off-by: Jason Volk --- .../event_handler/handle_incoming_pdu.rs | 102 ++++++++++-------- .../rooms/event_handler/handle_prev_pdu.rs | 65 +++++------ 2 files changed, 88 insertions(+), 79 deletions(-) diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index b437bf2e..77cae41d 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -3,9 +3,12 @@ use std::{ time::Instant, }; -use conduwuit::{Err, Result, debug, debug::INFO_SPAN_LEVEL, err, implement, warn}; +use conduwuit::{ + Err, Result, debug, debug::INFO_SPAN_LEVEL, defer, err, implement, utils::stream::IterStream, + warn, +}; use futures::{ - FutureExt, + FutureExt, TryFutureExt, TryStreamExt, future::{OptionFuture, try_join5}, }; use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UserId, events::StateEventType}; @@ -86,7 +89,7 @@ pub async fn handle_incoming_pdu<'a>( .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, ""); - let (meta_exists, is_disabled, (), (), create_event) = try_join5( + let (meta_exists, is_disabled, (), (), ref create_event) = try_join5( meta_exists, is_disabled, origin_acl_check, @@ -104,7 +107,7 @@ pub async fn handle_incoming_pdu<'a>( } let (incoming_pdu, val) = self - .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false) + .handle_outlier_pdu(origin, create_event, event_id, room_id, value, false) .await?; // 8. if not timeline event: stop @@ -129,66 +132,71 @@ pub async fn handle_incoming_pdu<'a>( let (sorted_prev_events, mut eventid_info) = self .fetch_prev( origin, - &create_event, + create_event, room_id, first_ts_in_room, incoming_pdu.prev_events.clone(), ) .await?; - debug!(events = ?sorted_prev_events, "Got previous events"); - for prev_id in sorted_prev_events { - self.services.server.check_running()?; - if let Err(e) = self - .handle_prev_pdu( + debug!( + events = ?sorted_prev_events, + "Handling previous events" + ); + + sorted_prev_events + .iter() + .try_stream() + .map_ok(AsRef::as_ref) + .try_for_each(|prev_id| { + self.handle_prev_pdu( origin, event_id, room_id, - &mut eventid_info, - &create_event, + eventid_info.remove(prev_id), + create_event, first_ts_in_room, - &prev_id, + prev_id, ) - .await - { - use hash_map::Entry; - - let now = Instant::now(); - warn!("Prev event {prev_id} failed: {e}"); - - match self - .services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry(prev_id) - { - | Entry::Vacant(e) => { - e.insert((now, 1)); - }, - | Entry::Occupied(mut e) => { - *e.get_mut() = (now, e.get().1.saturating_add(1)); - }, - } - } - } + .inspect_err(move |e| { + warn!("Prev {prev_id} failed: {e}"); + match self + .services + .globals + .bad_event_ratelimiter + .write() + .expect("locked") + .entry(prev_id.into()) + { + | hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + | hash_map::Entry::Occupied(mut e) => { + let tries = e.get().1.saturating_add(1); + *e.get_mut() = (Instant::now(), tries); + }, + } + }) + .map(|_| self.services.server.check_running()) + }) + .boxed() + .await?; // Done with prev events, now handling the incoming event let start_time = Instant::now(); self.federation_handletime .write() .expect("locked") - .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + .insert(room_id.into(), (event_id.to_owned(), start_time)); - let r = self - .upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id) - .await; + defer! {{ + self.federation_handletime + .write() + .expect("locked") + .remove(room_id); + }}; - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - r + self.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, create_event, origin, room_id) + .boxed() + .await } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 85e0a6b9..d612b2bf 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -1,13 +1,10 @@ -use std::{ - collections::{BTreeMap, HashMap}, - time::Instant, -}; +use std::{collections::BTreeMap, time::Instant}; use conduwuit::{ - Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, implement, + Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, defer, implement, utils::continue_exponential_backoff_secs, }; -use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; +use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UInt}; #[implement(super::Service)] #[allow(clippy::type_complexity)] @@ -23,10 +20,10 @@ pub(super) async fn handle_prev_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - eventid_info: &mut HashMap)>, - create_event: &PduEvent, + eventid_info: Option<(PduEvent, BTreeMap)>, + create_event: &'a PduEvent, first_ts_in_room: UInt, - prev_id: &EventId, + prev_id: &'a EventId, ) -> Result { // Check for disabled again because it might have changed if self.services.metadata.is_disabled(room_id).await { @@ -57,31 +54,35 @@ pub(super) async fn handle_prev_pdu<'a>( } } - if let Some((pdu, json)) = eventid_info.remove(prev_id) { - // Skip old events - if pdu.origin_server_ts < first_ts_in_room { - return Ok(()); - } + let Some((pdu, json)) = eventid_info else { + return Ok(()); + }; - let start_time = Instant::now(); - self.federation_handletime - .write() - .expect("locked") - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - - self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) - .await?; - - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - debug!( - elapsed = ?start_time.elapsed(), - "Handled prev_event", - ); + // Skip old events + if pdu.origin_server_ts < first_ts_in_room { + return Ok(()); } + let start_time = Instant::now(); + self.federation_handletime + .write() + .expect("locked") + .insert(room_id.into(), ((*prev_id).to_owned(), start_time)); + + defer! {{ + self.federation_handletime + .write() + .expect("locked") + .remove(room_id); + }}; + + self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) + .await?; + + debug!( + elapsed = ?start_time.elapsed(), + "Handled prev_event", + ); + Ok(()) } From 00f7745ec4ebcea5f892376c5de5db1299f71696 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 02:56:54 +0000 Subject: [PATCH 0810/1248] remove the db pool queue full warning Signed-off-by: Jason Volk --- src/database/pool.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/database/pool.rs b/src/database/pool.rs index 47e61c30..0fa742d1 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -12,7 +12,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - Error, Result, Server, debug, debug_warn, err, error, implement, + Error, Result, Server, debug, err, error, implement, result::DebugInspect, smallvec::SmallVec, trace, @@ -245,13 +245,6 @@ async fn execute(&self, queue: &Sender, cmd: Cmd) -> Result { self.queued_max.fetch_max(queue.len(), Ordering::Relaxed); } - if queue.is_full() { - debug_warn!( - capacity = ?queue.capacity(), - "pool queue is full" - ); - } - queue .send(cmd) .await From 4e5b87d0cd16f3d015f4b61285b369d027bb909d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 4 Apr 2025 11:34:31 -0400 Subject: [PATCH 0811/1248] add missing condition for signatures upload failures Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index f6224343..2fdfc0bc 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -380,7 +380,9 @@ pub(crate) async fn upload_signatures_route( } } - failures.insert(user_id.to_owned(), failure_reasons.clone()); + if !failure_reasons.is_empty() { + failures.insert(user_id.to_owned(), failure_reasons.clone()); + } } Ok(upload_signatures::v3::Response { failures }) From 532dfd004dbc020baa74a4d4413d9ad8139f851e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 03:30:13 +0000 Subject: [PATCH 0812/1248] move core::pdu and core::state_res into core::matrix:: Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 4 +- src/admin/user/commands.rs | 3 +- src/api/client/account.rs | 6 ++- src/api/client/account_data.rs | 5 +- src/api/client/alias.rs | 2 +- src/api/client/backup.rs | 4 +- src/api/client/context.rs | 6 ++- src/api/client/device.rs | 4 +- src/api/client/directory.rs | 2 +- src/api/client/filter.rs | 4 +- src/api/client/keys.rs | 6 +-- src/api/client/membership.rs | 27 ++++++----- src/api/client/message.rs | 22 +++++---- src/api/client/openid.rs | 4 +- src/api/client/profile.rs | 4 +- src/api/client/push.rs | 6 +-- src/api/client/read_marker.rs | 4 +- src/api/client/redact.rs | 3 +- src/api/client/relations.rs | 5 +- src/api/client/report.rs | 8 ++-- src/api/client/room/create.rs | 6 ++- src/api/client/room/upgrade.rs | 5 +- src/api/client/search.rs | 5 +- src/api/client/send.rs | 4 +- src/api/client/session.rs | 9 ++-- src/api/client/space.rs | 10 ++-- src/api/client/state.rs | 8 +++- src/api/client/sync/mod.rs | 5 +- src/api/client/sync/v3.rs | 9 ++-- src/api/client/sync/v5.rs | 9 +++- src/api/client/tag.rs | 3 +- src/api/client/thirdparty.rs | 3 +- src/api/client/threads.rs | 7 ++- src/api/client/to_device.rs | 2 +- src/api/client/typing.rs | 4 +- src/api/client/unversioned.rs | 3 +- src/api/client/user_directory.rs | 4 +- src/api/client/voip.rs | 4 +- src/api/client/well_known.rs | 3 +- src/api/mod.rs | 2 - src/api/server/hierarchy.rs | 4 +- src/api/server/invite.rs | 5 +- src/api/server/make_join.rs | 10 ++-- src/api/server/make_knock.rs | 5 +- src/api/server/make_leave.rs | 4 +- src/api/server/openid.rs | 3 +- src/api/server/publicrooms.rs | 3 +- src/api/server/send.rs | 16 +++---- src/api/server/send_join.rs | 2 +- src/api/server/send_knock.rs | 6 ++- src/api/server/send_leave.rs | 8 ++-- src/api/server/version.rs | 3 +- src/api/server/well_known.rs | 3 +- .../state_event.rs => matrix/event.rs} | 0 src/core/matrix/mod.rs | 9 ++++ src/core/{pdu/mod.rs => matrix/pdu.rs} | 47 +++++++++++++++---- src/core/{ => matrix}/pdu/builder.rs | 0 src/core/{ => matrix}/pdu/content.rs | 0 src/core/{ => matrix}/pdu/count.rs | 0 src/core/{ => matrix}/pdu/event_id.rs | 0 src/core/{ => matrix}/pdu/filter.rs | 0 src/core/{ => matrix}/pdu/id.rs | 0 src/core/{ => matrix}/pdu/raw_id.rs | 0 src/core/{ => matrix}/pdu/redact.rs | 0 src/core/{ => matrix}/pdu/relation.rs | 0 src/core/{ => matrix}/pdu/state_key.rs | 0 src/core/{ => matrix}/pdu/strip.rs | 0 src/core/{ => matrix}/pdu/tests.rs | 0 src/core/{ => matrix}/pdu/unsigned.rs | 0 src/core/{ => matrix}/state_res/LICENSE | 0 src/core/{ => matrix}/state_res/benches.rs | 0 src/core/{ => matrix}/state_res/error.rs | 0 src/core/{ => matrix}/state_res/event_auth.rs | 0 src/core/{ => matrix}/state_res/mod.rs | 8 ++-- src/core/{ => matrix}/state_res/outcomes.txt | 0 .../{ => matrix}/state_res/power_levels.rs | 2 +- .../{ => matrix}/state_res/room_version.rs | 0 src/core/{ => matrix}/state_res/test_utils.rs | 5 +- src/core/mod.rs | 6 +-- src/core/pdu/event.rs | 35 -------------- src/service/admin/grant.rs | 4 +- src/service/mod.rs | 1 - .../rooms/event_handler/state_at_incoming.rs | 4 +- .../event_handler/upgrade_outlier_pdu.rs | 3 +- src/service/rooms/outlier/mod.rs | 6 +-- src/service/rooms/read_receipt/mod.rs | 6 ++- src/service/rooms/short/mod.rs | 4 +- .../rooms/state_accessor/room_state.rs | 5 +- src/service/rooms/state_accessor/state.rs | 6 ++- src/service/rooms/threads/mod.rs | 5 +- src/service/rooms/timeline/mod.rs | 9 ++-- 91 files changed, 266 insertions(+), 205 deletions(-) rename src/core/{state_res/state_event.rs => matrix/event.rs} (100%) create mode 100644 src/core/matrix/mod.rs rename src/core/{pdu/mod.rs => matrix/pdu.rs} (72%) rename src/core/{ => matrix}/pdu/builder.rs (100%) rename src/core/{ => matrix}/pdu/content.rs (100%) rename src/core/{ => matrix}/pdu/count.rs (100%) rename src/core/{ => matrix}/pdu/event_id.rs (100%) rename src/core/{ => matrix}/pdu/filter.rs (100%) rename src/core/{ => matrix}/pdu/id.rs (100%) rename src/core/{ => matrix}/pdu/raw_id.rs (100%) rename src/core/{ => matrix}/pdu/redact.rs (100%) rename src/core/{ => matrix}/pdu/relation.rs (100%) rename src/core/{ => matrix}/pdu/state_key.rs (100%) rename src/core/{ => matrix}/pdu/strip.rs (100%) rename src/core/{ => matrix}/pdu/tests.rs (100%) rename src/core/{ => matrix}/pdu/unsigned.rs (100%) rename src/core/{ => matrix}/state_res/LICENSE (100%) rename src/core/{ => matrix}/state_res/benches.rs (100%) rename src/core/{ => matrix}/state_res/error.rs (100%) rename src/core/{ => matrix}/state_res/event_auth.rs (100%) rename src/core/{ => matrix}/state_res/mod.rs (99%) rename src/core/{ => matrix}/state_res/outcomes.txt (100%) rename src/core/{ => matrix}/state_res/power_levels.rs (99%) rename src/core/{ => matrix}/state_res/room_version.rs (100%) rename src/core/{ => matrix}/state_res/test_utils.rs (99%) delete mode 100644 src/core/pdu/event.rs diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index c6f6a170..87ca03a0 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,9 @@ use std::{ }; use conduwuit::{ - Error, PduEvent, PduId, RawPduId, Result, debug_error, err, info, trace, utils, + Error, Result, debug_error, err, info, + matrix::pdu::{PduEvent, PduId, RawPduId}, + trace, utils, utils::{ stream::{IterStream, ReadyExt}, string::EMPTY, diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 35067304..45e550be 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,7 +2,8 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - PduBuilder, Result, debug, debug_warn, error, info, is_equal_to, + Result, debug, debug_warn, error, info, is_equal_to, + matrix::pdu::PduBuilder, utils::{self, ReadyExt}, warn, }; diff --git a/src/api/client/account.rs b/src/api/client/account.rs index e5894d47..32f2530c 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,10 +3,13 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils, + Err, Error, Result, debug_info, err, error, info, is_equal_to, + matrix::pdu::PduBuilder, + utils, utils::{ReadyExt, stream::BroadbandExt}, warn, }; +use conduwuit_service::Services; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ @@ -30,7 +33,6 @@ use ruma::{ }, push, }; -use service::Services; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; use crate::Ruma; diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs index 60c18b37..e44ce4e7 100644 --- a/src/api/client/account_data.rs +++ b/src/api/client/account_data.rs @@ -1,5 +1,6 @@ use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Result, err}; +use conduwuit_service::Services; use ruma::{ RoomId, UserId, api::client::config::{ @@ -15,7 +16,7 @@ use ruma::{ use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -use crate::{Result, Ruma, service::Services}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 319e5141..9f1b05f8 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -1,12 +1,12 @@ use axum::extract::State; use conduwuit::{Err, Result, debug}; +use conduwuit_service::Services; use futures::StreamExt; use rand::seq::SliceRandom; use ruma::{ OwnedServerName, RoomAliasId, RoomId, api::client::alias::{create_alias, delete_alias, get_alias}, }; -use service::Services; use crate::Ruma; diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 83955fea..2ad37cf3 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,7 +1,7 @@ use std::cmp::Ordering; use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Result, err}; use ruma::{ UInt, api::client::backup::{ @@ -13,7 +13,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/room_keys/version` /// diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 1dda7b53..dbc2a22f 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,18 +1,20 @@ use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, debug_warn, err, ref_at, + Err, Result, at, debug_warn, err, + matrix::pdu::PduEvent, + ref_at, utils::{ IterStream, future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, }, }; +use conduwuit_service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use futures::{ FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::{OptionFuture, join, join3, try_join3}, }; use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType}; -use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use crate::{ Ruma, diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 7603c866..5519a1a5 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,6 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, debug, err}; +use conduwuit::{Err, Error, Result, debug, err, utils}; use futures::StreamExt; use ruma::{ MilliSecondsSinceUnixEpoch, OwnedDeviceId, @@ -12,7 +12,7 @@ use ruma::{ }; use super::SESSION_ID_LENGTH; -use crate::{Error, Result, Ruma, client::DEVICE_ID_LENGTH, utils}; +use crate::{Ruma, client::DEVICE_ID_LENGTH}; /// # `GET /_matrix/client/r0/devices` /// diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index f2f668c8..9ca35537 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -9,6 +9,7 @@ use conduwuit::{ stream::{ReadyExt, WidebandExt}, }, }; +use conduwuit_service::Services; use futures::{ FutureExt, StreamExt, TryFutureExt, future::{join, join4, join5}, @@ -35,7 +36,6 @@ use ruma::{ }, uint, }; -use service::Services; use crate::Ruma; diff --git a/src/api/client/filter.rs b/src/api/client/filter.rs index 84086452..97044ffc 100644 --- a/src/api/client/filter.rs +++ b/src/api/client/filter.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::err; +use conduwuit::{Result, err}; use ruma::api::client::filter::{create_filter, get_filter}; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 2fdfc0bc..6865c2a4 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -2,6 +2,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils}; +use conduwuit_service::{Services, users::parse_master_key}; use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, @@ -23,10 +24,7 @@ use ruma::{ use serde_json::json; use super::SESSION_ID_LENGTH; -use crate::{ - Ruma, - service::{Services, users::parse_master_key}, -}; +use crate::Ruma; /// # `POST /_matrix/client/r0/keys/upload` /// diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index ef40e972..d0345c8e 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,13 +9,25 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info, - pdu::{PduBuilder, gen_event_id_canonical_json}, + Err, Result, at, debug, debug_info, debug_warn, err, error, info, + matrix::{ + StateKey, + pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, + state_res, + }, result::{FlatOk, NotFound}, - state_res, trace, + trace, utils::{self, IterStream, ReadyExt, shuffle}, warn, }; +use conduwuit_service::{ + Services, + appservice::RegistrationInfo, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, +}; use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, @@ -44,15 +56,6 @@ use ruma::{ }, }, }; -use service::{ - Services, - appservice::RegistrationInfo, - pdu::gen_event_id, - rooms::{ - state::RoomMutexGuard, - state_compressor::{CompressedState, HashSetCompressStateEvent}, - }, -}; use crate::{Ruma, client::full_user_deactivate}; diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 03c7335a..3e784a4a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,12 +1,24 @@ use axum::extract::State; use conduwuit::{ - Err, Event, PduCount, PduEvent, Result, at, + Err, Result, at, + matrix::{ + Event, + pdu::{PduCount, PduEvent}, + }, utils::{ IterStream, ReadyExt, result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, }, }; +use conduwuit_service::{ + Services, + rooms::{ + lazy_loading, + lazy_loading::{Options, Witness}, + timeline::PdusIterItem, + }, +}; use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; use ruma::{ RoomId, UserId, @@ -17,14 +29,6 @@ use ruma::{ events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, serde::Raw, }; -use service::{ - Services, - rooms::{ - lazy_loading, - lazy_loading::{Options, Witness}, - timeline::PdusIterItem, - }, -}; use crate::Ruma; diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 671d0c6d..8d2de68d 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -1,14 +1,14 @@ use std::time::Duration; use axum::extract::State; -use conduwuit::utils; +use conduwuit::{Error, Result, utils}; use ruma::{ api::client::{account, error::ErrorKind}, authentication::TokenType, }; use super::TOKEN_LENGTH; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/v3/user/{userId}/openid/request_token` /// diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 5abe5b23..3699b590 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -3,10 +3,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ Err, Error, Result, - pdu::PduBuilder, + matrix::pdu::PduBuilder, utils::{IterStream, stream::TryIgnore}, warn, }; +use conduwuit_service::Services; use futures::{StreamExt, TryStreamExt, future::join3}; use ruma::{ OwnedMxcUri, OwnedRoomId, UserId, @@ -22,7 +23,6 @@ use ruma::{ events::room::member::{MembershipState, RoomMemberEventContent}, presence::PresenceState, }; -use service::Services; use crate::Ruma; diff --git a/src/api/client/push.rs b/src/api/client/push.rs index cc1d3be2..81020ffa 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,5 +1,6 @@ use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Error, Result, err}; +use conduwuit_service::Services; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, api::client::{ @@ -19,9 +20,8 @@ use ruma::{ RemovePushRuleError, Ruleset, }, }; -use service::Services; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/pushrules/` /// diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index b334e356..fbfc8fea 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{Err, PduCount, err}; +use conduwuit::{Err, PduCount, Result, err}; use ruma::{ MilliSecondsSinceUnixEpoch, api::client::{read_marker::set_read_marker, receipt::create_receipt}, @@ -11,7 +11,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` /// diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index 7b512d06..8dbe47a6 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -1,9 +1,10 @@ use axum::extract::State; +use conduwuit::{Result, matrix::pdu::PduBuilder}; use ruma::{ api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, }; -use crate::{Result, Ruma, service::pdu::PduBuilder}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 7ed40f14..b8c2dd4d 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,8 +1,10 @@ use axum::extract::State; use conduwuit::{ - PduCount, Result, at, + Result, at, + matrix::pdu::PduCount, utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; +use conduwuit_service::{Services, rooms::timeline::PdusIterItem}; use futures::StreamExt; use ruma::{ EventId, RoomId, UInt, UserId, @@ -15,7 +17,6 @@ use ruma::{ }, events::{TimelineEventType, relation::RelationType}, }; -use service::{Services, rooms::timeline::PdusIterItem}; use crate::Ruma; diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 7922caca..4ee8ebe5 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,7 +2,8 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, info, utils::ReadyExt}; +use conduwuit::{Err, Error, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt}; +use conduwuit_service::Services; use rand::Rng; use ruma::{ EventId, RoomId, UserId, @@ -15,10 +16,7 @@ use ruma::{ }; use tokio::time::sleep; -use crate::{ - Error, Result, Ruma, debug_info, - service::{Services, pdu::PduEvent}, -}; +use crate::Ruma; /// # `POST /_matrix/client/v3/rooms/{roomId}/report` /// diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index bdc5d5a5..4ce53f15 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,8 +2,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - Err, Error, Result, StateKey, debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, + Err, Error, Result, debug_info, debug_warn, err, error, info, + matrix::{StateKey, pdu::PduBuilder}, + warn, }; +use conduwuit_service::{Services, appservice::RegistrationInfo}; use futures::FutureExt; use ruma::{ CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, @@ -29,7 +32,6 @@ use ruma::{ serde::{JsonObject, Raw}, }; use serde_json::{json, value::to_raw_value}; -use service::{Services, appservice::RegistrationInfo}; use crate::{Ruma, client::invite_helper}; diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 3cfb3c28..9ec0b3bb 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,7 +1,10 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{Error, Result, StateKey, err, info, pdu::PduBuilder}; +use conduwuit::{ + Error, Result, err, info, + matrix::{StateKey, pdu::PduBuilder}, +}; use futures::StreamExt; use ruma::{ CanonicalJsonObject, RoomId, RoomVersionId, diff --git a/src/api/client/search.rs b/src/api/client/search.rs index d66df881..d4dcde57 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -2,10 +2,12 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, is_true, + Err, Result, at, is_true, + matrix::pdu::PduEvent, result::FlatOk, utils::{IterStream, stream::ReadyExt}, }; +use conduwuit_service::{Services, rooms::search::RoomQuery}; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture}; use ruma::{ OwnedRoomId, RoomId, UInt, UserId, @@ -17,7 +19,6 @@ use ruma::{ serde::Raw, }; use search_events::v3::{Request, Response}; -use service::{Services, rooms::search::RoomQuery}; use crate::Ruma; diff --git a/src/api/client/send.rs b/src/api/client/send.rs index 1af74f57..f753fa65 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Result, err, matrix::pdu::PduBuilder, utils}; use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; use serde_json::from_str; -use crate::{Result, Ruma, service::pdu::PduBuilder, utils}; +use crate::Ruma; /// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` /// diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 3de625e4..2499a43d 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,7 +2,11 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, debug, err, info, utils::ReadyExt}; +use conduwuit::{ + Err, Error, Result, debug, err, info, utils, + utils::{ReadyExt, hash}, +}; +use conduwuit_service::uiaa::SESSION_ID_LENGTH; use futures::StreamExt; use ruma::{ UserId, @@ -22,10 +26,9 @@ use ruma::{ uiaa, }, }; -use service::uiaa::SESSION_ID_LENGTH; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{Error, Result, Ruma, utils, utils::hash}; +use crate::Ruma; /// # `GET /_matrix/client/v3/login` /// diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 567ac62f..4eee9d76 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -8,16 +8,16 @@ use conduwuit::{ Err, Result, utils::{future::TryExtExt, stream::IterStream}, }; -use futures::{StreamExt, TryFutureExt, future::OptionFuture}; -use ruma::{ - OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, -}; -use service::{ +use conduwuit_service::{ Services, rooms::spaces::{ PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk, }, }; +use futures::{StreamExt, TryFutureExt, future::OptionFuture}; +use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, +}; use crate::Ruma; diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 23583356..5c5c71f2 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,5 +1,10 @@ use axum::extract::State; -use conduwuit::{Err, PduEvent, Result, err, pdu::PduBuilder, utils::BoolExt}; +use conduwuit::{ + Err, Result, err, + matrix::pdu::{PduBuilder, PduEvent}, + utils::BoolExt, +}; +use conduwuit_service::Services; use futures::TryStreamExt; use ruma::{ OwnedEventId, RoomId, UserId, @@ -16,7 +21,6 @@ use ruma::{ }, serde::Raw, }; -use service::Services; use crate::{Ruma, RumaResponse}; diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 3eab76cc..14459acf 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -3,12 +3,14 @@ mod v4; mod v5; use conduwuit::{ - PduCount, + Error, PduCount, Result, + matrix::pdu::PduEvent, utils::{ IterStream, stream::{BroadbandExt, ReadyExt, TryIgnore}, }, }; +use conduwuit_service::Services; use futures::{StreamExt, pin_mut}; use ruma::{ RoomId, UserId, @@ -21,7 +23,6 @@ use ruma::{ pub(crate) use self::{ v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, }; -use crate::{Error, PduEvent, Result, service::Services}; pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 83ffa55a..12731ff6 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,9 +6,12 @@ use std::{ use axum::extract::State; use conduwuit::{ - PduCount, PduEvent, Result, at, err, error, extract_variant, is_equal_to, pair_of, - pdu::{Event, EventHash}, - ref_at, + Result, at, err, error, extract_variant, is_equal_to, + matrix::{ + Event, + pdu::{EventHash, PduCount, PduEvent}, + }, + pair_of, ref_at, result::FlatOk, utils::{ self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index c4e71d88..684752ec 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,13 +6,19 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, PduEvent, Result, TypeStateKey, debug, error, extract_variant, trace, + Error, Result, debug, error, extract_variant, + matrix::{ + TypeStateKey, + pdu::{PduCount, PduEvent}, + }, + trace, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma}, }, warn, }; +use conduwuit_service::rooms::read_receipt::pack_receipts; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, @@ -27,7 +33,6 @@ use ruma::{ serde::Raw, uint, }; -use service::{PduCount, rooms::read_receipt::pack_receipts}; use super::{filter_rooms, share_encrypted_room}; use crate::{ diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index 3b3b40d4..caafe10d 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; +use conduwuit::Result; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -9,7 +10,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` /// diff --git a/src/api/client/thirdparty.rs b/src/api/client/thirdparty.rs index 790b27d3..0713a882 100644 --- a/src/api/client/thirdparty.rs +++ b/src/api/client/thirdparty.rs @@ -1,8 +1,9 @@ use std::collections::BTreeMap; +use conduwuit::Result; use ruma::api::client::thirdparty::get_protocols; -use crate::{Result, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse}; /// # `GET /_matrix/client/r0/thirdparty/protocols` /// diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 00bfe553..5b838bef 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,9 +1,12 @@ use axum::extract::State; -use conduwuit::{PduCount, PduEvent, at}; +use conduwuit::{ + Result, at, + matrix::pdu::{PduCount, PduEvent}, +}; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/rooms/{roomId}/threads` pub(crate) async fn get_threads_route( diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index 1b942fba..8ad9dc99 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{Error, Result}; +use conduwuit_service::sending::EduBuf; use futures::StreamExt; use ruma::{ api::{ @@ -10,7 +11,6 @@ use ruma::{ }, to_device::DeviceIdOrAllDevices, }; -use service::sending::EduBuf; use crate::Ruma; diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index b02cc473..1d8d02fd 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::{Err, utils::math::Tried}; +use conduwuit::{Err, Result, utils, utils::math::Tried}; use ruma::api::client::typing::create_typing_event; -use crate::{Result, Ruma, utils}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 4e2b7d9d..232d5b28 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -1,10 +1,11 @@ use std::collections::BTreeMap; use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::Result; use futures::StreamExt; use ruma::api::client::discovery::get_supported_versions; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/versions` /// diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index c5d79a56..8f564eed 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::utils::TryFutureExtExt; +use conduwuit::{Result, utils::TryFutureExtExt}; use futures::{StreamExt, pin_mut}; use ruma::{ api::client::user_directory::search_users, @@ -9,7 +9,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/user_directory/search` /// diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index 37e67984..91991d24 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -2,12 +2,12 @@ use std::time::{Duration, SystemTime}; use axum::extract::State; use base64::{Engine as _, engine::general_purpose}; -use conduwuit::{Err, utils}; +use conduwuit::{Err, Result, utils}; use hmac::{Hmac, Mac}; use ruma::{SecondsSinceUnixEpoch, UserId, api::client::voip::get_turn_server_info}; use sha1::Sha1; -use crate::{Result, Ruma}; +use crate::Ruma; const RANDOM_USER_ID_LENGTH: usize = 10; diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index abda61b0..eedab981 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -1,4 +1,5 @@ use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::{Error, Result}; use ruma::api::client::{ discovery::{ discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, @@ -7,7 +8,7 @@ use ruma::api::client::{ error::ErrorKind, }; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /.well-known/matrix/client` /// diff --git a/src/api/mod.rs b/src/api/mod.rs index 090cf897..9ca24e72 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -8,8 +8,6 @@ pub mod server; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::{Error, Result, debug_info, pdu::PduEvent, utils}; - pub(crate) use self::router::{Ruma, RumaResponse, State}; conduwuit::mod_ctor! {} diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index c759c8ea..42c348f9 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -3,9 +3,11 @@ use conduwuit::{ Err, Result, utils::stream::{BroadbandExt, IterStream}, }; +use conduwuit_service::rooms::spaces::{ + Identifier, SummaryAccessibility, get_parent_children_via, +}; use futures::{FutureExt, StreamExt}; use ruma::api::federation::space::get_hierarchy; -use service::rooms::spaces::{Identifier, SummaryAccessibility, get_parent_children_via}; use crate::Ruma; diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index f4cc6eb2..cda34fb5 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,14 +1,15 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use base64::{Engine as _, engine::general_purpose}; -use conduwuit::{Err, Error, PduEvent, Result, err, utils, utils::hash::sha256, warn}; +use conduwuit::{ + Err, Error, PduEvent, Result, err, pdu::gen_event_id, utils, utils::hash::sha256, warn, +}; use ruma::{ CanonicalJsonValue, OwnedUserId, UserId, api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, }; -use service::pdu::gen_event_id; use crate::Ruma; diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index f18d1304..4664b904 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,5 +1,8 @@ use axum::extract::State; -use conduwuit::{Err, debug_info, utils::IterStream, warn}; +use conduwuit::{ + Err, Error, Result, debug_info, matrix::pdu::PduBuilder, utils::IterStream, warn, +}; +use conduwuit_service::Services; use futures::StreamExt; use ruma::{ CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, @@ -14,10 +17,7 @@ use ruma::{ }; use serde_json::value::to_raw_value; -use crate::{ - Error, Result, Ruma, - service::{Services, pdu::PduBuilder}, -}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` /// diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 71536439..6d71ab2a 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -1,15 +1,14 @@ use RoomVersionId::*; use axum::extract::State; -use conduwuit::{Err, debug_warn}; +use conduwuit::{Err, Error, Result, debug_warn, matrix::pdu::PduBuilder, warn}; use ruma::{ RoomVersionId, api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, events::room::member::{MembershipState, RoomMemberEventContent}, }; use serde_json::value::to_raw_value; -use tracing::warn; -use crate::{Error, Result, Ruma, service::pdu::PduBuilder}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}` /// diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 1ed02785..cb6bd2fa 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{Err, Result}; +use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use ruma::{ api::federation::membership::prepare_leave_event, events::room::member::{MembershipState, RoomMemberEventContent}, @@ -7,7 +7,7 @@ use ruma::{ use serde_json::value::to_raw_value; use super::make_join::maybe_strip_event_id; -use crate::{Ruma, service::pdu::PduBuilder}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/openid.rs b/src/api/server/openid.rs index 4833fbe1..a09cd7ad 100644 --- a/src/api/server/openid.rs +++ b/src/api/server/openid.rs @@ -1,7 +1,8 @@ use axum::extract::State; +use conduwuit::Result; use ruma::api::federation::openid::get_openid_userinfo; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/openid/userinfo` /// diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index ff74574a..cf66ea71 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -1,5 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; +use conduwuit::{Error, Result}; use ruma::{ api::{ client::error::ErrorKind, @@ -8,7 +9,7 @@ use ruma::{ directory::Filter, }; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/federation/v1/publicRooms` /// diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 1f467dac..9c5bfd2b 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -9,11 +9,15 @@ use conduwuit::{ result::LogErr, trace, utils::{ - IterStream, ReadyExt, + IterStream, ReadyExt, millis_since_unix_epoch, stream::{BroadbandExt, TryBroadbandExt, automatic_width}, }, warn, }; +use conduwuit_service::{ + Services, + sending::{EDU_LIMIT, PDU_LIMIT}, +}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use itertools::Itertools; use ruma::{ @@ -33,16 +37,8 @@ use ruma::{ serde::Raw, to_device::DeviceIdOrAllDevices, }; -use service::{ - Services, - sending::{EDU_LIMIT, PDU_LIMIT}, -}; -use utils::millis_since_unix_epoch; -use crate::{ - Ruma, - utils::{self}, -}; +use crate::Ruma; type ResolvedMap = BTreeMap; type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index c1749835..2e2e89ee 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -9,6 +9,7 @@ use conduwuit::{ utils::stream::{IterStream, TryBroadbandExt}, warn, }; +use conduwuit_service::Services; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, @@ -20,7 +21,6 @@ use ruma::{ }, }; use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; -use service::Services; use crate::Ruma; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index f7bb0735..c5ab0306 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,5 +1,9 @@ use axum::extract::State; -use conduwuit::{Err, PduEvent, Result, err, pdu::gen_event_id_canonical_json, warn}; +use conduwuit::{ + Err, Result, err, + matrix::pdu::{PduEvent, gen_event_id_canonical_json}, + warn, +}; use futures::FutureExt; use ruma::{ OwnedServerName, OwnedUserId, diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index 71516553..d3dc994c 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,7 +1,8 @@ #![allow(deprecated)] use axum::extract::State; -use conduwuit::{Err, Result, err}; +use conduwuit::{Err, Result, err, matrix::pdu::gen_event_id_canonical_json}; +use conduwuit_service::Services; use futures::FutureExt; use ruma::{ OwnedRoomId, OwnedUserId, RoomId, ServerName, @@ -13,10 +14,7 @@ use ruma::{ }; use serde_json::value::RawValue as RawJsonValue; -use crate::{ - Ruma, - service::{Services, pdu::gen_event_id_canonical_json}, -}; +use crate::Ruma; /// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/version.rs b/src/api/server/version.rs index 036b61f7..b08ff77a 100644 --- a/src/api/server/version.rs +++ b/src/api/server/version.rs @@ -1,6 +1,7 @@ +use conduwuit::Result; use ruma::api::federation::discovery::get_server_version; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/version` /// diff --git a/src/api/server/well_known.rs b/src/api/server/well_known.rs index 48caa7d6..75c7cf5d 100644 --- a/src/api/server/well_known.rs +++ b/src/api/server/well_known.rs @@ -1,7 +1,8 @@ use axum::extract::State; +use conduwuit::{Error, Result}; use ruma::api::{client::error::ErrorKind, federation::discovery::discover_homeserver}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /.well-known/matrix/server` /// diff --git a/src/core/state_res/state_event.rs b/src/core/matrix/event.rs similarity index 100% rename from src/core/state_res/state_event.rs rename to src/core/matrix/event.rs diff --git a/src/core/matrix/mod.rs b/src/core/matrix/mod.rs new file mode 100644 index 00000000..8c978173 --- /dev/null +++ b/src/core/matrix/mod.rs @@ -0,0 +1,9 @@ +//! Core Matrix Library + +pub mod event; +pub mod pdu; +pub mod state_res; + +pub use event::Event; +pub use pdu::{PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; +pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; diff --git a/src/core/pdu/mod.rs b/src/core/matrix/pdu.rs similarity index 72% rename from src/core/pdu/mod.rs rename to src/core/matrix/pdu.rs index 9fb2a3da..7e1ecfa8 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/matrix/pdu.rs @@ -1,7 +1,6 @@ mod builder; mod content; mod count; -mod event; mod event_id; mod filter; mod id; @@ -17,8 +16,8 @@ mod unsigned; use std::cmp::Ordering; use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, UInt, events::TimelineEventType, + CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, UInt, UserId, events::TimelineEventType, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; @@ -27,12 +26,12 @@ pub use self::{ Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, builder::{Builder, Builder as PduBuilder}, count::Count, - event::Event, event_id::*, id::*, raw_id::*, state_key::{ShortStateKey, StateKey}, }; +use super::Event; use crate::Result; /// Persistent Data Unit (Event) @@ -79,6 +78,36 @@ impl Pdu { } } +impl Event for Pdu { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { &self.room_id } + + fn sender(&self) -> &UserId { &self.sender } + + fn event_type(&self) -> &TimelineEventType { &self.kind } + + fn content(&self) -> &RawJsonValue { &self.content } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + MilliSecondsSinceUnixEpoch(self.origin_server_ts) + } + + fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.prev_events.iter() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.auth_events.iter() + } + + fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } +} + /// Prevent derived equality which wouldn't limit itself to event_id impl Eq for Pdu {} @@ -87,12 +116,12 @@ impl PartialEq for Pdu { fn eq(&self, other: &Self) -> bool { self.event_id == other.event_id } } -/// Ordering determined by the Pdu's ID, not the memory representations. -impl PartialOrd for Pdu { - fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } -} - /// Ordering determined by the Pdu's ID, not the memory representations. impl Ord for Pdu { fn cmp(&self, other: &Self) -> Ordering { self.event_id.cmp(&other.event_id) } } + +/// Ordering determined by the Pdu's ID, not the memory representations. +impl PartialOrd for Pdu { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } +} diff --git a/src/core/pdu/builder.rs b/src/core/matrix/pdu/builder.rs similarity index 100% rename from src/core/pdu/builder.rs rename to src/core/matrix/pdu/builder.rs diff --git a/src/core/pdu/content.rs b/src/core/matrix/pdu/content.rs similarity index 100% rename from src/core/pdu/content.rs rename to src/core/matrix/pdu/content.rs diff --git a/src/core/pdu/count.rs b/src/core/matrix/pdu/count.rs similarity index 100% rename from src/core/pdu/count.rs rename to src/core/matrix/pdu/count.rs diff --git a/src/core/pdu/event_id.rs b/src/core/matrix/pdu/event_id.rs similarity index 100% rename from src/core/pdu/event_id.rs rename to src/core/matrix/pdu/event_id.rs diff --git a/src/core/pdu/filter.rs b/src/core/matrix/pdu/filter.rs similarity index 100% rename from src/core/pdu/filter.rs rename to src/core/matrix/pdu/filter.rs diff --git a/src/core/pdu/id.rs b/src/core/matrix/pdu/id.rs similarity index 100% rename from src/core/pdu/id.rs rename to src/core/matrix/pdu/id.rs diff --git a/src/core/pdu/raw_id.rs b/src/core/matrix/pdu/raw_id.rs similarity index 100% rename from src/core/pdu/raw_id.rs rename to src/core/matrix/pdu/raw_id.rs diff --git a/src/core/pdu/redact.rs b/src/core/matrix/pdu/redact.rs similarity index 100% rename from src/core/pdu/redact.rs rename to src/core/matrix/pdu/redact.rs diff --git a/src/core/pdu/relation.rs b/src/core/matrix/pdu/relation.rs similarity index 100% rename from src/core/pdu/relation.rs rename to src/core/matrix/pdu/relation.rs diff --git a/src/core/pdu/state_key.rs b/src/core/matrix/pdu/state_key.rs similarity index 100% rename from src/core/pdu/state_key.rs rename to src/core/matrix/pdu/state_key.rs diff --git a/src/core/pdu/strip.rs b/src/core/matrix/pdu/strip.rs similarity index 100% rename from src/core/pdu/strip.rs rename to src/core/matrix/pdu/strip.rs diff --git a/src/core/pdu/tests.rs b/src/core/matrix/pdu/tests.rs similarity index 100% rename from src/core/pdu/tests.rs rename to src/core/matrix/pdu/tests.rs diff --git a/src/core/pdu/unsigned.rs b/src/core/matrix/pdu/unsigned.rs similarity index 100% rename from src/core/pdu/unsigned.rs rename to src/core/matrix/pdu/unsigned.rs diff --git a/src/core/state_res/LICENSE b/src/core/matrix/state_res/LICENSE similarity index 100% rename from src/core/state_res/LICENSE rename to src/core/matrix/state_res/LICENSE diff --git a/src/core/state_res/benches.rs b/src/core/matrix/state_res/benches.rs similarity index 100% rename from src/core/state_res/benches.rs rename to src/core/matrix/state_res/benches.rs diff --git a/src/core/state_res/error.rs b/src/core/matrix/state_res/error.rs similarity index 100% rename from src/core/state_res/error.rs rename to src/core/matrix/state_res/error.rs diff --git a/src/core/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs similarity index 100% rename from src/core/state_res/event_auth.rs rename to src/core/matrix/state_res/event_auth.rs diff --git a/src/core/state_res/mod.rs b/src/core/matrix/state_res/mod.rs similarity index 99% rename from src/core/state_res/mod.rs rename to src/core/matrix/state_res/mod.rs index 1db92e59..93c00d15 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -4,7 +4,6 @@ pub(crate) mod error; pub mod event_auth; mod power_levels; mod room_version; -mod state_event; #[cfg(test)] mod test_utils; @@ -36,9 +35,12 @@ use self::power_levels::PowerLevelsContentFields; pub use self::{ event_auth::{auth_check, auth_types_for_event}, room_version::RoomVersion, - state_event::Event, }; -use crate::{debug, pdu::StateKey, trace, warn}; +use crate::{ + debug, + matrix::{event::Event, pdu::StateKey}, + trace, warn, +}; /// A mapping of event type and state_key to some value `T`, usually an /// `EventId`. diff --git a/src/core/state_res/outcomes.txt b/src/core/matrix/state_res/outcomes.txt similarity index 100% rename from src/core/state_res/outcomes.txt rename to src/core/matrix/state_res/outcomes.txt diff --git a/src/core/state_res/power_levels.rs b/src/core/matrix/state_res/power_levels.rs similarity index 99% rename from src/core/state_res/power_levels.rs rename to src/core/matrix/state_res/power_levels.rs index 045b1666..19ba8fb9 100644 --- a/src/core/state_res/power_levels.rs +++ b/src/core/matrix/state_res/power_levels.rs @@ -11,9 +11,9 @@ use ruma::{ }; use serde::Deserialize; use serde_json::{Error, from_str as from_json_str}; -use tracing::error; use super::{Result, RoomVersion}; +use crate::error; #[derive(Deserialize)] struct IntRoomPowerLevelsEventContent { diff --git a/src/core/state_res/room_version.rs b/src/core/matrix/state_res/room_version.rs similarity index 100% rename from src/core/state_res/room_version.rs rename to src/core/matrix/state_res/room_version.rs diff --git a/src/core/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs similarity index 99% rename from src/core/state_res/test_utils.rs rename to src/core/matrix/state_res/test_utils.rs index d96ee927..f2ee4238 100644 --- a/src/core/state_res/test_utils.rs +++ b/src/core/matrix/state_res/test_utils.rs @@ -28,7 +28,10 @@ use serde_json::{ pub(crate) use self::event::PduEvent; use super::auth_types_for_event; -use crate::{Event, EventTypeExt, Result, StateMap, info}; +use crate::{ + Result, info, + matrix::{Event, EventTypeExt, StateMap}, +}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); diff --git a/src/core/mod.rs b/src/core/mod.rs index 80ebbdcb..b91cdf0b 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -6,11 +6,10 @@ pub mod debug; pub mod error; pub mod info; pub mod log; +pub mod matrix; pub mod metrics; pub mod mods; -pub mod pdu; pub mod server; -pub mod state_res; pub mod utils; pub use ::arrayvec; @@ -23,9 +22,8 @@ pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; -pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; +pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res}; pub use server::Server; -pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; pub use utils::{ctor, dtor, implement, result, result::Result}; pub use crate as conduwuit_core; diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs deleted file mode 100644 index 09ad1666..00000000 --- a/src/core/pdu/event.rs +++ /dev/null @@ -1,35 +0,0 @@ -use ruma::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::TimelineEventType}; -use serde_json::value::RawValue as RawJsonValue; - -use super::Pdu; -pub use crate::state_res::Event; - -impl Event for Pdu { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } - - fn room_id(&self) -> &RoomId { &self.room_id } - - fn sender(&self) -> &UserId { &self.sender } - - fn event_type(&self) -> &TimelineEventType { &self.kind } - - fn content(&self) -> &RawJsonValue { &self.content } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - MilliSecondsSinceUnixEpoch(self.origin_server_ts) - } - - fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.prev_events.iter() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.auth_events.iter() - } - - fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } -} diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 5173987a..6780b7ae 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use conduwuit::{Err, Result, debug_info, debug_warn, error, implement}; +use conduwuit::{Err, Result, debug_info, debug_warn, error, implement, matrix::pdu::PduBuilder}; use ruma::{ RoomId, UserId, events::{ @@ -14,8 +14,6 @@ use ruma::{ }, }; -use crate::pdu::PduBuilder; - /// Invite the user to the conduwuit admin room. /// /// This is equivalent to granting server admin privileges. diff --git a/src/service/mod.rs b/src/service/mod.rs index 8f4a84b0..63a51213 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -31,7 +31,6 @@ pub mod users; extern crate conduwuit_core as conduwuit; extern crate conduwuit_database as database; -pub use conduwuit::{PduBuilder, PduCount, PduEvent, pdu}; pub(crate) use service::{Args, Dep, Service}; pub use crate::services::Services; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 0402ff14..eb38c2c3 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -5,7 +5,9 @@ use std::{ }; use conduwuit::{ - PduEvent, Result, StateMap, debug, err, implement, trace, + Result, debug, err, implement, + matrix::{PduEvent, StateMap}, + trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, }; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 086dc6bd..97d3df97 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,7 +1,8 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - Err, EventTypeExt, PduEvent, Result, StateKey, debug, debug_info, err, implement, state_res, + Err, Result, debug, debug_info, err, implement, + matrix::{EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, warn, diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index a1b0263a..12b56935 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,11 +1,9 @@ use std::sync::Arc; -use conduwuit::{Result, implement}; -use database::{Deserialized, Json, Map}; +use conduwuit::{Result, implement, matrix::pdu::PduEvent}; +use conduwuit_database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; -use crate::PduEvent; - pub struct Service { db: Data, } diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index d6239aee..69e859c4 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,7 +2,11 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{PduCount, PduId, RawPduId, Result, debug, err, warn}; +use conduwuit::{ + Result, debug, err, + matrix::pdu::{PduCount, PduId, RawPduId}, + warn, +}; use futures::{Stream, TryFutureExt, try_join}; use ruma::{ OwnedEventId, OwnedUserId, RoomId, UserId, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 3980617e..06ff6493 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; -pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; -use conduwuit::{Result, StateKey, err, implement, utils, utils::IterStream}; +pub use conduwuit::matrix::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; +use conduwuit::{Result, err, implement, matrix::StateKey, utils, utils::IterStream}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; use ruma::{EventId, RoomId, events::StateEventType}; diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index 642cd5d2..89fa2a83 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -1,6 +1,9 @@ use std::borrow::Borrow; -use conduwuit::{PduEvent, Result, StateKey, err, implement}; +use conduwuit::{ + Result, err, implement, + matrix::{PduEvent, StateKey}, +}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 8f2dd76f..169e69e9 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -1,13 +1,15 @@ use std::{borrow::Borrow, ops::Deref, sync::Arc}; use conduwuit::{ - PduEvent, Result, StateKey, at, err, implement, pair_of, + Result, at, err, implement, + matrix::{PduEvent, StateKey}, + pair_of, utils::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryIgnore}, }, }; -use database::Deserialized; +use conduwuit_database::Deserialized; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, future::try_join, pin_mut}; use ruma::{ EventId, OwnedEventId, UserId, diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index 7f9a7515..a680df55 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,13 +1,14 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - PduCount, PduEvent, PduId, RawPduId, Result, err, + Result, err, + matrix::pdu::{PduCount, PduEvent, PduId, RawPduId}, utils::{ ReadyExt, stream::{TryIgnore, WidebandExt}, }, }; -use database::{Deserialized, Map}; +use conduwuit_database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{ CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index dc359d22..947e1c38 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -10,16 +10,19 @@ use std::{ }; use async_trait::async_trait; +pub use conduwuit::matrix::pdu::{PduId, RawPduId}; use conduwuit::{ Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, - pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, - state_res::{self, Event, RoomVersion}, + matrix::{ + Event, + pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, + state_res::{self, RoomVersion}, + }, utils::{ self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, }, validated, warn, }; -pub use conduwuit::{PduId, RawPduId}; use futures::{ Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, }; From bb8320a691eda03c202bc428e75a616b0021fe03 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 02:39:40 +0000 Subject: [PATCH 0813/1248] abstract and encapsulate the awkward OptionFuture into Stream pattern Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 45 +++----------------------- src/core/utils/future/mod.rs | 2 ++ src/core/utils/future/option_ext.rs | 3 ++ src/core/utils/future/option_stream.rs | 25 ++++++++++++++ 4 files changed, 35 insertions(+), 40 deletions(-) create mode 100644 src/core/utils/future/option_stream.rs diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 12731ff6..24930941 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -15,6 +15,7 @@ use conduwuit::{ result::FlatOk, utils::{ self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::OptionStream, math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, @@ -1036,7 +1037,7 @@ async fn calculate_state_incremental<'a>( }) .into(); - let state_diff: OptionFuture<_> = (!full_state && state_changed) + let state_diff_ids: OptionFuture<_> = (!full_state && state_changed) .then(|| { StreamExt::into_future( services @@ -1061,45 +1062,9 @@ async fn calculate_state_incremental<'a>( }) .into(); - let lazy_state_ids = lazy_state_ids - .map(|opt| { - opt.map(|(curr, next)| { - let opt = curr; - let iter = Option::into_iter(opt); - IterStream::stream(iter).chain(next) - }) - }) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten(); - - let state_diff_ids = state_diff - .map(|opt| { - opt.map(|(curr, next)| { - let opt = curr; - let iter = Option::into_iter(opt); - IterStream::stream(iter).chain(next) - }) - }) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten(); - let state_events = current_state_ids - .map(|opt| { - opt.map(|(curr, next)| { - let opt = curr; - let iter = Option::into_iter(opt); - IterStream::stream(iter).chain(next) - }) - }) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten() - .chain(state_diff_ids) + .stream() + .chain(state_diff_ids.stream()) .broad_filter_map(|(shortstatekey, shorteventid)| async move { if witness.is_none() || encrypted_room { return Some(shorteventid); @@ -1107,7 +1072,7 @@ async fn calculate_state_incremental<'a>( lazy_filter(services, sender_user, shortstatekey, shorteventid).await }) - .chain(lazy_state_ids) + .chain(lazy_state_ids.stream()) .broad_filter_map(|shorteventid| { services .rooms diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index e1d96941..4edd0102 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -1,9 +1,11 @@ mod bool_ext; mod ext_ext; mod option_ext; +mod option_stream; mod try_ext_ext; pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; +pub use option_stream::OptionStream; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/option_ext.rs b/src/core/utils/future/option_ext.rs index d553e5dc..920dd044 100644 --- a/src/core/utils/future/option_ext.rs +++ b/src/core/utils/future/option_ext.rs @@ -11,11 +11,14 @@ pub trait OptionExt { impl OptionExt for OptionFuture where Fut: Future + Send, + T: Send, { + #[inline] fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { self.map(|o| o.as_ref().is_none_or(f)) } + #[inline] fn is_some_and(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { self.map(|o| o.as_ref().is_some_and(f)) } diff --git a/src/core/utils/future/option_stream.rs b/src/core/utils/future/option_stream.rs new file mode 100644 index 00000000..81130c87 --- /dev/null +++ b/src/core/utils/future/option_stream.rs @@ -0,0 +1,25 @@ +use futures::{Future, FutureExt, Stream, StreamExt, future::OptionFuture}; + +use super::super::IterStream; + +pub trait OptionStream { + fn stream(self) -> impl Stream + Send; +} + +impl OptionStream for OptionFuture +where + Fut: Future + Send, + S: Stream + Send, + O: IntoIterator + Send, + ::IntoIter: Send, + T: Send, +{ + #[inline] + fn stream(self) -> impl Stream + Send { + self.map(|opt| opt.map(|(curr, next)| curr.into_iter().stream().chain(next))) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten() + } +} From 58b8c7516a755c0300be1fe0d36b819ebda36ffb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 09:02:12 +0000 Subject: [PATCH 0814/1248] extend extract_variant to multiple variants Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 7593990c..117fb739 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -49,10 +49,10 @@ pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, sou #[macro_export] macro_rules! extract_variant { - ($e:expr_2021, $variant:path) => { + ( $e:expr_2021, $( $variant:path )|* ) => { match $e { - | $variant(value) => Some(value), - | _ => None, + $( $variant(value) => Some(value), )* + _ => None, } }; } From a212bf7cfca7a6547681f46a438ecc278a905aab Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 14:00:40 -0400 Subject: [PATCH 0815/1248] update default room version to v11 Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 4 ++-- src/core/config/mod.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 75ecddab..46459547 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -527,9 +527,9 @@ # Default room version conduwuit will create rooms with. # -# Per spec, room version 10 is the default. +# Per spec, room version 11 is the default. # -#default_room_version = 10 +#default_room_version = 11 # This item is undocumented. Please contribute documentation for it. # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 7be140a5..bb509a0d 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -640,9 +640,9 @@ pub struct Config { /// Default room version conduwuit will create rooms with. /// - /// Per spec, room version 10 is the default. + /// Per spec, room version 11 is the default. /// - /// default: 10 + /// default: 11 #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, @@ -2170,7 +2170,7 @@ fn default_rocksdb_stats_level() -> u8 { 1 } // I know, it's a great name #[must_use] #[inline] -pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V10 } +pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V11 } fn default_ip_range_denylist() -> Vec { vec![ From c7246662f4b2c892667b253aff1560523d8e2cff Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 14:07:37 -0400 Subject: [PATCH 0816/1248] try partially reverting 94b107b42b722aff9518f64ad603ce01665b25f3 Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 43 ++++++++++-------------------------------- 1 file changed, 10 insertions(+), 33 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 6865c2a4..adbdd715 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -11,7 +11,7 @@ use ruma::{ error::ErrorKind, keys::{ claim_keys, get_key_changes, get_keys, upload_keys, - upload_signatures::{self, v3::Failure}, + upload_signatures::{self}, upload_signing_keys, }, uiaa::{AuthFlow, AuthType, UiaaInfo}, @@ -308,82 +308,59 @@ async fn check_for_new_keys( /// /// Uploads end-to-end key signatures from the sender user. /// -/// TODO: clean this timo-code up more. tried to improve it a bit to stop -/// exploding the entire request on bad sigs, but needs way more work. +/// TODO: clean this timo-code up more and integrate failures. tried to improve +/// it a bit to stop exploding the entire request on bad sigs, but needs way +/// more work. pub(crate) async fn upload_signatures_route( State(services): State, body: Ruma, ) -> Result { - use upload_signatures::v3::FailureErrorCode::*; - if body.signed_keys.is_empty() { debug!("Empty signed_keys sent in key signature upload"); return Ok(upload_signatures::v3::Response::new()); } let sender_user = body.sender_user(); - let mut failures: BTreeMap> = BTreeMap::new(); - let mut failure_reasons: BTreeMap = BTreeMap::new(); - let failure = Failure { - errcode: InvalidSignature, - error: String::new(), - }; for (user_id, keys) in &body.signed_keys { for (key_id, key) in keys { let Ok(key) = serde_json::to_value(key) .inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}")) else { - let mut failure = failure.clone(); - failure.error = String::from("Invalid \"key\" JSON"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; let Some(signatures) = key.get("signatures") else { - let mut failure = failure.clone(); - failure.error = String::from("Missing \"signatures\" field"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; let Some(sender_user_val) = signatures.get(sender_user.to_string()) else { - let mut failure = failure.clone(); - failure.error = String::from("Invalid user in signatures field"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; let Some(sender_user_object) = sender_user_val.as_object() else { - let mut failure = failure.clone(); - failure.error = String::from("signatures field is not a JSON object"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; for (signature, val) in sender_user_object.clone() { - let signature = (signature, val.to_string()); + let Some(val) = val.as_str().map(ToOwned::to_owned) else { + continue; + }; + let signature = (signature, val); - if let Err(e) = services + if let Err(_e) = services .users .sign_key(user_id, key_id, signature, sender_user) .await .inspect_err(|e| debug_warn!("{e}")) { - let mut failure = failure.clone(); - failure.error = format!("Error signing key: {e}"); - failure_reasons.insert(key_id.to_owned(), failure); continue; } } } - - if !failure_reasons.is_empty() { - failures.insert(user_id.to_owned(), failure_reasons.clone()); - } } - Ok(upload_signatures::v3::Response { failures }) + Ok(upload_signatures::v3::Response { failures: BTreeMap::new() }) } /// # `POST /_matrix/client/r0/keys/changes` From e28ae8fb4d442cba0eb52728a129372289c85ccd Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 14:26:00 -0400 Subject: [PATCH 0817/1248] downgrade `deranged` crate Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0753f81d..86833adb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1279,9 +1279,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", ] From d6cc447add272f9eff0b2c77fb751dcf055d3208 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 21:26:53 +0000 Subject: [PATCH 0818/1248] simplify acl brick-check conditions Signed-off-by: Jason Volk --- src/api/client/state.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 5c5c71f2..2ddc8f14 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -211,7 +211,7 @@ async fn allowed_to_send_state_event( // irreversible mistakes match json.deserialize_as::() { | Ok(acl_content) => { - if acl_content.allow.is_empty() { + if acl_content.allow_is_empty() { return Err!(Request(BadJson(debug_warn!( ?room_id, "Sending an ACL event with an empty allow key will permanently \ @@ -220,9 +220,7 @@ async fn allowed_to_send_state_event( )))); } - if acl_content.deny.contains(&String::from("*")) - && acl_content.allow.contains(&String::from("*")) - { + if acl_content.deny_contains("*") && acl_content.allow_contains("*") { return Err!(Request(BadJson(debug_warn!( ?room_id, "Sending an ACL event with a deny and allow key value of \"*\" will \ @@ -231,11 +229,9 @@ async fn allowed_to_send_state_event( )))); } - if acl_content.deny.contains(&String::from("*")) + if acl_content.deny_contains("*") && !acl_content.is_allowed(services.globals.server_name()) - && !acl_content - .allow - .contains(&services.globals.server_name().to_string()) + && !acl_content.allow_contains(services.globals.server_name().as_str()) { return Err!(Request(BadJson(debug_warn!( ?room_id, @@ -245,11 +241,9 @@ async fn allowed_to_send_state_event( )))); } - if !acl_content.allow.contains(&String::from("*")) + if !acl_content.allow_contains("*") && !acl_content.is_allowed(services.globals.server_name()) - && !acl_content - .allow - .contains(&services.globals.server_name().to_string()) + && !acl_content.allow_contains(services.globals.server_name().as_str()) { return Err!(Request(BadJson(debug_warn!( ?room_id, From 500faa8d7fcefab2f5bee867bf268f87fc0643fa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 01:05:43 +0000 Subject: [PATCH 0819/1248] simplify space join rules related Signed-off-by: Jason Volk --- Cargo.lock | 22 ++--- Cargo.toml | 2 +- src/api/client/room/summary.rs | 70 +++++++++------ src/service/rooms/spaces/mod.rs | 110 ++++++++++++------------ src/service/rooms/state_accessor/mod.rs | 37 +------- 5 files changed, 113 insertions(+), 128 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86833adb..c2c5182f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3654,7 +3654,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "assign", "js_int", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3686,7 +3686,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "assign", @@ -3709,7 +3709,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "base64 0.22.1", @@ -3741,7 +3741,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3766,7 +3766,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "bytes", "headers", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3797,7 +3797,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3807,7 +3807,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3822,7 +3822,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3834,7 +3834,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index a44fc0f0..b1c5acb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4" features = [ "compat", "rand", diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs index 34820e83..2fa81bd2 100644 --- a/src/api/client/room/summary.rs +++ b/src/api/client/room/summary.rs @@ -4,9 +4,13 @@ use conduwuit::{ Err, Result, debug_warn, utils::{IterStream, future::TryExtExt}, }; -use futures::{FutureExt, StreamExt, future::join3, stream::FuturesUnordered}; +use futures::{ + FutureExt, StreamExt, + future::{OptionFuture, join3}, + stream::FuturesUnordered, +}; use ruma::{ - OwnedRoomId, OwnedServerName, RoomId, UserId, + OwnedServerName, RoomId, UserId, api::{ client::room::get_summary, federation::space::{SpaceHierarchyParentSummary, get_hierarchy}, @@ -91,13 +95,9 @@ async fn room_summary_response( join_rule: room.join_rule, room_type: room.room_type, room_version: room.room_version, - membership: if sender_user.is_none() { - None - } else { - Some(MembershipState::Leave) - }, encryption: room.encryption, allowed_room_ids: room.allowed_room_ids, + membership: sender_user.is_some().then_some(MembershipState::Leave), }) } @@ -106,20 +106,22 @@ async fn local_room_summary_response( room_id: &RoomId, sender_user: Option<&UserId>, ) -> Result { - let join_rule = services.rooms.state_accessor.get_space_join_rule(room_id); + let join_rule = services.rooms.state_accessor.get_join_rules(room_id); + let world_readable = services.rooms.state_accessor.is_world_readable(room_id); + let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); - let ((join_rule, allowed_room_ids), world_readable, guest_can_join) = + let (join_rule, world_readable, guest_can_join) = join3(join_rule, world_readable, guest_can_join).await; user_can_see_summary( services, room_id, - &join_rule, + &join_rule.clone().into(), guest_can_join, world_readable, - &allowed_room_ids, + join_rule.allowed_rooms(), sender_user, ) .await?; @@ -129,26 +131,43 @@ async fn local_room_summary_response( .state_accessor .get_canonical_alias(room_id) .ok(); + let name = services.rooms.state_accessor.get_name(room_id).ok(); + let topic = services.rooms.state_accessor.get_room_topic(room_id).ok(); + let room_type = services.rooms.state_accessor.get_room_type(room_id).ok(); + let avatar_url = services .rooms .state_accessor .get_avatar(room_id) .map(|res| res.into_option().unwrap_or_default().url); + let room_version = services.rooms.state.get_room_version(room_id).ok(); + let encryption = services .rooms .state_accessor .get_room_encryption(room_id) .ok(); + let num_joined_members = services .rooms .state_cache .room_joined_count(room_id) .unwrap_or(0); + let membership: OptionFuture<_> = sender_user + .map(|sender_user| { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .map_ok_or(MembershipState::Leave, |content| content.membership) + }) + .into(); + let ( canonical_alias, name, @@ -158,6 +177,7 @@ async fn local_room_summary_response( room_type, room_version, encryption, + membership, ) = futures::join!( canonical_alias, name, @@ -167,6 +187,7 @@ async fn local_room_summary_response( room_type, room_version, encryption, + membership, ); Ok(get_summary::msc3266::Response { @@ -178,21 +199,12 @@ async fn local_room_summary_response( num_joined_members: num_joined_members.try_into().unwrap_or_default(), topic, world_readable, - join_rule, room_type, room_version, - membership: if let Some(sender_user) = sender_user { - services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .await - .map_or(Some(MembershipState::Leave), |content| Some(content.membership)) - } else { - None - }, encryption, - allowed_room_ids, + membership, + allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), + join_rule: join_rule.into(), }) } @@ -241,7 +253,7 @@ async fn remote_room_summary_hierarchy_response( &room.join_rule, room.guest_can_join, room.world_readable, - &room.allowed_room_ids, + room.allowed_room_ids.iter().map(AsRef::as_ref), sender_user, ) .await @@ -254,15 +266,18 @@ async fn remote_room_summary_hierarchy_response( ))) } -async fn user_can_see_summary( +async fn user_can_see_summary<'a, I>( services: &Services, room_id: &RoomId, join_rule: &SpaceRoomJoinRule, guest_can_join: bool, world_readable: bool, - allowed_room_ids: &[OwnedRoomId], + allowed_room_ids: I, sender_user: Option<&UserId>, -) -> Result { +) -> Result +where + I: Iterator + Send, +{ match sender_user { | Some(sender_user) => { let user_can_see_state_events = services @@ -271,7 +286,6 @@ async fn user_can_see_summary( .user_can_see_state_events(sender_user, room_id); let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false); let user_in_allowed_restricted_room = allowed_room_ids - .iter() .stream() .any(|room| services.rooms.state_cache.is_joined(sender_user, room)); diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index f51a5e3a..ea9756ba 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -121,21 +121,22 @@ pub async fn get_summary_and_children_local( | None => (), // cache miss | Some(None) => return Ok(None), | Some(Some(cached)) => { - return Ok(Some( - if self - .is_accessible_child( - current_room, - &cached.summary.join_rule, - identifier, - &cached.summary.allowed_room_ids, - ) - .await - { - SummaryAccessibility::Accessible(cached.summary.clone()) - } else { - SummaryAccessibility::Inaccessible - }, - )); + let allowed_rooms = cached.summary.allowed_room_ids.iter().map(AsRef::as_ref); + + let is_accessible_child = self.is_accessible_child( + current_room, + &cached.summary.join_rule, + identifier, + allowed_rooms, + ); + + let accessibility = if is_accessible_child.await { + SummaryAccessibility::Accessible(cached.summary.clone()) + } else { + SummaryAccessibility::Inaccessible + }; + + return Ok(Some(accessibility)); }, } @@ -145,12 +146,11 @@ pub async fn get_summary_and_children_local( .collect() .await; - let summary = self + let Ok(summary) = self .get_room_summary(current_room, children_pdus, identifier) .boxed() - .await; - - let Ok(summary) = summary else { + .await + else { return Ok(None); }; @@ -217,20 +217,19 @@ async fn get_summary_and_children_federation( .await; let identifier = Identifier::UserId(user_id); + let allowed_room_ids = summary.allowed_room_ids.iter().map(AsRef::as_ref); + let is_accessible_child = self - .is_accessible_child( - current_room, - &summary.join_rule, - &identifier, - &summary.allowed_room_ids, - ) + .is_accessible_child(current_room, &summary.join_rule, &identifier, allowed_room_ids) .await; - if is_accessible_child { - return Ok(Some(SummaryAccessibility::Accessible(summary))); - } + let accessibility = if is_accessible_child { + SummaryAccessibility::Accessible(summary) + } else { + SummaryAccessibility::Inaccessible + }; - Ok(Some(SummaryAccessibility::Inaccessible)) + Ok(Some(accessibility)) } /// Simply returns the stripped m.space.child events of a room @@ -305,14 +304,15 @@ async fn get_room_summary( children_state: Vec>, identifier: &Identifier<'_>, ) -> Result { - let (join_rule, allowed_room_ids) = self - .services - .state_accessor - .get_space_join_rule(room_id) - .await; + let join_rule = self.services.state_accessor.get_join_rules(room_id).await; let is_accessible_child = self - .is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids) + .is_accessible_child( + room_id, + &join_rule.clone().into(), + identifier, + join_rule.allowed_rooms(), + ) .await; if !is_accessible_child { @@ -379,7 +379,7 @@ async fn get_room_summary( encryption, ); - Ok(SpaceHierarchyParentSummary { + let summary = SpaceHierarchyParentSummary { canonical_alias, name, topic, @@ -388,24 +388,29 @@ async fn get_room_summary( avatar_url, room_type, children_state, - allowed_room_ids, - join_rule, - room_id: room_id.to_owned(), - num_joined_members: num_joined_members.try_into().unwrap_or_default(), encryption, room_version, - }) + room_id: room_id.to_owned(), + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), + join_rule: join_rule.clone().into(), + }; + + Ok(summary) } /// With the given identifier, checks if a room is accessable #[implement(Service)] -async fn is_accessible_child( +async fn is_accessible_child<'a, I>( &self, current_room: &RoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>, - allowed_room_ids: &[OwnedRoomId], -) -> bool { + allowed_rooms: I, +) -> bool +where + I: Iterator + Send, +{ if let Identifier::ServerName(server_name) = identifier { // Checks if ACLs allow for the server to participate if self @@ -430,21 +435,18 @@ async fn is_accessible_child( } } - match join_rule { + match *join_rule { | SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted => true, | SpaceRoomJoinRule::Restricted => - allowed_room_ids - .iter() + allowed_rooms .stream() - .any(|room| async { - match identifier { - | Identifier::UserId(user) => - self.services.state_cache.is_joined(user, room).await, - | Identifier::ServerName(server) => - self.services.state_cache.server_in_room(server, room).await, - } + .any(async |room| match identifier { + | Identifier::UserId(user) => + self.services.state_cache.is_joined(user, room).await, + | Identifier::ServerName(server) => + self.services.state_cache.server_in_room(server, room).await, }) .await, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 7fff5935..f719fc7b 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use conduwuit::{Result, err}; use database::Map; use ruma::{ - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, RoomId, UserId, + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, RoomId, UserId, events::{ StateEventType, room::{ @@ -19,14 +19,13 @@ use ruma::{ encryption::RoomEncryptionEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent, RoomMembership}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, member::RoomMemberEventContent, name::RoomNameEventContent, topic::RoomTopicEventContent, }, }, room::RoomType, - space::SpaceRoomJoinRule, }; use crate::{Dep, rooms}; @@ -129,42 +128,12 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Returns the space join rule (`SpaceRoomJoinRule`) for a given room and - /// any allowed room IDs if available. Will default to Invite and empty vec - /// if doesnt exist or invalid, - pub async fn get_space_join_rule( - &self, - room_id: &RoomId, - ) -> (SpaceRoomJoinRule, Vec) { - self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or_else( - |_| (SpaceRoomJoinRule::Invite, vec![]), - |c: RoomJoinRulesEventContent| { - (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) - }, - ) - } - /// Returns the join rules for a given room (`JoinRule` type). Will default /// to Invite if doesnt exist or invalid pub async fn get_join_rules(&self, room_id: &RoomId) -> JoinRule { self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") .await - .map_or_else(|_| JoinRule::Invite, |c: RoomJoinRulesEventContent| (c.join_rule)) - } - - /// Returns an empty vec if not a restricted room - pub fn allowed_room_ids(&self, join_rule: JoinRule) -> Vec { - let mut room_ids = Vec::with_capacity(1); // restricted rooms generally only have 1 allowed room ID - if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { - for rule in r.allow { - if let AllowRule::RoomMembership(RoomMembership { room_id: membership }) = rule { - room_ids.push(membership.clone()); - } - } - } - room_ids + .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule) } pub async fn get_room_type(&self, room_id: &RoomId) -> Result { From 9678948daf76b64368a6865d359ab162de1c5855 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 18:31:14 -0400 Subject: [PATCH 0820/1248] use patch of resolv-conf crate to allow no-aaaa resolv.conf option Signed-off-by: June Clementine Strawberry --- Cargo.lock | 3 +-- Cargo.toml | 9 ++++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2c5182f..8817af1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3625,8 +3625,7 @@ dependencies = [ [[package]] name = "resolv-conf" version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48375394603e3dd4b2d64371f7148fd8c7baa2680e28741f2cb8d23b59e3d4c4" +source = "git+https://github.com/girlbossceo/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" dependencies = [ "hostname", ] diff --git a/Cargo.toml b/Cargo.toml index b1c5acb5..62350dee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.85.0" +rust-version = "1.86.0" version = "0.5.0" [workspace.metadata.crane] @@ -580,6 +580,13 @@ rev = "9c8e51510c35077df888ee72a36b4b05637147da" git = "https://github.com/girlbossceo/hyper-util" rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" +# allows no-aaaa option in resolv.conf +# bumps rust edition and toolchain to 1.86.0 and 2024 +# use sat_add on line number errors +[patch.crates-io.resolv-conf] +git = "https://github.com/girlbossceo/resolv-conf" +rev = "200e958941d522a70c5877e3d846f55b5586c68d" + # # Our crates # From 3cc92b32ec97667bbabfb44edc305a972a7d3437 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 18:37:13 -0400 Subject: [PATCH 0821/1248] bump rust toolchain to 1.86.0 Signed-off-by: June Clementine Strawberry --- flake.nix | 2 +- rust-toolchain.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 9db2e90a..49e860ed 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU="; + sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 97b4a789..aadc8f99 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.85.0" +channel = "1.86.0" profile = "minimal" components = [ # For rust-analyzer From 6578b83bce71e9a232ff8531e80ab7d6d12a731c Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 20:09:22 -0400 Subject: [PATCH 0822/1248] parallelise IO of user searching, improve perf, raise max limit to 500 Signed-off-by: June Clementine Strawberry --- src/api/client/user_directory.rs | 121 ++++++++++++++----------------- 1 file changed, 55 insertions(+), 66 deletions(-) diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 8f564eed..99b3bb67 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,16 +1,20 @@ use axum::extract::State; -use conduwuit::{Result, utils::TryFutureExtExt}; -use futures::{StreamExt, pin_mut}; +use conduwuit::{ + Result, + utils::{future::BoolExt, stream::BroadbandExt}, +}; +use futures::{FutureExt, StreamExt, pin_mut}; use ruma::{ - api::client::user_directory::search_users, - events::{ - StateEventType, - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, - }, + api::client::user_directory::search_users::{self}, + events::room::join_rules::JoinRule, }; use crate::Ruma; +// conduwuit can handle a lot more results than synapse +const LIMIT_MAX: usize = 500; +const LIMIT_DEFAULT: usize = 10; + /// # `POST /_matrix/client/r0/user_directory/search` /// /// Searches all known users for a match. @@ -21,78 +25,63 @@ pub(crate) async fn search_users_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = usize::try_from(body.limit).map_or(10, usize::from).min(100); // default limit is 10 + let sender_user = body.sender_user(); + let limit = usize::try_from(body.limit) + .map_or(LIMIT_DEFAULT, usize::from) + .min(LIMIT_MAX); - let users = services.users.stream().filter_map(|user_id| async { - // Filter out buggy users (they should not exist, but you never know...) - let user = search_users::v3::User { - user_id: user_id.to_owned(), - display_name: services.users.displayname(user_id).await.ok(), - avatar_url: services.users.avatar_url(user_id).await.ok(), - }; + let mut users = services + .users + .stream() + .map(ToOwned::to_owned) + .broad_filter_map(async |user_id| { + let user = search_users::v3::User { + user_id: user_id.clone(), + display_name: services.users.displayname(&user_id).await.ok(), + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }; - let user_id_matches = user - .user_id - .to_string() - .to_lowercase() - .contains(&body.search_term.to_lowercase()); + let user_id_matches = user + .user_id + .as_str() + .to_lowercase() + .contains(&body.search_term.to_lowercase()); - let user_displayname_matches = user - .display_name - .as_ref() - .filter(|name| { + let user_displayname_matches = user.display_name.as_ref().is_some_and(|name| { name.to_lowercase() .contains(&body.search_term.to_lowercase()) - }) - .is_some(); + }); - if !user_id_matches && !user_displayname_matches { - return None; - } + if !user_id_matches && !user_displayname_matches { + return None; + } - // It's a matching user, but is the sender allowed to see them? - let mut user_visible = false; - - let user_is_in_public_rooms = services - .rooms - .state_cache - .rooms_joined(&user.user_id) - .any(|room| { - services - .rooms - .state_accessor - .room_state_get_content::( - room, - &StateEventType::RoomJoinRules, - "", - ) - .map_ok_or(false, |content| content.join_rule == JoinRule::Public) - }) - .await; - - if user_is_in_public_rooms { - user_visible = true; - } else { - let user_is_in_shared_rooms = services + let user_in_public_room = services .rooms .state_cache - .user_sees_user(sender_user, &user.user_id) - .await; + .rooms_joined(&user_id) + .map(ToOwned::to_owned) + .any(|room| async move { + services + .rooms + .state_accessor + .get_join_rules(&room) + .map(|rule| matches!(rule, JoinRule::Public)) + .await + }); - if user_is_in_shared_rooms { - user_visible = true; - } - } + let user_sees_user = services + .rooms + .state_cache + .user_sees_user(sender_user, &user_id); - user_visible.then_some(user) - }); + pin_mut!(user_in_public_room, user_sees_user); - pin_mut!(users); + user_in_public_room.or(user_sees_user).await.then_some(user) + }); - let limited = users.by_ref().next().await.is_some(); - - let results = users.take(limit).collect().await; + let results = users.by_ref().take(limit).collect().await; + let limited = users.next().await.is_some(); Ok(search_users::v3::Response { results, limited }) } From 5f8c68ab842d66ecda70726e2f9726824d51b815 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 13:17:13 -0400 Subject: [PATCH 0823/1248] add trace logging for room summaries, use server_in_room instead of exists Signed-off-by: June Clementine Strawberry --- src/api/client/room/summary.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs index 2fa81bd2..67d2e2ad 100644 --- a/src/api/client/room/summary.rs +++ b/src/api/client/room/summary.rs @@ -1,7 +1,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Result, debug_warn, + Err, Result, debug_warn, trace, utils::{IterStream, future::TryExtExt}, }; use futures::{ @@ -74,7 +74,12 @@ async fn room_summary_response( servers: &[OwnedServerName], sender_user: Option<&UserId>, ) -> Result { - if services.rooms.metadata.exists(room_id).await { + if services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await + { return local_room_summary_response(services, room_id, sender_user) .boxed() .await; @@ -106,14 +111,14 @@ async fn local_room_summary_response( room_id: &RoomId, sender_user: Option<&UserId>, ) -> Result { + trace!(?sender_user, "Sending local room summary response for {room_id:?}"); let join_rule = services.rooms.state_accessor.get_join_rules(room_id); - let world_readable = services.rooms.state_accessor.is_world_readable(room_id); - let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); let (join_rule, world_readable, guest_can_join) = join3(join_rule, world_readable, guest_can_join).await; + trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}"); user_can_see_summary( services, @@ -215,6 +220,7 @@ async fn remote_room_summary_hierarchy_response( servers: &[OwnedServerName], sender_user: Option<&UserId>, ) -> Result { + trace!(?sender_user, ?servers, "Sending remote room summary response for {room_id:?}"); if !services.config.allow_federation { return Err!(Request(Forbidden("Federation is disabled."))); } @@ -237,6 +243,7 @@ async fn remote_room_summary_hierarchy_response( .collect(); while let Some(Ok(response)) = requests.next().await { + trace!("{response:?}"); let room = response.room.clone(); if room.room_id != room_id { debug_warn!( @@ -278,6 +285,7 @@ async fn user_can_see_summary<'a, I>( where I: Iterator + Send, { + let is_public_room = matches!(join_rule, Public | Knock | KnockRestricted); match sender_user { | Some(sender_user) => { let user_can_see_state_events = services @@ -296,7 +304,7 @@ where if user_can_see_state_events || (is_guest && guest_can_join) - || matches!(&join_rule, &Public | &Knock | &KnockRestricted) + || is_public_room || user_in_allowed_restricted_room { return Ok(()); @@ -309,7 +317,7 @@ where ))) }, | None => { - if matches!(join_rule, Public | Knock | KnockRestricted) || world_readable { + if is_public_room || world_readable { return Ok(()); } From ff276a42a36cfe565ff541ce064db25bbb1946c8 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 13:19:09 -0400 Subject: [PATCH 0824/1248] drop unnecessary info log to debug Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index adbdd715..650c573f 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils}; +use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils}; use conduwuit_service::{Services, users::parse_master_key}; use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ @@ -177,7 +177,7 @@ pub(crate) async fn upload_signing_keys_route( body.master_key.as_ref(), ) .await - .inspect_err(|e| info!(?e)) + .inspect_err(|e| debug!(?e)) { | Ok(exists) => { if let Some(result) = exists { From d5ad973464168c567c3f9615380ced9e0067da4f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 15:25:11 -0400 Subject: [PATCH 0825/1248] change forbidden_server_names and etc to allow regex patterns for wildcards Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 27 ++++++++++------ src/api/client/directory.rs | 14 ++++++--- src/api/client/membership.rs | 6 ++-- src/api/client/message.rs | 3 +- src/api/router/auth.rs | 3 +- src/api/server/invite.rs | 6 ++-- src/api/server/make_join.rs | 6 ++-- src/api/server/make_knock.rs | 6 ++-- src/api/server/send_join.rs | 12 +++----- src/api/server/send_knock.rs | 6 ++-- src/core/config/mod.rs | 51 +++++++++++++++++-------------- src/service/federation/execute.rs | 2 +- src/service/media/remote.rs | 8 ++++- 13 files changed, 79 insertions(+), 71 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 46459547..118bc57d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -594,7 +594,7 @@ # Currently, conduwuit doesn't support inbound batched key requests, so # this list should only contain other Synapse servers. # -# example: ["matrix.org", "envs.net", "tchncs.de"] +# example: ["matrix.org", "tchncs.de"] # #trusted_servers = ["matrix.org"] @@ -1186,13 +1186,16 @@ # #prune_missing_media = false -# Vector list of servers that conduwuit will refuse to download remote -# media from. +# Vector list of regex patterns of server names that conduwuit will refuse +# to download remote media from. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # #prevent_media_downloads_from = [] -# List of forbidden server names that we will block incoming AND outgoing -# federation with, and block client room joins / remote user invites. +# List of forbidden server names via regex patterns that we will block +# incoming AND outgoing federation with, and block client room joins / +# remote user invites. # # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and @@ -1200,11 +1203,15 @@ # # Basically "global" ACLs. # +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] +# #forbidden_remote_server_names = [] -# List of forbidden server names that we will block all outgoing federated -# room directory requests for. Useful for preventing our users from -# wandering into bad servers or spaces. +# List of forbidden server names via regex patterns that we will block all +# outgoing federated room directory requests for. Useful for preventing +# our users from wandering into bad servers or spaces. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # #forbidden_remote_room_directory_server_names = [] @@ -1315,7 +1322,7 @@ # used, and startup as warnings if any room aliases in your database have # a forbidden room alias/ID. # -# example: ["19dollarfortnitecards", "b[4a]droom"] +# example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] # #forbidden_alias_names = [] @@ -1328,7 +1335,7 @@ # startup as warnings if any local users in your database have a forbidden # username. # -# example: ["administrator", "b[a4]dusernam[3e]"] +# example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] # #forbidden_usernames = [] diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 9ca35537..b44b9f64 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -52,10 +52,13 @@ pub(crate) async fn get_public_rooms_filtered_route( ) -> Result { if let Some(server) = &body.server { if services - .server .config .forbidden_remote_room_directory_server_names - .contains(server) + .is_match(server.host()) + || services + .config + .forbidden_remote_server_names + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } @@ -90,10 +93,13 @@ pub(crate) async fn get_public_rooms_route( ) -> Result { if let Some(server) = &body.server { if services - .server .config .forbidden_remote_room_directory_server_names - .contains(server) + .is_match(server.host()) + || services + .config + .forbidden_remote_server_names + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d0345c8e..1eeacf83 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -79,10 +79,9 @@ async fn banned_room_check( if let Some(room_id) = room_id { if services.rooms.metadata.is_banned(room_id).await || services - .server .config .forbidden_remote_server_names - .contains(&room_id.server_name().unwrap().to_owned()) + .is_match(room_id.server_name().unwrap().host()) { warn!( "User {user_id} who is not an admin attempted to send an invite for or \ @@ -120,10 +119,9 @@ async fn banned_room_check( } } else if let Some(server_name) = server_name { if services - .server .config .forbidden_remote_server_names - .contains(&server_name.to_owned()) + .is_match(server_name.host()) { warn!( "User {user_id} who is not an admin tried joining a room which has the server \ diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 3e784a4a..db11ef4a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -261,10 +261,9 @@ pub(crate) async fn is_ignored_pdu( let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); let ignored_server = services - .server .config .forbidden_remote_server_names - .contains(pdu.sender().server_name()); + .is_match(pdu.sender().server_name().host()); if ignored_type && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 5cd7b831..0eb61ca6 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -317,10 +317,9 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { let origin = &x_matrix.origin; if services - .server .config .forbidden_remote_server_names - .contains(origin) + .is_match(origin.host()) { return Err!(Request(Forbidden(debug_warn!( "Federation requests from {origin} denied." diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index cda34fb5..edd6ac16 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -38,20 +38,18 @@ pub(crate) async fn create_invite_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Received federated/remote invite from banned server {} for room ID {}. Rejecting.", diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index 4664b904..ac2c5485 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -42,10 +42,9 @@ pub(crate) async fn create_join_event_template_route( .await?; if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} for remote user {} tried joining room ID {} which has a server name that \ @@ -59,10 +58,9 @@ pub(crate) async fn create_join_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { return Err!(Request(Forbidden(warn!( "Room ID server name {server} is banned on this homeserver." diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 6d71ab2a..511c13b2 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -33,10 +33,9 @@ pub(crate) async fn create_knock_event_template_route( .await?; if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} for remote user {} tried knocking room ID {} which has a server name \ @@ -50,10 +49,9 @@ pub(crate) async fn create_knock_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 2e2e89ee..a66d8890 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -268,10 +268,9 @@ pub(crate) async fn create_join_event_v1_route( body: Ruma, ) -> Result { if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} tried joining room ID {} through us who has a server name that is \ @@ -284,10 +283,9 @@ pub(crate) async fn create_join_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ @@ -316,20 +314,18 @@ pub(crate) async fn create_join_event_v2_route( body: Ruma, ) -> Result { if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index c5ab0306..ee7b6cba 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -26,10 +26,9 @@ pub(crate) async fn create_knock_event_v1_route( body: Ruma, ) -> Result { if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} tried knocking room ID {} who has a server name that is globally \ @@ -42,10 +41,9 @@ pub(crate) async fn create_knock_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { warn!( "Server {} tried knocking room ID {} which has a server name that is globally \ diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index bb509a0d..0ca6bbaf 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -3,7 +3,7 @@ pub mod manager; pub mod proxy; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet}, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, path::{Path, PathBuf}, }; @@ -715,7 +715,7 @@ pub struct Config { /// Currently, conduwuit doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers. /// - /// example: ["matrix.org", "envs.net", "tchncs.de"] + /// example: ["matrix.org", "tchncs.de"] /// /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] @@ -1361,15 +1361,18 @@ pub struct Config { #[serde(default)] pub prune_missing_media: bool, - /// Vector list of servers that conduwuit will refuse to download remote - /// media from. + /// Vector list of regex patterns of server names that conduwuit will refuse + /// to download remote media from. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] - #[serde(default)] - pub prevent_media_downloads_from: HashSet, + #[serde(default, with = "serde_regex")] + pub prevent_media_downloads_from: RegexSet, - /// List of forbidden server names that we will block incoming AND outgoing - /// federation with, and block client room joins / remote user invites. + /// List of forbidden server names via regex patterns that we will block + /// incoming AND outgoing federation with, and block client room joins / + /// remote user invites. /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and @@ -1377,17 +1380,21 @@ pub struct Config { /// /// Basically "global" ACLs. /// - /// default: [] - #[serde(default)] - pub forbidden_remote_server_names: HashSet, - - /// List of forbidden server names that we will block all outgoing federated - /// room directory requests for. Useful for preventing our users from - /// wandering into bad servers or spaces. + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] - #[serde(default = "HashSet::new")] - pub forbidden_remote_room_directory_server_names: HashSet, + #[serde(default, with = "serde_regex")] + pub forbidden_remote_server_names: RegexSet, + + /// List of forbidden server names via regex patterns that we will block all + /// outgoing federated room directory requests for. Useful for preventing + /// our users from wandering into bad servers or spaces. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub forbidden_remote_room_directory_server_names: RegexSet, /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you /// do not want conduwuit to send outbound requests to. Defaults to @@ -1508,11 +1515,10 @@ pub struct Config { /// used, and startup as warnings if any room aliases in your database have /// a forbidden room alias/ID. /// - /// example: ["19dollarfortnitecards", "b[4a]droom"] + /// example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] /// /// default: [] - #[serde(default)] - #[serde(with = "serde_regex")] + #[serde(default, with = "serde_regex")] pub forbidden_alias_names: RegexSet, /// List of forbidden username patterns/strings. @@ -1524,11 +1530,10 @@ pub struct Config { /// startup as warnings if any local users in your database have a forbidden /// username. /// - /// example: ["administrator", "b[a4]dusernam[3e]"] + /// example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] /// /// default: [] - #[serde(default)] - #[serde(with = "serde_regex")] + #[serde(default, with = "serde_regex")] pub forbidden_usernames: RegexSet, /// Retry failed and incomplete messages to remote servers immediately upon diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 63f2ccfb..97314ffb 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -69,7 +69,7 @@ where .server .config .forbidden_remote_server_names - .contains(dest) + .is_match(dest.host()) { return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index b6c853d2..cdcb429e 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -426,7 +426,13 @@ fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { .server .config .prevent_media_downloads_from - .contains(mxc.server_name) + .is_match(mxc.server_name.host()) + || self + .services + .server + .config + .forbidden_remote_server_names + .is_match(mxc.server_name.host()) { // we'll lie to the client and say the blocked server's media was not found and // log. the client has no way of telling anyways so this is a security bonus. From 99868b166173d7bd510a7f2dd3a1b1e415a99682 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 15:30:01 -0400 Subject: [PATCH 0826/1248] update new complement flakes Signed-off-by: June Clementine Strawberry --- .../complement/test_results.jsonl | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index c0e28750..97c2e1b1 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -491,7 +491,7 @@ {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself"} {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Joining_room_twice_is_idempotent"} -{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} @@ -527,17 +527,17 @@ {"Action":"pass","Test":"TestRoomMessagesLazyLoadingLocalUser"} {"Action":"pass","Test":"TestRoomReadMarkers"} {"Action":"pass","Test":"TestRoomReceipts"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} {"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} {"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} {"Action":"fail","Test":"TestRoomState"} {"Action":"fail","Test":"TestRoomState/Parallel"} @@ -589,7 +589,7 @@ {"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_has_correct_timeline_in_incremental_sync"} {"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_includes_presence_in_incremental_sync"} {"Action":"pass","Test":"TestSync/parallel/Newly_joined_room_is_included_in_an_incremental_sync"} -{"Action":"fail","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} +{"Action":"pass","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} {"Action":"pass","Test":"TestSyncFilter"} {"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} {"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} From 47f83454570a1d4338137708b4b042e8c49b7cb7 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 8 Apr 2025 09:05:49 -0400 Subject: [PATCH 0827/1248] bump tokio because of RUSTSEC-2025-0023 Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8817af1a..c724e31e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4758,9 +4758,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.1" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 62350dee..f5ee3f0f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,7 @@ default-features = false features = ["std", "async-await"] [workspace.dependencies.tokio] -version = "1.44.1" +version = "1.44.2" default-features = false features = [ "fs", From d8311a5ff672fdc4729d956af5e3af8646b0670d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 8 Apr 2025 23:38:54 -0400 Subject: [PATCH 0828/1248] bump crossbeam-channel bc yanked crate with potential double free Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c724e31e..d81fdbc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1119,9 +1119,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] From e054a56b3286a6fb3091bedd5261089435ed26d1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 14 Apr 2025 22:34:22 +0100 Subject: [PATCH 0829/1248] docs: New readme It's a continuwuation! --- README.md | 214 +++++++++++++++++++++--------------------------------- 1 file changed, 82 insertions(+), 132 deletions(-) diff --git a/README.md b/README.md index d8f99d45..89e1a299 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ -# conduwuit +# continuwuity + -### a very cool [Matrix](https://matrix.org/) chat homeserver written in Rust +## A community-driven [Matrix](https://matrix.org/) homeserver in Rust -Visit the [conduwuit documentation](https://conduwuit.puppyirl.gay/) for more -information and how to deploy/setup conduwuit. +[continuwuity] is a Matrix homeserver written in Rust. +It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver. -#### What is Matrix? + +### Why does this exist? + +The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features. + +Our aim is to provide a stable, well-maintained alternative for current conduwuit users and to welcome new users looking for a lightweight, efficient Matrix homeserver. + + +### Who are we? + +We are a group of Matrix enthusiasts, developers and system administrators who have used conduwuit and believe in its potential. Our team includes both previous +contributors to the original project and new developers who want to help maintain and improve this important piece of Matrix infrastructure. + +We operate as an open community project, welcoming contributions from anyone interested in improving continuwuity. + +### What is Matrix? [Matrix](https://matrix.org) is an open, federated, and extensible network for -decentralised communication. Users from any Matrix homeserver can chat with users from all +decentralized communication. Users from any Matrix homeserver can chat with users from all other homeservers over federation. Matrix is designed to be extensible and built on top of. You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord. -#### What is the goal? +### What are the project's goals? -A high-performance, efficient, low-cost, and featureful Matrix homeserver that's -easy to set up and just works with minimal configuration needed. +Continuwuity aims to: -#### Can I try it out? +- Maintain a stable, reliable Matrix homeserver implementation in Rust +- Improve compatibility and specification compliance with the Matrix protocol +- Fix bugs and performance issues from the original conduwuit +- Add missing features needed by homeserver administrators +- Provide comprehensive documentation and easy deployment options +- Create a sustainable development model for long-term maintenance +- Keep a lightweight, efficient codebase that can run on modest hardware -An official conduwuit server ran by me is available at transfem.dev -([element.transfem.dev](https://element.transfem.dev) / -[cinny.transfem.dev](https://cinny.transfem.dev)) +### Can I try it out? -transfem.dev is a public homeserver that can be used, it is not a "test only -homeserver". This means there are rules, so please read the rules: -[https://transfem.dev/homeserver_rules.txt](https://transfem.dev/homeserver_rules.txt) +Not right now. We've still got work to do! -transfem.dev is also listed at -[servers.joinmatrix.org](https://servers.joinmatrix.org/), which is a list of -popular public Matrix homeservers, including some others that run conduwuit. -#### What is the current status? +### What are we working on? -conduwuit is technically a hard fork of [Conduit](https://conduit.rs/), which is in beta. -The beta status initially was inherited from Conduit, however the huge amount of -codebase divergance, changes, fixes, and improvements have effectively made this -beta status not entirely applicable to us anymore. +We're working our way through all of the issues in the [Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues). -conduwuit is very stable based on our rapidly growing userbase, has lots of features that users -expect, and very usable as a daily driver for small, medium, and upper-end medium sized homeservers. +- [Replacing old conduwuit links with working continuwuity links](https://forgejo.ellis.link/continuwuation/continuwuity/issues/742) +- [Getting CI and docs deployment working on the new Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues/740) +- [Packaging & availability in more places](https://forgejo.ellis.link/continuwuation/continuwuity/issues/747) +- [Appservices bugs & features](https://forgejo.ellis.link/continuwuation/continuwuity/issues?q=&type=all&state=open&labels=178&milestone=0&assignee=0&poster=0) +- [Improving compatibility and spec compliance](https://forgejo.ellis.link/continuwuation/continuwuity/issues?labels=119) +- Automated testing +- [Admin API](https://forgejo.ellis.link/continuwuation/continuwuity/issues/748) +- [Policy-list controlled moderation](https://forgejo.ellis.link/continuwuation/continuwuity/issues/750) -A lot of critical stability and performance issues have been fixed, and a lot of -necessary groundwork has finished; making this project way better than it was -back in the start at ~early 2024. +### Can I migrate my data from x? -#### Where is the differences page? +- Conduwuit: Yes +- Conduit: No, database is now incompatible +- Grapevine: No, database is now incompatible +- Dendrite: No +- Synapse: No -conduwuit historically had a "differences" page that listed each and every single -different thing about conduwuit from Conduit, as a way to promote and advertise -conduwuit by showing significant amounts of work done. While this was feasible to -maintain back when the project was new in early-2024, this became impossible -very quickly and has unfortunately became heavily outdated, missing tons of things, etc. - -It's difficult to list out what we do differently, what are our notable features, etc -when there's so many things and features and bug fixes and performance optimisations, -the list goes on. We simply recommend folks to just try out conduwuit, or ask us -what features you are looking for and if they're implemented in conduwuit. - -#### How is conduwuit funded? Is conduwuit sustainable? - -conduwuit has no external funding. This is made possible purely in my freetime with -contributors, also in their free time, and only by user-curated donations. - -conduwuit has existed since around November 2023, but [only became more publicly known -in March/April 2024](https://matrix.org/blog/2024/04/26/this-week-in-matrix-2024-04-26/#conduwuit-website) -and we have no plans in stopping or slowing down any time soon! - -#### Can I migrate or switch from Conduit? - -conduwuit had drop-in migration/replacement support for Conduit for about 12 months before -bugs somewhere along the line broke it. Maintaining this has been difficult and -the majority of Conduit users have already migrated, additionally debugging Conduit -is not one of our interests, and so Conduit migration no longer works. We also -feel that 12 months has been plenty of time for people to seamlessly migrate. - -If you are a Conduit user looking to migrate, you will have to wipe and reset -your database. We may fix seamless migration support at some point, but it's not an interest -from us. - -#### Can I migrate from Synapse or Dendrite? - -Currently there is no known way to seamlessly migrate all user data from the old -homeserver to conduwuit. However it is perfectly acceptable to replace the old -homeserver software with conduwuit using the same server name and there will not -be any issues with federation. - -There is an interest in developing a built-in seamless user data migration -method into conduwuit, however there is no concrete ETA or timeline for this. +Although you can't migrate your data from other homeservers, it is perfectly acceptable to set up continuwuity on the same domain as a previous homeserver. +## Contribution + +### Development flow + +- Features / changes must developed in a separate branch +- For each change, create a descriptive PR +- Your code will be reviewed by one or more of the continuwuity developers +- The branch will be deployed live on multiple tester's matrix servers to shake out bugs +- Once all testers and reviewers have agreed, the PR will be merged to the main branch +- The main branch will have nightly builds deployed to users on the cutting edge +- Every week or two, a new release is cut. + +The main branch is always green! + + +### Policy on pulling from other forks + +We welcome contributions from other forks of conduwuit, subject to our review process. +When incorporating code from other forks: + +- All external contributions must go through our standard PR process +- Code must meet our quality standards and pass tests +- Code changes will require testing on multiple test servers before merging +- Attribution will be given to original authors and forks +- We prioritize stability and compatibility when evaluating external contributions +- Features that align with our project goals will be given priority consideration + #### Contact -[`#conduwuit:puppygock.gay`](https://matrix.to/#/#conduwuit:puppygock.gay) -is the official project Matrix room. You can get support here, ask questions or -concerns, get assistance setting up conduwuit, etc. - -This room should stay relevant and focused on conduwuit. An offtopic general -chatter room can be found in the room topic there as well. - -Please keep the issue trackers focused on *actual* bug reports and enhancement requests. - -General support is extremely difficult to be offered over an issue tracker, and -simple questions should be asked directly in an interactive platform like our -Matrix room above as they can turn into a relevant discussion and/or may not be -simple to answer. If you're not sure, just ask in the Matrix room. - -If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) - -If you need to contact the primary maintainer, my contact methods are on my website: https://girlboss.ceo - -#### Donate - -conduwuit development is purely made possible by myself and contributors. I do -not get paid to work on this, and I work on it in my free time. Donations are -heavily appreciated! 💜🥺 - -- Liberapay: -- GitHub Sponsors: -- Ko-fi: - -I do not and will not accept cryptocurrency donations, including things related. - -Note that donations will NOT guarantee you or give you any kind of tangible product, -feature prioritisation, etc. By donating, you are agreeing that conduwuit is NOT -going to provide you any goods or services as part of your donation, and this -donation is purely a generous donation. We will not provide things like paid -personal/direct support, feature request priority, merchandise, etc. - -#### Logo - -Original repo and Matrix room picture was from bran (<3). Current banner image -and logo is directly from [this cohost -post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for). - -An SVG logo made by [@nktnet1](https://github.com/nktnet1) is available here: - -#### Is it conduwuit or Conduwuit? - -Both, but I prefer conduwuit. - -#### Mirrors of conduwuit - -If GitHub is unavailable in your country, or has poor connectivity, conduwuit's -source code is mirrored onto the following additional platforms I maintain: - -- GitHub: -- GitLab: -- git.girlcock.ceo: -- git.gay: -- mau.dev: -- Codeberg: -- sourcehut: + + + +[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity From 57d26dae0d35f8be9e66054479261ca33a1ea42c Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 14 Apr 2025 22:58:30 +0100 Subject: [PATCH 0830/1248] docs: Remove hidden conduwuit badges --- README.md | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/README.md b/README.md index 89e1a299..f61f6a87 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,5 @@ # continuwuity - - - ## A community-driven [Matrix](https://matrix.org/) homeserver in Rust From 3e54c7e69163ebfcc24414dad4888b4d8a4380b8 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 14 Apr 2025 23:01:22 +0100 Subject: [PATCH 0831/1248] docs: Phrasing --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index f61f6a87..24a34d18 100644 --- a/README.md +++ b/README.md @@ -16,8 +16,7 @@ It's a community continuation of the [conduwuit](https://github.com/girlbossceo/ The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features. -Our aim is to provide a stable, well-maintained alternative for current conduwuit users and to welcome new users looking for a lightweight, efficient Matrix homeserver. - +We aim to provide a stable, well-maintained alternative for current Conduit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver. ### Who are we? From 4f9e9174e2aaabcb45a81ec33ec9284159bc59fd Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 14 Apr 2025 23:06:39 +0100 Subject: [PATCH 0832/1248] docs: Mention future migration guide --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 24a34d18..deaed364 100644 --- a/README.md +++ b/README.md @@ -70,8 +70,7 @@ We're working our way through all of the issues in the [Forgejo project](https:/ - Dendrite: No - Synapse: No -Although you can't migrate your data from other homeservers, it is perfectly acceptable to set up continuwuity on the same domain as a previous homeserver. - +We haven't written up a guide on migrating from incompatible homeservers yet. Reach out to us if you need to do this! From 35bffa5970e311a2b2ffa7c23ecd77121c36dfbe Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 10:25:49 +0100 Subject: [PATCH 0833/1248] ci: Delete all old CI files Part of #753 --- .github/workflows/ci.yml | 717 ------------------- .github/workflows/docker-hub-description.yml | 41 -- .github/workflows/documentation.yml | 104 --- .github/workflows/release.yml | 118 --- .gitlab-ci.yml | 152 ---- .gitlab/merge_request_templates/MR.md | 8 - .gitlab/route-map.yml | 3 - 7 files changed, 1143 deletions(-) delete mode 100644 .github/workflows/ci.yml delete mode 100644 .github/workflows/docker-hub-description.yml delete mode 100644 .github/workflows/documentation.yml delete mode 100644 .github/workflows/release.yml delete mode 100644 .gitlab-ci.yml delete mode 100644 .gitlab/merge_request_templates/MR.md delete mode 100644 .gitlab/route-map.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 5043f23b..00000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,717 +0,0 @@ -name: CI and Artifacts - -on: - pull_request: - push: - paths-ignore: - - '.gitlab-ci.yml' - - '.gitignore' - - 'renovate.json' - - 'debian/**' - - 'docker/**' - branches: - - main - tags: - - '*' - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -concurrency: - group: ${{ github.head_ref || github.ref_name }} - cancel-in-progress: true - -env: - # Required to make some things output color - TERM: ansi - # Publishing to my nix binary cache - ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} - # conduwuit.cachix.org - CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }} - # Just in case incremental is still being set to true, speeds up CI - CARGO_INCREMENTAL: 0 - # Custom nix binary cache if fork is being used - ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} - ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} - # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps - NIX_CONFIG: | - show-trace = true - extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= - experimental-features = nix-command flakes - extra-experimental-features = nix-command flakes - accept-flake-config = true - WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }} - GH_REF_NAME: ${{ github.ref_name }} - WEBSERVER_DIR_NAME: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - -permissions: {} - -jobs: - tests: - name: Test - runs-on: self-hosted - steps: - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Tag comparison check - if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} - run: | - # Tag mismatch with latest repo tag check to prevent potential downgrades - LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - - if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - name: Prepare build environment - run: | - echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" - direnv allow - nix develop .#all-features --command true - - - name: Cache CI dependencies - run: | - bin/nix-build-and-cache ci - bin/nix-build-and-cache just '.#devShells.x86_64-linux.default' - bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features' - bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic' - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - - name: Run CI tests - env: - CARGO_PROFILE: "test" - run: | - direnv exec . engage > >(tee -a test_output.log) - - - name: Run Complement tests - env: - CARGO_PROFILE: "test" - run: | - # the nix devshell sets $COMPLEMENT_SRC, so "/dev/null" is no-op - direnv exec . bin/complement "/dev/null" complement_test_logs.jsonl complement_test_results.jsonl > >(tee -a test_output.log) - cp -v -f result complement_oci_image.tar.gz - - - name: Upload Complement OCI image - uses: actions/upload-artifact@v4 - with: - name: complement_oci_image.tar.gz - path: complement_oci_image.tar.gz - if-no-files-found: error - compression-level: 0 - - - name: Upload Complement logs - uses: actions/upload-artifact@v4 - with: - name: complement_test_logs.jsonl - path: complement_test_logs.jsonl - if-no-files-found: error - - - name: Upload Complement results - uses: actions/upload-artifact@v4 - with: - name: complement_test_results.jsonl - path: complement_test_results.jsonl - if-no-files-found: error - - - name: Diff Complement results with checked-in repo results - run: | - diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log) - - - name: Update Job Summary - env: - GH_JOB_STATUS: ${{ job.status }} - if: success() || failure() - run: | - if [ ${GH_JOB_STATUS} == 'success' ]; then - echo '# ✅ CI completed suwuccessfully' >> $GITHUB_STEP_SUMMARY - else - echo '# ❌ CI failed (last 100 lines of output)' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - fi - - build: - name: Build - runs-on: self-hosted - strategy: - matrix: - include: - - target: aarch64-linux-musl - - target: x86_64-linux-musl - steps: - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - name: Prepare build environment - run: | - echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" - direnv allow - nix develop .#all-features --command true --impure - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - - name: Build static ${{ matrix.target }}-all-features - run: | - if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" - elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl" - fi - - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features - - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduwuit target/release/conduwuit - cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb - mv -v target/release/conduwuit static-${{ matrix.target }} - mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb - - - name: Build static x86_64-linux-musl-all-features-x86_64-haswell-optimised - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-x86_64-linux-musl-all-features-x86_64-haswell-optimised - - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduwuit target/release/conduwuit - cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb - mv -v target/release/conduwuit static-x86_64-linux-musl-x86_64-haswell-optimised - mv -v target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb x86_64-linux-musl-x86_64-haswell-optimised.deb - - # quick smoke test of the x86_64 static release binary - - name: Quick smoke test the x86_64 static release binary - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - # GH actions default runners are x86_64 only - if file result/bin/conduwuit | grep x86-64; then - result/bin/conduwuit --version - result/bin/conduwuit --help - result/bin/conduwuit -Oserver_name="'$(date -u +%s).local'" -Odatabase_path="'/tmp/$(date -u +%s)'" --execute "server admin-notice awawawawawawawawawawa" --execute "server memory-usage" --execute "server shutdown" - fi - - - name: Build static debug ${{ matrix.target }}-all-features - run: | - if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" - elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl" - fi - - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features-debug - - # > warning: dev profile is not supported and will be a hard error in the future. cargo-deb is for making releases, and it doesn't make sense to use it with dev profiles. - # so we need to coerce cargo-deb into thinking this is a release binary - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduwuit target/release/conduwuit - cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}-debug.deb - mv -v target/release/conduwuit static-${{ matrix.target }}-debug - mv -v target/release/${{ matrix.target }}-debug.deb ${{ matrix.target }}-debug.deb - - # quick smoke test of the x86_64 static debug binary - - name: Run x86_64 static debug binary - run: | - # GH actions default runners are x86_64 only - if file result/bin/conduwuit | grep x86-64; then - result/bin/conduwuit --version - fi - - # check validity of produced deb package, invalid debs will error on these commands - - name: Validate produced deb package - run: | - # List contents - dpkg-deb --contents ${{ matrix.target }}.deb - dpkg-deb --contents ${{ matrix.target }}-debug.deb - # List info - dpkg-deb --info ${{ matrix.target }}.deb - dpkg-deb --info ${{ matrix.target }}-debug.deb - - - name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub - uses: actions/upload-artifact@v4 - if: ${{ matrix.target == 'x86_64-linux-musl' }} - with: - name: static-x86_64-linux-musl-x86_64-haswell-optimised - path: static-x86_64-linux-musl-x86_64-haswell-optimised - if-no-files-found: error - - - name: Upload static-${{ matrix.target }}-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: static-${{ matrix.target }} - path: static-${{ matrix.target }} - if-no-files-found: error - - - name: Upload static deb ${{ matrix.target }}-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: deb-${{ matrix.target }} - path: ${{ matrix.target }}.deb - if-no-files-found: error - compression-level: 0 - - - name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised - scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-x86_64-linux-musl-x86_64-haswell-optimised - fi - - - name: Upload static-${{ matrix.target }}-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x static-${{ matrix.target }} - scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }} - fi - - - name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/x86_64-linux-musl-x86_64-haswell-optimised.deb - fi - - - name: Upload static deb ${{ matrix.target }}-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}.deb - fi - - - name: Upload static-${{ matrix.target }}-debug-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: static-${{ matrix.target }}-debug - path: static-${{ matrix.target }}-debug - if-no-files-found: error - - - name: Upload static deb ${{ matrix.target }}-debug-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: deb-${{ matrix.target }}-debug - path: ${{ matrix.target }}-debug.deb - if-no-files-found: error - compression-level: 0 - - - name: Upload static-${{ matrix.target }}-debug-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}-debug - fi - - - name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}-debug.deb - fi - - - name: Build OCI image ${{ matrix.target }}-all-features - run: | - bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features - - cp -v -f result oci-image-${{ matrix.target }}.tar.gz - - - name: Build OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - bin/nix-build-and-cache just .#oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised - - cp -v -f result oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz - - - name: Build debug OCI image ${{ matrix.target }}-all-features - run: | - bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features-debug - - cp -v -f result oci-image-${{ matrix.target }}-debug.tar.gz - - - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub - if: ${{ matrix.target == 'x86_64-linux-musl' }} - uses: actions/upload-artifact@v4 - with: - name: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised - path: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz - if-no-files-found: error - compression-level: 0 - - name: Upload OCI image ${{ matrix.target }}-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: oci-image-${{ matrix.target }} - path: oci-image-${{ matrix.target }}.tar.gz - if-no-files-found: error - compression-level: 0 - - - name: Upload OCI image ${{ matrix.target }}-debug-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: oci-image-${{ matrix.target }}-debug - path: oci-image-${{ matrix.target }}-debug.tar.gz - if-no-files-found: error - compression-level: 0 - - - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz to webserver - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz - fi - - - name: Upload OCI image ${{ matrix.target }}-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}.tar.gz - fi - - - name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz - fi - - variables: - outputs: - github_repository: ${{ steps.var.outputs.github_repository }} - runs-on: self-hosted - steps: - - name: Setting global variables - uses: actions/github-script@v7 - id: var - with: - script: | - core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase()) - docker: - name: Docker publish - runs-on: self-hosted - needs: [build, variables, tests] - permissions: - packages: write - contents: read - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' - env: - DOCKER_HUB_REPO: docker.io/${{ needs.variables.outputs.github_repository }} - GHCR_REPO: ghcr.io/${{ needs.variables.outputs.github_repository }} - GLCR_REPO: registry.gitlab.com/conduwuit/conduwuit - UNIQUE_TAG: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - BRANCH_TAG: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} - - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} - GHCR_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}" - steps: - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Login to Docker Hub - if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} - uses: docker/login-action@v3 - with: - registry: docker.io - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to GitLab Container Registry - if: ${{ (vars.GITLAB_USERNAME != '') && (env.GITLAB_TOKEN != '') }} - uses: docker/login-action@v3 - with: - registry: registry.gitlab.com - username: ${{ vars.GITLAB_USERNAME }} - password: ${{ secrets.GITLAB_TOKEN }} - - - name: Download artifacts - uses: actions/download-artifact@v4 - with: - pattern: "oci*" - - - name: Move OCI images into position - run: | - mv -v oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised/*.tar.gz oci-image-amd64-haswell-optimised.tar.gz - mv -v oci-image-x86_64-linux-musl/*.tar.gz oci-image-amd64.tar.gz - mv -v oci-image-aarch64-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz - mv -v oci-image-x86_64-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz - mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz - - - name: Load and push amd64 haswell image - run: | - docker load -i oci-image-amd64-haswell-optimised.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-haswell - docker push ${GHCR_REPO}:${UNIQUE_TAG}-haswell - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-haswell - docker push ${GLCR_REPO}:${UNIQUE_TAG}-haswell - fi - - - name: Load and push amd64 image - run: | - docker load -i oci-image-amd64.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - fi - - - name: Load and push arm64 image - run: | - docker load -i oci-image-arm64v8.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 - docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 - docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 - fi - - - name: Load and push amd64 debug image - run: | - docker load -i oci-image-amd64-debug.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - - - name: Load and push arm64 debug image - run: | - docker load -i oci-image-arm64v8-debug.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - fi - - - name: Create Docker haswell manifests - run: | - # Dockerhub Container Registry - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - fi - # GitHub Container Registry - if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell - fi - # GitLab Container Registry - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell - fi - - - name: Create Docker combined manifests - run: | - # Dockerhub Container Registry - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - fi - # GitHub Container Registry - if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - docker manifest create ${GHCR_REPO}:${BRANCH_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - fi - # GitLab Container Registry - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - docker manifest create ${GLCR_REPO}:${BRANCH_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - fi - - - name: Create Docker combined debug manifests - run: | - # Dockerhub Container Registry - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - fi - # GitHub Container Registry - if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - # GitLab Container Registry - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - - - name: Push manifests to Docker registries - run: | - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG} - docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG} - docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug - docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug - docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell - fi - if [ $GHCR_ENABLED = "true" ]; then - docker manifest push ${GHCR_REPO}:${UNIQUE_TAG} - docker manifest push ${GHCR_REPO}:${BRANCH_TAG} - docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-debug - docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-debug - docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-haswell - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest push ${GLCR_REPO}:${UNIQUE_TAG} - docker manifest push ${GLCR_REPO}:${BRANCH_TAG} - docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-debug - docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-debug - docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-haswell - fi - - - name: Add Image Links to Job Summary - run: | - if [ ! -z $DOCKERHUB_TOKEN ]; then - echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY - fi - if [ $GHCR_ENABLED = "true" ]; then - echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY - fi - if [ ! -z $GITLAB_TOKEN ]; then - echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY - fi diff --git a/.github/workflows/docker-hub-description.yml b/.github/workflows/docker-hub-description.yml deleted file mode 100644 index b4f142db..00000000 --- a/.github/workflows/docker-hub-description.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Update Docker Hub Description - -on: - push: - branches: - - main - paths: - - README.md - - .github/workflows/docker-hub-description.yml - - workflow_dispatch: - -jobs: - dockerHubDescription: - runs-on: ubuntu-latest - if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' && (vars.DOCKER_USERNAME != '') }} - steps: - - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setting variables - uses: actions/github-script@v7 - id: var - with: - script: | - const githubRepo = '${{ github.repository }}'.toLowerCase() - const repoId = githubRepo.split('/')[1] - - core.setOutput('github_repository', githubRepo) - const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId - core.setOutput('docker_repo', dockerRepo) - - - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v4 - with: - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - repository: ${{ steps.var.outputs.docker_repo }} - short-description: ${{ github.event.repository.description }} - enable-url-completion: true diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml deleted file mode 100644 index b5b4ff46..00000000 --- a/.github/workflows/documentation.yml +++ /dev/null @@ -1,104 +0,0 @@ -name: Documentation and GitHub Pages - -on: - pull_request: - push: - branches: - - main - tags: - - '*' - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -env: - # Required to make some things output color - TERM: ansi - # Publishing to my nix binary cache - ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} - # conduwuit.cachix.org - CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }} - # Custom nix binary cache if fork is being used - ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} - ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} - # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps - NIX_CONFIG: | - show-trace = true - extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= - experimental-features = nix-command flakes - extra-experimental-features = nix-command flakes - accept-flake-config = true - -# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. -# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. -concurrency: - group: "pages" - cancel-in-progress: false - -permissions: {} - -jobs: - docs: - name: Documentation and GitHub Pages - runs-on: self-hosted - - permissions: - pages: write - id-token: write - - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - - steps: - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup GitHub Pages - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') - uses: actions/configure-pages@v5 - - - name: Prepare build environment - run: | - echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" - direnv allow - nix develop --command true - - - name: Cache CI dependencies - run: | - bin/nix-build-and-cache ci - - - name: Run lychee and markdownlint - run: | - direnv exec . engage just lints lychee - direnv exec . engage just lints markdownlint - - - name: Build documentation (book) - run: | - bin/nix-build-and-cache just .#book - - cp -r --dereference result public - chmod u+w -R public - - - name: Upload generated documentation (book) as normal artifact - uses: actions/upload-artifact@v4 - with: - name: public - path: public - if-no-files-found: error - # don't compress again - compression-level: 0 - - - name: Upload generated documentation (book) as GitHub Pages artifact - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') - uses: actions/upload-pages-artifact@v3 - with: - path: public - - - name: Deploy to GitHub Pages - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') - id: deployment - uses: actions/deploy-pages@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index cfe72d2a..00000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,118 +0,0 @@ -name: Upload Release Assets - -on: - release: - types: [published] - workflow_dispatch: - inputs: - tag: - description: 'Tag to release' - required: true - type: string - action_id: - description: 'Action ID of the CI run' - required: true - type: string - -permissions: {} - -jobs: - publish: - runs-on: ubuntu-latest - permissions: - contents: write - env: - GH_EVENT_NAME: ${{ github.event_name }} - GH_EVENT_INPUTS_ACTION_ID: ${{ github.event.inputs.action_id }} - GH_EVENT_INPUTS_TAG: ${{ github.event.inputs.tag }} - GH_REPOSITORY: ${{ github.repository }} - GH_SHA: ${{ github.sha }} - GH_TAG: ${{ github.event.release.tag_name }} - - steps: - - name: get latest ci id - id: get_ci_id - env: - GH_TOKEN: ${{ github.token }} - run: | - if [ "${GH_EVENT_NAME}" == "workflow_dispatch" ]; then - id="${GH_EVENT_INPUTS_ACTION_ID}" - tag="${GH_EVENT_INPUTS_TAG}" - else - # get all runs of the ci workflow - json=$(gh api "repos/${GH_REPOSITORY}/actions/workflows/ci.yml/runs") - - # find first run that is github sha and status is completed - id=$(echo "$json" | jq ".workflow_runs[] | select(.head_sha == \"${GH_SHA}\" and .status == \"completed\") | .id" | head -n 1) - - if [ ! "$id" ]; then - echo "No completed runs found" - echo "ci_id=0" >> "$GITHUB_OUTPUT" - exit 0 - fi - - tag="${GH_TAG}" - fi - - echo "ci_id=$id" >> "$GITHUB_OUTPUT" - echo "tag=$tag" >> "$GITHUB_OUTPUT" - - - name: get latest ci artifacts - if: steps.get_ci_id.outputs.ci_id != 0 - uses: actions/download-artifact@v4 - env: - GH_TOKEN: ${{ github.token }} - with: - merge-multiple: true - run-id: ${{ steps.get_ci_id.outputs.ci_id }} - github-token: ${{ github.token }} - - - run: | - ls - - - name: upload release assets - if: steps.get_ci_id.outputs.ci_id != 0 - env: - GH_TOKEN: ${{ github.token }} - TAG: ${{ steps.get_ci_id.outputs.tag }} - run: | - for file in $(find . -type f); do - case "$file" in - *json*) echo "Skipping $file...";; - *) echo "Uploading $file..."; gh release upload $TAG "$file" --clobber --repo="${GH_REPOSITORY}" || echo "Something went wrong, skipping.";; - esac - done - - - name: upload release assets to website - if: steps.get_ci_id.outputs.ci_id != 0 - env: - TAG: ${{ steps.get_ci_id.outputs.tag }} - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config < /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi - # Accept flake config from "untrusted" users - - if command -v nix > /dev/null; then echo "accept-flake-config = true" >> /etc/nix/nix.conf; fi - - # Add conduwuit binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduwuit" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE=" >> /etc/nix/nix.conf; fi - - - if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduit" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk=" >> /etc/nix/nix.conf; fi - - # Add alternate binary cache - - if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi - - # Add crane binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi - - # Add nix-community binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi - - - if command -v nix > /dev/null; then echo "extra-substituters = https://aseipp-nix-cache.freetls.fastly.net" >> /etc/nix/nix.conf; fi - - # Install direnv and nix-direnv - - if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi - - # Allow .envrc - - if command -v nix > /dev/null; then direnv allow; fi - - # Set CARGO_HOME to a cacheable path - - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" - -ci: - stage: ci - image: nixos/nix:2.24.9 - script: - # Cache CI dependencies - - ./bin/nix-build-and-cache ci - - - direnv exec . engage - cache: - key: nix - paths: - - target - - .gitlab-ci.d - rules: - # CI on upstream runners (only available for maintainers) - - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true" - # Manual CI on unprotected branches that are not MRs - - if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false" - when: manual - # Manual CI on forks - - if: $IS_UPSTREAM_CI != "true" - when: manual - - if: $CI - interruptible: true - -artifacts: - stage: artifacts - image: nixos/nix:2.24.9 - script: - - ./bin/nix-build-and-cache just .#static-x86_64-linux-musl - - cp result/bin/conduit x86_64-linux-musl - - - mkdir -p target/release - - cp result/bin/conduit target/release - - direnv exec . cargo deb --no-build --no-strip - - mv target/debian/*.deb x86_64-linux-musl.deb - - # Since the OCI image package is based on the binary package, this has the - # fun side effect of uploading the normal binary too. Conduit users who are - # deploying with Nix can leverage this fact by adding our binary cache to - # their systems. - # - # Note that although we have an `oci-image-x86_64-linux-musl` - # output, we don't build it because it would be largely redundant to this - # one since it's all containerized anyway. - - ./bin/nix-build-and-cache just .#oci-image - - cp result oci-image-amd64.tar.gz - - - ./bin/nix-build-and-cache just .#static-aarch64-linux-musl - - cp result/bin/conduit aarch64-linux-musl - - - ./bin/nix-build-and-cache just .#oci-image-aarch64-linux-musl - - cp result oci-image-arm64v8.tar.gz - - - ./bin/nix-build-and-cache just .#book - # We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746 - - cp -r --dereference result public - artifacts: - paths: - - x86_64-linux-musl - - aarch64-linux-musl - - x86_64-linux-musl.deb - - oci-image-amd64.tar.gz - - oci-image-arm64v8.tar.gz - - public - rules: - # CI required for all MRs - - if: $CI_PIPELINE_SOURCE == "merge_request_event" - # Optional CI on forks - - if: $IS_UPSTREAM_CI != "true" - when: manual - allow_failure: true - - if: $CI - interruptible: true - -pages: - stage: publish - dependencies: - - artifacts - only: - - next - script: - - "true" - artifacts: - paths: - - public diff --git a/.gitlab/merge_request_templates/MR.md b/.gitlab/merge_request_templates/MR.md deleted file mode 100644 index 4210554b..00000000 --- a/.gitlab/merge_request_templates/MR.md +++ /dev/null @@ -1,8 +0,0 @@ - - - ------------------------------------------------------------------------------ - -- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test` -- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license - diff --git a/.gitlab/route-map.yml b/.gitlab/route-map.yml deleted file mode 100644 index cf31bd18..00000000 --- a/.gitlab/route-map.yml +++ /dev/null @@ -1,3 +0,0 @@ -# Docs: Map markdown to html files -- source: /docs/(.+)\.md/ - public: '\1.html' From 57779df66a5fa6891894d7e96acac3a50b9dfecc Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 12:05:04 +0100 Subject: [PATCH 0834/1248] chore: Add mailmap --- .mailmap | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .mailmap diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..1909b3f5 --- /dev/null +++ b/.mailmap @@ -0,0 +1,13 @@ +AlexPewMaster <68469103+AlexPewMaster@users.noreply.github.com> +Daniel Wiesenberg +Devin Ragotzy +Devin Ragotzy +Jonas Platte +Jonas Zohren +Jonathan de Jong +June Clementine Strawberry +June Clementine Strawberry +June Clementine Strawberry +Rudi Floren +Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> +x4u <14617923-x4u@users.noreply.gitlab.com> From 6e5392c2f50d110b40f724725843dd74513ad8f4 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 12:15:09 +0100 Subject: [PATCH 0835/1248] =?UTF-8?q?chore:=20Add=20Timo=20K=C3=B6sters=20?= =?UTF-8?q?to=20the=20mailmap?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 1909b3f5..0cd8d7ec 100644 --- a/.mailmap +++ b/.mailmap @@ -10,4 +10,5 @@ June Clementine Strawberry June Clementine Strawberry Rudi Floren Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> +Timo Kösters x4u <14617923-x4u@users.noreply.gitlab.com> From 17a04940fca6ab39731c5f8815d5029bbf30762d Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 21:58:39 +0100 Subject: [PATCH 0836/1248] chore: Update Olivia Lee in mailmap --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 0cd8d7ec..fa267e13 100644 --- a/.mailmap +++ b/.mailmap @@ -8,6 +8,7 @@ Jonathan de Jong June Clementine Strawberry June Clementine Strawberry June Clementine Strawberry +Olivia Lee Rudi Floren Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> Timo Kösters From dad407fb22a991238926ab65b685ec55d2272ab9 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 12:09:14 +0100 Subject: [PATCH 0837/1248] chore: Add words to cspell dictionary --- .vscode/settings.json | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..a4fad964 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,11 @@ +{ + "cSpell.words": [ + "Forgejo", + "appservice", + "appservices", + "conduwuit", + "continuwuity", + "homeserver", + "homeservers" + ] +} From f76f669d163778a1ec768773da5cdbefc78539ec Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 23:26:25 +0100 Subject: [PATCH 0838/1248] chore: Remove the default sentry endpoint --- conduwuit-example.toml | 2 +- src/core/config/mod.rs | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 118bc57d..af8da6bb 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1428,7 +1428,7 @@ # Sentry reporting URL, if a custom one is desired. # -#sentry_endpoint = "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" +#sentry_endpoint = "" # Report your conduwuit server_name in Sentry.io crash reports and # metrics. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 0ca6bbaf..a7205423 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1636,7 +1636,7 @@ pub struct Config { /// Sentry reporting URL, if a custom one is desired. /// /// display: sensitive - /// default: "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" + /// default: "" #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, @@ -2207,9 +2207,7 @@ fn default_url_preview_max_spider_size() -> usize { fn default_new_user_displayname_suffix() -> String { "🏳️‍⚧️".to_owned() } -fn default_sentry_endpoint() -> Option { - Url::parse("https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536").ok() -} +fn default_sentry_endpoint() -> Option { None } fn default_sentry_traces_sample_rate() -> f32 { 0.15 } From 90880e268966718c39b9970b0d564b0bad7f823d Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Wed, 16 Apr 2025 02:56:22 +0100 Subject: [PATCH 0839/1248] Update mdBook config for continuwuity --- book.toml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/book.toml b/book.toml index 7eb1983b..46d3a7b0 100644 --- a/book.toml +++ b/book.toml @@ -1,8 +1,8 @@ [book] -title = "conduwuit 🏳️‍⚧️ 💜 🦴" -description = "conduwuit, which is a well-maintained fork of Conduit, is a simple, fast and reliable chat server for the Matrix protocol" +title = "continuwuity" +description = "continuwuity is a community continuation of the conduwuit Matrix homeserver, written in Rust." language = "en" -authors = ["strawberry (June)"] +authors = ["The continuwuity Community"] text-direction = "ltr" multilingual = false src = "docs" @@ -16,12 +16,9 @@ extra-watch-dirs = ["debian", "docs"] edition = "2024" [output.html] -git-repository-url = "https://github.com/girlbossceo/conduwuit" -edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}" -git-repository-icon = "fa-github-square" - -[output.html.redirect] -"/differences.html" = "https://conduwuit.puppyirl.gay/#where-is-the-differences-page" +edit-url-template = "https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/{path}" +git-repository-url = "https://forgejo.ellis.link/continuwuation/continuwuity" +git-repository-icon = "fa-git-alt" [output.html.search] limit-results = 15 From 538347204fab75e9965ee4a60f00690f8a354ddf Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Wed, 16 Apr 2025 02:56:47 +0100 Subject: [PATCH 0840/1248] Add Matrix .well-known files --- docs/static/_headers | 3 +++ docs/static/client | 1 + docs/static/server | 1 + 3 files changed, 5 insertions(+) create mode 100644 docs/static/_headers create mode 100644 docs/static/client create mode 100644 docs/static/server diff --git a/docs/static/_headers b/docs/static/_headers new file mode 100644 index 00000000..5e960241 --- /dev/null +++ b/docs/static/_headers @@ -0,0 +1,3 @@ +/.well-known/matrix/* + Access-Control-Allow-Origin: * + Content-Type: application/json diff --git a/docs/static/client b/docs/static/client new file mode 100644 index 00000000..c2b70a14 --- /dev/null +++ b/docs/static/client @@ -0,0 +1 @@ +{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"}} diff --git a/docs/static/server b/docs/static/server new file mode 100644 index 00000000..a3099f6e --- /dev/null +++ b/docs/static/server @@ -0,0 +1 @@ +{"m.server":"matrix.continuwuity.org:443"} From b91af70e0b909ce7486bb88768d65f573bcfd9e8 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Wed, 16 Apr 2025 15:47:12 +0100 Subject: [PATCH 0841/1248] Add Forgejo CI workflow for Cloudflare Pages --- .forgejo/workflows/documentation.yml | 68 ++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 .forgejo/workflows/documentation.yml diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml new file mode 100644 index 00000000..c08c1abb --- /dev/null +++ b/.forgejo/workflows/documentation.yml @@ -0,0 +1,68 @@ +name: Documentation + +on: + pull_request: + push: + branches: + - main + tags: + - "v*" + workflow_dispatch: + +concurrency: + group: "pages-${{ github.ref }}" + cancel-in-progress: true + +jobs: + docs: + name: Build and Deploy Documentation + runs-on: not-nexy + + steps: + - name: Sync repository + uses: https://github.com/actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Setup mdBook + uses: https://github.com/peaceiris/actions-mdbook@v2 + with: + mdbook-version: "latest" + + - name: Build mdbook + run: mdbook build + + - name: Prepare static files for deployment + run: | + mkdir -p ./public/.well-known/matrix + # Copy the Matrix .well-known files + cp ./docs/static/server ./public/.well-known/matrix/server + cp ./docs/static/client ./public/.well-known/matrix/client + # Copy the custom headers file + cp ./docs/static/_headers ./public/_headers + echo "Copied .well-known files and _headers to ./public" + + - name: Setup Node.js + uses: https://github.com/actions/setup-node@v4 + with: + node-version: 20 + + - name: Install dependencies + run: npm install --save-dev wrangler@latest + + - name: Deploy to Cloudflare Pages (Production) + if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" + + - name: Deploy to Cloudflare Pages (Preview) + if: ${{ github.event_name != 'push' || github.ref != 'refs/heads/main' }} + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" From 773c3d457b214d60376780b0baaa4031e4352685 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 17 Apr 2025 07:48:54 -0700 Subject: [PATCH 0842/1248] fix space hierarchy pagination not respecting client-specified limit. --- src/api/client/space.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 4eee9d76..92768926 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -179,7 +179,7 @@ where (next_short_room_ids.iter().ne(short_room_ids) && !next_short_room_ids.is_empty()) .then_some(PaginationToken { short_room_ids: next_short_room_ids, - limit: max_depth.try_into().ok()?, + limit: limit.try_into().ok()?, max_depth: max_depth.try_into().ok()?, suggested_only, }) From 68d68a0645d52e4e5fe5a0c27d1c1b31f87f35aa Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 15 Apr 2025 17:35:33 +0100 Subject: [PATCH 0843/1248] fix: Do not panic when sender_device is None in `/messages` route The device ID is not always present when the appservice is the client. This was causing 500 errors for some users, as appservices can lazy load from `/messages`. Fixes #738 Co-authored-by: Jade Ellis --- src/api/client/message.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index db11ef4a..f85611ca 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -21,7 +21,7 @@ use conduwuit_service::{ }; use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; use ruma::{ - RoomId, UserId, + DeviceId, RoomId, UserId, api::{ Direction, client::{filter::RoomEventFilter, message::get_message_events}, @@ -67,8 +67,8 @@ pub(crate) async fn get_message_events_route( body: Ruma, ) -> Result { debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted"); - let sender = body.sender(); - let (sender_user, sender_device) = sender; + let sender_user = body.sender_user(); + let sender_device = body.sender_device.as_ref(); let room_id = &body.room_id; let filter = &body.filter; @@ -132,7 +132,7 @@ pub(crate) async fn get_message_events_route( let lazy_loading_context = lazy_loading::Context { user_id: sender_user, - device_id: sender_device, + device_id: sender_device.map_or_else(|| <&DeviceId>::from(""), AsRef::as_ref), room_id, token: Some(from.into_unsigned()), options: Some(&filter.lazy_load_options), From 3a95585f0edb529154a5a8e3f181f4b5e929b698 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 16 Apr 2025 13:47:35 +0100 Subject: [PATCH 0844/1248] fix: Disambiguate appservices in lazy loading context In the previous commit, app services would all appear to be the same device when accessing the same user. This sets the device ID to be the appservice ID when available to avoid possible clobbering. --- src/api/client/message.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index f85611ca..9c2c4057 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -26,7 +26,10 @@ use ruma::{ Direction, client::{filter::RoomEventFilter, message::get_message_events}, }, - events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, + events::{ + AnyStateEvent, StateEventType, + TimelineEventType::{self, *}, + }, serde::Raw, }; @@ -129,10 +132,20 @@ pub(crate) async fn get_message_events_route( .take(limit) .collect() .await; + // let appservice_id = body.appservice_info.map(|appservice| + // appservice.registration.id); let lazy_loading_context = lazy_loading::Context { user_id: sender_user, - device_id: sender_device.map_or_else(|| <&DeviceId>::from(""), AsRef::as_ref), + device_id: match sender_device { + | Some(device_id) => device_id, + | None => + if let Some(registration) = body.appservice_info.as_ref() { + <&DeviceId>::from(registration.registration.id.as_str()) + } else { + <&DeviceId>::from("") + }, + }, room_id, token: Some(from.into_unsigned()), options: Some(&filter.lazy_load_options), From dc599db19c48ad3cbae15fc419c4a531d217ed05 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 16 Apr 2025 13:52:28 +0100 Subject: [PATCH 0845/1248] chore: Change branding string to continuwuity --- src/core/info/version.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/info/version.rs b/src/core/info/version.rs index 37580210..6abb6e13 100644 --- a/src/core/info/version.rs +++ b/src/core/info/version.rs @@ -7,7 +7,7 @@ use std::sync::OnceLock; -static BRANDING: &str = "conduwuit"; +static BRANDING: &str = "continuwuity"; static SEMANTIC: &str = env!("CARGO_PKG_VERSION"); static VERSION: OnceLock = OnceLock::new(); From 6b92e965824924ea7f78399758cb4c4a1057e2fb Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 16 Apr 2025 18:54:36 +0100 Subject: [PATCH 0846/1248] feat: Docker images built with Forgejo Actions --- .dockerignore | 4 +- .forgejo/workflows/release-image.yml | 223 +++++++++++++++++++++++++++ docker/Dockerfile | 216 ++++++++++++++++++++++++++ 3 files changed, 441 insertions(+), 2 deletions(-) create mode 100644 .forgejo/workflows/release-image.yml create mode 100644 docker/Dockerfile diff --git a/.dockerignore b/.dockerignore index 35d35e1b..453634df 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,9 +1,9 @@ # Local build and dev artifacts -target -tests +target/ # Docker files Dockerfile* +docker/ # IDE files .vscode diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml new file mode 100644 index 00000000..3a6c93eb --- /dev/null +++ b/.forgejo/workflows/release-image.yml @@ -0,0 +1,223 @@ +name: Release Docker Image + +on: + pull_request: + push: + paths-ignore: + - '.gitlab-ci.yml' + - '.gitignore' + - 'renovate.json' + - 'debian/**' + - 'docker/**' + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +env: + BUILTIN_REGISTRY: forgejo.ellis.link + BUILTIN_REGISTRY_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}" + +jobs: + define-variables: + runs-on: ubuntu-latest + + outputs: + images: ${{ steps.var.outputs.images }} + images_list: ${{ steps.var.outputs.images_list }} + build_matrix: ${{ steps.var.outputs.build_matrix }} + + steps: + - name: Setting variables + uses: https://github.com/actions/github-script@v7 + id: var + with: + script: | + const githubRepo = '${{ github.repository }}'.toLowerCase() + const repoId = githubRepo.split('/')[1] + + core.setOutput('github_repository', githubRepo) + const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo + let images = [] + if (process.env.BUILTIN_REGISTRY_ENABLED === "true") { + images.push(builtinImage) + } + core.setOutput('images', images.join("\n")) + core.setOutput('images_list', images.join(",")) + const platforms = ['linux/amd64', 'linux/arm64'] + core.setOutput('build_matrix', JSON.stringify({ + platform: platforms, + include: platforms.map(platform => { return { + platform, + slug: platform.replace('/', '-') + }}) + })) + + build-image: + runs-on: not-nexy + container: ghcr.io/catthehacker/ubuntu:act-latest + needs: define-variables + permissions: + contents: read + packages: write + attestations: write + id-token: write + strategy: + matrix: ${{ fromJSON(needs.define-variables.outputs.build_matrix) }} + steps: + - name: Echo strategy + run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' + - name: Echo matrix + run: echo '${{ toJSON(matrix) }}' + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. + - name: Login to builtin registry + uses: docker/login-action@v3 + with: + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. + - name: Extract metadata (labels, annotations) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{needs.define-variables.outputs.images}} + # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 + env: + DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index + + # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. + # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. + # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. + # It will not push images generated from a pull request + - name: Get short git commit SHA + id: sha + run: | + calculatedSha=$(git rev-parse --short ${{ github.sha }}) + echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV + - name: Get Git commit timestamps + run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV + - uses: https://github.com/Swatinem/rust-cache@v2 + with: + prefix-key: v0-rust-linux/${{ matrix.slug }} + id: rust-cache + - name: Inject cache into Docker + uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.2 + with: + cache-map: | + { + "/home/runner/.cargo/registry": "/usr/local/cargo/registry", + "/home/runner/.cargo/git/db": "/usr/local/cargo/git/db", + "./target": "/app/target", + "./timelord": "/timelord" + } + - name: Cache timelord state + uses: actions/cache@v4 + with: + path: "./timelord" + key: ${{ runner.os }}-${{ matrix.slug }} + - name: Build and push Docker image by digest + id: build + uses: docker/build-push-action@v6 + with: + context: . + file: "docker/Dockerfile" + build-args: | + CONDUWUIT_VERSION_EXTRA=${{ env.COMMIT_SHORT_SHA }} + platforms: ${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + annotations: ${{ steps.meta.outputs.annotations }} + cache-from: type=gha + cache-to: type=gha,mode=max + sbom: true + outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true + env: + SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }} + + # For publishing multi-platform manifests + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ matrix.slug }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + merge: + runs-on: not-nexy + container: ghcr.io/catthehacker/ubuntu:act-latest + needs: [define-variables, build-image] + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. + - name: Login to builtin registry + uses: docker/login-action@v3 + with: + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Extract metadata (tags) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + tags: | + type=semver,pattern=v{{version}} + type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} + type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} + type=ref,event=branch + type=ref,event=pr + type=sha,format=long + images: ${{needs.define-variables.outputs.images}} + # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 + env: + DOCKER_METADATA_ANNOTATIONS_LEVELS: index + + - name: Create manifest list and push + working-directory: /tmp/digests + env: + IMAGES: ${{needs.define-variables.outputs.images}} + shell: bash + run: | + IFS=$'\n' + IMAGES_LIST=($IMAGES) + ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS) + TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS) + for REPO in "${IMAGES_LIST[@]}"; do + docker buildx imagetools create \ + $(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \ + $(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \ + $(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done) + done + + - name: Inspect image + env: + IMAGES: ${{needs.define-variables.outputs.images}} + shell: bash + run: | + IMAGES_LIST=($IMAGES) + for REPO in "${IMAGES_LIST[@]}"; do + docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }} + done diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..10f54d94 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,216 @@ +ARG RUST_VERSION=1 + +FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx +FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS base +FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS toolchain + +# Prevent deletion of apt cache +RUN rm -f /etc/apt/apt.conf.d/docker-clean + +# Match Rustc version as close as possible +# rustc -vV +ARG LLVM_VERSION=19 +# ENV RUSTUP_TOOLCHAIN=${RUST_VERSION} + +# Install repo tools +# Line one: compiler tools +# Line two: curl, for downloading binaries +# Line three: for xx-verify +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ +apt-get update && apt-get install -y \ + clang-${LLVM_VERSION} lld-${LLVM_VERSION} pkg-config make jq \ + curl git \ + file + +# Create symlinks for LLVM tools +RUN <> /etc/environment + +# Configure pkg-config +RUN <> /etc/environment + echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment + echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment +EOF + +# Configure cc to use clang version +RUN <> /etc/environment + echo "CXX=clang++" >> /etc/environment +EOF + +# Cross-language LTO +RUN <> /etc/environment + echo "CXXFLAGS=-flto" >> /etc/environment + # Linker is set to target-compatible clang by xx + echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment +EOF + +# Apply CPU-specific optimizations if TARGET_CPU is provided +ARG TARGET_CPU= +RUN <> /etc/environment + echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment + echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment + fi +EOF + +# Prepare output directories +RUN mkdir /out + +FROM toolchain AS builder + +# Conduwuit version info +ARG COMMIT_SHA= +ARG CONDUWUIT_VERSION_EXTRA= +ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA +RUN <> /etc/environment +fi +EOF + +ARG TARGETPLATFORM + +# Verify environment configuration +RUN cat /etc/environment +RUN xx-cargo --print-target-triple + +# Get source +COPY . . + +# Timelord sync +RUN --mount=type=cache,target=/timelord/ \ + timelord sync --source-dir . --cache-dir /timelord/ + +# Build the binary +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + --mount=type=cache,target=/app/target \ + bash <<'EOF' + set -o allexport + . /etc/environment + TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \ + jq -r ".target_directory")) + mkdir /out/sbin + PACKAGE=conduwuit + xx-cargo build --locked --release \ + -p $PACKAGE; + BINARIES=($(cargo metadata --no-deps --format-version 1 | \ + jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name")) + for BINARY in "${BINARIES[@]}"; do + echo $BINARY + xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY + cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY + done +EOF + +# Generate Software Bill of Materials (SBOM) +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + bash <<'EOF' + mkdir /out/sbom + typeset -A PACKAGES + for BINARY in /out/sbin/*; do + BINARY_BASE=$(basename ${BINARY}) + package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name") + if [ -z "$package" ]; then + continue + fi + PACKAGES[$package]=1 + done + for PACKAGE in $(echo ${!PACKAGES[@]}); do + echo $PACKAGE + cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json + done +EOF + +# Extract dynamically linked dependencies +RUN < Date: Thu, 17 Apr 2025 10:33:19 +0100 Subject: [PATCH 0847/1248] build: Use hacks for a cached actions build - Use cache dance for github actions caching - Use timelord hack to avoid bad cache invalidation --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 3a6c93eb..fddb493c 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -107,7 +107,7 @@ jobs: run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - uses: https://github.com/Swatinem/rust-cache@v2 with: - prefix-key: v0-rust-linux/${{ matrix.slug }} + prefix-key: v0-rust-linux/${{ matrix.platform.slug }} id: rust-cache - name: Inject cache into Docker uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.2 @@ -123,7 +123,7 @@ jobs: uses: actions/cache@v4 with: path: "./timelord" - key: ${{ runner.os }}-${{ matrix.slug }} + key: ${{ runner.os }}-${{ matrix.platform.slug }} - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 From 70cee36041913c2d077ff427e89a341d5e5ac2e1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:02:59 +0100 Subject: [PATCH 0848/1248] fix: Allow specifying user & password for builtin registry --- .forgejo/workflows/release-image.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index fddb493c..65dfb43c 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -81,8 +81,8 @@ jobs: uses: docker/login-action@v3 with: registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. - name: Extract metadata (labels, annotations) for Docker @@ -173,8 +173,8 @@ jobs: uses: docker/login-action@v3 with: registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 From 3ced2e2f9064200ffdcad18c49866bc96cf5c2a1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:06:45 +0100 Subject: [PATCH 0849/1248] fix: Use forgejo patched artifact actions --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 65dfb43c..8a178d36 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -150,7 +150,7 @@ jobs: touch "/tmp/digests/${digest#sha256:}" - name: Upload digest - uses: actions/upload-artifact@v4 + uses: forgejo/upload-artifact@v4 with: name: digests-${{ matrix.slug }} path: /tmp/digests/* @@ -163,7 +163,7 @@ jobs: needs: [define-variables, build-image] steps: - name: Download digests - uses: actions/download-artifact@v4 + uses: forgejo/download-artifact@v4 with: path: /tmp/digests pattern: digests-* From 0ac1ce9996952b96a865c36a6cbda85df0db60da Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:13:19 +0100 Subject: [PATCH 0850/1248] fix: Hardcode matrix --- .forgejo/workflows/release-image.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 8a178d36..9fc50441 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -61,7 +61,22 @@ jobs: attestations: write id-token: write strategy: - matrix: ${{ fromJSON(needs.define-variables.outputs.build_matrix) }} + matrix: { + "include": [ + { + "platform": "linux/amd64", + "slug": "linux-amd64" + }, + { + "platform": "linux/arm64", + "slug": "linux-arm64" + } + ], + "platform": [ + "linux/amd64", + "linux/arm64" + ] + } steps: - name: Echo strategy run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' From 93253237e938984cdaf7a947f8b3a3d4c54c63e4 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:44:18 +0100 Subject: [PATCH 0851/1248] ci: Prefix branch builds with branch- --- .forgejo/workflows/release-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 9fc50441..3970ff9d 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -202,7 +202,7 @@ jobs: type=semver,pattern=v{{version}} type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} - type=ref,event=branch + type=ref,event=branch,prefix=branch- type=ref,event=pr type=sha,format=long images: ${{needs.define-variables.outputs.images}} From 10947f6f1a268b03f7a35b30e82679b98e7b9337 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:51:18 +0100 Subject: [PATCH 0852/1248] fix: Replace rust cache with direct cache use, as Rust is not installed on CI image --- .forgejo/workflows/release-image.yml | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 3970ff9d..7c72082a 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -120,10 +120,20 @@ jobs: echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - - uses: https://github.com/Swatinem/rust-cache@v2 + - name: Rust cache + uses: actions/cache@v4 with: - prefix-key: v0-rust-linux/${{ matrix.platform.slug }} + key: v0-rust-linux/${{ runner.os }}-${{ matrix.slug }} + path: | + ~/.cargo/registry + ~/.cargo/git/db + ./target id: rust-cache + - name: Cache timelord state + uses: actions/cache@v4 + with: + path: "./timelord" + key: ${{ runner.os }}-${{ matrix.slug }} - name: Inject cache into Docker uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.2 with: @@ -134,11 +144,6 @@ jobs: "./target": "/app/target", "./timelord": "/timelord" } - - name: Cache timelord state - uses: actions/cache@v4 - with: - path: "./timelord" - key: ${{ runner.os }}-${{ matrix.platform.slug }} - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 From fb793e8315bd0acb222b1ac53aef07f247afff38 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 14:35:16 +0100 Subject: [PATCH 0853/1248] ci: Limit concurrency Mainly to prevent runners from getting bogged down --- .forgejo/workflows/release-image.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 7c72082a..0eaf945a 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -1,4 +1,6 @@ name: Release Docker Image +concurrency: + group: "release-image-${{ github.ref }}" on: pull_request: From 71d2421f55a97aee5b2b31dc1889f3f688d5315c Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 15:44:56 +0100 Subject: [PATCH 0854/1248] ci: Only prefix non-default branches AKA, tag image:main as the latest commit --- .forgejo/workflows/release-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 0eaf945a..f03341a7 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -209,7 +209,7 @@ jobs: type=semver,pattern=v{{version}} type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} - type=ref,event=branch,prefix=branch- + type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) == github.ref && '' || 'branch-' }} type=ref,event=pr type=sha,format=long images: ${{needs.define-variables.outputs.images}} From d85aaabe9ee239e22bff1be5decbaeac51880ec1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 15:59:40 +0100 Subject: [PATCH 0855/1248] fix: Disable buildkit caching This is for tom's runners, whilst they're having network issues --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index f03341a7..194fa93a 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -157,8 +157,8 @@ jobs: platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} annotations: ${{ steps.meta.outputs.annotations }} - cache-from: type=gha - cache-to: type=gha,mode=max + # cache-from: type=gha + # cache-to: type=gha,mode=max sbom: true outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true env: From 9e0530839d54f8ae8e58e8ff01c57e0deca72af8 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 14:08:21 +0100 Subject: [PATCH 0856/1248] ci: Remove non-functional cache steps --- .forgejo/workflows/release-image.yml | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 194fa93a..6bde932d 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -122,30 +122,6 @@ jobs: echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - - name: Rust cache - uses: actions/cache@v4 - with: - key: v0-rust-linux/${{ runner.os }}-${{ matrix.slug }} - path: | - ~/.cargo/registry - ~/.cargo/git/db - ./target - id: rust-cache - - name: Cache timelord state - uses: actions/cache@v4 - with: - path: "./timelord" - key: ${{ runner.os }}-${{ matrix.slug }} - - name: Inject cache into Docker - uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.2 - with: - cache-map: | - { - "/home/runner/.cargo/registry": "/usr/local/cargo/registry", - "/home/runner/.cargo/git/db": "/usr/local/cargo/git/db", - "./target": "/app/target", - "./timelord": "/timelord" - } - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 From b16e26952af74da3cdcbfbb72ec6786a55a37133 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 14:09:09 +0100 Subject: [PATCH 0857/1248] ci: Use dind label --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 6bde932d..142529ae 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -54,7 +54,7 @@ jobs: })) build-image: - runs-on: not-nexy + runs-on: dind container: ghcr.io/catthehacker/ubuntu:act-latest needs: define-variables permissions: @@ -156,7 +156,7 @@ jobs: retention-days: 1 merge: - runs-on: not-nexy + runs-on: dind container: ghcr.io/catthehacker/ubuntu:act-latest needs: [define-variables, build-image] steps: From 2e6ec2f89cd218f89489d7ca86997bce94d25064 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 17:38:42 +0100 Subject: [PATCH 0858/1248] chore: Update git links --- Cargo.lock | 54 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 42 +++++++++++++++++++++--------------------- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d81fdbc0..def41f68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -118,7 +118,7 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-channel" version = "2.3.1" -source = "git+https://github.com/girlbossceo/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280" +source = "git+https://forgejo.ellis.link/continuwuation/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -1047,7 +1047,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_affinity" version = "0.8.1" -source = "git+https://github.com/girlbossceo/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" +source = "git+https://forgejo.ellis.link/continuwuation/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" dependencies = [ "libc", "num_cpus", @@ -1379,7 +1379,7 @@ dependencies = [ [[package]] name = "event-listener" version = "5.3.1" -source = "git+https://github.com/girlbossceo/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d" +source = "git+https://forgejo.ellis.link/continuwuation/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d" dependencies = [ "concurrent-queue", "parking", @@ -2030,7 +2030,7 @@ dependencies = [ [[package]] name = "hyper-util" version = "0.1.11" -source = "git+https://github.com/girlbossceo/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941" +source = "git+https://forgejo.ellis.link/continuwuation/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941" dependencies = [ "bytes", "futures-channel", @@ -3625,7 +3625,7 @@ dependencies = [ [[package]] name = "resolv-conf" version = "0.7.1" -source = "git+https://github.com/girlbossceo/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" +source = "git+https://forgejo.ellis.link/continuwuation/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" dependencies = [ "hostname", ] @@ -3653,7 +3653,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "assign", "js_int", @@ -3673,7 +3673,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "assign", @@ -3708,7 +3708,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "base64 0.22.1", @@ -3740,7 +3740,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3765,7 +3765,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "bytes", "headers", @@ -3787,7 +3787,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3796,7 +3796,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3806,7 +3806,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3821,7 +3821,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3833,7 +3833,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3849,7 +3849,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" +source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" dependencies = [ "bindgen 0.71.1", "bzip2-sys", @@ -3866,7 +3866,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" +source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" dependencies = [ "libc", "rust-librocksdb-sys", @@ -3979,7 +3979,7 @@ checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "rustyline-async" version = "0.4.3" -source = "git+https://github.com/girlbossceo/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" +source = "git+https://forgejo.ellis.link/continuwuation/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" dependencies = [ "crossterm", "futures-channel", @@ -4675,7 +4675,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "paste", @@ -4685,7 +4685,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" -source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "cc", "libc", @@ -4694,7 +4694,7 @@ dependencies = [ [[package]] name = "tikv-jemallocator" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -4980,7 +4980,7 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.41" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "pin-project-lite", "tracing-attributes", @@ -4990,7 +4990,7 @@ dependencies = [ [[package]] name = "tracing-attributes" version = "0.1.28" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "proc-macro2", "quote", @@ -5000,7 +5000,7 @@ dependencies = [ [[package]] name = "tracing-core" version = "0.1.33" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "once_cell", "valuable", @@ -5020,7 +5020,7 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "log", "once_cell", @@ -5048,7 +5048,7 @@ dependencies = [ [[package]] name = "tracing-subscriber" version = "0.3.19" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "matchers", "nu-ansi-term", diff --git a/Cargo.toml b/Cargo.toml index f5ee3f0f..e9ae0007 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,12 +14,12 @@ authors = [ categories = ["network-programming"] description = "a very cool Matrix chat homeserver written in Rust" edition = "2024" -homepage = "https://conduwuit.puppyirl.gay/" +homepage = "https://continuwuity.org/" keywords = ["chat", "matrix", "networking", "server", "uwu"] license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" -repository = "https://github.com/girlbossceo/conduwuit" +repository = "https://forgejo.ellis.link/continuwuation/continuwuity" rust-version = "1.86.0" version = "0.5.0" @@ -348,7 +348,7 @@ version = "0.1.2" # Used for matrix spec type definitions and helpers [workspace.dependencies.ruma] -git = "https://github.com/girlbossceo/ruwuma" +git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4" features = [ @@ -388,8 +388,8 @@ features = [ ] [workspace.dependencies.rust-rocksdb] -git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" +git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1" +rev = "fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" default-features = false features = [ "multi-threaded-cf", @@ -449,7 +449,7 @@ version = "0.37.0" # jemalloc usage [workspace.dependencies.tikv-jemalloc-sys] -git = "https://github.com/girlbossceo/jemallocator" +git = "https://forgejo.ellis.link/continuwuation/jemallocator" rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = [ @@ -457,7 +457,7 @@ features = [ "unprefixed_malloc_on_supported_platforms", ] [workspace.dependencies.tikv-jemallocator] -git = "https://github.com/girlbossceo/jemallocator" +git = "https://forgejo.ellis.link/continuwuation/jemallocator" rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = [ @@ -465,7 +465,7 @@ features = [ "unprefixed_malloc_on_supported_platforms", ] [workspace.dependencies.tikv-jemalloc-ctl] -git = "https://github.com/girlbossceo/jemallocator" +git = "https://forgejo.ellis.link/continuwuation/jemallocator" rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = ["use_std"] @@ -542,49 +542,49 @@ version = "1.0.2" # backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing. # we can switch back to upstream if #2956 is merged and backported in the upstream repo. -# https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c +# https://forgejo.ellis.link/continuwuation/tracing/commit/b348dca742af641c47bc390261f60711c2af573c [patch.crates-io.tracing-subscriber] -git = "https://github.com/girlbossceo/tracing" +git = "https://forgejo.ellis.link/continuwuation/tracing" rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing] -git = "https://github.com/girlbossceo/tracing" +git = "https://forgejo.ellis.link/continuwuation/tracing" rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-core] -git = "https://github.com/girlbossceo/tracing" +git = "https://forgejo.ellis.link/continuwuation/tracing" rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-log] -git = "https://github.com/girlbossceo/tracing" +git = "https://forgejo.ellis.link/continuwuation/tracing" rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" -# adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 -# adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b +# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 +# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b [patch.crates-io.rustyline-async] -git = "https://github.com/girlbossceo/rustyline-async" +git = "https://forgejo.ellis.link/continuwuation/rustyline-async" rev = "deaeb0694e2083f53d363b648da06e10fc13900c" # adds LIFO queue scheduling; this should be updated with PR progress. [patch.crates-io.event-listener] -git = "https://github.com/girlbossceo/event-listener" +git = "https://forgejo.ellis.link/continuwuation/event-listener" rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d" [patch.crates-io.async-channel] -git = "https://github.com/girlbossceo/async-channel" +git = "https://forgejo.ellis.link/continuwuation/async-channel" rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280" # adds affinity masks for selecting more than one core at a time [patch.crates-io.core_affinity] -git = "https://github.com/girlbossceo/core_affinity_rs" +git = "https://forgejo.ellis.link/continuwuation/core_affinity_rs" rev = "9c8e51510c35077df888ee72a36b4b05637147da" # reverts hyperium#148 conflicting with our delicate federation resolver hooks [patch.crates-io.hyper-util] -git = "https://github.com/girlbossceo/hyper-util" +git = "https://forgejo.ellis.link/continuwuation/hyper-util" rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" # allows no-aaaa option in resolv.conf # bumps rust edition and toolchain to 1.86.0 and 2024 # use sat_add on line number errors [patch.crates-io.resolv-conf] -git = "https://github.com/girlbossceo/resolv-conf" +git = "https://forgejo.ellis.link/continuwuation/resolv-conf" rev = "200e958941d522a70c5877e3d846f55b5586c68d" # From c5b99fbccda7140d7232cca1ca456ff4b1124109 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 21:05:17 +0100 Subject: [PATCH 0859/1248] ci: Enable buildx caching --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 142529ae..ba94acc3 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -133,8 +133,8 @@ jobs: platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} annotations: ${{ steps.meta.outputs.annotations }} - # cache-from: type=gha - # cache-to: type=gha,mode=max + cache-from: type=gha + cache-to: type=gha,mode=max sbom: true outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true env: From 298e2af3d7aed47d5f070c5259f23a2a761673b3 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 22:24:35 +0100 Subject: [PATCH 0860/1248] ci: Try invert condition for branch prefix --- .forgejo/workflows/release-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index ba94acc3..1411db61 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -185,7 +185,7 @@ jobs: type=semver,pattern=v{{version}} type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} - type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) == github.ref && '' || 'branch-' }} + type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) 1= github.ref && 'branch-' || '' }} type=ref,event=pr type=sha,format=long images: ${{needs.define-variables.outputs.images}} From d4561e950b25f1120d15167f62431177c466202b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 22:25:10 +0100 Subject: [PATCH 0861/1248] ci: Run builtin registry whenever secret is available --- .forgejo/workflows/release-image.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 1411db61..0b764110 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -16,7 +16,8 @@ on: env: BUILTIN_REGISTRY: forgejo.ellis.link - BUILTIN_REGISTRY_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}" + BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}" + jobs: define-variables: From 1b1198771f577f9dad4e34a00aff0e85ecc5879a Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 22:38:17 +0100 Subject: [PATCH 0862/1248] ci: Move timelord to actions to avoid bad cache invalidations from cargo --- .forgejo/workflows/release-image.yml | 20 +++++++++++++++++++- docker/Dockerfile | 8 -------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 0b764110..adf70594 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -89,7 +89,13 @@ jobs: uses: actions/checkout@v4 with: persist-credentials: false - + - run: | + if ! command -v rustup &> /dev/null ; then + curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y + echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH + fi + - uses: https://github.com/cargo-bins/cargo-binstall@main + - run: cargo binstall timelord-cli@3.0.1 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Set up QEMU @@ -123,6 +129,18 @@ jobs: echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV + - name: Set up timelord + uses: actions/cache/restore@v3 + with: + path: /timelord/ + key: timelord-v0 # Cache is already split per runner + - name: Run timelord to set timestamps + run: timelord sync --source-dir . --cache-dir /timelord/ + - name: Save timelord + uses: actions/cache/save@v3 + with: + path: /timelord/ + key: timelord-v0 - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 diff --git a/docker/Dockerfile b/docker/Dockerfile index 10f54d94..536af632 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -44,15 +44,11 @@ ENV CARGO_SBOM_VERSION=0.9.1 # renovate: datasource=crate depName=lddtree ENV LDDTREE_VERSION=0.3.7 -# renovate: datasource=crate depName=timelord-cli -ENV TIMELORD_VERSION=3.0.1 - # Install unpackaged tools RUN < Date: Sun, 6 Apr 2025 05:52:48 +0000 Subject: [PATCH 0863/1248] relax Send requirement on some drier stream extensions Signed-off-by: Jason Volk --- src/core/utils/stream/expect.rs | 2 +- src/core/utils/stream/ready.rs | 4 ++-- src/core/utils/stream/try_ready.rs | 8 ++++---- src/core/utils/stream/try_tools.rs | 8 ++++---- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/core/utils/stream/expect.rs b/src/core/utils/stream/expect.rs index 3509bb83..ec572714 100644 --- a/src/core/utils/stream/expect.rs +++ b/src/core/utils/stream/expect.rs @@ -10,7 +10,7 @@ pub trait TryExpect<'a, Item> { impl<'a, T, Item> TryExpect<'a, Item> for T where - T: Stream> + TryStream + Send + 'a, + T: Stream> + Send + TryStream + 'a, Item: 'a, { #[inline] diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index dce7d378..38feaf64 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -16,7 +16,7 @@ use futures::{ /// This interface is not necessarily complete; feel free to add as-needed. pub trait ReadyExt where - Self: Stream + Send + Sized, + Self: Stream + Sized, { fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> where @@ -93,7 +93,7 @@ where impl ReadyExt for S where - S: Stream + Send + Sized, + S: Stream + Sized, { #[inline] fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index 611c177f..287fa1e1 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -13,8 +13,8 @@ use crate::Result; /// This interface is not necessarily complete; feel free to add as-needed. pub trait TryReadyExt where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { fn ready_and_then( self, @@ -67,8 +67,8 @@ where impl TryReadyExt for S where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { #[inline] fn ready_and_then( diff --git a/src/core/utils/stream/try_tools.rs b/src/core/utils/stream/try_tools.rs index ea3b50fc..417806fc 100644 --- a/src/core/utils/stream/try_tools.rs +++ b/src/core/utils/stream/try_tools.rs @@ -8,8 +8,8 @@ use crate::Result; /// TryStreamTools pub trait TryTools where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { fn try_take( self, @@ -23,8 +23,8 @@ where impl TryTools for S where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { #[inline] fn try_take( From 75fb19a5cacf853740fb2cfd016c435c319e5e5f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 05:42:27 +0000 Subject: [PATCH 0864/1248] add ready_find() stream extension Signed-off-by: Jason Volk --- src/core/utils/stream/ready.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index 38feaf64..be4d1b25 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{Ready, ready}, + future::{FutureExt, Ready, ready}, stream::{ All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile, }, @@ -26,6 +26,12 @@ where where F: Fn(Item) -> bool; + fn ready_find<'a, F>(self, f: F) -> impl Future> + Send + where + Self: Send + Unpin + 'a, + F: Fn(&Item) -> bool + Send + 'a, + Item: Send; + fn ready_filter<'a, F>( self, f: F, @@ -111,6 +117,19 @@ where self.any(move |t| ready(f(t))) } + #[inline] + fn ready_find<'a, F>(self, f: F) -> impl Future> + Send + where + Self: Send + Unpin + 'a, + F: Fn(&Item) -> bool + Send + 'a, + Item: Send, + { + self.ready_filter(f) + .take(1) + .into_future() + .map(|(curr, _next)| curr) + } + #[inline] fn ready_filter<'a, F>( self, From d8b56c9c35a953aed756f4c6374109219dfdaf77 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 19:09:07 +0000 Subject: [PATCH 0865/1248] add ReadyEq future extension Signed-off-by: Jason Volk --- src/core/utils/future/bool_ext.rs | 48 +++++++++++++-------------- src/core/utils/future/mod.rs | 2 ++ src/core/utils/future/ready_eq_ext.rs | 25 ++++++++++++++ src/core/utils/mod.rs | 2 +- 4 files changed, 52 insertions(+), 25 deletions(-) create mode 100644 src/core/utils/future/ready_eq_ext.rs diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs index c93c7dbc..24f239ff 100644 --- a/src/core/utils/future/bool_ext.rs +++ b/src/core/utils/future/bool_ext.rs @@ -22,30 +22,6 @@ where Self: Sized + Unpin; } -pub async fn and(args: I) -> impl Future + Send -where - I: Iterator + Send, - F: Future + Send, -{ - type Result = crate::Result<(), ()>; - - let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); - - try_join_all(args).map(|result| result.is_ok()) -} - -pub async fn or(args: I) -> impl Future + Send -where - I: Iterator + Send, - F: Future + Send + Unpin, -{ - type Result = crate::Result<(), ()>; - - let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); - - select_ok(args).map(|result| result.is_ok()) -} - impl BoolExt for Fut where Fut: Future + Send, @@ -80,3 +56,27 @@ where try_select(a, b).map(|result| result.is_ok()) } } + +pub async fn and(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + try_join_all(args).map(|result| result.is_ok()) +} + +pub async fn or(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send + Unpin, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + select_ok(args).map(|result| result.is_ok()) +} diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 4edd0102..d896e66d 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -2,10 +2,12 @@ mod bool_ext; mod ext_ext; mod option_ext; mod option_stream; +mod ready_eq_ext; mod try_ext_ext; pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use option_stream::OptionStream; +pub use ready_eq_ext::ReadyEqExt; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/ready_eq_ext.rs b/src/core/utils/future/ready_eq_ext.rs new file mode 100644 index 00000000..1625adae --- /dev/null +++ b/src/core/utils/future/ready_eq_ext.rs @@ -0,0 +1,25 @@ +//! Future extension for Partial Equality against present value + +use futures::{Future, FutureExt}; + +pub trait ReadyEqExt +where + Self: Future + Send + Sized, + T: PartialEq + Send + Sync, +{ + fn eq(self, t: &T) -> impl Future + Send; + + fn ne(self, t: &T) -> impl Future + Send; +} + +impl ReadyEqExt for Fut +where + Fut: Future + Send + Sized, + T: PartialEq + Send + Sync, +{ + #[inline] + fn eq(self, t: &T) -> impl Future + Send { self.map(move |r| r.eq(t)) } + + #[inline] + fn ne(self, t: &T) -> impl Future + Send { self.map(move |r| r.ne(t)) } +} diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 117fb739..5e6f2868 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -28,7 +28,7 @@ pub use self::{ bool::BoolExt, bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}, debug::slice_truncated as debug_slice_truncated, - future::TryExtExt as TryFutureExtExt, + future::{BoolExt as FutureBoolExt, OptionStream, TryExtExt as TryFutureExtExt}, hash::sha256::delimited as calculate_hash, html::Escape as HtmlEscape, json::{deserialize_from_str, to_canonical_object}, From e7c3f783775cde5b606005e24220c008f657b970 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 06:39:45 +0000 Subject: [PATCH 0866/1248] modernize state_res w/ stream extensions Signed-off-by: Jason Volk --- src/core/matrix/state_res/mod.rs | 220 ++++++++++++++++--------------- 1 file changed, 111 insertions(+), 109 deletions(-) diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index 93c00d15..ce6b7e89 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -15,11 +15,10 @@ use std::{ borrow::Borrow, cmp::{Ordering, Reverse}, collections::{BinaryHeap, HashMap, HashSet}, - fmt::Debug, hash::{BuildHasher, Hash}, }; -use futures::{Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt, future, stream}; +use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future}; use ruma::{ EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, events::{ @@ -37,9 +36,13 @@ pub use self::{ room_version::RoomVersion, }; use crate::{ - debug, + debug, debug_error, matrix::{event::Event, pdu::StateKey}, - trace, warn, + trace, + utils::stream::{ + BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryReadyExt, WidebandExt, + }, + warn, }; /// A mapping of event type and state_key to some value `T`, usually an @@ -112,20 +115,16 @@ where debug!(count = conflicting.len(), "conflicting events"); trace!(map = ?conflicting, "conflicting events"); - let auth_chain_diff = - get_auth_chain_diff(auth_chain_sets).chain(conflicting.into_values().flatten()); + let conflicting_values = conflicting.into_values().flatten().stream(); // `all_conflicted` contains unique items // synapse says `full_set = {eid for eid in full_conflicted_set if eid in // event_map}` - let all_conflicted: HashSet<_> = stream::iter(auth_chain_diff) - // Don't honor events we cannot "verify" - .map(|id| event_exists(id.clone()).map(move |exists| (id, exists))) - .buffer_unordered(parallel_fetches) - .filter_map(|(id, exists)| future::ready(exists.then_some(id))) - .collect() - .boxed() - .await; + let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets) + .chain(conflicting_values) + .broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id)) + .collect() + .await; debug!(count = all_conflicted.len(), "full conflicted set"); trace!(set = ?all_conflicted, "full conflicted set"); @@ -135,12 +134,15 @@ where // Get only the control events with a state_key: "" or ban/kick event (sender != // state_key) - let control_events: Vec<_> = stream::iter(all_conflicted.iter()) - .map(|id| is_power_event_id(id, &event_fetch).map(move |is| (id, is))) - .buffer_unordered(parallel_fetches) - .filter_map(|(id, is)| future::ready(is.then_some(id.clone()))) + let control_events: Vec<_> = all_conflicted + .iter() + .stream() + .wide_filter_map(async |id| { + is_power_event_id(id, &event_fetch) + .await + .then_some(id.clone()) + }) .collect() - .boxed() .await; // Sort the control events based on power_level/clock/event_id and @@ -160,10 +162,9 @@ where // Sequentially auth check each control event. let resolved_control = iterative_auth_check( &room_version, - sorted_control_levels.iter(), + sorted_control_levels.iter().stream(), clean.clone(), &event_fetch, - parallel_fetches, ) .await?; @@ -172,36 +173,35 @@ where // At this point the control_events have been resolved we now have to // sort the remaining events using the mainline of the resolved power level. - let deduped_power_ev = sorted_control_levels.into_iter().collect::>(); + let deduped_power_ev: HashSet<_> = sorted_control_levels.into_iter().collect(); // This removes the control events that passed auth and more importantly those // that failed auth - let events_to_resolve = all_conflicted + let events_to_resolve: Vec<_> = all_conflicted .iter() .filter(|&id| !deduped_power_ev.contains(id.borrow())) .cloned() - .collect::>(); + .collect(); debug!(count = events_to_resolve.len(), "events left to resolve"); trace!(list = ?events_to_resolve, "events left to resolve"); // This "epochs" power level event - let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, StateKey::new())); + let power_levels_ty_sk = (StateEventType::RoomPowerLevels, StateKey::new()); + let power_event = resolved_control.get(&power_levels_ty_sk); debug!(event_id = ?power_event, "power event"); let sorted_left_events = - mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches) - .await?; + mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?; trace!(list = ?sorted_left_events, "events left, sorted"); let mut resolved_state = iterative_auth_check( &room_version, - sorted_left_events.iter(), + sorted_left_events.iter().stream(), resolved_control, // The control events are added to the final resolved state &event_fetch, - parallel_fetches, ) .await?; @@ -265,7 +265,7 @@ where #[allow(clippy::arithmetic_side_effects)] fn get_auth_chain_diff( auth_chain_sets: &[HashSet], -) -> impl Iterator + Send + use +) -> impl Stream + Send + use where Id: Clone + Eq + Hash + Send, Hasher: BuildHasher + Send + Sync, @@ -279,6 +279,7 @@ where id_counts .into_iter() .filter_map(move |(id, count)| (count < num_sets).then_some(id)) + .stream() } /// Events are sorted from "earliest" to "latest". @@ -310,13 +311,15 @@ where } // This is used in the `key_fn` passed to the lexico_topo_sort fn - let event_to_pl = stream::iter(graph.keys()) + let event_to_pl = graph + .keys() + .stream() .map(|event_id| { - get_power_level_for_sender(event_id.clone(), fetch_event, parallel_fetches) + get_power_level_for_sender(event_id.clone(), fetch_event) .map(move |res| res.map(|pl| (event_id, pl))) }) .buffer_unordered(parallel_fetches) - .try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { + .ready_try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { debug!( event_id = event_id.borrow().as_str(), power_level = i64::from(pl), @@ -324,7 +327,7 @@ where ); event_to_pl.insert(event_id.clone(), pl); - future::ok(event_to_pl) + Ok(event_to_pl) }) .boxed() .await?; @@ -475,7 +478,6 @@ where async fn get_power_level_for_sender( event_id: E::Id, fetch_event: &F, - parallel_fetches: usize, ) -> serde_json::Result where F: Fn(E::Id) -> Fut + Sync, @@ -485,19 +487,17 @@ where { debug!("fetch event ({event_id}) senders power level"); - let event = fetch_event(event_id.clone()).await; + let event = fetch_event(event_id).await; - let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + let auth_events = event.as_ref().map(Event::auth_events); - let pl = stream::iter(auth_events) - .map(|aid| fetch_event(aid.clone())) - .buffer_unordered(parallel_fetches.min(5)) - .filter_map(future::ready) - .collect::>() - .boxed() - .await + let pl = auth_events .into_iter() - .find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")); + .flatten() + .stream() + .broadn_filter_map(5, |aid| fetch_event(aid.clone())) + .ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")) + .await; let content: PowerLevelsContentFields = match pl { | None => return Ok(int!(0)), @@ -525,34 +525,28 @@ where /// For each `events_to_check` event we gather the events needed to auth it from /// the the `fetch_event` closure and verify each event using the /// `event_auth::auth_check` function. -async fn iterative_auth_check<'a, E, F, Fut, I>( +async fn iterative_auth_check<'a, E, F, Fut, S>( room_version: &RoomVersion, - events_to_check: I, + events_to_check: S, unconflicted_state: StateMap, fetch_event: &F, - parallel_fetches: usize, ) -> Result> where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, E::Id: Borrow + Clone + Eq + Ord + Send + Sync + 'a, - I: Iterator + Debug + Send + 'a, + S: Stream + Send + 'a, E: Event + Clone + Send + Sync, { debug!("starting iterative auth check"); - trace!( - list = ?events_to_check, - "events to check" - ); - let events_to_check: Vec<_> = stream::iter(events_to_check) + let events_to_check: Vec<_> = events_to_check .map(Result::Ok) - .map_ok(|event_id| { - fetch_event(event_id.clone()).map(move |result| { - result.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) - }) + .broad_and_then(async |event_id| { + fetch_event(event_id.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) }) - .try_buffer_unordered(parallel_fetches) .try_collect() .boxed() .await?; @@ -562,10 +556,10 @@ where .flat_map(|event: &E| event.auth_events().map(Clone::clone)) .collect(); - let auth_events: HashMap = stream::iter(auth_event_ids.into_iter()) - .map(fetch_event) - .buffer_unordered(parallel_fetches) - .filter_map(future::ready) + let auth_events: HashMap = auth_event_ids + .into_iter() + .stream() + .broad_filter_map(fetch_event) .map(|auth_event| (auth_event.event_id().clone(), auth_event)) .collect() .boxed() @@ -574,7 +568,6 @@ where let auth_events = &auth_events; let mut resolved_state = unconflicted_state; for event in &events_to_check { - let event_id = event.event_id(); let state_key = event .state_key() .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; @@ -603,24 +596,22 @@ where } } - stream::iter( - auth_types - .iter() - .filter_map(|key| Some((key, resolved_state.get(key)?))), - ) - .filter_map(|(key, ev_id)| async move { - if let Some(event) = auth_events.get(ev_id.borrow()) { - Some((key, event.clone())) - } else { - Some((key, fetch_event(ev_id.clone()).await?)) - } - }) - .for_each(|(key, event)| { - //TODO: synapse checks "rejected_reason" is None here - auth_state.insert(key.to_owned(), event); - future::ready(()) - }) - .await; + auth_types + .iter() + .stream() + .ready_filter_map(|key| Some((key, resolved_state.get(key)?))) + .filter_map(|(key, ev_id)| async move { + if let Some(event) = auth_events.get(ev_id.borrow()) { + Some((key, event.clone())) + } else { + Some((key, fetch_event(ev_id.clone()).await?)) + } + }) + .ready_for_each(|(key, event)| { + //TODO: synapse checks "rejected_reason" is None here + auth_state.insert(key.to_owned(), event); + }) + .await; debug!("event to check {:?}", event.event_id()); @@ -634,12 +625,25 @@ where future::ready(auth_state.get(&ty.with_state_key(key))) }; - if auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await? { - // add event to resolved state map - resolved_state.insert(event.event_type().with_state_key(state_key), event_id.clone()); - } else { - // synapse passes here on AuthError. We do not add this event to resolved_state. - warn!("event {event_id} failed the authentication check"); + let auth_result = + auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await; + + match auth_result { + | Ok(true) => { + // add event to resolved state map + resolved_state.insert( + event.event_type().with_state_key(state_key), + event.event_id().clone(), + ); + }, + | Ok(false) => { + // synapse passes here on AuthError. We do not add this event to resolved_state. + warn!("event {} failed the authentication check", event.event_id()); + }, + | Err(e) => { + debug_error!("event {} failed the authentication check: {e}", event.event_id()); + return Err(e); + }, } } @@ -659,7 +663,6 @@ async fn mainline_sort( to_sort: &[E::Id], resolved_power_level: Option, fetch_event: &F, - parallel_fetches: usize, ) -> Result> where F: Fn(E::Id) -> Fut + Sync, @@ -682,11 +685,13 @@ where let event = fetch_event(p.clone()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?; + pl = None; for aid in event.auth_events() { let ev = fetch_event(aid.clone()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") { pl = Some(aid.to_owned()); break; @@ -694,36 +699,32 @@ where } } - let mainline_map = mainline + let mainline_map: HashMap<_, _> = mainline .iter() .rev() .enumerate() .map(|(idx, eid)| ((*eid).clone(), idx)) - .collect::>(); + .collect(); - let order_map = stream::iter(to_sort.iter()) - .map(|ev_id| { - fetch_event(ev_id.clone()).map(move |event| event.map(|event| (event, ev_id))) + let order_map: HashMap<_, _> = to_sort + .iter() + .stream() + .broad_filter_map(async |ev_id| { + fetch_event(ev_id.clone()).await.map(|event| (event, ev_id)) }) - .buffer_unordered(parallel_fetches) - .filter_map(future::ready) - .map(|(event, ev_id)| { + .broad_filter_map(|(event, ev_id)| { get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event) - .map_ok(move |depth| (depth, event, ev_id)) + .map_ok(move |depth| (ev_id, (depth, event.origin_server_ts(), ev_id))) .map(Result::ok) }) - .buffer_unordered(parallel_fetches) - .filter_map(future::ready) - .fold(HashMap::new(), |mut order_map, (depth, event, ev_id)| { - order_map.insert(ev_id, (depth, event.origin_server_ts(), ev_id)); - future::ready(order_map) - }) + .collect() .boxed() .await; // Sort the event_ids by their depth, timestamp and EventId // unwrap is OK order map and sort_event_ids are from to_sort (the same Vec) - let mut sort_event_ids = order_map.keys().map(|&k| k.clone()).collect::>(); + let mut sort_event_ids: Vec<_> = order_map.keys().map(|&k| k.clone()).collect(); + sort_event_ids.sort_by_key(|sort_id| &order_map[sort_id]); Ok(sort_event_ids) @@ -744,6 +745,7 @@ where { while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); + let id = sort_ev.event_id(); if let Some(depth) = mainline_map.get(id.borrow()) { return Ok(*depth); @@ -754,6 +756,7 @@ where let aev = fetch_event(aid.clone()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") { event = Some(aev); break; @@ -884,7 +887,7 @@ mod tests { zara, }, }; - use crate::debug; + use crate::{debug, utils::stream::IterStream}; async fn test_event_sort() { use futures::future::ready; @@ -915,10 +918,9 @@ mod tests { let resolved_power = super::iterative_auth_check( &RoomVersion::V6, - sorted_power_events.iter(), + sorted_power_events.iter().stream(), HashMap::new(), // unconflicted events &fetcher, - 1, ) .await .expect("iterative auth check failed on resolved events"); @@ -932,7 +934,7 @@ mod tests { .get(&(StateEventType::RoomPowerLevels, "".into())) .cloned(); - let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher, 1) + let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher) .await .unwrap(); From 05e65936fa8a522667035b12774ef788944303e7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 20:30:15 +0000 Subject: [PATCH 0867/1248] modest cleanup of snake sync service related Signed-off-by: Jason Volk --- src/api/client/sync/v4.rs | 83 +++++++-------- src/api/client/sync/v5.rs | 55 ++++------ src/service/sync/mod.rs | 205 +++++++++++++++++--------------------- 3 files changed, 143 insertions(+), 200 deletions(-) diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index f7edb8c0..55faf420 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,22 +6,23 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, PduCount, PduEvent, Result, debug, error, extract_variant, + Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, }, warn, }; +use conduwuit_service::{ + rooms::read_receipt::pack_receipts, + sync::{into_db_key, into_snake_key}, +}; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, - api::client::{ - error::ErrorKind, - sync::sync_events::{ - self, DeviceLists, UnreadNotificationsCount, - v4::{SlidingOp, SlidingSyncRoomHero}, - }, + api::client::sync::sync_events::{ + self, DeviceLists, UnreadNotificationsCount, + v4::{SlidingOp, SlidingSyncRoomHero}, }, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, @@ -31,7 +32,6 @@ use ruma::{ serde::Raw, uint, }; -use service::rooms::read_receipt::pack_receipts; use super::{load_timeline, share_encrypted_room}; use crate::{ @@ -50,10 +50,11 @@ pub(crate) async fn sync_events_v4_route( ) -> Result { debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut body = body.body; + // Setup watchers, so if there's no response, we can wait for them - let watcher = services.sync.watch(sender_user, &sender_device); + let watcher = services.sync.watch(sender_user, sender_device); let next_batch = services.globals.next_count()?; @@ -68,33 +69,21 @@ pub(crate) async fn sync_events_v4_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); - if globalsince != 0 - && !services - .sync - .remembered(sender_user.clone(), sender_device.clone(), conn_id.clone()) - { + let db_key = into_db_key(sender_user, sender_device, conn_id.clone()); + if globalsince != 0 && !services.sync.remembered(&db_key) { debug!("Restarting sync stream because it was gone from the database"); - return Err(Error::Request( - ErrorKind::UnknownPos, - "Connection data lost since last time".into(), - http::StatusCode::BAD_REQUEST, - )); + return Err!(Request(UnknownPos("Connection data lost since last time"))); } if globalsince == 0 { - services.sync.forget_sync_request_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ); + services.sync.forget_sync_request_connection(&db_key); } // Get sticky parameters from cache - let known_rooms = services.sync.update_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); + let snake_key = into_snake_key(sender_user, sender_device, conn_id.clone()); + let known_rooms = services + .sync + .update_sync_request_with_cache(&snake_key, &mut body); let all_joined_rooms: Vec<_> = services .rooms @@ -136,7 +125,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.to_device.enabled.unwrap_or(false) { services .users - .remove_to_device_events(sender_user, &sender_device, globalsince) + .remove_to_device_events(sender_user, sender_device, globalsince) .await; } @@ -261,7 +250,7 @@ pub(crate) async fn sync_events_v4_route( if let Some(Ok(user_id)) = pdu.state_key.as_deref().map(UserId::parse) { - if user_id == *sender_user { + if user_id == sender_user { continue; } @@ -299,7 +288,7 @@ pub(crate) async fn sync_events_v4_route( .state_cache .room_members(room_id) // Don't send key updates from the sender to the sender - .ready_filter(|user_id| sender_user != user_id) + .ready_filter(|&user_id| sender_user != user_id) // Only send keys if the sender doesn't share an encrypted room with the target // already .filter_map(|user_id| { @@ -425,10 +414,9 @@ pub(crate) async fn sync_events_v4_route( }); if let Some(conn_id) = &body.conn_id { + let db_key = into_db_key(sender_user, sender_device, conn_id); services.sync.update_sync_known_rooms( - sender_user, - &sender_device, - conn_id.clone(), + &db_key, list_id.clone(), new_known_rooms, globalsince, @@ -478,23 +466,20 @@ pub(crate) async fn sync_events_v4_route( } if let Some(conn_id) = &body.conn_id { + let db_key = into_db_key(sender_user, sender_device, conn_id); services.sync.update_sync_known_rooms( - sender_user, - &sender_device, - conn_id.clone(), + &db_key, "subscriptions".to_owned(), known_subscription_rooms, globalsince, ); } - if let Some(conn_id) = &body.conn_id { - services.sync.update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ); + if let Some(conn_id) = body.conn_id.clone() { + let db_key = into_db_key(sender_user, sender_device, conn_id); + services + .sync + .update_sync_subscriptions(&db_key, body.room_subscriptions); } let mut rooms = BTreeMap::new(); @@ -648,7 +633,7 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_cache .room_members(room_id) - .ready_filter(|member| member != sender_user) + .ready_filter(|&member| member != sender_user) .filter_map(|user_id| { services .rooms @@ -787,7 +772,7 @@ pub(crate) async fn sync_events_v4_route( .users .get_to_device_events( sender_user, - &sender_device, + sender_device, Some(globalsince), Some(next_batch), ) @@ -805,7 +790,7 @@ pub(crate) async fn sync_events_v4_route( }, device_one_time_keys_count: services .users - .count_one_time_keys(sender_user, &sender_device) + .count_one_time_keys(sender_user, sender_device) .await, // Fallback keys are not yet supported device_unused_fallback_key_types: None, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 684752ec..00a2d18d 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, Result, debug, error, extract_variant, + Err, Error, Result, error, extract_variant, matrix::{ TypeStateKey, pdu::{PduCount, PduEvent}, @@ -18,14 +18,11 @@ use conduwuit::{ }, warn, }; -use conduwuit_service::rooms::read_receipt::pack_receipts; +use conduwuit_service::{rooms::read_receipt::pack_receipts, sync::into_snake_key}; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, - api::client::{ - error::ErrorKind, - sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, - }, + api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, room::member::{MembershipState, RoomMemberEventContent}, @@ -74,35 +71,23 @@ pub(crate) async fn sync_events_v5_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); - if globalsince != 0 - && !services.sync.snake_connection_cached( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ) { - debug!("Restarting sync stream because it was gone from the database"); - return Err(Error::Request( - ErrorKind::UnknownPos, - "Connection data lost since last time".into(), - http::StatusCode::BAD_REQUEST, - )); + let snake_key = into_snake_key(sender_user, sender_device, conn_id); + + if globalsince != 0 && !services.sync.snake_connection_cached(&snake_key) { + return Err!(Request(UnknownPos( + "Connection data unknown to server; restarting sync stream." + ))); } // Client / User requested an initial sync if globalsince == 0 { - services.sync.forget_snake_sync_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ); + services.sync.forget_snake_sync_connection(&snake_key); } // Get sticky parameters from cache - let known_rooms = services.sync.update_snake_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); + let known_rooms = services + .sync + .update_snake_sync_request_with_cache(&snake_key, &mut body); let all_joined_rooms: Vec<_> = services .rooms @@ -254,11 +239,10 @@ async fn fetch_subscriptions( // body.room_subscriptions.remove(&r); //} - if let Some(conn_id) = &body.conn_id { + if let Some(conn_id) = body.conn_id.clone() { + let snake_key = into_snake_key(sender_user, sender_device, conn_id); services.sync.update_snake_sync_known_rooms( - sender_user, - sender_device, - conn_id.clone(), + &snake_key, "subscriptions".to_owned(), known_subscription_rooms, globalsince, @@ -340,11 +324,10 @@ async fn handle_lists<'a>( count: ruma_from_usize(active_rooms.len()), }); - if let Some(conn_id) = &body.conn_id { + if let Some(conn_id) = body.conn_id.clone() { + let snake_key = into_snake_key(sender_user, sender_device, conn_id); services.sync.update_snake_sync_known_rooms( - sender_user, - sender_device, - conn_id.clone(), + &snake_key, list_id.clone(), new_known_rooms, globalsince, diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index bf2bc142..b095d2c1 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -8,7 +8,7 @@ use std::{ use conduwuit::{Result, Server}; use database::Map; use ruma::{ - DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, + OwnedDeviceId, OwnedRoomId, OwnedUserId, api::client::sync::sync_events::{ self, v4::{ExtensionsConfig, SyncRequestList}, @@ -49,8 +49,8 @@ struct Services { struct SlidingSyncCache { lists: BTreeMap, subscriptions: BTreeMap, - known_rooms: BTreeMap>, /* For every room, the - * roomsince number */ + // For every room, the roomsince number + known_rooms: BTreeMap>, extensions: ExtensionsConfig, } @@ -98,79 +98,35 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -/// load params from cache if body doesn't contain it, as long as it's allowed -/// in some cases we may need to allow an empty list as an actual value -fn list_or_sticky(target: &mut Vec, cached: &Vec) { - if target.is_empty() { - target.clone_from(cached); - } -} -fn some_or_sticky(target: &mut Option, cached: Option) { - if target.is_none() { - *target = cached; - } -} - impl Service { - pub fn snake_connection_cached( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, - ) -> bool { - self.snake_connections - .lock() - .unwrap() - .contains_key(&(user_id, device_id, conn_id)) - } - - pub fn forget_snake_sync_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, - ) { + pub fn snake_connection_cached(&self, key: &SnakeConnectionsKey) -> bool { self.snake_connections .lock() .expect("locked") - .remove(&(user_id, device_id, conn_id)); + .contains_key(key) } - pub fn remembered( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) -> bool { - self.connections - .lock() - .unwrap() - .contains_key(&(user_id, device_id, conn_id)) + pub fn forget_snake_sync_connection(&self, key: &SnakeConnectionsKey) { + self.snake_connections.lock().expect("locked").remove(key); } - pub fn forget_sync_request_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) { - self.connections - .lock() - .expect("locked") - .remove(&(user_id, device_id, conn_id)); + pub fn remembered(&self, key: &DbConnectionsKey) -> bool { + self.connections.lock().expect("locked").contains_key(key) + } + + pub fn forget_sync_request_connection(&self, key: &DbConnectionsKey) { + self.connections.lock().expect("locked").remove(key); } pub fn update_snake_sync_request_with_cache( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, + snake_key: &SnakeConnectionsKey, request: &mut v5::Request, ) -> BTreeMap> { - let conn_id = request.conn_id.clone(); let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry(snake_key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); @@ -268,25 +224,23 @@ impl Service { pub fn update_sync_request_with_cache( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, + key: &SnakeConnectionsKey, request: &mut sync_events::v4::Request, ) -> BTreeMap> { let Some(conn_id) = request.conn_id.clone() else { return BTreeMap::new(); }; + let key = into_db_key(key.0.clone(), key.1.clone(), conn_id); let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone(cache.entry(key).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -371,22 +325,18 @@ impl Service { pub fn update_sync_subscriptions( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, + key: &DbConnectionsKey, subscriptions: BTreeMap, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -395,90 +345,81 @@ impl Service { pub fn update_sync_known_rooms( &self, - user_id: &UserId, - device_id: &DeviceId, - conn_id: String, + key: &DbConnectionsKey, list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry((user_id.to_owned(), device_id.to_owned(), conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); + let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); - for (roomid, lastsince) in cached + for (room_id, lastsince) in cached .known_rooms .entry(list_id.clone()) .or_default() .iter_mut() { - if !new_cached_rooms.contains(roomid) { + if !new_cached_rooms.contains(room_id) { *lastsince = 0; } } let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); + for room_id in new_cached_rooms { + list.insert(room_id, globalsince); } } pub fn update_snake_sync_known_rooms( &self, - user_id: &UserId, - device_id: &DeviceId, - conn_id: String, + key: &SnakeConnectionsKey, list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { + assert!(key.2.is_some(), "Some(conn_id) required for this call"); let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id.to_owned(), device_id.to_owned(), Some(conn_id))) + .entry(key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); drop(cache); - for (roomid, lastsince) in cached + for (room_id, lastsince) in cached .known_rooms .entry(list_id.clone()) .or_default() .iter_mut() { - if !new_cached_rooms.contains(roomid) { + if !new_cached_rooms.contains(room_id) { *lastsince = 0; } } let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); + for room_id in new_cached_rooms { + list.insert(room_id, globalsince); } } pub fn update_snake_sync_subscriptions( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, + key: &SnakeConnectionsKey, subscriptions: BTreeMap, ) { let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry(key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); @@ -487,3 +428,37 @@ impl Service { cached.subscriptions = subscriptions; } } + +#[inline] +pub fn into_snake_key(user_id: U, device_id: D, conn_id: C) -> SnakeConnectionsKey +where + U: Into, + D: Into, + C: Into>, +{ + (user_id.into(), device_id.into(), conn_id.into()) +} + +#[inline] +pub fn into_db_key(user_id: U, device_id: D, conn_id: C) -> DbConnectionsKey +where + U: Into, + D: Into, + C: Into, +{ + (user_id.into(), device_id.into(), conn_id.into()) +} + +/// load params from cache if body doesn't contain it, as long as it's allowed +/// in some cases we may need to allow an empty list as an actual value +fn list_or_sticky(target: &mut Vec, cached: &Vec) { + if target.is_empty() { + target.clone_from(cached); + } +} + +fn some_or_sticky(target: &mut Option, cached: Option) { + if target.is_none() { + *target = cached; + } +} From e8a64bb59d13aad7588a1ca28422f463e0f1a302 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 21:59:18 +0000 Subject: [PATCH 0868/1248] increase snake sync asynchronicity Signed-off-by: Jason Volk --- src/api/client/sync/mod.rs | 36 +----- src/api/client/sync/v3.rs | 17 ++- src/api/client/sync/v4.rs | 38 +++++- src/api/client/sync/v5.rs | 256 ++++++++++++++++++++++++------------- 4 files changed, 212 insertions(+), 135 deletions(-) diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 14459acf..40370160 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -5,16 +5,12 @@ mod v5; use conduwuit::{ Error, PduCount, Result, matrix::pdu::PduEvent, - utils::{ - IterStream, - stream::{BroadbandExt, ReadyExt, TryIgnore}, - }, + utils::stream::{BroadbandExt, ReadyExt, TryIgnore}, }; use conduwuit_service::Services; use futures::{StreamExt, pin_mut}; use ruma::{ RoomId, UserId, - directory::RoomTypeFilter, events::TimelineEventType::{ self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker, }, @@ -87,33 +83,3 @@ async fn share_encrypted_room( }) .await } - -pub(crate) async fn filter_rooms<'a>( - services: &Services, - rooms: &[&'a RoomId], - filter: &[RoomTypeFilter], - negate: bool, -) -> Vec<&'a RoomId> { - rooms - .iter() - .stream() - .filter_map(|r| async move { - let room_type = services.rooms.state_accessor.get_room_type(r).await; - - if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { - return None; - } - - let room_type_filter = RoomTypeFilter::from(room_type.ok()); - - let include = if negate { - !filter.contains(&room_type_filter) - } else { - filter.is_empty() || filter.contains(&room_type_filter) - }; - - include.then_some(r) - }) - .collect() - .await -} diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 24930941..8eac6b66 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -14,8 +14,8 @@ use conduwuit::{ pair_of, ref_at, result::FlatOk, utils::{ - self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, - future::OptionStream, + self, BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::{OptionStream, ReadyEqExt}, math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, @@ -32,6 +32,7 @@ use conduwuit_service::{ use futures::{ FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::{OptionFuture, join, join3, join4, join5, try_join, try_join4}, + pin_mut, }; use ruma::{ DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, @@ -433,10 +434,14 @@ async fn handle_left_room( return Ok(None); } - if !services.rooms.metadata.exists(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - || services.rooms.metadata.is_banned(room_id).await - { + let is_not_found = services.rooms.metadata.exists(room_id).eq(&false); + + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + let is_banned = services.rooms.metadata.is_banned(room_id); + + pin_mut!(is_not_found, is_disabled, is_banned); + if is_not_found.or(is_disabled).or(is_banned).await { // This is just a rejected invite, not a room we know // Insert a leave event anyways for the client let event = PduEvent { diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 55faf420..f153b2da 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -7,6 +7,7 @@ use std::{ use axum::extract::State; use conduwuit::{ Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant, + matrix::TypeStateKey, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, @@ -14,6 +15,7 @@ use conduwuit::{ warn, }; use conduwuit_service::{ + Services, rooms::read_receipt::pack_receipts, sync::{into_db_key, into_snake_key}, }; @@ -24,6 +26,7 @@ use ruma::{ self, DeviceLists, UnreadNotificationsCount, v4::{SlidingOp, SlidingSyncRoomHero}, }, + directory::RoomTypeFilter, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, @@ -36,10 +39,11 @@ use ruma::{ use super::{load_timeline, share_encrypted_room}; use crate::{ Ruma, - client::{DEFAULT_BUMP_TYPES, filter_rooms, ignored_filter, sync::v5::TodoRooms}, + client::{DEFAULT_BUMP_TYPES, ignored_filter}, }; -pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; +type TodoRooms = BTreeMap, usize, u64)>; +const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; /// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` /// @@ -802,3 +806,33 @@ pub(crate) async fn sync_events_v4_route( delta_token: None, }) } + +async fn filter_rooms<'a>( + services: &Services, + rooms: &[&'a RoomId], + filter: &[RoomTypeFilter], + negate: bool, +) -> Vec<&'a RoomId> { + rooms + .iter() + .stream() + .filter_map(|r| async move { + let room_type = services.rooms.state_accessor.get_room_type(r).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(r) + }) + .collect() + .await +} diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 00a2d18d..f3fc0f44 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -1,28 +1,35 @@ use std::{ cmp::{self, Ordering}, collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + ops::Deref, time::Duration, }; use axum::extract::State; use conduwuit::{ - Err, Error, Result, error, extract_variant, + Err, Error, Result, error, extract_variant, is_equal_to, matrix::{ TypeStateKey, pdu::{PduCount, PduEvent}, }, trace, utils::{ - BoolExt, IterStream, ReadyExt, TryFutureExtExt, + BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::ReadyEqExt, math::{ruma_from_usize, usize_from_ruma}, }, warn, }; -use conduwuit_service::{rooms::read_receipt::pack_receipts, sync::into_snake_key}; -use futures::{FutureExt, StreamExt, TryFutureExt}; +use conduwuit_service::{Services, rooms::read_receipt::pack_receipts, sync::into_snake_key}; +use futures::{ + FutureExt, Stream, StreamExt, TryFutureExt, + future::{OptionFuture, join3, try_join4}, + pin_mut, +}; use ruma::{ DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, + directory::RoomTypeFilter, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, room::member::{MembershipState, RoomMemberEventContent}, @@ -31,13 +38,15 @@ use ruma::{ uint, }; -use super::{filter_rooms, share_encrypted_room}; +use super::share_encrypted_room; use crate::{ Ruma, client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline}, }; type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); +type TodoRooms = BTreeMap, usize, u64)>; +type KnownRooms = BTreeMap>; /// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync` /// ([MSC4186]) @@ -50,7 +59,7 @@ type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request /// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 /// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186 pub(crate) async fn sync_events_v5_route( - State(services): State, + State(ref services): State, body: Ruma, ) -> Result { debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); @@ -89,65 +98,77 @@ pub(crate) async fn sync_events_v5_route( .sync .update_snake_sync_request_with_cache(&snake_key, &mut body); - let all_joined_rooms: Vec<_> = services + let all_joined_rooms = services .rooms .state_cache .rooms_joined(sender_user) .map(ToOwned::to_owned) - .collect() - .await; + .collect::>(); - let all_invited_rooms: Vec<_> = services + let all_invited_rooms = services .rooms .state_cache .rooms_invited(sender_user) .map(|r| r.0) - .collect() - .await; + .collect::>(); - let all_knocked_rooms: Vec<_> = services + let all_knocked_rooms = services .rooms .state_cache .rooms_knocked(sender_user) .map(|r| r.0) - .collect() - .await; + .collect::>(); - let all_rooms: Vec<&RoomId> = all_joined_rooms - .iter() - .map(AsRef::as_ref) - .chain(all_invited_rooms.iter().map(AsRef::as_ref)) - .chain(all_knocked_rooms.iter().map(AsRef::as_ref)) - .collect(); + let (all_joined_rooms, all_invited_rooms, all_knocked_rooms) = + join3(all_joined_rooms, all_invited_rooms, all_knocked_rooms).await; - let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect(); - let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref); + let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref); + let all_knocked_rooms = all_knocked_rooms.iter().map(AsRef::as_ref); + let all_rooms = all_joined_rooms + .clone() + .chain(all_invited_rooms.clone()) + .chain(all_knocked_rooms.clone()); let pos = next_batch.clone().to_string(); let mut todo_rooms: TodoRooms = BTreeMap::new(); let sync_info: SyncInfo<'_> = (sender_user, sender_device, globalsince, &body); + + let account_data = collect_account_data(services, sync_info).map(Ok); + + let e2ee = collect_e2ee(services, sync_info, all_joined_rooms.clone()); + + let to_device = collect_to_device(services, sync_info, next_batch).map(Ok); + + let receipts = collect_receipts(services).map(Ok); + + let (account_data, e2ee, to_device, receipts) = + try_join4(account_data, e2ee, to_device, receipts).await?; + + let extensions = sync_events::v5::response::Extensions { + account_data, + e2ee, + to_device, + receipts, + typing: sync_events::v5::response::Typing::default(), + }; + let mut response = sync_events::v5::Response { txn_id: body.txn_id.clone(), pos, lists: BTreeMap::new(), rooms: BTreeMap::new(), - extensions: sync_events::v5::response::Extensions { - account_data: collect_account_data(services, sync_info).await, - e2ee: collect_e2ee(services, sync_info, &all_joined_rooms).await?, - to_device: collect_to_device(services, sync_info, next_batch).await, - receipts: collect_receipts(services).await, - typing: sync_events::v5::response::Typing::default(), - }, + extensions, }; handle_lists( services, sync_info, - &all_invited_rooms, - &all_joined_rooms, - &all_rooms, + all_invited_rooms.clone(), + all_joined_rooms.clone(), + all_rooms, &mut todo_rooms, &known_rooms, &mut response, @@ -160,7 +181,7 @@ pub(crate) async fn sync_events_v5_route( services, sender_user, next_batch, - &all_invited_rooms, + all_invited_rooms.clone(), &todo_rooms, &mut response, &body, @@ -185,31 +206,33 @@ pub(crate) async fn sync_events_v5_route( } trace!( - rooms=?response.rooms.len(), - account_data=?response.extensions.account_data.rooms.len(), - receipts=?response.extensions.receipts.rooms.len(), + rooms = ?response.rooms.len(), + account_data = ?response.extensions.account_data.rooms.len(), + receipts = ?response.extensions.receipts.rooms.len(), "responding to request with" ); Ok(response) } -type KnownRooms = BTreeMap>; -pub(crate) type TodoRooms = BTreeMap, usize, u64)>; - async fn fetch_subscriptions( - services: crate::State, + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, known_rooms: &KnownRooms, todo_rooms: &mut TodoRooms, ) { let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - || services.rooms.metadata.is_banned(room_id).await - { + let not_exists = services.rooms.metadata.exists(room_id).eq(&false); + + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + let is_banned = services.rooms.metadata.is_banned(room_id); + + pin_mut!(not_exists, is_disabled, is_banned); + if not_exists.or(is_disabled).or(is_banned).await { continue; } + let todo_room = todo_rooms .entry(room_id.clone()) @@ -251,27 +274,39 @@ async fn fetch_subscriptions( } #[allow(clippy::too_many_arguments)] -async fn handle_lists<'a>( - services: crate::State, +async fn handle_lists<'a, Rooms, AllRooms>( + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, - all_invited_rooms: &Vec<&'a RoomId>, - all_joined_rooms: &Vec<&'a RoomId>, - all_rooms: &Vec<&'a RoomId>, + all_invited_rooms: Rooms, + all_joined_rooms: Rooms, + all_rooms: AllRooms, todo_rooms: &'a mut TodoRooms, known_rooms: &'a KnownRooms, response: &'_ mut sync_events::v5::Response, -) -> KnownRooms { +) -> KnownRooms +where + Rooms: Iterator + Clone + Send + 'a, + AllRooms: Iterator + Clone + Send + 'a, +{ for (list_id, list) in &body.lists { - let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { - | Some(true) => all_invited_rooms, - | Some(false) => all_joined_rooms, - | None => all_rooms, + let active_rooms: Vec<_> = match list.filters.as_ref().and_then(|f| f.is_invite) { + | None => all_rooms.clone().collect(), + | Some(true) => all_invited_rooms.clone().collect(), + | Some(false) => all_joined_rooms.clone().collect(), }; - let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { - | Some(filter) if filter.is_empty() => active_rooms, - | Some(value) => &filter_rooms(&services, active_rooms, &value, true).await, + let active_rooms = match list.filters.as_ref().map(|f| &f.not_room_types) { | None => active_rooms, + | Some(filter) if filter.is_empty() => active_rooms, + | Some(value) => + filter_rooms( + services, + value, + &true, + active_rooms.iter().stream().map(Deref::deref), + ) + .collect() + .await, }; let mut new_known_rooms: BTreeSet = BTreeSet::new(); @@ -289,6 +324,7 @@ async fn handle_lists<'a>( let new_rooms: BTreeSet = room_ids.clone().into_iter().map(From::from).collect(); + new_known_rooms.extend(new_rooms); //new_known_rooms.extend(room_ids..cloned()); for room_id in room_ids { @@ -334,18 +370,22 @@ async fn handle_lists<'a>( ); } } + BTreeMap::default() } -async fn process_rooms( - services: crate::State, +async fn process_rooms<'a, Rooms>( + services: &Services, sender_user: &UserId, next_batch: u64, - all_invited_rooms: &[&RoomId], + all_invited_rooms: Rooms, todo_rooms: &TodoRooms, response: &mut sync_events::v5::Response, body: &sync_events::v5::Request, -) -> Result> { +) -> Result> +where + Rooms: Iterator + Clone + Send + 'a, +{ let mut rooms = BTreeMap::new(); for (room_id, (required_state_request, timeline_limit, roomsince)) in todo_rooms { let roomsincecount = PduCount::Normal(*roomsince); @@ -354,7 +394,7 @@ async fn process_rooms( let mut invite_state = None; let (timeline_pdus, limited); let new_room_id: &RoomId = (*room_id).as_ref(); - if all_invited_rooms.contains(&new_room_id) { + if all_invited_rooms.clone().any(is_equal_to!(new_room_id)) { // TODO: figure out a timestamp we can use for remote invites invite_state = services .rooms @@ -366,7 +406,7 @@ async fn process_rooms( (timeline_pdus, limited) = (Vec::new(), true); } else { (timeline_pdus, limited) = match load_timeline( - &services, + services, sender_user, room_id, roomsincecount, @@ -399,18 +439,17 @@ async fn process_rooms( .rooms .read_receipt .last_privateread_update(sender_user, room_id) - .await > *roomsince; + .await; - let private_read_event = if last_privateread_update { - services - .rooms - .read_receipt - .private_read_get(room_id, sender_user) - .await - .ok() - } else { - None - }; + let private_read_event: OptionFuture<_> = (last_privateread_update > *roomsince) + .then(|| { + services + .rooms + .read_receipt + .private_read_get(room_id, sender_user) + .ok() + }) + .into(); let mut receipts: Vec> = services .rooms @@ -426,7 +465,7 @@ async fn process_rooms( .collect() .await; - if let Some(private_read_event) = private_read_event { + if let Some(private_read_event) = private_read_event.await.flatten() { receipts.push(private_read_event); } @@ -475,7 +514,7 @@ async fn process_rooms( let room_events: Vec<_> = timeline_pdus .iter() .stream() - .filter_map(|item| ignored_filter(&services, item.clone(), sender_user)) + .filter_map(|item| ignored_filter(services, item.clone(), sender_user)) .map(|(_, pdu)| pdu.to_sync_room_event()) .collect() .await; @@ -627,7 +666,7 @@ async fn process_rooms( Ok(rooms) } async fn collect_account_data( - services: crate::State, + services: &Services, (sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request), ) -> sync_events::v5::response::AccountData { let mut account_data = sync_events::v5::response::AccountData { @@ -663,16 +702,19 @@ async fn collect_account_data( account_data } -async fn collect_e2ee<'a>( - services: crate::State, +async fn collect_e2ee<'a, Rooms>( + services: &Services, (sender_user, sender_device, globalsince, body): ( &UserId, &DeviceId, u64, &sync_events::v5::Request, ), - all_joined_rooms: &'a Vec<&'a RoomId>, -) -> Result { + all_joined_rooms: Rooms, +) -> Result +where + Rooms: Iterator + Send + 'a, +{ if !body.extensions.e2ee.enabled.unwrap_or(false) { return Ok(sync_events::v5::response::E2EE::default()); } @@ -773,7 +815,7 @@ async fn collect_e2ee<'a>( | MembershipState::Join => { // A new user joined an encrypted room if !share_encrypted_room( - &services, + services, sender_user, user_id, Some(room_id), @@ -806,7 +848,7 @@ async fn collect_e2ee<'a>( // Only send keys if the sender doesn't share an encrypted room with the target // already .filter_map(|user_id| { - share_encrypted_room(&services, sender_user, user_id, Some(room_id)) + share_encrypted_room(services, sender_user, user_id, Some(room_id)) .map(|res| res.or_some(user_id.to_owned())) }) .collect::>() @@ -829,7 +871,7 @@ async fn collect_e2ee<'a>( for user_id in left_encrypted_users { let dont_share_encrypted_room = - !share_encrypted_room(&services, sender_user, &user_id, None).await; + !share_encrypted_room(services, sender_user, &user_id, None).await; // If the user doesn't share an encrypted room with the target anymore, we need // to tell them @@ -839,20 +881,22 @@ async fn collect_e2ee<'a>( } Ok(sync_events::v5::response::E2EE { - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, + device_unused_fallback_key_types: None, + device_one_time_keys_count: services .users .count_one_time_keys(sender_user, sender_device) .await, - device_unused_fallback_key_types: None, + + device_lists: DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, }) } async fn collect_to_device( - services: crate::State, + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, next_batch: u64, ) -> Option { @@ -875,7 +919,35 @@ async fn collect_to_device( }) } -async fn collect_receipts(_services: crate::State) -> sync_events::v5::response::Receipts { +async fn collect_receipts(_services: &Services) -> sync_events::v5::response::Receipts { sync_events::v5::response::Receipts { rooms: BTreeMap::new() } // TODO: get explicitly requested read receipts } + +fn filter_rooms<'a, Rooms>( + services: &'a Services, + filter: &'a [RoomTypeFilter], + negate: &'a bool, + rooms: Rooms, +) -> impl Stream + Send + 'a +where + Rooms: Stream + Send + 'a, +{ + rooms.filter_map(async |room_id| { + let room_type = services.rooms.state_accessor.get_room_type(room_id).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if *negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(room_id) + }) +} From eac713a2a91569ed0736aa8c88cfe543b44ec4fb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 22:50:01 +0000 Subject: [PATCH 0869/1248] slightly optimize user directory search loop Signed-off-by: Jason Volk --- src/api/client/user_directory.rs | 43 ++++++++++++++++---------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 99b3bb67..748fc049 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,7 +1,10 @@ use axum::extract::State; use conduwuit::{ Result, - utils::{future::BoolExt, stream::BroadbandExt}, + utils::{ + future::BoolExt, + stream::{BroadbandExt, ReadyExt}, + }, }; use futures::{FutureExt, StreamExt, pin_mut}; use ruma::{ @@ -30,29 +33,21 @@ pub(crate) async fn search_users_route( .map_or(LIMIT_DEFAULT, usize::from) .min(LIMIT_MAX); + let search_term = body.search_term.to_lowercase(); let mut users = services .users .stream() + .ready_filter(|user_id| user_id.as_str().to_lowercase().contains(&search_term)) .map(ToOwned::to_owned) .broad_filter_map(async |user_id| { - let user = search_users::v3::User { - user_id: user_id.clone(), - display_name: services.users.displayname(&user_id).await.ok(), - avatar_url: services.users.avatar_url(&user_id).await.ok(), - }; + let display_name = services.users.displayname(&user_id).await.ok(); - let user_id_matches = user - .user_id - .as_str() - .to_lowercase() - .contains(&body.search_term.to_lowercase()); + let display_name_matches = display_name + .as_deref() + .map(str::to_lowercase) + .is_some_and(|display_name| display_name.contains(&search_term)); - let user_displayname_matches = user.display_name.as_ref().is_some_and(|name| { - name.to_lowercase() - .contains(&body.search_term.to_lowercase()) - }); - - if !user_id_matches && !user_displayname_matches { + if !display_name_matches { return None; } @@ -61,11 +56,11 @@ pub(crate) async fn search_users_route( .state_cache .rooms_joined(&user_id) .map(ToOwned::to_owned) - .any(|room| async move { + .broad_any(async |room_id| { services .rooms .state_accessor - .get_join_rules(&room) + .get_join_rules(&room_id) .map(|rule| matches!(rule, JoinRule::Public)) .await }); @@ -76,8 +71,14 @@ pub(crate) async fn search_users_route( .user_sees_user(sender_user, &user_id); pin_mut!(user_in_public_room, user_sees_user); - - user_in_public_room.or(user_sees_user).await.then_some(user) + user_in_public_room + .or(user_sees_user) + .await + .then_some(search_users::v3::User { + user_id: user_id.clone(), + display_name, + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }) }); let results = users.by_ref().take(limit).collect().await; From 83126cc66765541c88168e0ada63c41ed9f07058 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 9 Apr 2025 03:35:40 +0000 Subject: [PATCH 0870/1248] propagate better message from RustlsConfig load error. (#734) Signed-off-by: Jason Volk --- src/router/serve/tls.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index dd46ab53..20b58601 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -31,12 +31,14 @@ pub(super) async fn serve( .install_default() .expect("failed to initialise aws-lc-rs rustls crypto provider"); - debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); info!( "Note: It is strongly recommended that you use a reverse proxy instead of running \ conduwuit directly with TLS." ); - let conf = RustlsConfig::from_pem_file(certs, key).await?; + debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); + let conf = RustlsConfig::from_pem_file(certs, key) + .await + .map_err(|e| err!(Config("tls", "Failed to load certificates or key: {e}")))?; let mut join_set = JoinSet::new(); let app = app.into_make_service_with_connect_info::(); From b3e5d2f683d633c948cc66b6cec77bbca6952f91 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 8 Apr 2025 04:39:01 +0000 Subject: [PATCH 0871/1248] remove box ids from admin room command arguments Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 33 +++++++++++----------- src/admin/debug/mod.rs | 32 ++++++++++----------- src/admin/federation/commands.rs | 10 +++---- src/admin/federation/mod.rs | 10 +++---- src/admin/media/commands.rs | 8 +++--- src/admin/media/mod.rs | 8 +++--- src/admin/query/account_data.rs | 18 ++++++------ src/admin/query/appservice.rs | 2 +- src/admin/query/globals.rs | 4 +-- src/admin/query/presence.rs | 4 +-- src/admin/query/pusher.rs | 4 +-- src/admin/query/room_alias.rs | 6 ++-- src/admin/query/room_state_cache.rs | 44 +++++++++++++++-------------- src/admin/query/sending.rs | 20 ++++++------- src/admin/room/alias.rs | 8 ++---- src/admin/room/directory.rs | 6 ++-- src/admin/room/info.rs | 10 +++---- src/admin/room/moderation.rs | 10 +++---- src/admin/user/commands.rs | 10 +++---- src/admin/user/mod.rs | 10 +++---- 20 files changed, 128 insertions(+), 129 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 87ca03a0..6d0e375a 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -17,10 +17,9 @@ use conduwuit::{ }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, - ServerName, - api::{client::error::ErrorKind, federation::event::get_room_state}, - events::room::message::RoomMessageEventContent, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, + api::federation::event::get_room_state, events::room::message::RoomMessageEventContent, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -40,7 +39,7 @@ pub(super) async fn echo(&self, message: Vec) -> Result, + event_id: OwnedEventId, ) -> Result { let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else { return Ok(RoomMessageEventContent::notice_plain("Event not found.")); @@ -109,7 +108,7 @@ pub(super) async fn parse_pdu(&self) -> Result { } #[admin_command] -pub(super) async fn get_pdu(&self, event_id: Box) -> Result { +pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result { let mut outlier = false; let mut pdu_json = self .services @@ -173,7 +172,7 @@ pub(super) async fn get_short_pdu( #[admin_command] pub(super) async fn get_remote_pdu_list( &self, - server: Box, + server: OwnedServerName, force: bool, ) -> Result { if !self.services.server.config.allow_federation { @@ -359,7 +358,7 @@ pub(super) async fn get_room_state( } #[admin_command] -pub(super) async fn ping(&self, server: Box) -> Result { +pub(super) async fn ping(&self, server: OwnedServerName) -> Result { if server == self.services.globals.server_name() { return Ok(RoomMessageEventContent::text_plain( "Not allowed to send federation requests to ourselves.", @@ -538,7 +537,7 @@ pub(super) async fn verify_json(&self) -> Result { } #[admin_command] -pub(super) async fn verify_pdu(&self, event_id: Box) -> Result { +pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result { let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?; event.remove("event_id"); @@ -556,7 +555,7 @@ pub(super) async fn verify_pdu(&self, event_id: Box) -> Result, + room_id: OwnedRoomId, ) -> Result { if !self .services @@ -585,7 +584,7 @@ pub(super) async fn first_pdu_in_room( #[tracing::instrument(skip(self))] pub(super) async fn latest_pdu_in_room( &self, - room_id: Box, + room_id: OwnedRoomId, ) -> Result { if !self .services @@ -614,8 +613,8 @@ pub(super) async fn latest_pdu_in_room( #[tracing::instrument(skip(self))] pub(super) async fn force_set_room_state_from_server( &self, - room_id: Box, - server_name: Box, + room_id: OwnedRoomId, + server_name: OwnedServerName, ) -> Result { if !self .services @@ -763,8 +762,8 @@ pub(super) async fn force_set_room_state_from_server( #[admin_command] pub(super) async fn get_signing_keys( &self, - server_name: Option>, - notary: Option>, + server_name: Option, + notary: Option, query: bool, ) -> Result { let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); @@ -801,7 +800,7 @@ pub(super) async fn get_signing_keys( #[admin_command] pub(super) async fn get_verify_keys( &self, - server_name: Option>, + server_name: Option, ) -> Result { let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); @@ -824,7 +823,7 @@ pub(super) async fn get_verify_keys( #[admin_command] pub(super) async fn resolve_true_destination( &self, - server_name: Box, + server_name: OwnedServerName, no_cache: bool, ) -> Result { if !self.services.server.config.allow_federation { diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index db04ccf4..9b86f18c 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -3,7 +3,7 @@ pub(crate) mod tester; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, OwnedRoomOrAliasId, RoomId, ServerName}; +use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName}; use service::rooms::short::{ShortEventId, ShortRoomId}; use self::tester::TesterCommand; @@ -20,7 +20,7 @@ pub(super) enum DebugCommand { /// - Get the auth_chain of a PDU GetAuthChain { /// An event ID (the $ character followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, }, /// - Parse and print a PDU from a JSON @@ -35,7 +35,7 @@ pub(super) enum DebugCommand { /// - Retrieve and print a PDU by EventID from the conduwuit database GetPdu { /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, }, /// - Retrieve and print a PDU by PduId from the conduwuit database @@ -52,11 +52,11 @@ pub(super) enum DebugCommand { /// (following normal event auth rules, handles it as an incoming PDU). GetRemotePdu { /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, /// Argument for us to attempt to fetch the event from the /// specified remote server. - server: Box, + server: OwnedServerName, }, /// - Same as `get-remote-pdu` but accepts a codeblock newline delimited @@ -64,7 +64,7 @@ pub(super) enum DebugCommand { GetRemotePduList { /// Argument for us to attempt to fetch all the events from the /// specified remote server. - server: Box, + server: OwnedServerName, /// If set, ignores errors, else stops at the first error/failure. #[arg(short, long)] @@ -88,10 +88,10 @@ pub(super) enum DebugCommand { /// - Get and display signing keys from local cache or remote server. GetSigningKeys { - server_name: Option>, + server_name: Option, #[arg(long)] - notary: Option>, + notary: Option, #[arg(short, long)] query: bool, @@ -99,14 +99,14 @@ pub(super) enum DebugCommand { /// - Get and display signing keys from local cache or remote server. GetVerifyKeys { - server_name: Option>, + server_name: Option, }, /// - Sends a federation request to the remote server's /// `/_matrix/federation/v1/version` endpoint and measures the latency it /// took for the server to respond Ping { - server: Box, + server: OwnedServerName, }, /// - Forces device lists for all local and remote users to be updated (as @@ -141,21 +141,21 @@ pub(super) enum DebugCommand { /// /// This re-verifies a PDU existing in the database found by ID. VerifyPdu { - event_id: Box, + event_id: OwnedEventId, }, /// - Prints the very first PDU in the specified room (typically /// m.room.create) FirstPduInRoom { /// The room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Prints the latest ("last") PDU in the specified room (typically a /// message) LatestPduInRoom { /// The room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Forcefully replaces the room state of our local copy of the specified @@ -174,9 +174,9 @@ pub(super) enum DebugCommand { /// `/_matrix/federation/v1/state/{roomId}`. ForceSetRoomStateFromServer { /// The impacted room ID - room_id: Box, + room_id: OwnedRoomId, /// The server we will use to query the room state for - server_name: Box, + server_name: OwnedServerName, }, /// - Runs a server name through conduwuit's true destination resolution @@ -184,7 +184,7 @@ pub(super) enum DebugCommand { /// /// Useful for debugging well-known issues ResolveTrueDestination { - server_name: Box, + server_name: OwnedServerName, #[arg(short, long)] no_cache: bool, diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 240ffa6a..12ed9c25 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -3,19 +3,19 @@ use std::fmt::Write; use conduwuit::Result; use futures::StreamExt; use ruma::{ - OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent, + OwnedRoomId, OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent, }; use crate::{admin_command, get_room_info}; #[admin_command] -pub(super) async fn disable_room(&self, room_id: Box) -> Result { +pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, true); Ok(RoomMessageEventContent::text_plain("Room disabled.")) } #[admin_command] -pub(super) async fn enable_room(&self, room_id: Box) -> Result { +pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, false); Ok(RoomMessageEventContent::text_plain("Room enabled.")) } @@ -42,7 +42,7 @@ pub(super) async fn incoming_federation(&self) -> Result, + server_name: OwnedServerName, ) -> Result { let response = self .services @@ -90,7 +90,7 @@ pub(super) async fn fetch_support_well_known( #[admin_command] pub(super) async fn remote_user_in_rooms( &self, - user_id: Box, + user_id: OwnedUserId, ) -> Result { if user_id.server_name() == self.services.server.name { return Ok(RoomMessageEventContent::text_plain( diff --git a/src/admin/federation/mod.rs b/src/admin/federation/mod.rs index 3adfd459..2c539adc 100644 --- a/src/admin/federation/mod.rs +++ b/src/admin/federation/mod.rs @@ -2,7 +2,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{RoomId, ServerName, UserId}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; use crate::admin_command_dispatch; @@ -14,12 +14,12 @@ pub(super) enum FederationCommand { /// - Disables incoming federation handling for a room. DisableRoom { - room_id: Box, + room_id: OwnedRoomId, }, /// - Enables incoming federation handling for a room again. EnableRoom { - room_id: Box, + room_id: OwnedRoomId, }, /// - Fetch `/.well-known/matrix/support` from the specified server @@ -32,11 +32,11 @@ pub(super) enum FederationCommand { /// moderation, and security inquiries. This command provides a way to /// easily fetch that information. FetchSupportWellKnown { - server_name: Box, + server_name: OwnedServerName, }, /// - Lists all the rooms we share/track with the specified *remote* user RemoteUserInRooms { - user_id: Box, + user_id: OwnedUserId, }, } diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index aeefa9f2..c8364969 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -5,7 +5,7 @@ use conduwuit::{ }; use conduwuit_service::media::Dim; use ruma::{ - EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName, + Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName, events::room::message::RoomMessageEventContent, }; @@ -14,8 +14,8 @@ use crate::{admin_command, utils::parse_local_user_id}; #[admin_command] pub(super) async fn delete( &self, - mxc: Option>, - event_id: Option>, + mxc: Option, + event_id: Option, ) -> Result { if event_id.is_some() && mxc.is_some() { return Ok(RoomMessageEventContent::text_plain( @@ -282,7 +282,7 @@ pub(super) async fn delete_all_from_user( #[admin_command] pub(super) async fn delete_all_from_server( &self, - server_name: Box, + server_name: OwnedServerName, yes_i_want_to_delete_local_media: bool, ) -> Result { if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media { diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index 641834b2..d1e6cd3a 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -3,7 +3,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, MxcUri, OwnedMxcUri, OwnedServerName, ServerName}; +use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName}; use crate::admin_command_dispatch; @@ -15,12 +15,12 @@ pub(super) enum MediaCommand { Delete { /// The MXC URL to delete #[arg(long)] - mxc: Option>, + mxc: Option, /// - The message event ID which contains the media and thumbnail MXC /// URLs #[arg(long)] - event_id: Option>, + event_id: Option, }, /// - Deletes a codeblock list of MXC URLs from our database and on the @@ -57,7 +57,7 @@ pub(super) enum MediaCommand { /// - Deletes all remote media from the specified remote server. This will /// always ignore errors by default. DeleteAllFromServer { - server_name: Box, + server_name: OwnedServerName, /// Long argument to delete local media #[arg(long)] diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index b2bf5e6d..879aed16 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; @@ -12,30 +12,30 @@ pub(crate) enum AccountDataCommand { /// - Returns all changes to the account data that happened after `since`. ChangesSince { /// Full user ID - user_id: Box, + user_id: OwnedUserId, /// UNIX timestamp since (u64) since: u64, /// Optional room ID of the account data - room_id: Option>, + room_id: Option, }, /// - Searches the account data for a specific kind. AccountDataGet { /// Full user ID - user_id: Box, + user_id: OwnedUserId, /// Account data event type kind: String, /// Optional room ID of the account data - room_id: Option>, + room_id: Option, }, } #[admin_command] async fn changes_since( &self, - user_id: Box, + user_id: OwnedUserId, since: u64, - room_id: Option>, + room_id: Option, ) -> Result { let timer = tokio::time::Instant::now(); let results: Vec<_> = self @@ -54,9 +54,9 @@ async fn changes_since( #[admin_command] async fn account_data_get( &self, - user_id: Box, + user_id: OwnedUserId, kind: String, - room_id: Option>, + room_id: Option, ) -> Result { let timer = tokio::time::Instant::now(); let results = self diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index f9e1fd2c..93c76a7e 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -9,7 +9,7 @@ pub(crate) enum AppserviceCommand { /// - Gets the appservice registration info/details from the ID as a string GetRegistration { /// Appservice registration ID - appservice_id: Box, + appservice_id: String, }, /// - Gets all appservice registrations with their ID and registration info diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 1642f7cd..33810704 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::ServerName; +use ruma::OwnedServerName; use crate::Command; @@ -16,7 +16,7 @@ pub(crate) enum GlobalsCommand { /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// for the server. SigningKeysFor { - origin: Box, + origin: OwnedServerName, }, } diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 38272749..65164802 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::UserId; +use ruma::OwnedUserId; use crate::Command; @@ -11,7 +11,7 @@ pub(crate) enum PresenceCommand { /// - Returns the latest presence event for the given user. GetPresence { /// Full user ID - user_id: Box, + user_id: OwnedUserId, }, /// - Iterator of the most recent presence updates that happened after the diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 34edf4db..583c4999 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::UserId; +use ruma::OwnedUserId; use crate::Command; @@ -9,7 +9,7 @@ pub(crate) enum PusherCommand { /// - Returns all the pushers for the user. GetPushers { /// Full user ID - user_id: Box, + user_id: OwnedUserId, }, } diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs index 2d4d8104..f58f6717 100644 --- a/src/admin/query/room_alias.rs +++ b/src/admin/query/room_alias.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{RoomAliasId, RoomId}; +use ruma::{OwnedRoomAliasId, OwnedRoomId}; use crate::Command; @@ -10,13 +10,13 @@ use crate::Command; pub(crate) enum RoomAliasCommand { ResolveLocalAlias { /// Full room alias - alias: Box, + alias: OwnedRoomAliasId, }, /// - Iterator of all our local room aliases for the room ID LocalAliasesForRoom { /// Full room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Iterator of all our local aliases in our database with their room IDs diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 1de5c02d..7f5e2536 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,78 +1,80 @@ use clap::Subcommand; use conduwuit::{Error, Result}; use futures::StreamExt; -use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent}; +use ruma::{ + OwnedRoomId, OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent, +}; use crate::Command; #[derive(Debug, Subcommand)] pub(crate) enum RoomStateCacheCommand { ServerInRoom { - server: Box, - room_id: Box, + server: OwnedServerName, + room_id: OwnedRoomId, }, RoomServers { - room_id: Box, + room_id: OwnedRoomId, }, ServerRooms { - server: Box, + server: OwnedServerName, }, RoomMembers { - room_id: Box, + room_id: OwnedRoomId, }, LocalUsersInRoom { - room_id: Box, + room_id: OwnedRoomId, }, ActiveLocalUsersInRoom { - room_id: Box, + room_id: OwnedRoomId, }, RoomJoinedCount { - room_id: Box, + room_id: OwnedRoomId, }, RoomInvitedCount { - room_id: Box, + room_id: OwnedRoomId, }, RoomUserOnceJoined { - room_id: Box, + room_id: OwnedRoomId, }, RoomMembersInvited { - room_id: Box, + room_id: OwnedRoomId, }, GetInviteCount { - room_id: Box, - user_id: Box, + room_id: OwnedRoomId, + user_id: OwnedUserId, }, GetLeftCount { - room_id: Box, - user_id: Box, + room_id: OwnedRoomId, + user_id: OwnedUserId, }, RoomsJoined { - user_id: Box, + user_id: OwnedUserId, }, RoomsLeft { - user_id: Box, + user_id: OwnedUserId, }, RoomsInvited { - user_id: Box, + user_id: OwnedUserId, }, InviteState { - user_id: Box, - room_id: Box, + user_id: OwnedUserId, + room_id: OwnedRoomId, }, } diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index a148f718..860bca4a 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent}; use service::sending::Destination; use crate::Command; @@ -27,9 +27,9 @@ pub(crate) enum SendingCommand { #[arg(short, long)] appservice_id: Option, #[arg(short, long)] - server_name: Option>, + server_name: Option, #[arg(short, long)] - user_id: Option>, + user_id: Option, #[arg(short, long)] push_key: Option, }, @@ -49,15 +49,15 @@ pub(crate) enum SendingCommand { #[arg(short, long)] appservice_id: Option, #[arg(short, long)] - server_name: Option>, + server_name: Option, #[arg(short, long)] - user_id: Option>, + user_id: Option, #[arg(short, long)] push_key: Option, }, GetLatestEduCount { - server_name: Box, + server_name: OwnedServerName, }, } @@ -120,7 +120,7 @@ pub(super) async fn reprocess( | (None, Some(server_name), None, None) => services .sending .db - .queued_requests(&Destination::Federation(server_name.into())), + .queued_requests(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { return Ok(RoomMessageEventContent::text_plain( @@ -132,7 +132,7 @@ pub(super) async fn reprocess( services .sending .db - .queued_requests(&Destination::Push(user_id.into(), push_key)) + .queued_requests(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { return Ok(RoomMessageEventContent::text_plain( @@ -190,7 +190,7 @@ pub(super) async fn reprocess( | (None, Some(server_name), None, None) => services .sending .db - .active_requests_for(&Destination::Federation(server_name.into())), + .active_requests_for(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { return Ok(RoomMessageEventContent::text_plain( @@ -202,7 +202,7 @@ pub(super) async fn reprocess( services .sending .db - .active_requests_for(&Destination::Push(user_id.into(), push_key)) + .active_requests_for(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { return Ok(RoomMessageEventContent::text_plain( diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index ab21170c..4cfff2e5 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -3,9 +3,7 @@ use std::fmt::Write; use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{ - OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedRoomAliasId, OwnedRoomId, events::room::message::RoomMessageEventContent}; use crate::{Command, escape_html}; @@ -18,7 +16,7 @@ pub(crate) enum RoomAliasCommand { force: bool, /// The room id to set the alias on - room_id: Box, + room_id: OwnedRoomId, /// The alias localpart to use (`alias`, not `#alias:servername.tld`) room_alias_localpart: String, @@ -40,7 +38,7 @@ pub(crate) enum RoomAliasCommand { /// - List aliases currently being used List { /// If set, only list the aliases for this room - room_id: Option>, + room_id: Option, }, } diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index ca036825..179131e4 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{RoomId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; use crate::{Command, PAGE_SIZE, get_room_info}; @@ -10,13 +10,13 @@ pub(crate) enum RoomDirectoryCommand { /// - Publish a room to the room directory Publish { /// The room id of the room to publish - room_id: Box, + room_id: OwnedRoomId, }, /// - Unpublish a room to the room directory Unpublish { /// The room id of the room to unpublish - room_id: Box, + room_id: OwnedRoomId, }, /// - List rooms that are published diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index a39728fe..35a92b6a 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{Result, utils::ReadyExt}; use futures::StreamExt; -use ruma::{RoomId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; @@ -10,7 +10,7 @@ use crate::{admin_command, admin_command_dispatch}; pub(crate) enum RoomInfoCommand { /// - List joined members in a room ListJoinedMembers { - room_id: Box, + room_id: OwnedRoomId, /// Lists only our local users in the specified room #[arg(long)] @@ -22,14 +22,14 @@ pub(crate) enum RoomInfoCommand { /// Room topics can be huge, so this is in its /// own separate command ViewRoomTopic { - room_id: Box, + room_id: OwnedRoomId, }, } #[admin_command] async fn list_joined_members( &self, - room_id: Box, + room_id: OwnedRoomId, local_only: bool, ) -> Result { let room_name = self @@ -79,7 +79,7 @@ async fn list_joined_members( } #[admin_command] -async fn view_room_topic(&self, room_id: Box) -> Result { +async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result { let Ok(room_topic) = self .services .rooms diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index dd5ea627..41458622 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -7,7 +7,7 @@ use conduwuit::{ }; use futures::StreamExt; use ruma::{ - OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId, + OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId, events::room::message::RoomMessageEventContent, }; @@ -24,7 +24,7 @@ pub(crate) enum RoomModerationCommand { BanRoom { /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` - room: Box, + room: OwnedRoomOrAliasId, }, /// - Bans a list of rooms (room IDs and room aliases) from a newline @@ -36,7 +36,7 @@ pub(crate) enum RoomModerationCommand { UnbanRoom { /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` - room: Box, + room: OwnedRoomOrAliasId, }, /// - List of all rooms we have banned @@ -49,7 +49,7 @@ pub(crate) enum RoomModerationCommand { } #[admin_command] -async fn ban_room(&self, room: Box) -> Result { +async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { debug!("Got room alias or ID: {}", room); let admin_room_alias = &self.services.globals.admin_alias; @@ -363,7 +363,7 @@ async fn ban_list_of_rooms(&self) -> Result { } #[admin_command] -async fn unban_room(&self, room: Box) -> Result { +async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 45e550be..84795f9b 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -10,7 +10,7 @@ use conduwuit::{ use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; use ruma::{ - EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, + OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId, events::{ RoomAccountDataEventType, StateEventType, room::{ @@ -802,7 +802,7 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result, + room_id: OwnedRoomId, tag: String, ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; @@ -840,7 +840,7 @@ pub(super) async fn put_room_tag( pub(super) async fn delete_room_tag( &self, user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; @@ -876,7 +876,7 @@ pub(super) async fn delete_room_tag( pub(super) async fn get_room_tags( &self, user_id: String, - room_id: Box, + room_id: OwnedRoomId, ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; @@ -898,7 +898,7 @@ pub(super) async fn get_room_tags( #[admin_command] pub(super) async fn redact_event( &self, - event_id: Box, + event_id: OwnedEventId, ) -> Result { let Ok(event) = self .services diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index 1494ea8f..e789376a 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -2,7 +2,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, OwnedRoomOrAliasId, RoomId}; +use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId}; use crate::admin_command_dispatch; @@ -102,21 +102,21 @@ pub(super) enum UserCommand { /// room's internal ID, and the tag name `m.server_notice`. PutRoomTag { user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, }, /// - Deletes the room tag for the specified user and room ID DeleteRoomTag { user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, }, /// - Gets all the room tags for the specified user and room ID GetRoomTags { user_id: String, - room_id: Box, + room_id: OwnedRoomId, }, /// - Attempts to forcefully redact the specified event ID from the sender @@ -124,7 +124,7 @@ pub(super) enum UserCommand { /// /// This is only valid for local users RedactEvent { - event_id: Box, + event_id: OwnedEventId, }, /// - Force joins a specified list of local users to join the specified From ecf20f7ebbcaeff2e5adedf4b7c45b6d9cbb49b7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 7 Apr 2025 03:28:51 +0000 Subject: [PATCH 0872/1248] improve appservice service async interfaces Signed-off-by: Jason Volk --- src/admin/query/appservice.rs | 3 +- src/service/appservice/mod.rs | 78 +++++++++++++++-------------------- 2 files changed, 36 insertions(+), 45 deletions(-) diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index 93c76a7e..0359261a 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -1,5 +1,6 @@ use clap::Subcommand; use conduwuit::Result; +use futures::TryStreamExt; use crate::Command; @@ -31,7 +32,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_> }, | AppserviceCommand::All => { let timer = tokio::time::Instant::now(); - let results = services.appservice.all().await; + let results: Vec<_> = services.appservice.iter_db_ids().try_collect().await?; let query_time = timer.elapsed(); write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 50a60033..7be8a471 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,20 +1,20 @@ mod namespace_regex; mod registration_info; -use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc}; use async_trait::async_trait; -use conduwuit::{Result, err, utils::stream::TryIgnore}; +use conduwuit::{Result, err, utils::stream::IterStream}; use database::Map; -use futures::{Future, StreamExt, TryStreamExt}; +use futures::{Future, FutureExt, Stream, TryStreamExt}; use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration}; -use tokio::sync::RwLock; +use tokio::sync::{RwLock, RwLockReadGuard}; pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo}; use crate::{Dep, sending}; pub struct Service { - registration_info: RwLock>, + registration_info: RwLock, services: Services, db: Data, } @@ -27,6 +27,8 @@ struct Data { id_appserviceregistrations: Arc, } +type Registrations = BTreeMap; + #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -41,19 +43,18 @@ impl crate::Service for Service { })) } - async fn worker(self: Arc) -> Result<()> { + async fn worker(self: Arc) -> Result { // Inserting registrations into cache - for appservice in self.iter_db_ids().await? { - self.registration_info.write().await.insert( - appservice.0, - appservice - .1 - .try_into() - .expect("Should be validated on registration"), - ); - } + self.iter_db_ids() + .try_for_each(async |appservice| { + self.registration_info + .write() + .await + .insert(appservice.0, appservice.1.try_into()?); - Ok(()) + Ok(()) + }) + .await } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } @@ -84,7 +85,7 @@ impl Service { /// # Arguments /// /// * `service_name` - the registration ID of the appservice - pub async fn unregister_appservice(&self, appservice_id: &str) -> Result<()> { + pub async fn unregister_appservice(&self, appservice_id: &str) -> Result { // removes the appservice registration info self.registration_info .write() @@ -112,15 +113,6 @@ impl Service { .map(|info| info.registration) } - pub async fn iter_ids(&self) -> Vec { - self.registration_info - .read() - .await - .keys() - .cloned() - .collect() - } - pub async fn find_from_token(&self, token: &str) -> Option { self.read() .await @@ -156,15 +148,22 @@ impl Service { .any(|info| info.rooms.is_exclusive_match(room_id.as_str())) } - pub fn read( - &self, - ) -> impl Future>> - { - self.registration_info.read() + pub fn iter_ids(&self) -> impl Stream + Send { + self.read() + .map(|info| info.keys().cloned().collect::>()) + .map(IntoIterator::into_iter) + .map(IterStream::stream) + .flatten_stream() } - #[inline] - pub async fn all(&self) -> Result> { self.iter_db_ids().await } + pub fn iter_db_ids(&self) -> impl Stream> + Send { + self.db + .id_appserviceregistrations + .keys() + .and_then(move |id: &str| async move { + Ok((id.to_owned(), self.get_db_registration(id).await?)) + }) + } pub async fn get_db_registration(&self, id: &str) -> Result { self.db @@ -175,16 +174,7 @@ impl Service { .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) } - async fn iter_db_ids(&self) -> Result> { - self.db - .id_appserviceregistrations - .keys() - .ignore_err() - .then(|id: String| async move { - let reg = self.get_db_registration(&id).await?; - Ok((id, reg)) - }) - .try_collect() - .await + pub fn read(&self) -> impl Future> + Send { + self.registration_info.read() } } From fb3020d8da5b221cba042053a65ce67034622973 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 9 Apr 2025 01:50:13 +0000 Subject: [PATCH 0873/1248] misc async optimizations; macro reformatting Signed-off-by: Jason Volk --- src/api/client/membership.rs | 139 ++++++++++++++++++----------------- 1 file changed, 72 insertions(+), 67 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 1eeacf83..18a1c741 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,6 +1,6 @@ use std::{ borrow::Borrow, - collections::{BTreeMap, HashMap, HashSet}, + collections::{HashMap, HashSet}, iter::once, net::IpAddr, sync::Arc, @@ -9,7 +9,7 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Result, at, debug, debug_info, debug_warn, err, error, info, + Err, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, is_matching, matrix::{ StateKey, pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, @@ -17,7 +17,12 @@ use conduwuit::{ }, result::{FlatOk, NotFound}, trace, - utils::{self, IterStream, ReadyExt, shuffle}, + utils::{ + self, FutureBoolExt, + future::ReadyEqExt, + shuffle, + stream::{BroadbandExt, IterStream, ReadyExt}, + }, warn, }; use conduwuit_service::{ @@ -28,7 +33,7 @@ use conduwuit_service::{ state_compressor::{CompressedState, HashSetCompressStateEvent}, }, }; -use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join}; +use futures::{FutureExt, StreamExt, TryFutureExt, join, pin_mut}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, @@ -52,7 +57,6 @@ use ruma::{ room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, }, }, }; @@ -81,7 +85,7 @@ async fn banned_room_check( || services .config .forbidden_remote_server_names - .is_match(room_id.server_name().unwrap().host()) + .is_match(room_id.server_name().expect("legacy room mxid").host()) { warn!( "User {user_id} who is not an admin attempted to send an invite for or \ @@ -96,12 +100,11 @@ async fn banned_room_check( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "Automatically deactivating user {user_id} due to attempted banned \ room join from IP {client_ip}" - ))) - .await - .ok(); + )) + .await; } let all_joined_rooms: Vec = services @@ -136,12 +139,11 @@ async fn banned_room_check( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "Automatically deactivating user {user_id} due to attempted banned \ room join from IP {client_ip}" - ))) - .await - .ok(); + )) + .await; } let all_joined_rooms: Vec = services @@ -366,10 +368,10 @@ pub(crate) async fn knock_room_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let body = body.body; + let sender_user = body.sender_user(); + let body = &body.body; - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { | Ok(room_id) => { banned_room_check( &services, @@ -493,7 +495,7 @@ pub(crate) async fn invite_user_route( let sender_user = body.sender_user(); if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { - info!( + debug_error!( "User {sender_user} is not an admin and attempted to send an invite to room {}", &body.room_id ); @@ -722,12 +724,10 @@ pub(crate) async fn forget_room_route( let joined = services.rooms.state_cache.is_joined(user_id, room_id); let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); - let left = services.rooms.state_cache.is_left(user_id, room_id); let invited = services.rooms.state_cache.is_invited(user_id, room_id); - let (joined, knocked, left, invited) = join4(joined, knocked, left, invited).await; - - if joined || knocked || invited { + pin_mut!(joined, knocked, invited); + if joined.or(knocked).or(invited).await { return Err!(Request(Unknown("You must leave the room before forgetting it"))); } @@ -741,11 +741,11 @@ pub(crate) async fn forget_room_route( return Err!(Request(Unknown("No membership event was found, room was never joined"))); } - if left - || membership.is_ok_and(|member| { - member.membership == MembershipState::Leave - || member.membership == MembershipState::Ban - }) { + let non_membership = membership + .map(|member| member.membership) + .is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban)); + + if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await { services.rooms.state_cache.forget(room_id, user_id); } @@ -866,32 +866,32 @@ pub(crate) async fn joined_members_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user(); - if !services .rooms .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) + .user_can_see_state_events(body.sender_user(), &body.room_id) .await { return Err!(Request(Forbidden("You don't have permission to view this room."))); } - let joined: BTreeMap = services - .rooms - .state_cache - .room_members(&body.room_id) - .map(ToOwned::to_owned) - .then(|user| async move { - (user.clone(), RoomMember { - display_name: services.users.displayname(&user).await.ok(), - avatar_url: services.users.avatar_url(&user).await.ok(), - }) - }) - .collect() - .await; + Ok(joined_members::v3::Response { + joined: services + .rooms + .state_cache + .room_members(&body.room_id) + .map(ToOwned::to_owned) + .broad_then(|user_id| async move { + let member = RoomMember { + display_name: services.users.displayname(&user_id).await.ok(), + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }; - Ok(joined_members::v3::Response { joined }) + (user_id, member) + }) + .collect() + .await, + }) } pub async fn join_room_by_id_helper( @@ -1118,9 +1118,10 @@ async fn join_room_by_id_helper_remote( })?; if signed_event_id != event_id { - return Err!(Request(BadJson( - warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") - ))); + return Err!(Request(BadJson(warn!( + %signed_event_id, %event_id, + "Server {remote_server} sent event with wrong event ID" + )))); } match signed_value["signatures"] @@ -1696,19 +1697,18 @@ pub(crate) async fn invite_helper( })?; if pdu.event_id != event_id { - return Err!(Request(BadJson( - warn!(%pdu.event_id, %event_id, "Server {} sent event with wrong event ID", user_id.server_name()) - ))); + return Err!(Request(BadJson(warn!( + %pdu.event_id, %event_id, + "Server {} sent event with wrong event ID", + user_id.server_name() + )))); } - let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, - ) - .expect("CanonicalJson is valid json value"), - ) + let origin: OwnedServerName = serde_json::from_value(serde_json::to_value( + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, + )?) .map_err(|e| { err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))) })?; @@ -1818,9 +1818,11 @@ pub async fn leave_room( blurhash: None, }; - if services.rooms.metadata.is_banned(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - { + let is_banned = services.rooms.metadata.is_banned(room_id); + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + pin_mut!(is_banned, is_disabled); + if is_banned.or(is_disabled).await { // the room is banned/disabled, the room must be rejected locally since we // cant/dont want to federate with this server services @@ -1840,18 +1842,21 @@ pub async fn leave_room( return Ok(()); } - // Ask a remote server if we don't have this room and are not knocking on it - if !services + let dont_have_room = services .rooms .state_cache .server_in_room(services.globals.server_name(), room_id) - .await && !services + .eq(&false); + + let not_knocked = services .rooms .state_cache .is_knocked(user_id, room_id) - .await - { - if let Err(e) = remote_leave_room(services, user_id, room_id).await { + .eq(&false); + + // Ask a remote server if we don't have this room and are not knocking on it + if dont_have_room.and(not_knocked).await { + if let Err(e) = remote_leave_room(services, user_id, room_id).boxed().await { warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); // Don't tell the client about this error } From 4f8fec7e5a5631ef7b679a00838219e7926040dd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 23:41:58 +0000 Subject: [PATCH 0874/1248] replace admin command branches returning RoomMessageEventContent rename admin Command back to Context Signed-off-by: Jason Volk --- src/admin/admin.rs | 24 +- src/admin/appservice/commands.rs | 82 +++-- src/admin/check/commands.rs | 14 +- src/admin/{command.rs => context.rs} | 14 +- src/admin/debug/commands.rs | 481 +++++++++++---------------- src/admin/debug/tester.rs | 17 +- src/admin/federation/commands.rs | 103 +++--- src/admin/media/commands.rs | 119 +++---- src/admin/mod.rs | 8 +- src/admin/processor.rs | 8 +- src/admin/query/account_data.rs | 16 +- src/admin/query/appservice.rs | 4 +- src/admin/query/globals.rs | 4 +- src/admin/query/presence.rs | 4 +- src/admin/query/pusher.rs | 4 +- src/admin/query/raw.rs | 120 ++----- src/admin/query/resolver.rs | 13 +- src/admin/query/room_alias.rs | 4 +- src/admin/query/room_state_cache.rs | 146 ++++---- src/admin/query/room_timeline.rs | 10 +- src/admin/query/sending.rs | 90 +++-- src/admin/query/short.rs | 16 +- src/admin/query/users.rs | 175 ++++------ src/admin/room/alias.rs | 92 ++--- src/admin/room/commands.rs | 33 +- src/admin/room/directory.rs | 43 +-- src/admin/room/info.rs | 38 +-- src/admin/room/moderation.rs | 91 +++-- src/admin/server/commands.rs | 73 ++-- src/admin/user/commands.rs | 344 ++++++++----------- src/admin/utils.rs | 2 + src/macros/admin.rs | 17 +- 32 files changed, 903 insertions(+), 1306 deletions(-) rename src/admin/{command.rs => context.rs} (67%) diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 9e010a59..0d636c72 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -2,7 +2,7 @@ use clap::Parser; use conduwuit::Result; use crate::{ - appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command, + appservice, appservice::AppserviceCommand, check, check::CheckCommand, context::Context, debug, debug::DebugCommand, federation, federation::FederationCommand, media, media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server, server::ServerCommand, user, user::UserCommand, @@ -49,20 +49,18 @@ pub(super) enum AdminCommand { } #[tracing::instrument(skip_all, name = "command")] -pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Result { use AdminCommand::*; match command { - | Appservices(command) => appservice::process(command, context).await?, - | Media(command) => media::process(command, context).await?, - | Users(command) => user::process(command, context).await?, - | Rooms(command) => room::process(command, context).await?, - | Federation(command) => federation::process(command, context).await?, - | Server(command) => server::process(command, context).await?, - | Debug(command) => debug::process(command, context).await?, - | Query(command) => query::process(command, context).await?, - | Check(command) => check::process(command, context).await?, + | Appservices(command) => appservice::process(command, context).await, + | Media(command) => media::process(command, context).await, + | Users(command) => user::process(command, context).await, + | Rooms(command) => room::process(command, context).await, + | Federation(command) => federation::process(command, context).await, + | Server(command) => server::process(command, context).await, + | Debug(command) => debug::process(command, context).await, + | Query(command) => query::process(command, context).await, + | Check(command) => check::process(command, context).await, } - - Ok(()) } diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs index 88f28431..3575e067 100644 --- a/src/admin/appservice/commands.rs +++ b/src/admin/appservice/commands.rs @@ -1,84 +1,80 @@ -use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; +use conduwuit::{Err, Result, checked}; +use futures::{FutureExt, StreamExt, TryFutureExt}; -use crate::{Result, admin_command}; +use crate::admin_command; #[admin_command] -pub(super) async fn register(&self) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" +pub(super) async fn register(&self) -> Result { + let body = &self.body; + let body_len = self.body.len(); + if body_len < 2 + || !body[0].trim().starts_with("```") + || body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } - let appservice_config_body = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config_body); + let range = 1..checked!(body_len - 1)?; + let appservice_config_body = body[range].join("\n"); + let parsed_config = serde_yaml::from_str(&appservice_config_body); match parsed_config { + | Err(e) => return Err!("Could not parse appservice config as YAML: {e}"), | Ok(registration) => match self .services .appservice .register_appservice(®istration, &appservice_config_body) .await + .map(|()| registration.id) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {}", - registration.id - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {e}" - ))), + | Err(e) => return Err!("Failed to register appservice: {e}"), + | Ok(id) => write!(self, "Appservice registered with ID: {id}"), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config as YAML: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn unregister( - &self, - appservice_identifier: String, -) -> Result { +pub(super) async fn unregister(&self, appservice_identifier: String) -> Result { match self .services .appservice .unregister_appservice(&appservice_identifier) .await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to unregister appservice: {e}" - ))), + | Err(e) => return Err!("Failed to unregister appservice: {e}"), + | Ok(()) => write!(self, "Appservice unregistered."), } + .await } #[admin_command] -pub(super) async fn show_appservice_config( - &self, - appservice_identifier: String, -) -> Result { +pub(super) async fn show_appservice_config(&self, appservice_identifier: String) -> Result { match self .services .appservice .get_registration(&appservice_identifier) .await { + | None => return Err!("Appservice does not exist."), | Some(config) => { - let config_str = serde_yaml::to_string(&config) - .expect("config should've been validated on register"); - let output = - format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",); - Ok(RoomMessageEventContent::notice_markdown(output)) + let config_str = serde_yaml::to_string(&config)?; + write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```") }, - | None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")), } + .await } #[admin_command] -pub(super) async fn list_registered(&self) -> Result { - let appservices = self.services.appservice.iter_ids().await; - let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", ")); - Ok(RoomMessageEventContent::text_plain(output)) +pub(super) async fn list_registered(&self) -> Result { + self.services + .appservice + .iter_ids() + .collect() + .map(Ok) + .and_then(|appservices: Vec<_>| { + let len = appservices.len(); + let list = appservices.join(", "); + write!(self, "Appservices ({len}): {list}") + }) + .await } diff --git a/src/admin/check/commands.rs b/src/admin/check/commands.rs index 7e27362f..1ffc3ae5 100644 --- a/src/admin/check/commands.rs +++ b/src/admin/check/commands.rs @@ -1,15 +1,14 @@ use conduwuit::Result; use conduwuit_macros::implement; use futures::StreamExt; -use ruma::events::room::message::RoomMessageEventContent; -use crate::Command; +use crate::Context; /// Uses the iterator in `src/database/key_value/users.rs` to iterator over /// every user in our database (remote and local). Reports total count, any /// errors if there were any, etc -#[implement(Command, params = "<'_>")] -pub(super) async fn check_all_users(&self) -> Result { +#[implement(Context, params = "<'_>")] +pub(super) async fn check_all_users(&self) -> Result { let timer = tokio::time::Instant::now(); let users = self.services.users.iter().collect::>().await; let query_time = timer.elapsed(); @@ -18,11 +17,10 @@ pub(super) async fn check_all_users(&self) -> Result { let err_count = users.iter().filter(|_user| false).count(); let ok_count = users.iter().filter(|_user| true).count(); - let message = format!( + self.write_str(&format!( "Database query completed in {query_time:?}:\n\n```\nTotal entries: \ {total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \ {ok_count:?}\n```" - ); - - Ok(RoomMessageEventContent::notice_markdown(message)) + )) + .await } diff --git a/src/admin/command.rs b/src/admin/context.rs similarity index 67% rename from src/admin/command.rs rename to src/admin/context.rs index 5df980d6..270537be 100644 --- a/src/admin/command.rs +++ b/src/admin/context.rs @@ -3,13 +3,13 @@ use std::{fmt, time::SystemTime}; use conduwuit::Result; use conduwuit_service::Services; use futures::{ - Future, FutureExt, + Future, FutureExt, TryFutureExt, io::{AsyncWriteExt, BufWriter}, lock::Mutex, }; use ruma::EventId; -pub(crate) struct Command<'a> { +pub(crate) struct Context<'a> { pub(crate) services: &'a Services, pub(crate) body: &'a [&'a str], pub(crate) timer: SystemTime, @@ -17,14 +17,14 @@ pub(crate) struct Command<'a> { pub(crate) output: Mutex>>, } -impl Command<'_> { +impl Context<'_> { pub(crate) fn write_fmt( &self, arguments: fmt::Arguments<'_>, ) -> impl Future + Send + '_ + use<'_> { let buf = format!("{arguments}"); - self.output.lock().then(|mut output| async move { - output.write_all(buf.as_bytes()).await.map_err(Into::into) + self.output.lock().then(async move |mut output| { + output.write_all(buf.as_bytes()).map_err(Into::into).await }) } @@ -32,8 +32,8 @@ impl Command<'_> { &'a self, s: &'a str, ) -> impl Future + Send + 'a { - self.output.lock().then(move |mut output| async move { - output.write_all(s.as_bytes()).await.map_err(Into::into) + self.output.lock().then(async move |mut output| { + output.write_all(s.as_bytes()).map_err(Into::into).await }) } } diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 6d0e375a..d0debc2a 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,7 @@ use std::{ }; use conduwuit::{ - Error, Result, debug_error, err, info, + Err, Result, debug_error, err, info, matrix::pdu::{PduEvent, PduId, RawPduId}, trace, utils, utils::{ @@ -19,7 +19,7 @@ use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, - api::federation::event::get_room_state, events::room::message::RoomMessageEventContent, + api::federation::event::get_room_state, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -30,28 +30,24 @@ use tracing_subscriber::EnvFilter; use crate::admin_command; #[admin_command] -pub(super) async fn echo(&self, message: Vec) -> Result { +pub(super) async fn echo(&self, message: Vec) -> Result { let message = message.join(" "); - - Ok(RoomMessageEventContent::notice_plain(message)) + self.write_str(&message).await } #[admin_command] -pub(super) async fn get_auth_chain( - &self, - event_id: OwnedEventId, -) -> Result { +pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result { let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else { - return Ok(RoomMessageEventContent::notice_plain("Event not found.")); + return Err!("Event not found."); }; let room_id_str = event .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + .and_then(CanonicalJsonValue::as_str) + .ok_or_else(|| err!(Database("Invalid event in database")))?; let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + .map_err(|_| err!(Database("Invalid room id field in event in database")))?; let start = Instant::now(); let count = self @@ -64,51 +60,39 @@ pub(super) async fn get_auth_chain( .await; let elapsed = start.elapsed(); - Ok(RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {count} in {elapsed:?}" - ))) + let out = format!("Loaded auth chain with length {count} in {elapsed:?}"); + + self.write_str(&out).await } #[admin_command] -pub(super) async fn parse_pdu(&self) -> Result { +pub(super) async fn parse_pdu(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&EMPTY).trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().saturating_sub(1)].join("\n"); match serde_json::from_str(&string) { + | Err(e) => return Err!("Invalid json in command body: {e}"), | Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + | Err(e) => return Err!("Could not parse PDU JSON: {e:?}"), | Ok(hash) => { let event_id = OwnedEventId::parse(format!("${hash}")); - - match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), - ) { - | Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\n{pdu:#?}" - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\nCould not parse event: {e}" - ))), + match serde_json::from_value::(serde_json::to_value(value)?) { + | Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"), + | Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"), } }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse PDU JSON: {e:?}" - ))), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result { +pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result { let mut outlier = false; let mut pdu_json = self .services @@ -123,21 +107,18 @@ pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result return Err!("PDU not found locally."), | Ok(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::notice_markdown(format!( - "{}\n```json\n{}\n```", - if outlier { - "Outlier (Rejected / Soft Failed) PDU found in our database" - } else { - "PDU found in our database" - }, - json_text - ))) + let text = serde_json::to_string_pretty(&json)?; + let msg = if outlier { + "Outlier (Rejected / Soft Failed) PDU found in our database" + } else { + "PDU found in our database" + }; + write!(self, "{msg}\n```json\n{text}\n```",) }, - | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } + .await } #[admin_command] @@ -145,7 +126,7 @@ pub(super) async fn get_short_pdu( &self, shortroomid: ShortRoomId, shorteventid: ShortEventId, -) -> Result { +) -> Result { let pdu_id: RawPduId = PduId { shortroomid, shorteventid: shorteventid.into(), @@ -160,41 +141,33 @@ pub(super) async fn get_short_pdu( .await; match pdu_json { + | Err(_) => return Err!("PDU not found locally."), | Ok(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",))) + let json_text = serde_json::to_string_pretty(&json)?; + write!(self, "```json\n{json_text}\n```") }, - | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } + .await } #[admin_command] -pub(super) async fn get_remote_pdu_list( - &self, - server: OwnedServerName, - force: bool, -) -> Result { +pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: bool) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver.",); } if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs from the database.", - )); + ); } if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&EMPTY).trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let list = self @@ -208,18 +181,19 @@ pub(super) async fn get_remote_pdu_list( let mut failed_count: usize = 0; let mut success_count: usize = 0; - for pdu in list { + for event_id in list { if force { - match self.get_remote_pdu(Box::from(pdu), server.clone()).await { + match self + .get_remote_pdu(event_id.to_owned(), server.clone()) + .await + { | Err(e) => { failed_count = failed_count.saturating_add(1); self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to get remote PDU, ignoring error: {e}" - ))) - .await - .ok(); + .send_text(&format!("Failed to get remote PDU, ignoring error: {e}")) + .await; + warn!("Failed to get remote PDU, ignoring error: {e}"); }, | _ => { @@ -227,44 +201,48 @@ pub(super) async fn get_remote_pdu_list( }, } } else { - self.get_remote_pdu(Box::from(pdu), server.clone()).await?; + self.get_remote_pdu(event_id.to_owned(), server.clone()) + .await?; success_count = success_count.saturating_add(1); } } - Ok(RoomMessageEventContent::text_plain(format!( - "Fetched {success_count} remote PDUs successfully with {failed_count} failures" - ))) + let out = + format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures"); + + self.write_str(&out).await } #[admin_command] pub(super) async fn get_remote_pdu( &self, - event_id: Box, - server: Box, -) -> Result { + event_id: OwnedEventId, + server: OwnedServerName, +) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver."); } if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", - )); + ); } match self .services .sending .send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request { - event_id: event_id.clone().into(), + event_id: event_id.clone(), include_unredacted_content: None, }) .await { + | Err(e) => + return Err!( + "Remote server did not have PDU or failed sending request to remote server: {e}" + ), | Ok(response) => { let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { @@ -272,10 +250,9 @@ pub(super) async fn get_remote_pdu( "Requested event ID {event_id} from server but failed to convert from \ RawValue to CanonicalJsonObject (malformed event/response?): {e}" ); - Error::BadRequest( - ErrorKind::Unknown, - "Received response from server but failed to parse PDU", - ) + err!(Request(Unknown( + "Received response from server but failed to parse PDU" + ))) })?; trace!("Attempting to parse PDU: {:?}", &response.pdu); @@ -285,6 +262,7 @@ pub(super) async fn get_remote_pdu( .rooms .event_handler .parse_incoming_pdu(&response.pdu) + .boxed() .await; let (event_id, value, room_id) = match parsed_result { @@ -292,9 +270,7 @@ pub(super) async fn get_remote_pdu( | Err(e) => { warn!("Failed to parse PDU: {e}"); info!("Full PDU: {:?}", &response.pdu); - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse PDU remote server {server} sent us: {e}" - ))); + return Err!("Failed to parse PDU remote server {server} sent us: {e}"); }, }; @@ -306,30 +282,18 @@ pub(super) async fn get_remote_pdu( .rooms .timeline .backfill_pdu(&server, response.pdu) - .boxed() .await?; - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "{}\n```json\n{}\n```", - "Got PDU from specified server and handled as backfilled PDU successfully. \ - Event body:", - json_text - ))) + let text = serde_json::to_string_pretty(&json)?; + let msg = "Got PDU from specified server and handled as backfilled"; + write!(self, "{msg}. Event body:\n```json\n{text}\n```") }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Remote server did not have PDU or failed sending request to remote server: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn get_room_state( - &self, - room: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room).await?; let room_state: Vec<_> = self .services @@ -341,28 +305,24 @@ pub(super) async fn get_room_state( .await?; if room_state.is_empty() { - return Ok(RoomMessageEventContent::text_plain( - "Unable to find room state in our database (vector is empty)", - )); + return Err!("Unable to find room state in our database (vector is empty)",); } let json = serde_json::to_string_pretty(&room_state).map_err(|e| { - warn!("Failed converting room state vector in our database to pretty JSON: {e}"); - Error::bad_database( + err!(Database( "Failed to convert room state events to pretty JSON, possible invalid room state \ - events in our database", - ) + events in our database {e}", + )) })?; - Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json}\n```"))) + let out = format!("```json\n{json}\n```"); + self.write_str(&out).await } #[admin_command] -pub(super) async fn ping(&self, server: OwnedServerName) -> Result { +pub(super) async fn ping(&self, server: OwnedServerName) -> Result { if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to send federation requests to ourselves.", - )); + return Err!("Not allowed to send federation requests to ourselves."); } let timer = tokio::time::Instant::now(); @@ -376,35 +336,27 @@ pub(super) async fn ping(&self, server: OwnedServerName) -> Result { + return Err!("Failed sending federation request to specified server:\n\n{e}"); + }, | Ok(response) => { let ping_time = timer.elapsed(); - let json_text_res = serde_json::to_string_pretty(&response.server); - if let Ok(json) = json_text_res { - return Ok(RoomMessageEventContent::notice_markdown(format!( - "Got response which took {ping_time:?} time:\n```json\n{json}\n```" - ))); - } + let out = if let Ok(json) = json_text_res { + format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```") + } else { + format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}") + }; - Ok(RoomMessageEventContent::text_plain(format!( - "Got non-JSON response which took {ping_time:?} time:\n{response:?}" - ))) - }, - | Err(e) => { - warn!( - "Failed sending federation request to specified server from ping debug command: \ - {e}" - ); - Ok(RoomMessageEventContent::text_plain(format!( - "Failed sending federation request to specified server:\n\n{e}", - ))) + write!(self, "{out}") }, } + .await } #[admin_command] -pub(super) async fn force_device_list_updates(&self) -> Result { +pub(super) async fn force_device_list_updates(&self) -> Result { // Force E2EE device list updates for all users self.services .users @@ -412,27 +364,17 @@ pub(super) async fn force_device_list_updates(&self) -> Result, - reset: bool, -) -> Result { +pub(super) async fn change_log_level(&self, filter: Option, reset: bool) -> Result { let handles = &["console"]; if reset { let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) { | Ok(s) => s, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Log level from config appears to be invalid now: {e}" - ))); - }, + | Err(e) => return Err!("Log level from config appears to be invalid now: {e}"), }; match self @@ -442,16 +384,12 @@ pub(super) async fn change_log_level( .reload .reload(&old_filter_layer, Some(handles)) { + | Err(e) => + return Err!("Failed to modify and reload the global tracing log level: {e}"), | Ok(()) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Successfully changed log level back to config value {}", - self.services.server.config.log - ))); - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); + let value = &self.services.server.config.log; + let out = format!("Successfully changed log level back to config value {value}"); + return self.write_str(&out).await; }, } } @@ -459,11 +397,7 @@ pub(super) async fn change_log_level( if let Some(filter) = filter { let new_filter_layer = match EnvFilter::try_new(filter) { | Ok(s) => s, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Invalid log level filter specified: {e}" - ))); - }, + | Err(e) => return Err!("Invalid log level filter specified: {e}"), }; match self @@ -473,90 +407,75 @@ pub(super) async fn change_log_level( .reload .reload(&new_filter_layer, Some(handles)) { - | Ok(()) => { - return Ok(RoomMessageEventContent::text_plain("Successfully changed log level")); - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); - }, + | Ok(()) => return self.write_str("Successfully changed log level").await, + | Err(e) => + return Err!("Failed to modify and reload the global tracing log level: {e}"), } } - Ok(RoomMessageEventContent::text_plain("No log level was specified.")) + Err!("No log level was specified.") } #[admin_command] -pub(super) async fn sign_json(&self) -> Result { +pub(super) async fn sign_json(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str(&string) { + | Err(e) => return Err!("Invalid json: {e}"), | Ok(mut value) => { - self.services - .server_keys - .sign_json(&mut value) - .expect("our request json is what ruma expects"); - let json_text = - serde_json::to_string_pretty(&value).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::text_plain(json_text)) + self.services.server_keys.sign_json(&mut value)?; + let json_text = serde_json::to_string_pretty(&value)?; + write!(self, "{json_text}") }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } + .await } #[admin_command] -pub(super) async fn verify_json(&self) -> Result { +pub(super) async fn verify_json(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str::(&string) { + | Err(e) => return Err!("Invalid json: {e}"), | Ok(value) => match self.services.server_keys.verify_json(&value, None).await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Signature verification failed: {e}" - ))), + | Err(e) => return Err!("Signature verification failed: {e}"), + | Ok(()) => write!(self, "Signature correct"), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } + .await } #[admin_command] -pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result { +pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result { + use ruma::signatures::Verified; + let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?; event.remove("event_id"); let msg = match self.services.server_keys.verify_event(&event, None).await { - | Ok(ruma::signatures::Verified::Signatures) => - "signatures OK, but content hash failed (redaction).", - | Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.", | Err(e) => return Err(e), + | Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).", + | Ok(Verified::All) => "signatures and hashes OK.", }; - Ok(RoomMessageEventContent::notice_plain(msg)) + self.write_str(msg).await } #[admin_command] #[tracing::instrument(skip(self))] -pub(super) async fn first_pdu_in_room( - &self, - room_id: OwnedRoomId, -) -> Result { +pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { if !self .services .rooms @@ -564,9 +483,7 @@ pub(super) async fn first_pdu_in_room( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID.",); } let first_pdu = self @@ -575,17 +492,15 @@ pub(super) async fn first_pdu_in_room( .timeline .first_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the first PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the first PDU in database")))?; - Ok(RoomMessageEventContent::text_plain(format!("{first_pdu:?}"))) + let out = format!("{first_pdu:?}"); + self.write_str(&out).await } #[admin_command] #[tracing::instrument(skip(self))] -pub(super) async fn latest_pdu_in_room( - &self, - room_id: OwnedRoomId, -) -> Result { +pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { if !self .services .rooms @@ -593,9 +508,7 @@ pub(super) async fn latest_pdu_in_room( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID."); } let latest_pdu = self @@ -604,9 +517,10 @@ pub(super) async fn latest_pdu_in_room( .timeline .latest_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; - Ok(RoomMessageEventContent::text_plain(format!("{latest_pdu:?}"))) + let out = format!("{latest_pdu:?}"); + self.write_str(&out).await } #[admin_command] @@ -615,7 +529,7 @@ pub(super) async fn force_set_room_state_from_server( &self, room_id: OwnedRoomId, server_name: OwnedServerName, -) -> Result { +) -> Result { if !self .services .rooms @@ -623,9 +537,7 @@ pub(super) async fn force_set_room_state_from_server( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID."); } let first_pdu = self @@ -634,7 +546,7 @@ pub(super) async fn force_set_room_state_from_server( .timeline .latest_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; let room_version = self.services.rooms.state.get_room_version(&room_id).await?; @@ -644,10 +556,9 @@ pub(super) async fn force_set_room_state_from_server( .services .sending .send_federation_request(&server_name, get_room_state::v1::Request { - room_id: room_id.clone().into(), + room_id: room_id.clone(), event_id: first_pdu.event_id.clone(), }) - .boxed() .await?; for pdu in remote_state_response.pdus.clone() { @@ -656,7 +567,6 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .parse_incoming_pdu(&pdu) - .boxed() .await { | Ok(t) => t, @@ -720,7 +630,6 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .resolve_state(&room_id, &room_version, state) - .boxed() .await?; info!("Forcing new room state"); @@ -736,6 +645,7 @@ pub(super) async fn force_set_room_state_from_server( .await?; let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await; + self.services .rooms .state @@ -752,11 +662,8 @@ pub(super) async fn force_set_room_state_from_server( .update_joined_count(&room_id) .await; - drop(state_lock); - - Ok(RoomMessageEventContent::text_plain( - "Successfully forced the room state from the requested remote server.", - )) + self.write_str("Successfully forced the room state from the requested remote server.") + .await } #[admin_command] @@ -765,8 +672,8 @@ pub(super) async fn get_signing_keys( server_name: Option, notary: Option, query: bool, -) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); +) -> Result { + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); if let Some(notary) = notary { let signing_keys = self @@ -775,9 +682,8 @@ pub(super) async fn get_signing_keys( .notary_request(¬ary, &server_name) .await?; - return Ok(RoomMessageEventContent::notice_markdown(format!( - "```rs\n{signing_keys:#?}\n```" - ))); + let out = format!("```rs\n{signing_keys:#?}\n```"); + return self.write_str(&out).await; } let signing_keys = if query { @@ -792,17 +698,13 @@ pub(super) async fn get_signing_keys( .await? }; - Ok(RoomMessageEventContent::notice_markdown(format!( - "```rs\n{signing_keys:#?}\n```" - ))) + let out = format!("```rs\n{signing_keys:#?}\n```"); + self.write_str(&out).await } #[admin_command] -pub(super) async fn get_verify_keys( - &self, - server_name: Option, -) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); +pub(super) async fn get_verify_keys(&self, server_name: Option) -> Result { + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); let keys = self .services @@ -817,7 +719,7 @@ pub(super) async fn get_verify_keys( writeln!(out, "| {key_id} | {key:?} |")?; } - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&out).await } #[admin_command] @@ -825,18 +727,16 @@ pub(super) async fn resolve_true_destination( &self, server_name: OwnedServerName, no_cache: bool, -) -> Result { +) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver.",); } if server_name == self.services.server.name { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", - )); + ); } let actual = self @@ -845,13 +745,12 @@ pub(super) async fn resolve_true_destination( .resolve_actual_dest(&server_name, !no_cache) .await?; - let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host,); - - Ok(RoomMessageEventContent::text_markdown(msg)) + let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host); + self.write_str(&msg).await } #[admin_command] -pub(super) async fn memory_stats(&self, opts: Option) -> Result { +pub(super) async fn memory_stats(&self, opts: Option) -> Result { const OPTS: &str = "abcdefghijklmnopqrstuvwxyz"; let opts: String = OPTS @@ -870,13 +769,12 @@ pub(super) async fn memory_stats(&self, opts: Option) -> Result Result { +pub(super) async fn runtime_metrics(&self) -> Result { let out = self.services.server.metrics.runtime_metrics().map_or_else( || "Runtime metrics are not available.".to_owned(), |metrics| { @@ -889,51 +787,51 @@ pub(super) async fn runtime_metrics(&self) -> Result { }, ); - Ok(RoomMessageEventContent::text_markdown(out)) + self.write_str(&out).await } #[cfg(not(tokio_unstable))] #[admin_command] -pub(super) async fn runtime_metrics(&self) -> Result { - Ok(RoomMessageEventContent::text_markdown( - "Runtime metrics require building with `tokio_unstable`.", - )) +pub(super) async fn runtime_metrics(&self) -> Result { + self.write_str("Runtime metrics require building with `tokio_unstable`.") + .await } #[cfg(tokio_unstable)] #[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { +pub(super) async fn runtime_interval(&self) -> Result { let out = self.services.server.metrics.runtime_interval().map_or_else( || "Runtime metrics are not available.".to_owned(), |metrics| format!("```rs\n{metrics:#?}\n```"), ); - Ok(RoomMessageEventContent::text_markdown(out)) + self.write_str(&out).await } #[cfg(not(tokio_unstable))] #[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { - Ok(RoomMessageEventContent::text_markdown( - "Runtime metrics require building with `tokio_unstable`.", - )) +pub(super) async fn runtime_interval(&self) -> Result { + self.write_str("Runtime metrics require building with `tokio_unstable`.") + .await } #[admin_command] -pub(super) async fn time(&self) -> Result { +pub(super) async fn time(&self) -> Result { let now = SystemTime::now(); - Ok(RoomMessageEventContent::text_markdown(utils::time::format(now, "%+"))) + let now = utils::time::format(now, "%+"); + + self.write_str(&now).await } #[admin_command] -pub(super) async fn list_dependencies(&self, names: bool) -> Result { +pub(super) async fn list_dependencies(&self, names: bool) -> Result { if names { let out = info::cargo::dependencies_names().join(" "); - return Ok(RoomMessageEventContent::notice_markdown(out)); + return self.write_str(&out).await; } - let deps = info::cargo::dependencies(); let mut out = String::new(); + let deps = info::cargo::dependencies(); writeln!(out, "| name | version | features |")?; writeln!(out, "| ---- | ------- | -------- |")?; for (name, dep) in deps { @@ -944,10 +842,11 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result, map: Option, -) -> Result { +) -> Result { let map_name = map.as_ref().map_or(EMPTY, String::as_str); let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); self.services @@ -967,17 +866,11 @@ pub(super) async fn database_stats( let res = map.property(&property).expect("invalid property"); writeln!(self, "##### {name}:\n```\n{}\n```", res.trim()) }) - .await?; - - Ok(RoomMessageEventContent::notice_plain("")) + .await } #[admin_command] -pub(super) async fn database_files( - &self, - map: Option, - level: Option, -) -> Result { +pub(super) async fn database_files(&self, map: Option, level: Option) -> Result { let mut files: Vec<_> = self.services.db.db.file_list().collect::>()?; files.sort_by_key(|f| f.name.clone()); @@ -1004,16 +897,12 @@ pub(super) async fn database_files( file.column_family_name, ) }) - .await?; - - Ok(RoomMessageEventContent::notice_plain("")) + .await } #[admin_command] -pub(super) async fn trim_memory(&self) -> Result { +pub(super) async fn trim_memory(&self) -> Result { conduwuit::alloc::trim(None)?; - writeln!(self, "done").await?; - - Ok(RoomMessageEventContent::notice_plain("")) + writeln!(self, "done").await } diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 005ee775..0a2b1516 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -1,7 +1,6 @@ -use conduwuit::Err; -use ruma::events::room::message::RoomMessageEventContent; +use conduwuit::{Err, Result}; -use crate::{Result, admin_command, admin_command_dispatch}; +use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, clap::Subcommand)] @@ -14,14 +13,14 @@ pub(crate) enum TesterCommand { #[rustfmt::skip] #[admin_command] -async fn panic(&self) -> Result { +async fn panic(&self) -> Result { panic!("panicked") } #[rustfmt::skip] #[admin_command] -async fn failure(&self) -> Result { +async fn failure(&self) -> Result { Err!("failed") } @@ -29,20 +28,20 @@ async fn failure(&self) -> Result { #[inline(never)] #[rustfmt::skip] #[admin_command] -async fn tester(&self) -> Result { +async fn tester(&self) -> Result { - Ok(RoomMessageEventContent::notice_plain("legacy")) + self.write_str("Ok").await } #[inline(never)] #[rustfmt::skip] #[admin_command] -async fn timer(&self) -> Result { +async fn timer(&self) -> Result { let started = std::time::Instant::now(); timed(self.body); let elapsed = started.elapsed(); - Ok(RoomMessageEventContent::notice_plain(format!("completed in {elapsed:#?}"))) + self.write_str(&format!("completed in {elapsed:#?}")).await } #[inline(never)] diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 12ed9c25..545dcbca 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -1,49 +1,48 @@ use std::fmt::Write; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{ - OwnedRoomId, OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; use crate::{admin_command, get_room_info}; #[admin_command] -pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result { +pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, true); - Ok(RoomMessageEventContent::text_plain("Room disabled.")) + self.write_str("Room disabled.").await } #[admin_command] -pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result { +pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, false); - Ok(RoomMessageEventContent::text_plain("Room enabled.")) + self.write_str("Room enabled.").await } #[admin_command] -pub(super) async fn incoming_federation(&self) -> Result { - let map = self - .services - .rooms - .event_handler - .federation_handletime - .read() - .expect("locked"); - let mut msg = format!("Handling {} incoming pdus:\n", map.len()); +pub(super) async fn incoming_federation(&self) -> Result { + let msg = { + let map = self + .services + .rooms + .event_handler + .federation_handletime + .read() + .expect("locked"); - for (r, (e, i)) in map.iter() { - let elapsed = i.elapsed(); - writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?; - } + let mut msg = format!("Handling {} incoming pdus:\n", map.len()); + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?; + } - Ok(RoomMessageEventContent::text_plain(&msg)) + msg + }; + + self.write_str(&msg).await } #[admin_command] -pub(super) async fn fetch_support_well_known( - &self, - server_name: OwnedServerName, -) -> Result { +pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName) -> Result { let response = self .services .client @@ -55,54 +54,44 @@ pub(super) async fn fetch_support_well_known( let text = response.text().await?; if text.is_empty() { - return Ok(RoomMessageEventContent::text_plain("Response text/body is empty.")); + return Err!("Response text/body is empty."); } if text.len() > 1500 { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Response text/body is over 1500 characters, assuming no support well-known.", - )); + ); } let json: serde_json::Value = match serde_json::from_str(&text) { | Ok(json) => json, | Err(_) => { - return Ok(RoomMessageEventContent::text_plain( - "Response text/body is not valid JSON.", - )); + return Err!("Response text/body is not valid JSON.",); }, }; let pretty_json: String = match serde_json::to_string_pretty(&json) { | Ok(json) => json, | Err(_) => { - return Ok(RoomMessageEventContent::text_plain( - "Response text/body is not valid JSON.", - )); + return Err!("Response text/body is not valid JSON.",); }, }; - Ok(RoomMessageEventContent::notice_markdown(format!( - "Got JSON response:\n\n```json\n{pretty_json}\n```" - ))) + self.write_str(&format!("Got JSON response:\n\n```json\n{pretty_json}\n```")) + .await } #[admin_command] -pub(super) async fn remote_user_in_rooms( - &self, - user_id: OwnedUserId, -) -> Result { +pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result { if user_id.server_name() == self.services.server.name { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "User belongs to our server, please use `list-joined-rooms` user admin command \ instead.", - )); + ); } if !self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain( - "Remote user does not exist in our database.", - )); + return Err!("Remote user does not exist in our database.",); } let mut rooms: Vec<(OwnedRoomId, u64, String)> = self @@ -115,21 +104,19 @@ pub(super) async fn remote_user_in_rooms( .await; if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("User is not in any rooms.")); + return Err!("User is not in any rooms."); } rooms.sort_by_key(|r| r.1); rooms.reverse(); - let output = format!( - "Rooms {user_id} shares with us ({}):\n```\n{}\n```", - rooms.len(), - rooms - .iter() - .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) - .collect::>() - .join("\n") - ); + let num = rooms.len(); + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::text_markdown(output)) + self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",)) + .await } diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index c8364969..7aed28db 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,13 +1,11 @@ use std::time::Duration; use conduwuit::{ - Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, + Err, Result, debug, debug_info, debug_warn, error, info, trace, + utils::time::parse_timepoint_ago, warn, }; use conduwuit_service::media::Dim; -use ruma::{ - Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName, - events::room::message::RoomMessageEventContent, -}; +use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName}; use crate::{admin_command, utils::parse_local_user_id}; @@ -16,11 +14,9 @@ pub(super) async fn delete( &self, mxc: Option, event_id: Option, -) -> Result { +) -> Result { if event_id.is_some() && mxc.is_some() { - return Ok(RoomMessageEventContent::text_plain( - "Please specify either an MXC or an event ID, not both.", - )); + return Err!("Please specify either an MXC or an event ID, not both.",); } if let Some(mxc) = mxc { @@ -30,9 +26,7 @@ pub(super) async fn delete( .delete(&mxc.as_str().try_into()?) .await?; - return Ok(RoomMessageEventContent::text_plain( - "Deleted the MXC from our database and on our filesystem.", - )); + return Err!("Deleted the MXC from our database and on our filesystem.",); } if let Some(event_id) = event_id { @@ -113,41 +107,36 @@ pub(super) async fn delete( let final_url = url.to_string().replace('"', ""); mxc_urls.push(final_url); } else { - info!( + warn!( "Found a URL in the event ID {event_id} but did not \ start with mxc://, ignoring" ); } } else { - info!("No \"url\" key in \"file\" key."); + error!("No \"url\" key in \"file\" key."); } } } } else { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Event ID does not have a \"content\" key or failed parsing the \ event ID JSON.", - )); + ); } } else { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Event ID does not have a \"content\" key, this is not a message or an \ event type that contains media.", - )); + ); } }, | _ => { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not exist or is not known to us.", - )); + return Err!("Event ID does not exist or is not known to us.",); }, } if mxc_urls.is_empty() { - info!("Parsed event ID {event_id} but did not contain any MXC URLs."); - return Ok(RoomMessageEventContent::text_plain( - "Parsed event ID but found no MXC URLs.", - )); + return Err!("Parsed event ID but found no MXC URLs.",); } let mut mxc_deletion_count: usize = 0; @@ -170,27 +159,27 @@ pub(super) async fn delete( } } - return Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from \ - event ID {event_id}." - ))); + return self + .write_str(&format!( + "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem \ + from event ID {event_id}." + )) + .await; } - Ok(RoomMessageEventContent::text_plain( + Err!( "Please specify either an MXC using --mxc or an event ID using --event-id of the \ - message containing an image. See --help for details.", - )) + message containing an image. See --help for details." + ) } #[admin_command] -pub(super) async fn delete_list(&self) -> Result { +pub(super) async fn delete_list(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let mut failed_parsed_mxcs: usize = 0; @@ -204,7 +193,6 @@ pub(super) async fn delete_list(&self) -> Result { .try_into() .inspect_err(|e| { debug_warn!("Failed to parse user-provided MXC URI: {e}"); - failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1); }) .ok() @@ -227,10 +215,11 @@ pub(super) async fn delete_list(&self) -> Result { } } - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \ and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.", - ))) + )) + .await } #[admin_command] @@ -240,11 +229,9 @@ pub(super) async fn delete_past_remote_media( before: bool, after: bool, yes_i_want_to_delete_local_media: bool, -) -> Result { +) -> Result { if before && after { - return Ok(RoomMessageEventContent::text_plain( - "Please only pick one argument, --before or --after.", - )); + return Err!("Please only pick one argument, --before or --after.",); } assert!(!(before && after), "--before and --after should not be specified together"); @@ -260,23 +247,18 @@ pub(super) async fn delete_past_remote_media( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] -pub(super) async fn delete_all_from_user( - &self, - username: String, -) -> Result { +pub(super) async fn delete_all_from_user(&self, username: String) -> Result { let user_id = parse_local_user_id(self.services, &username)?; let deleted_count = self.services.media.delete_from_user(&user_id).await?; - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] @@ -284,11 +266,9 @@ pub(super) async fn delete_all_from_server( &self, server_name: OwnedServerName, yes_i_want_to_delete_local_media: bool, -) -> Result { +) -> Result { if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media { - return Ok(RoomMessageEventContent::text_plain( - "This command only works for remote media by default.", - )); + return Err!("This command only works for remote media by default.",); } let Ok(all_mxcs) = self @@ -298,9 +278,7 @@ pub(super) async fn delete_all_from_server( .await .inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}")) else { - return Ok(RoomMessageEventContent::text_plain( - "Failed to get MXC URIs from our database", - )); + return Err!("Failed to get MXC URIs from our database",); }; let mut deleted_count: usize = 0; @@ -336,17 +314,16 @@ pub(super) async fn delete_all_from_server( } } - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] -pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { +pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let metadata = self.services.media.get_metadata(&mxc).await; - Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```"))) + self.write_str(&format!("```\n{metadata:#?}\n```")).await } #[admin_command] @@ -355,7 +332,7 @@ pub(super) async fn get_remote_file( mxc: OwnedMxcUri, server: Option, timeout: u32, -) -> Result { +) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); let mut result = self @@ -368,8 +345,8 @@ pub(super) async fn get_remote_file( let len = result.content.as_ref().expect("content").len(); result.content.as_mut().expect("content").clear(); - let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) + .await } #[admin_command] @@ -380,7 +357,7 @@ pub(super) async fn get_remote_thumbnail( timeout: u32, width: u32, height: u32, -) -> Result { +) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); let dim = Dim::new(width, height, None); @@ -394,6 +371,6 @@ pub(super) async fn get_remote_thumbnail( let len = result.content.as_ref().expect("content").len(); result.content.as_mut().expect("content").clear(); - let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) + .await } diff --git a/src/admin/mod.rs b/src/admin/mod.rs index 695155e8..1f777fa9 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -4,7 +4,7 @@ #![allow(clippy::too_many_arguments)] pub(crate) mod admin; -pub(crate) mod command; +pub(crate) mod context; pub(crate) mod processor; mod tests; pub(crate) mod utils; @@ -23,13 +23,9 @@ extern crate conduwuit_api as api; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::Result; pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch}; -pub(crate) use crate::{ - command::Command, - utils::{escape_html, get_room_info}, -}; +pub(crate) use crate::{context::Context, utils::get_room_info}; pub(crate) const PAGE_SIZE: usize = 100; diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 53a15098..8282a846 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -33,7 +33,7 @@ use service::{ use tracing::Level; use tracing_subscriber::{EnvFilter, filter::LevelFilter}; -use crate::{Command, admin, admin::AdminCommand}; +use crate::{admin, admin::AdminCommand, context::Context}; #[must_use] pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) } @@ -58,7 +58,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce | Ok(parsed) => parsed, }; - let context = Command { + let context = Context { services: &services, body: &body, timer: SystemTime::now(), @@ -103,7 +103,7 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { /// Parse and process a message from the admin room async fn process( - context: &Command<'_>, + context: &Context<'_>, command: AdminCommand, args: &[String], ) -> (Result, String) { @@ -132,7 +132,7 @@ async fn process( (result, output) } -fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { +fn capture_create(context: &Context<'_>) -> (Arc, Arc>) { let env_config = &context.services.server.config.admin_log_capture; let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| { warn!("admin_log_capture filter invalid: {e:?}"); diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index 879aed16..228d2120 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, OwnedUserId}; use crate::{admin_command, admin_command_dispatch}; @@ -36,7 +36,7 @@ async fn changes_since( user_id: OwnedUserId, since: u64, room_id: Option, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let results: Vec<_> = self .services @@ -46,9 +46,8 @@ async fn changes_since( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await } #[admin_command] @@ -57,7 +56,7 @@ async fn account_data_get( user_id: OwnedUserId, kind: String, room_id: Option, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let results = self .services @@ -66,7 +65,6 @@ async fn account_data_get( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await } diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index 0359261a..28bf6451 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::TryStreamExt; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/appservice.rs @@ -18,7 +18,7 @@ pub(crate) enum AppserviceCommand { } /// All the getters and iterators from src/database/key_value/appservice.rs -pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: AppserviceCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 33810704..3681acfd 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduwuit::Result; use ruma::OwnedServerName; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/globals.rs @@ -21,7 +21,7 @@ pub(crate) enum GlobalsCommand { } /// All the getters and iterators from src/database/key_value/globals.rs -pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 65164802..5b7ead4b 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -3,7 +3,7 @@ use conduwuit::Result; use futures::StreamExt; use ruma::OwnedUserId; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/presence.rs @@ -23,7 +23,7 @@ pub(crate) enum PresenceCommand { } /// All the getters and iterators in key_value/presence.rs -pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: PresenceCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 583c4999..0d0e6cc9 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduwuit::Result; use ruma::OwnedUserId; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum PusherCommand { @@ -13,7 +13,7 @@ pub(crate) enum PusherCommand { }, } -pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: PusherCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index c503eee5..0e248c65 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -11,7 +11,6 @@ use conduwuit::{ use conduwuit_database::Map; use conduwuit_service::Services; use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; -use ruma::events::room::message::RoomMessageEventContent; use tokio::time::Instant; use crate::{admin_command, admin_command_dispatch}; @@ -170,7 +169,7 @@ pub(super) async fn compact( into: Option, parallelism: Option, exhaustive: bool, -) -> Result { +) -> Result { use conduwuit_database::compact::Options; let default_all_maps: Option<_> = map.is_none().then(|| { @@ -221,17 +220,11 @@ pub(super) async fn compact( let results = results.await; let query_time = timer.elapsed(); self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_count( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_count(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -242,17 +235,11 @@ pub(super) async fn raw_count( let query_time = timer.elapsed(); self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_keys( - &self, - map: String, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys(&self, map: String, prefix: Option) -> Result { writeln!(self, "```").boxed().await?; let map = self.services.db.get(map.as_str())?; @@ -266,18 +253,12 @@ pub(super) async fn raw_keys( .await?; let query_time = timer.elapsed(); - let out = format!("\n```\n\nQuery completed in {query_time:?}"); - self.write_str(out.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_keys_sizes( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys_sizes(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -294,18 +275,12 @@ pub(super) async fn raw_keys_sizes( .await; let query_time = timer.elapsed(); - let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); - self.write_str(result.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_keys_total( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys_total(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -318,19 +293,12 @@ pub(super) async fn raw_keys_total( .await; let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_vals_sizes( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_vals_sizes(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -348,18 +316,12 @@ pub(super) async fn raw_vals_sizes( .await; let query_time = timer.elapsed(); - let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); - self.write_str(result.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_vals_total( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_vals_total(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -373,19 +335,12 @@ pub(super) async fn raw_vals_total( .await; let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_iter( - &self, - map: String, - prefix: Option, -) -> Result { +pub(super) async fn raw_iter(&self, map: String, prefix: Option) -> Result { writeln!(self, "```").await?; let map = self.services.db.get(&map)?; @@ -401,9 +356,7 @@ pub(super) async fn raw_iter( let query_time = timer.elapsed(); self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] @@ -412,7 +365,7 @@ pub(super) async fn raw_keys_from( map: String, start: String, limit: Option, -) -> Result { +) -> Result { writeln!(self, "```").await?; let map = self.services.db.get(&map)?; @@ -426,9 +379,7 @@ pub(super) async fn raw_keys_from( let query_time = timer.elapsed(); self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] @@ -437,7 +388,7 @@ pub(super) async fn raw_iter_from( map: String, start: String, limit: Option, -) -> Result { +) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); let result = map @@ -449,41 +400,38 @@ pub(super) async fn raw_iter_from( .await?; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -pub(super) async fn raw_del(&self, map: String, key: String) -> Result { +pub(super) async fn raw_del(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); map.remove(&key); - let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Operation completed in {query_time:?}" - ))) + let query_time = timer.elapsed(); + self.write_str(&format!("Operation completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_get(&self, map: String, key: String) -> Result { +pub(super) async fn raw_get(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); let handle = map.get(&key).await?; + let query_time = timer.elapsed(); let result = String::from_utf8_lossy(&handle); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) + .await } #[admin_command] -pub(super) async fn raw_maps(&self) -> Result { +pub(super) async fn raw_maps(&self) -> Result { let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect(); - Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}"))) + self.write_str(&format!("{list:#?}")).await } fn with_maps_or<'a>( diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 10748d88..4a39a40e 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{Result, utils::time}; use futures::StreamExt; -use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent}; +use ruma::OwnedServerName; use crate::{admin_command, admin_command_dispatch}; @@ -21,10 +21,7 @@ pub(crate) enum ResolverCommand { } #[admin_command] -async fn destinations_cache( - &self, - server_name: Option, -) -> Result { +async fn destinations_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedDest; writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?; @@ -44,11 +41,11 @@ async fn destinations_cache( .await?; } - Ok(RoomMessageEventContent::notice_plain("")) + Ok(()) } #[admin_command] -async fn overrides_cache(&self, server_name: Option) -> Result { +async fn overrides_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedOverride; writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?; @@ -70,5 +67,5 @@ async fn overrides_cache(&self, server_name: Option) -> Result) -> Result { +pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 7f5e2536..c64cd173 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,11 +1,9 @@ use clap::Subcommand; -use conduwuit::{Error, Result}; +use conduwuit::Result; use futures::StreamExt; -use ruma::{ - OwnedRoomId, OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum RoomStateCacheCommand { @@ -78,10 +76,10 @@ pub(crate) enum RoomStateCacheCommand { }, } -pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context<'_>) -> Result { let services = context.services; - let c = match subcommand { + match subcommand { | RoomStateCacheCommand::ServerInRoom { server, room_id } => { let timer = tokio::time::Instant::now(); let result = services @@ -91,9 +89,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomServers { room_id } => { let timer = tokio::time::Instant::now(); @@ -106,9 +106,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::ServerRooms { server } => { let timer = tokio::time::Instant::now(); @@ -121,9 +123,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomMembers { room_id } => { let timer = tokio::time::Instant::now(); @@ -136,9 +140,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::LocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -151,9 +157,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -166,18 +174,22 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomJoinedCount { room_id } => { let timer = tokio::time::Instant::now(); let results = services.rooms.state_cache.room_joined_count(&room_id).await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomInvitedCount { room_id } => { let timer = tokio::time::Instant::now(); @@ -188,9 +200,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomUserOnceJoined { room_id } => { let timer = tokio::time::Instant::now(); @@ -203,9 +217,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomMembersInvited { room_id } => { let timer = tokio::time::Instant::now(); @@ -218,9 +234,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::GetInviteCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); @@ -231,9 +249,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::GetLeftCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); @@ -244,9 +264,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsJoined { user_id } => { let timer = tokio::time::Instant::now(); @@ -259,9 +281,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsInvited { user_id } => { let timer = tokio::time::Instant::now(); @@ -273,9 +297,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsLeft { user_id } => { let timer = tokio::time::Instant::now(); @@ -287,9 +313,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::InviteState { user_id, room_id } => { let timer = tokio::time::Instant::now(); @@ -300,13 +328,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, - }?; - - context.write_str(c.body()).await?; - - Ok(()) + } } diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs index 6f08aee9..0fd22ca7 100644 --- a/src/admin/query/room_timeline.rs +++ b/src/admin/query/room_timeline.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{PduCount, Result, utils::stream::TryTools}; use futures::TryStreamExt; -use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomOrAliasId; use crate::{admin_command, admin_command_dispatch}; @@ -24,7 +24,7 @@ pub(crate) enum RoomTimelineCommand { } #[admin_command] -pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { +pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let result = self @@ -34,7 +34,7 @@ pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result, limit: Option, -) -> Result { +) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let from: Option = from.as_deref().map(str::parse).transpose()?; @@ -57,5 +57,5 @@ pub(super) async fn pdus( .try_collect() .await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}"))) + self.write_str(&format!("{result:#?}")).await } diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 860bca4a..8b1676bc 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,10 +1,10 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedServerName, OwnedUserId}; use service::sending::Destination; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/sending.rs @@ -62,17 +62,7 @@ pub(crate) enum SendingCommand { } /// All the getters and iterators in key_value/sending.rs -pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result { - let c = reprocess(subcommand, context).await?; - context.write_str(c.body()).await?; - Ok(()) -} - -/// All the getters and iterators in key_value/sending.rs -pub(super) async fn reprocess( - subcommand: SendingCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { @@ -82,9 +72,11 @@ pub(super) async fn reprocess( let active_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" + )) + .await }, | SendingCommand::QueuedRequests { appservice_id, @@ -97,19 +89,19 @@ pub(super) async fn reprocess( && user_id.is_none() && push_key.is_none() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -123,10 +115,10 @@ pub(super) async fn reprocess( .queued_requests(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -135,25 +127,27 @@ pub(super) async fn reprocess( .queued_requests(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. Not all of them See --help for more details.", - )); + ); }, | _ => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); }, }; let queued_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" + )) + .await }, | SendingCommand::ActiveRequestsFor { appservice_id, @@ -166,20 +160,20 @@ pub(super) async fn reprocess( && user_id.is_none() && push_key.is_none() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -193,10 +187,10 @@ pub(super) async fn reprocess( .active_requests_for(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -205,34 +199,38 @@ pub(super) async fn reprocess( .active_requests_for(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. Not all of them See --help for more details.", - )); + ); }, | _ => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); }, }; let active_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" + )) + .await }, | SendingCommand::GetLatestEduCount { server_name } => { let timer = tokio::time::Instant::now(); let results = services.sending.db.get_latest_educount(&server_name).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, } } diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs index 0957c15e..aa7c8666 100644 --- a/src/admin/query/short.rs +++ b/src/admin/query/short.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedEventId, OwnedRoomOrAliasId}; use crate::{admin_command, admin_command_dispatch}; @@ -18,10 +18,7 @@ pub(crate) enum ShortCommand { } #[admin_command] -pub(super) async fn short_event_id( - &self, - event_id: OwnedEventId, -) -> Result { +pub(super) async fn short_event_id(&self, event_id: OwnedEventId) -> Result { let shortid = self .services .rooms @@ -29,17 +26,14 @@ pub(super) async fn short_event_id( .get_shorteventid(&event_id) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) + self.write_str(&format!("{shortid:#?}")).await } #[admin_command] -pub(super) async fn short_room_id( - &self, - room_id: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn short_room_id(&self, room_id: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) + self.write_str(&format!("{shortid:#?}")).await } diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 5995bc62..0f34d13f 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -1,9 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::stream::StreamExt; -use ruma::{ - OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedDeviceId, OwnedRoomId, OwnedUserId}; use crate::{admin_command, admin_command_dispatch}; @@ -99,11 +97,7 @@ pub(crate) enum UsersCommand { } #[admin_command] -async fn get_shared_rooms( - &self, - user_a: OwnedUserId, - user_b: OwnedUserId, -) -> Result { +async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result: Vec<_> = self .services @@ -115,9 +109,8 @@ async fn get_shared_rooms( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] @@ -127,7 +120,7 @@ async fn get_backup_session( version: String, room_id: OwnedRoomId, session_id: String, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -136,9 +129,8 @@ async fn get_backup_session( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] @@ -147,7 +139,7 @@ async fn get_room_backups( user_id: OwnedUserId, version: String, room_id: OwnedRoomId, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -156,32 +148,22 @@ async fn get_room_backups( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_all_backups( - &self, - user_id: OwnedUserId, - version: String, -) -> Result { +async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.key_backups.get_all(&user_id, &version).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_backup_algorithm( - &self, - user_id: OwnedUserId, - version: String, -) -> Result { +async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -190,16 +172,12 @@ async fn get_backup_algorithm( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_latest_backup_version( - &self, - user_id: OwnedUserId, -) -> Result { +async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -208,36 +186,33 @@ async fn get_latest_backup_version( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { +async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.key_backups.get_latest_backup(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn iter_users(&self) -> Result { +async fn iter_users(&self) -> Result { let timer = tokio::time::Instant::now(); let result: Vec = self.services.users.stream().map(Into::into).collect().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn iter_users2(&self) -> Result { +async fn iter_users2(&self) -> Result { let timer = tokio::time::Instant::now(); let result: Vec<_> = self.services.users.stream().collect().await; let result: Vec<_> = result @@ -248,35 +223,32 @@ async fn iter_users2(&self) -> Result { let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) + .await } #[admin_command] -async fn count_users(&self) -> Result { +async fn count_users(&self) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.count().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn password_hash(&self, user_id: OwnedUserId) -> Result { +async fn password_hash(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.password_hash(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn list_devices(&self, user_id: OwnedUserId) -> Result { +async fn list_devices(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let devices = self .services @@ -288,13 +260,12 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result Result { +async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let devices = self .services @@ -304,17 +275,12 @@ async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result Result { +async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let device = self .services @@ -323,28 +289,22 @@ async fn get_device_metadata( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) + .await } #[admin_command] -async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { +async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let device = self.services.users.get_devicelist_version(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) + .await } #[admin_command] -async fn count_one_time_keys( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, -) -> Result { +async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -353,17 +313,12 @@ async fn count_one_time_keys( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_device_keys( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, -) -> Result { +async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -372,24 +327,22 @@ async fn get_device_keys( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { +async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.get_user_signing_key(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_master_key(&self, user_id: OwnedUserId) -> Result { +async fn get_master_key(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -398,17 +351,12 @@ async fn get_master_key(&self, user_id: OwnedUserId) -> Result Result { +async fn get_to_device_events(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -418,7 +366,6 @@ async fn get_to_device_events( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 4cfff2e5..6b37ffe4 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -1,11 +1,11 @@ use std::fmt::Write; use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{OwnedRoomAliasId, OwnedRoomId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomAliasId, OwnedRoomId}; -use crate::{Command, escape_html}; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum RoomAliasCommand { @@ -42,17 +42,7 @@ pub(crate) enum RoomAliasCommand { }, } -pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result { - let c = reprocess(command, context).await?; - context.write_str(c.body()).await?; - - Ok(()) -} - -pub(super) async fn reprocess( - command: RoomAliasCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) -> Result { let services = context.services; let server_user = &services.globals.server_user; @@ -65,9 +55,7 @@ pub(super) async fn reprocess( let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { | Ok(alias) => alias, | Err(err) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse alias: {err}" - ))); + return Err!("Failed to parse alias: {err}"); }, }; match command { @@ -79,60 +67,50 @@ pub(super) async fn reprocess( &room_id, server_user, ) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully overwrote alias (formerly {id})" - ))), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => + context + .write_str(&format!( + "Successfully overwrote alias (formerly {id})" + )) + .await, } }, - | (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!( + | (false, Ok(id)) => Err!( "Refusing to overwrite in use alias for {id}, use -f or --force to \ overwrite" - ))), + ), | (_, Err(_)) => { match services.rooms.alias.set_alias( &room_alias, &room_id, server_user, ) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain( - "Successfully set alias", - )), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => context.write_str("Successfully set alias").await, } }, } }, | RoomAliasCommand::Remove { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { + | Err(_) => Err!("Alias isn't in use."), | Ok(id) => match services .rooms .alias .remove_alias(&room_alias, server_user) .await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Removed alias from {id}" - ))), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => + context.write_str(&format!("Removed alias from {id}")).await, }, - | Err(_) => - Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), } }, | RoomAliasCommand::Which { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { - | Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( - "Alias resolves to {id}" - ))), - | Err(_) => - Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), + | Err(_) => Err!("Alias isn't in use."), + | Ok(id) => context.write_str(&format!("Alias resolves to {id}")).await, } }, | RoomAliasCommand::List { .. } => unreachable!(), @@ -154,15 +132,8 @@ pub(super) async fn reprocess( output }); - let html_list = aliases.iter().fold(String::new(), |mut output, alias| { - writeln!(output, "

  • {}
  • ", escape_html(alias.as_ref())) - .expect("should be able to write to string buffer"); - output - }); - let plain = format!("Aliases for {room_id}:\n{plain_list}"); - let html = format!("Aliases for {room_id}:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) + context.write_str(&plain).await } else { let aliases = services .rooms @@ -181,23 +152,8 @@ pub(super) async fn reprocess( output }); - let html_list = aliases - .iter() - .fold(String::new(), |mut output, (alias, id)| { - writeln!( - output, - "
  • {} -> #{}:{}
  • ", - escape_html(alias.as_ref()), - escape_html(id), - server_name - ) - .expect("should be able to write to string buffer"); - output - }); - let plain = format!("Aliases:\n{plain_list}"); - let html = format!("Aliases:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) + context.write_str(&plain).await }, } } diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index 6dd31b48..81f36f15 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -1,6 +1,6 @@ -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomId; use crate::{PAGE_SIZE, admin_command, get_room_info}; @@ -11,7 +11,7 @@ pub(super) async fn list_rooms( exclude_disabled: bool, exclude_banned: bool, no_details: bool, -) -> Result { +) -> Result { // TODO: i know there's a way to do this with clap, but i can't seem to find it let page = page.unwrap_or(1); let mut rooms = self @@ -41,29 +41,28 @@ pub(super) async fn list_rooms( .collect::>(); if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No more rooms.")); + return Err!("No more rooms."); } - let output_plain = format!( - "Rooms ({}):\n```\n{}\n```", - rooms.len(), - rooms - .iter() - .map(|(id, members, name)| if no_details { + let body = rooms + .iter() + .map(|(id, members, name)| { + if no_details { format!("{id}") } else { format!("{id}\tMembers: {members}\tName: {name}") - }) - .collect::>() - .join("\n") - ); + } + }) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),)) + .await } #[admin_command] -pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { +pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { let result = self.services.rooms.metadata.exists(&room_id).await; - Ok(RoomMessageEventContent::notice_markdown(format!("{result}"))) + self.write_str(&format!("{result}")).await } diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 179131e4..a6be9a15 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,9 +1,9 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomId; -use crate::{Command, PAGE_SIZE, get_room_info}; +use crate::{Context, PAGE_SIZE, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomDirectoryCommand { @@ -25,25 +25,16 @@ pub(crate) enum RoomDirectoryCommand { }, } -pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result { - let c = reprocess(command, context).await?; - context.write_str(c.body()).await?; - Ok(()) -} - -pub(super) async fn reprocess( - command: RoomDirectoryCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>) -> Result { let services = context.services; match command { | RoomDirectoryCommand::Publish { room_id } => { services.rooms.directory.set_public(&room_id); - Ok(RoomMessageEventContent::notice_plain("Room published")) + context.write_str("Room published").await }, | RoomDirectoryCommand::Unpublish { room_id } => { services.rooms.directory.set_not_public(&room_id); - Ok(RoomMessageEventContent::notice_plain("Room unpublished")) + context.write_str("Room unpublished").await }, | RoomDirectoryCommand::List { page } => { // TODO: i know there's a way to do this with clap, but i can't seem to find it @@ -66,20 +57,18 @@ pub(super) async fn reprocess( .collect(); if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No more rooms.")); + return Err!("No more rooms."); } - let output = format!( - "Rooms (page {page}):\n```\n{}\n```", - rooms - .iter() - .map(|(id, members, name)| format!( - "{id} | Members: {members} | Name: {name}" - )) - .collect::>() - .join("\n") - ); - Ok(RoomMessageEventContent::text_markdown(output)) + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) + .collect::>() + .join("\n"); + + context + .write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",)) + .await }, } } diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index 35a92b6a..1278e820 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{Result, utils::ReadyExt}; +use conduwuit::{Err, Result, utils::ReadyExt}; use futures::StreamExt; -use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomId; use crate::{admin_command, admin_command_dispatch}; @@ -27,11 +27,7 @@ pub(crate) enum RoomInfoCommand { } #[admin_command] -async fn list_joined_members( - &self, - room_id: OwnedRoomId, - local_only: bool, -) -> Result { +async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> Result { let room_name = self .services .rooms @@ -64,22 +60,19 @@ async fn list_joined_members( .collect() .await; - let output_plain = format!( - "{} Members in Room \"{}\":\n```\n{}\n```", - member_info.len(), - room_name, - member_info - .into_iter() - .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) - .collect::>() - .join("\n") - ); + let num = member_info.len(); + let body = member_info + .into_iter() + .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",)) + .await } #[admin_command] -async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result { +async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result { let Ok(room_topic) = self .services .rooms @@ -87,10 +80,9 @@ async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result Result { +async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { debug!("Got room alias or ID: {}", room); let admin_room_alias = &self.services.globals.admin_alias; if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) { - return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room.")); + return Err!("Not allowed to ban the admin room."); } } @@ -64,11 +61,11 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result room_id, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -80,11 +77,11 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result room_alias, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -123,9 +120,9 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { - return Ok(RoomMessageEventContent::notice_plain(format!( + return Err!( "Failed to resolve room alias {room_alias} to a room ID: {e}" - ))); + ); }, } }, @@ -135,11 +132,11 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result Result Result { +async fn ban_list_of_rooms(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let rooms_s = self @@ -356,23 +352,24 @@ async fn ban_list_of_rooms(&self) -> Result { self.services.rooms.metadata.disable_room(&room_id, true); } - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \ disabled incoming federation with the room." - ))) + )) + .await } #[admin_command] -async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { +async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -384,11 +381,11 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result room_alias, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -427,9 +424,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" - ))); + return Err!("Failed to resolve room alias {room} to a room ID: {e}"); }, } }, @@ -439,19 +434,20 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result Result { +async fn list_banned_rooms(&self, no_details: bool) -> Result { let room_ids: Vec = self .services .rooms @@ -462,7 +458,7 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result Result>() - .join("\n") - ); + } + }) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",)) + .await } diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 17bf9ec0..b01e9296 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,12 +1,11 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; use conduwuit::{Err, Result, info, utils::time, warn}; -use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; #[admin_command] -pub(super) async fn uptime(&self) -> Result { +pub(super) async fn uptime(&self) -> Result { let elapsed = self .services .server @@ -15,47 +14,36 @@ pub(super) async fn uptime(&self) -> Result { .expect("standard duration"); let result = time::pretty(elapsed); - Ok(RoomMessageEventContent::notice_plain(format!("{result}."))) + self.write_str(&format!("{result}.")).await } #[admin_command] -pub(super) async fn show_config(&self) -> Result { - // Construct and send the response - Ok(RoomMessageEventContent::text_markdown(format!( - "{}", - *self.services.server.config - ))) +pub(super) async fn show_config(&self) -> Result { + self.write_str(&format!("{}", *self.services.server.config)) + .await } #[admin_command] -pub(super) async fn reload_config( - &self, - path: Option, -) -> Result { +pub(super) async fn reload_config(&self, path: Option) -> Result { let path = path.as_deref().into_iter(); self.services.config.reload(path)?; - Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) + self.write_str("Successfully reconfigured.").await } #[admin_command] -pub(super) async fn list_features( - &self, - available: bool, - enabled: bool, - comma: bool, -) -> Result { +pub(super) async fn list_features(&self, available: bool, enabled: bool, comma: bool) -> Result { let delim = if comma { "," } else { " " }; if enabled && !available { let features = info::rustc::features().join(delim); let out = format!("`\n{features}\n`"); - return Ok(RoomMessageEventContent::text_markdown(out)); + return self.write_str(&out).await; } if available && !enabled { let features = info::cargo::features().join(delim); let out = format!("`\n{features}\n`"); - return Ok(RoomMessageEventContent::text_markdown(out)); + return self.write_str(&out).await; } let mut features = String::new(); @@ -68,41 +56,42 @@ pub(super) async fn list_features( writeln!(features, "{emoji} {feature} {remark}")?; } - Ok(RoomMessageEventContent::text_markdown(features)) + self.write_str(&features).await } #[admin_command] -pub(super) async fn memory_usage(&self) -> Result { +pub(super) async fn memory_usage(&self) -> Result { let services_usage = self.services.memory_usage().await?; let database_usage = self.services.db.db.memory_usage()?; let allocator_usage = conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}")); - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}", - ))) + )) + .await } #[admin_command] -pub(super) async fn clear_caches(&self) -> Result { +pub(super) async fn clear_caches(&self) -> Result { self.services.clear_cache().await; - Ok(RoomMessageEventContent::text_plain("Done.")) + self.write_str("Done.").await } #[admin_command] -pub(super) async fn list_backups(&self) -> Result { +pub(super) async fn list_backups(&self) -> Result { let result = self.services.db.db.backup_list()?; if result.is_empty() { - Ok(RoomMessageEventContent::text_plain("No backups found.")) - } else { - Ok(RoomMessageEventContent::text_plain(result)) + return Err!("No backups found."); } + + self.write_str(&result).await } #[admin_command] -pub(super) async fn backup_database(&self) -> Result { +pub(super) async fn backup_database(&self) -> Result { let db = Arc::clone(&self.services.db); let mut result = self .services @@ -118,27 +107,27 @@ pub(super) async fn backup_database(&self) -> Result { result = self.services.db.db.backup_list()?; } - Ok(RoomMessageEventContent::notice_markdown(result)) + self.write_str(&result).await } #[admin_command] -pub(super) async fn admin_notice(&self, message: Vec) -> Result { +pub(super) async fn admin_notice(&self, message: Vec) -> Result { let message = message.join(" "); self.services.admin.send_text(&message).await; - Ok(RoomMessageEventContent::notice_plain("Notice was sent to #admins")) + self.write_str("Notice was sent to #admins").await } #[admin_command] -pub(super) async fn reload_mods(&self) -> Result { +pub(super) async fn reload_mods(&self) -> Result { self.services.server.reload()?; - Ok(RoomMessageEventContent::notice_plain("Reloading server...")) + self.write_str("Reloading server...").await } #[admin_command] #[cfg(unix)] -pub(super) async fn restart(&self, force: bool) -> Result { +pub(super) async fn restart(&self, force: bool) -> Result { use conduwuit::utils::sys::current_exe_deleted; if !force && current_exe_deleted() { @@ -150,13 +139,13 @@ pub(super) async fn restart(&self, force: bool) -> Result Result { +pub(super) async fn shutdown(&self) -> Result { warn!("shutdown command"); self.services.server.shutdown()?; - Ok(RoomMessageEventContent::notice_plain("Shutting down server...")) + self.write_str("Shutting down server...").await } diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 84795f9b..e5e481e5 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - Result, debug, debug_warn, error, info, is_equal_to, + Err, Result, debug, debug_warn, error, info, is_equal_to, matrix::pdu::PduBuilder, utils::{self, ReadyExt}, warn, @@ -14,7 +14,6 @@ use ruma::{ events::{ RoomAccountDataEventType, StateEventType, room::{ - message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, redaction::RoomRedactionEventContent, }, @@ -31,7 +30,7 @@ const AUTO_GEN_PASSWORD_LENGTH: usize = 25; const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin."; #[admin_command] -pub(super) async fn list_users(&self) -> Result { +pub(super) async fn list_users(&self) -> Result { let users: Vec<_> = self .services .users @@ -44,30 +43,22 @@ pub(super) async fn list_users(&self) -> Result { plain_msg += users.join("\n").as_str(); plain_msg += "\n```"; - self.write_str(plain_msg.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&plain_msg).await } #[admin_command] -pub(super) async fn create_user( - &self, - username: String, - password: Option, -) -> Result { +pub(super) async fn create_user(&self, username: String, password: Option) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &username)?; if let Err(e) = user_id.validate_strict() { if self.services.config.emergency_password.is_none() { - return Ok(RoomMessageEventContent::text_plain(format!( - "Username {user_id} contains disallowed characters or spaces: {e}" - ))); + return Err!("Username {user_id} contains disallowed characters or spaces: {e}"); } } if self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists"))); + return Err!("User {user_id} already exists"); } let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -89,8 +80,7 @@ pub(super) async fn create_user( .new_user_displayname_suffix .is_empty() { - write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix) - .expect("should be able to write to string buffer"); + write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)?; } self.services @@ -110,15 +100,17 @@ pub(super) async fn create_user( content: ruma::events::push_rules::PushRulesEventContent { global: ruma::push::Ruleset::server_default(&user_id), }, - }) - .expect("to json value always works"), + })?, ) .await?; if !self.services.server.config.auto_join_rooms.is_empty() { for room in &self.services.server.config.auto_join_rooms { let Ok(room_id) = self.services.rooms.alias.resolve(room).await else { - error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"); + error!( + %user_id, + "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping" + ); continue; }; @@ -154,18 +146,17 @@ pub(super) async fn create_user( info!("Automatically joined room {room} for user {user_id}"); }, | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to automatically join room {room} for user {user_id}: \ - {e}" - ))) - .await - .ok(); // don't return this error so we don't fail registrations error!( "Failed to automatically join room {room} for user {user_id}: {e}" ); + self.services + .admin + .send_text(&format!( + "Failed to automatically join room {room} for user {user_id}: \ + {e}" + )) + .await; }, } } @@ -192,25 +183,18 @@ pub(super) async fn create_user( debug!("create_user admin command called without an admin room being available"); } - Ok(RoomMessageEventContent::text_plain(format!( - "Created user with user_id: {user_id} and password: `{password}`" - ))) + self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`")) + .await } #[admin_command] -pub(super) async fn deactivate( - &self, - no_leave_rooms: bool, - user_id: String, -) -> Result { +pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &user_id)?; // don't deactivate the server service account if user_id == self.services.globals.server_user { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to deactivate the server service account.", - )); + return Err!("Not allowed to deactivate the server service account.",); } self.services.users.deactivate_account(&user_id).await?; @@ -218,11 +202,8 @@ pub(super) async fn deactivate( if !no_leave_rooms { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Making {user_id} leave all rooms after deactivation..." - ))) - .await - .ok(); + .send_text(&format!("Making {user_id} leave all rooms after deactivation...")) + .await; let all_joined_rooms: Vec = self .services @@ -239,24 +220,19 @@ pub(super) async fn deactivate( leave_all_rooms(self.services, &user_id).await; } - Ok(RoomMessageEventContent::text_plain(format!( - "User {user_id} has been deactivated" - ))) + self.write_str(&format!("User {user_id} has been deactivated")) + .await } #[admin_command] -pub(super) async fn reset_password( - &self, - username: String, - password: Option, -) -> Result { +pub(super) async fn reset_password(&self, username: String, password: Option) -> Result { let user_id = parse_local_user_id(self.services, &username)?; if user_id == self.services.globals.server_user { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to set the password for the server account. Please use the emergency \ password config option.", - )); + ); } let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -266,28 +242,20 @@ pub(super) async fn reset_password( .users .set_password(&user_id, Some(new_password.as_str())) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {user_id}: `{new_password}`" - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Couldn't reset the password for user {user_id}: {e}" - ))), + | Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"), + | Ok(()) => + write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"), } + .await } #[admin_command] -pub(super) async fn deactivate_all( - &self, - no_leave_rooms: bool, - force: bool, -) -> Result { +pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let usernames = self @@ -301,15 +269,23 @@ pub(super) async fn deactivate_all( for username in usernames { match parse_active_local_user_id(self.services, username).await { + | Err(e) => { + self.services + .admin + .send_text(&format!("{username} is not a valid username, skipping over: {e}")) + .await; + + continue; + }, | Ok(user_id) => { if self.services.users.is_admin(&user_id).await && !force { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is an admin and --force is not set, skipping over" - ))) - .await - .ok(); + )) + .await; + admins.push(username); continue; } @@ -318,26 +294,16 @@ pub(super) async fn deactivate_all( if user_id == self.services.globals.server_user { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is the server service account, skipping over" - ))) - .await - .ok(); + )) + .await; + continue; } user_ids.push(user_id); }, - | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username, skipping over: {e}" - ))) - .await - .ok(); - continue; - }, } } @@ -345,6 +311,12 @@ pub(super) async fn deactivate_all( for user_id in user_ids { match self.services.users.deactivate_account(&user_id).await { + | Err(e) => { + self.services + .admin + .send_text(&format!("Failed deactivating user: {e}")) + .await; + }, | Ok(()) => { deactivation_count = deactivation_count.saturating_add(1); if !no_leave_rooms { @@ -365,33 +337,24 @@ pub(super) async fn deactivate_all( leave_all_rooms(self.services, &user_id).await; } }, - | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed deactivating user: {e}" - ))) - .await - .ok(); - }, } } if admins.is_empty() { - Ok(RoomMessageEventContent::text_plain(format!( - "Deactivated {deactivation_count} accounts." - ))) + write!(self, "Deactivated {deactivation_count} accounts.") } else { - Ok(RoomMessageEventContent::text_plain(format!( + write!( + self, "Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \ --force to deactivate admin accounts", admins.join(", ") - ))) + ) } + .await } #[admin_command] -pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result { +pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &user_id)?; @@ -405,23 +368,20 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result>() - .join("\n") - ); + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),)) + .await } #[admin_command] @@ -429,27 +389,23 @@ pub(super) async fn force_join_list_of_local_users( &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, -) -> Result { +) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } if !yes_i_want_to_do_this { - return Ok(RoomMessageEventContent::notice_markdown( + return Err!( "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ bulk join all specified local users.", - )); + ); } let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not an admin room to check for server admins.", - )); + return Err!("There is not an admin room to check for server admins.",); }; let (room_id, servers) = self @@ -466,7 +422,7 @@ pub(super) async fn force_join_list_of_local_users( .server_in_room(self.services.globals.server_name(), &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + return Err!("We are not joined in this room."); } let server_admins: Vec<_> = self @@ -486,9 +442,7 @@ pub(super) async fn force_join_list_of_local_users( .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .await { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not a single server admin in the room.", - )); + return Err!("There is not a single server admin in the room.",); } let usernames = self @@ -506,11 +460,11 @@ pub(super) async fn force_join_list_of_local_users( if user_id == self.services.globals.server_user { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is the server service account, skipping over" - ))) - .await - .ok(); + )) + .await; + continue; } @@ -519,11 +473,9 @@ pub(super) async fn force_join_list_of_local_users( | Err(e) => { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username, skipping over: {e}" - ))) - .await - .ok(); + .send_text(&format!("{username} is not a valid username, skipping over: {e}")) + .await; + continue; }, } @@ -554,10 +506,11 @@ pub(super) async fn force_join_list_of_local_users( } } - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ failed.", - ))) + )) + .await } #[admin_command] @@ -565,18 +518,16 @@ pub(super) async fn force_join_all_local_users( &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, -) -> Result { +) -> Result { if !yes_i_want_to_do_this { - return Ok(RoomMessageEventContent::notice_markdown( + return Err!( "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ bulk join all local users.", - )); + ); } let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not an admin room to check for server admins.", - )); + return Err!("There is not an admin room to check for server admins.",); }; let (room_id, servers) = self @@ -593,7 +544,7 @@ pub(super) async fn force_join_all_local_users( .server_in_room(self.services.globals.server_name(), &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + return Err!("We are not joined in this room."); } let server_admins: Vec<_> = self @@ -613,9 +564,7 @@ pub(super) async fn force_join_all_local_users( .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .await { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not a single server admin in the room.", - )); + return Err!("There is not a single server admin in the room.",); } let mut failed_joins: usize = 0; @@ -650,10 +599,11 @@ pub(super) async fn force_join_all_local_users( } } - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ failed.", - ))) + )) + .await } #[admin_command] @@ -661,7 +611,7 @@ pub(super) async fn force_join_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, -) -> Result { +) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let (room_id, servers) = self .services @@ -677,9 +627,8 @@ pub(super) async fn force_join_room( join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has been joined to {room_id}.", - ))) + self.write_str(&format!("{user_id} has been joined to {room_id}.",)) + .await } #[admin_command] @@ -687,7 +636,7 @@ pub(super) async fn force_leave_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, -) -> Result { +) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -703,24 +652,17 @@ pub(super) async fn force_leave_room( .is_joined(&user_id, &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} is not joined in the room" - ))); + return Err!("{user_id} is not joined in the room"); } leave_room(self.services, &user_id, &room_id, None).await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has left {room_id}.", - ))) + self.write_str(&format!("{user_id} has left {room_id}.",)) + .await } #[admin_command] -pub(super) async fn force_demote( - &self, - user_id: String, - room_id: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAliasId) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -731,15 +673,11 @@ pub(super) async fn force_demote( let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; - let room_power_levels = self + let room_power_levels: Option = self .services .rooms .state_accessor - .room_state_get_content::( - &room_id, - &StateEventType::RoomPowerLevels, - "", - ) + .room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "") .await .ok(); @@ -757,9 +695,7 @@ pub(super) async fn force_demote( .is_ok_and(|event| event.sender == user_id); if !user_can_demote_self { - return Ok(RoomMessageEventContent::notice_markdown( - "User is not allowed to modify their own power levels in the room.", - )); + return Err!("User is not allowed to modify their own power levels in the room.",); } let mut power_levels_content = room_power_levels.unwrap_or_default(); @@ -777,25 +713,25 @@ pub(super) async fn force_demote( ) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "User {user_id} demoted themselves to the room default power level in {room_id} - \ {event_id}" - ))) + )) + .await } #[admin_command] -pub(super) async fn make_user_admin(&self, user_id: String) -> Result { +pub(super) async fn make_user_admin(&self, user_id: String) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; - assert!( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); + self.services.admin.make_user_admin(&user_id).await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has been granted admin privileges.", - ))) + self.write_str(&format!("{user_id} has been granted admin privileges.",)) + .await } #[admin_command] @@ -804,7 +740,7 @@ pub(super) async fn put_room_tag( user_id: String, room_id: OwnedRoomId, tag: String, -) -> Result { +) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let mut tags_event = self @@ -831,9 +767,10 @@ pub(super) async fn put_room_tag( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Successfully updated room account data for {user_id} and room {room_id} with tag {tag}" - ))) + )) + .await } #[admin_command] @@ -842,7 +779,7 @@ pub(super) async fn delete_room_tag( user_id: String, room_id: OwnedRoomId, tag: String, -) -> Result { +) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let mut tags_event = self @@ -866,18 +803,15 @@ pub(super) async fn delete_room_tag( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Successfully updated room account data for {user_id} and room {room_id}, deleting room \ tag {tag}" - ))) + )) + .await } #[admin_command] -pub(super) async fn get_room_tags( - &self, - user_id: String, - room_id: OwnedRoomId, -) -> Result { +pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let tags_event = self @@ -889,17 +823,12 @@ pub(super) async fn get_room_tags( content: TagEventContent { tags: BTreeMap::new() }, }); - Ok(RoomMessageEventContent::notice_markdown(format!( - "```\n{:#?}\n```", - tags_event.content.tags - ))) + self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags)) + .await } #[admin_command] -pub(super) async fn redact_event( - &self, - event_id: OwnedEventId, -) -> Result { +pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result { let Ok(event) = self .services .rooms @@ -907,20 +836,18 @@ pub(super) async fn redact_event( .get_non_outlier_pdu(&event_id) .await else { - return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database.")); + return Err!("Event does not exist in our database."); }; if event.is_redacted() { - return Ok(RoomMessageEventContent::text_plain("Event is already redacted.")); + return Err!("Event is already redacted."); } let room_id = event.room_id; let sender_user = event.sender; if !self.services.globals.user_is_local(&sender_user) { - return Ok(RoomMessageEventContent::text_plain( - "This command only works on local users.", - )); + return Err!("This command only works on local users."); } let reason = format!( @@ -949,9 +876,8 @@ pub(super) async fn redact_event( .await? }; - let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}"); - - self.write_str(out.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!( + "Successfully redacted event. Redaction event ID: {redaction_event_id}" + )) + .await } diff --git a/src/admin/utils.rs b/src/admin/utils.rs index a2696c50..ea9696b2 100644 --- a/src/admin/utils.rs +++ b/src/admin/utils.rs @@ -1,3 +1,5 @@ +#![allow(dead_code)] + use conduwuit_core::{Err, Result, err}; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use service::Services; diff --git a/src/macros/admin.rs b/src/macros/admin.rs index bf1586a0..fe227b43 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -8,7 +8,7 @@ use crate::{Result, utils::camel_to_snake_string}; pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result { let attr: Attribute = parse_quote! { - #[conduwuit_macros::implement(crate::Command, params = "<'_>")] + #[conduwuit_macros::implement(crate::Context, params = "<'_>")] }; item.attrs.push(attr); @@ -19,15 +19,16 @@ pub(super) fn command_dispatch(item: ItemEnum, _args: &[Meta]) -> Result = item.variants.iter().map(dispatch_arm).try_collect()?; let switch = quote! { + #[allow(clippy::large_stack_frames)] //TODO: fixme pub(super) async fn process( command: #name, - context: &crate::Command<'_> + context: &crate::Context<'_> ) -> Result { use #name::*; #[allow(non_snake_case)] - Ok(match command { + match command { #( #arm )* - }) + } } }; @@ -47,8 +48,7 @@ fn dispatch_arm(v: &Variant) -> Result { let arg = field.clone(); quote! { #name { #( #field ),* } => { - let c = Box::pin(context.#handler(#( #arg ),*)).await?; - Box::pin(context.write_str(c.body())).await?; + Box::pin(context.#handler(#( #arg ),*)).await }, } }, @@ -58,15 +58,14 @@ fn dispatch_arm(v: &Variant) -> Result { }; quote! { #name ( #field ) => { - Box::pin(#handler::process(#field, context)).await?; + Box::pin(#handler::process(#field, context)).await } } }, | Fields::Unit => { quote! { #name => { - let c = Box::pin(context.#handler()).await?; - Box::pin(context.write_str(c.body())).await?; + Box::pin(context.#handler()).await }, } }, From 3c5bbd4f0505bb1faf6cc5985f0f43fc76cd94b6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 10 Apr 2025 20:55:41 +0000 Subject: [PATCH 0875/1248] simplify database backup interface related Signed-off-by: Jason Volk --- src/admin/server/commands.rs | 35 ++++++++------- src/database/engine/backup.rs | 80 ++++++++++++++++++++++------------- src/service/globals/data.rs | 6 --- 3 files changed, 69 insertions(+), 52 deletions(-) diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index b01e9296..6027a9eb 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,11 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{Err, Result, info, utils::time, warn}; +use conduwuit::{ + Err, Result, info, + utils::{stream::IterStream, time}, + warn, +}; +use futures::TryStreamExt; use crate::admin_command; @@ -81,33 +86,31 @@ pub(super) async fn clear_caches(&self) -> Result { #[admin_command] pub(super) async fn list_backups(&self) -> Result { - let result = self.services.db.db.backup_list()?; - - if result.is_empty() { - return Err!("No backups found."); - } - - self.write_str(&result).await + self.services + .db + .db + .backup_list()? + .try_stream() + .try_for_each(|result| write!(self, "{result}")) + .await } #[admin_command] pub(super) async fn backup_database(&self) -> Result { let db = Arc::clone(&self.services.db); - let mut result = self + let result = self .services .server .runtime() .spawn_blocking(move || match db.db.backup() { - | Ok(()) => String::new(), - | Err(e) => e.to_string(), + | Ok(()) => "Done".to_owned(), + | Err(e) => format!("Failed: {e}"), }) .await?; - if result.is_empty() { - result = self.services.db.db.backup_list()?; - } - - self.write_str(&result).await + let count = self.services.db.db.backup_count()?; + self.write_str(&format!("{result}. Currently have {count} backups.")) + .await } #[admin_command] diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs index bb110630..ac72e6d4 100644 --- a/src/database/engine/backup.rs +++ b/src/database/engine/backup.rs @@ -1,24 +1,16 @@ -use std::fmt::Write; +use std::{ffi::OsString, path::PathBuf}; -use conduwuit::{Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; +use conduwuit::{Err, Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; use super::Engine; -use crate::{or_else, util::map_err}; +use crate::util::map_err; #[implement(Engine)] #[tracing::instrument(skip(self))] pub fn backup(&self) -> Result { - let server = &self.ctx.server; - let config = &server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok(()); - } - - let options = - BackupEngineOptions::new(path.expect("valid database backup path")).map_err(map_err)?; - let mut engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err)?; + let mut engine = self.backup_engine()?; + let config = &self.ctx.server.config; if config.database_backups_to_keep > 0 { let flush = !self.is_read_only(); engine @@ -40,34 +32,62 @@ pub fn backup(&self) -> Result { } } + if config.database_backups_to_keep == 0 { + warn!("Configuration item `database_backups_to_keep` is set to 0."); + } + Ok(()) } #[implement(Engine)] -pub fn backup_list(&self) -> Result { - let server = &self.ctx.server; - let config = &server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok("Configure database_backup_path to enable backups, or the path specified is \ - not valid" - .to_owned()); +pub fn backup_list(&self) -> Result + Send> { + let info = self.backup_engine()?.get_backup_info(); + + if info.is_empty() { + return Err!("No backups found."); } - let mut res = String::new(); - let options = - BackupEngineOptions::new(path.expect("valid database backup path")).or_else(or_else)?; - let engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).or_else(or_else)?; - for info in engine.get_backup_info() { - writeln!( - res, + let list = info.into_iter().map(|info| { + format!( "#{} {}: {} bytes, {} files", info.backup_id, rfc2822_from_seconds(info.timestamp), info.size, info.num_files, - )?; + ) + }); + + Ok(list) +} + +#[implement(Engine)] +pub fn backup_count(&self) -> Result { + let info = self.backup_engine()?.get_backup_info(); + + Ok(info.len()) +} + +#[implement(Engine)] +fn backup_engine(&self) -> Result { + let path = self.backup_path()?; + let options = BackupEngineOptions::new(path).map_err(map_err)?; + BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err) +} + +#[implement(Engine)] +fn backup_path(&self) -> Result { + let path = self + .ctx + .server + .config + .database_backup_path + .clone() + .map(PathBuf::into_os_string) + .unwrap_or_default(); + + if path.is_empty() { + return Err!(Config("database_backup_path", "Configure path to enable backups")); } - Ok(res) + Ok(path) } diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index b43b7c5f..21c09252 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -72,10 +72,4 @@ impl Data { pub fn bump_database_version(&self, new_version: u64) { self.global.raw_put(b"version", new_version); } - - #[inline] - pub fn backup(&self) -> Result { self.db.db.backup() } - - #[inline] - pub fn backup_list(&self) -> Result { self.db.db.backup_list() } } From 21ec2551598247dc5f081aae73748861085d0ce0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 11 Apr 2025 01:29:26 +0000 Subject: [PATCH 0876/1248] eliminate Arc impl for trait Event Signed-off-by: Jason Volk --- src/core/matrix/event.rs | 29 ------------------- src/core/matrix/state_res/benches.rs | 33 ++++++++++------------ src/core/matrix/state_res/event_auth.rs | 14 ++++------ src/core/matrix/state_res/mod.rs | 11 +++----- src/core/matrix/state_res/test_utils.rs | 37 ++++++++++++------------- 5 files changed, 42 insertions(+), 82 deletions(-) diff --git a/src/core/matrix/event.rs b/src/core/matrix/event.rs index ac9e29d6..29153334 100644 --- a/src/core/matrix/event.rs +++ b/src/core/matrix/event.rs @@ -2,7 +2,6 @@ use std::{ borrow::Borrow, fmt::{Debug, Display}, hash::Hash, - sync::Arc, }; use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; @@ -72,31 +71,3 @@ impl Event for &T { fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() } } - -impl Event for Arc { - type Id = T::Id; - - fn event_id(&self) -> &Self::Id { (**self).event_id() } - - fn room_id(&self) -> &RoomId { (**self).room_id() } - - fn sender(&self) -> &UserId { (**self).sender() } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (**self).origin_server_ts() } - - fn event_type(&self) -> &TimelineEventType { (**self).event_type() } - - fn content(&self) -> &RawJsonValue { (**self).content() } - - fn state_key(&self) -> Option<&str> { (**self).state_key() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (**self).prev_events() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (**self).auth_events() - } - - fn redacts(&self) -> Option<&Self::Id> { (**self).redacts() } -} diff --git a/src/core/matrix/state_res/benches.rs b/src/core/matrix/state_res/benches.rs index 7a1ae5bf..01218b01 100644 --- a/src/core/matrix/state_res/benches.rs +++ b/src/core/matrix/state_res/benches.rs @@ -4,10 +4,7 @@ extern crate test; use std::{ borrow::Borrow, collections::{HashMap, HashSet}, - sync::{ - Arc, - atomic::{AtomicU64, Ordering::SeqCst}, - }, + sync::atomic::{AtomicU64, Ordering::SeqCst}, }; use futures::{future, future::ready}; @@ -64,7 +61,7 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) { c.iter(|| async { let ev_map = store.0.clone(); let state_sets = [&state_at_bob, &state_at_charlie]; - let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).clone()); let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); let auth_chain_sets: Vec> = state_sets .iter() @@ -148,7 +145,7 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) { }) .collect(); - let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); + let fetch = |id: OwnedEventId| ready(inner.get(&id).clone()); let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); let _ = match state_res::resolve( &RoomVersionId::V6, @@ -171,20 +168,20 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) { // IMPLEMENTATION DETAILS AHEAD // /////////////////////////////////////////////////////////////////////*/ -struct TestStore(HashMap>); +struct TestStore(HashMap); #[allow(unused)] -impl TestStore { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result { self.0 .get(event_id) - .map(Arc::clone) + .cloned() .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) } /// Returns the events that correspond to the `event_ids` sorted in the same /// order. - fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result> { let mut events = vec![]; for id in event_ids { events.push(self.get_event(room_id, id)?); @@ -264,7 +261,7 @@ impl TestStore { &[], ); let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), Arc::clone(&create_event)); + self.0.insert(cre.clone(), create_event.clone()); let alice_mem = to_pdu_event( "IMA", @@ -276,7 +273,7 @@ impl TestStore { &[cre.clone()], ); self.0 - .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); let join_rules = to_pdu_event( "IJR", @@ -383,7 +380,7 @@ fn to_pdu_event( content: Box, auth_events: &[S], prev_events: &[S], -) -> Arc +) -> PduEvent where S: AsRef, { @@ -407,7 +404,7 @@ where .collect::>(); let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { + PduEvent { event_id: id.try_into().unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id().to_owned(), @@ -424,12 +421,12 @@ where hashes: EventHash::new(String::new()), signatures: Signatures::new(), }), - }) + } } // all graphs start with these input events #[allow(non_snake_case)] -fn INITIAL_EVENTS() -> HashMap> { +fn INITIAL_EVENTS() -> HashMap { vec![ to_pdu_event::<&EventId>( "CREATE", @@ -511,7 +508,7 @@ fn INITIAL_EVENTS() -> HashMap> { // all graphs start with these input events #[allow(non_snake_case)] -fn BAN_STATE_SET() -> HashMap> { +fn BAN_STATE_SET() -> HashMap { vec![ to_pdu_event( "PA", diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 65bec802..8c9339ec 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -1112,8 +1112,6 @@ fn verify_third_party_invite( #[cfg(test)] mod tests { - use std::sync::Arc; - use ruma::events::{ StateEventType, TimelineEventType, room::{ @@ -1143,7 +1141,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1188,7 +1186,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1233,7 +1231,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1278,7 +1276,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1340,7 +1338,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1412,7 +1410,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index ce6b7e89..2ab7cb64 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -861,10 +861,7 @@ where #[cfg(test)] mod tests { - use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - }; + use std::collections::{HashMap, HashSet}; use maplit::{hashmap, hashset}; use rand::seq::SliceRandom; @@ -906,7 +903,7 @@ mod tests { let power_events = event_map .values() - .filter(|&pdu| is_power_event(&**pdu)) + .filter(|&pdu| is_power_event(&*pdu)) .map(|pdu| pdu.event_id.clone()) .collect::>(); @@ -1489,7 +1486,7 @@ mod tests { } #[allow(non_snake_case)] - fn BAN_STATE_SET() -> HashMap> { + fn BAN_STATE_SET() -> HashMap { vec![ to_pdu_event( "PA", @@ -1534,7 +1531,7 @@ mod tests { } #[allow(non_snake_case)] - fn JOIN_RULE() -> HashMap> { + fn JOIN_RULE() -> HashMap { vec![ to_pdu_event( "JR", diff --git a/src/core/matrix/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs index f2ee4238..a666748a 100644 --- a/src/core/matrix/state_res/test_utils.rs +++ b/src/core/matrix/state_res/test_utils.rs @@ -1,10 +1,7 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, - sync::{ - Arc, - atomic::{AtomicU64, Ordering::SeqCst}, - }, + sync::atomic::{AtomicU64, Ordering::SeqCst}, }; use futures::future::ready; @@ -36,7 +33,7 @@ use crate::{ static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); pub(crate) async fn do_check( - events: &[Arc], + events: &[PduEvent], edges: Vec>, expected_state_ids: Vec, ) { @@ -85,7 +82,7 @@ pub(crate) async fn do_check( } // event_id -> PduEvent - let mut event_map: HashMap> = HashMap::new(); + let mut event_map: HashMap = HashMap::new(); // event_id -> StateMap let mut state_at_event: HashMap> = HashMap::new(); @@ -194,7 +191,7 @@ pub(crate) async fn do_check( store.0.insert(ev_id.to_owned(), event.clone()); state_at_event.insert(node, state_after); - event_map.insert(event_id.to_owned(), Arc::clone(store.0.get(ev_id).unwrap())); + event_map.insert(event_id.to_owned(), store.0.get(ev_id).unwrap().clone()); } let mut expected_state = StateMap::new(); @@ -235,10 +232,10 @@ pub(crate) async fn do_check( } #[allow(clippy::exhaustive_structs)] -pub(crate) struct TestStore(pub(crate) HashMap>); +pub(crate) struct TestStore(pub(crate) HashMap); -impl TestStore { - pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result> { +impl TestStore { + pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result { self.0 .get(event_id) .cloned() @@ -288,7 +285,7 @@ impl TestStore { &[], ); let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), Arc::clone(&create_event)); + self.0.insert(cre.clone(), create_event.clone()); let alice_mem = to_pdu_event( "IMA", @@ -300,7 +297,7 @@ impl TestStore { &[cre.clone()], ); self.0 - .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); let join_rules = to_pdu_event( "IJR", @@ -399,7 +396,7 @@ pub(crate) fn to_init_pdu_event( ev_type: TimelineEventType, state_key: Option<&str>, content: Box, -) -> Arc { +) -> PduEvent { let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); let id = if id.contains('$') { id.to_owned() @@ -408,7 +405,7 @@ pub(crate) fn to_init_pdu_event( }; let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { + PduEvent { event_id: id.try_into().unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id().to_owned(), @@ -425,7 +422,7 @@ pub(crate) fn to_init_pdu_event( hashes: EventHash::new("".to_owned()), signatures: ServerSignatures::default(), }), - }) + } } pub(crate) fn to_pdu_event( @@ -436,7 +433,7 @@ pub(crate) fn to_pdu_event( content: Box, auth_events: &[S], prev_events: &[S], -) -> Arc +) -> PduEvent where S: AsRef, { @@ -458,7 +455,7 @@ where .collect::>(); let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { + PduEvent { event_id: id.try_into().unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id().to_owned(), @@ -475,12 +472,12 @@ where hashes: EventHash::new("".to_owned()), signatures: ServerSignatures::default(), }), - }) + } } // all graphs start with these input events #[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS() -> HashMap> { +pub(crate) fn INITIAL_EVENTS() -> HashMap { vec![ to_pdu_event::<&EventId>( "CREATE", @@ -562,7 +559,7 @@ pub(crate) fn INITIAL_EVENTS() -> HashMap> { // all graphs start with these input events #[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap> { +pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap { vec![to_pdu_event::<&EventId>( "CREATE", alice(), From 576a783a6f98bde5c04171f881c8a18e70222ac3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 1 Apr 2025 08:39:41 +0000 Subject: [PATCH 0877/1248] add missing feature-projections between intra-workspace crates Signed-off-by: Jason Volk --- Cargo.lock | 1 - src/admin/Cargo.toml | 49 +++++++++++++++++++++++++++ src/api/Cargo.toml | 50 +++++++++++++++++++++------- src/core/Cargo.toml | 38 ++++++++++----------- src/core/info/cargo.rs | 2 +- src/database/Cargo.toml | 30 ++++++++++++----- src/main/Cargo.toml | 2 ++ src/router/Cargo.toml | 73 +++++++++++++++++++++++++++++++++-------- src/service/Cargo.toml | 34 +++++++++++++++++-- 9 files changed, 221 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index def41f68..00aeca81 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -784,7 +784,6 @@ dependencies = [ "base64 0.22.1", "bytes", "conduwuit_core", - "conduwuit_database", "conduwuit_service", "const-str", "futures", diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml index ca865969..7896ef97 100644 --- a/src/admin/Cargo.toml +++ b/src/admin/Cargo.toml @@ -17,12 +17,61 @@ crate-type = [ ] [features] +brotli_compression = [ + "conduwuit-api/brotli_compression", + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", +] +gzip_compression = [ + "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", +] +io_uring = [ + "conduwuit-api/io_uring", + "conduwuit-database/io_uring", + "conduwuit-service/io_uring", +] +jemalloc = [ + "conduwuit-api/jemalloc", + "conduwuit-core/jemalloc", + "conduwuit-database/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-api/jemalloc_conf", + "conduwuit-core/jemalloc_conf", + "conduwuit-database/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-api/jemalloc_prof", + "conduwuit-core/jemalloc_prof", + "conduwuit-database/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-api/jemalloc_stats", + "conduwuit-core/jemalloc_stats", + "conduwuit-database/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] release_max_log_level = [ + "conduwuit-api/release_max_log_level", + "conduwuit-core/release_max_log_level", + "conduwuit-database/release_max_log_level", + "conduwuit-service/release_max_log_level", "tracing/max_level_trace", "tracing/release_max_level_info", "log/max_level_trace", "log/release_max_level_info", ] +zstd_compression = [ + "conduwuit-api/zstd_compression", + "conduwuit-core/zstd_compression", + "conduwuit-database/zstd_compression", + "conduwuit-service/zstd_compression", +] [dependencies] clap.workspace = true diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 7890561c..15ada812 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,21 +17,50 @@ crate-type = [ ] [features] -element_hacks = [] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", +brotli_compression = [ + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", + "reqwest/brotli", ] -zstd_compression = [ - "reqwest/zstd", +element_hacks = [ + "conduwuit-service/element_hacks", ] gzip_compression = [ + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", "reqwest/gzip", ] -brotli_compression = [ - "reqwest/brotli", +io_uring = [ + "conduwuit-service/io_uring", +] +jemalloc = [ + "conduwuit-core/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] +release_max_log_level = [ + "conduwuit-core/release_max_log_level", + "conduwuit-service/release_max_log_level", + "log/max_level_trace", + "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", +] +zstd_compression = [ + "conduwuit-core/zstd_compression", + "conduwuit-service/zstd_compression", + "reqwest/zstd", ] [dependencies] @@ -42,7 +71,6 @@ axum.workspace = true base64.workspace = true bytes.workspace = true conduwuit-core.workspace = true -conduwuit-database.workspace = true conduwuit-service.workspace = true const-str.workspace = true futures.workspace = true diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 4848e742..f42b049b 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -17,17 +17,24 @@ crate-type = [ ] [features] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", +brotli_compression = [ + "reqwest/brotli", +] +conduwuit_mods = [ + "dep:libloading" +] +gzip_compression = [ + "reqwest/gzip", +] +hardened_malloc = [ + "dep:hardened_malloc-rs" ] jemalloc = [ "dep:tikv-jemalloc-sys", "dep:tikv-jemalloc-ctl", "dep:tikv-jemallocator", ] +jemalloc_conf = [] jemalloc_prof = [ "tikv-jemalloc-sys/profiling", ] @@ -36,24 +43,17 @@ jemalloc_stats = [ "tikv-jemalloc-ctl/stats", "tikv-jemallocator/stats", ] -jemalloc_conf = [] -hardened_malloc = [ - "dep:hardened_malloc-rs" -] -gzip_compression = [ - "reqwest/gzip", -] -brotli_compression = [ - "reqwest/brotli", +perf_measurements = [] +release_max_log_level = [ + "tracing/max_level_trace", + "tracing/release_max_level_info", + "log/max_level_trace", + "log/release_max_level_info", ] +sentry_telemetry = [] zstd_compression = [ "reqwest/zstd", ] -perf_measurements = [] -sentry_telemetry = [] -conduwuit_mods = [ - "dep:libloading" -] [dependencies] argon2.workspace = true diff --git a/src/core/info/cargo.rs b/src/core/info/cargo.rs index c5a1d167..28c6590e 100644 --- a/src/core/info/cargo.rs +++ b/src/core/info/cargo.rs @@ -31,7 +31,7 @@ const ROUTER_MANIFEST: &'static str = (); #[cargo_manifest(crate = "main")] const MAIN_MANIFEST: &'static str = (); -/// Processed list of features access all project crates. This is generated from +/// Processed list of features across all project crates. This is generated from /// the data in the MANIFEST strings and contains all possible project features. /// For *enabled* features see the info::rustc module instead. static FEATURES: OnceLock> = OnceLock::new(); diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 067c6f5f..55d4793f 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -17,19 +17,31 @@ crate-type = [ ] [features] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", -] -jemalloc = [ - "rust-rocksdb/jemalloc", -] io_uring = [ "rust-rocksdb/io-uring", ] +jemalloc = [ + "conduwuit-core/jemalloc", + "rust-rocksdb/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", +] +release_max_log_level = [ + "conduwuit-core/release_max_log_level", + "log/max_level_trace", + "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", +] zstd_compression = [ + "conduwuit-core/zstd_compression", "rust-rocksdb/zstd", ] diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 87ca48c8..e2fed5d5 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -70,6 +70,7 @@ element_hacks = [ ] gzip_compression = [ "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", "conduwuit-router/gzip_compression", "conduwuit-service/gzip_compression", ] @@ -141,6 +142,7 @@ zstd_compression = [ "conduwuit-core/zstd_compression", "conduwuit-database/zstd_compression", "conduwuit-router/zstd_compression", + "conduwuit-service/zstd_compression", ] conduwuit_mods = [ "conduwuit-core/conduwuit_mods", diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 51e15aed..31a44983 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -17,34 +17,79 @@ crate-type = [ ] [features] +brotli_compression = [ + "conduwuit-admin/brotli_compression", + "conduwuit-api/brotli_compression", + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", + "tower-http/compression-br", +] +direct_tls = [ + "axum-server/tls-rustls", + "dep:rustls", + "dep:axum-server-dual-protocol", +] +gzip_compression = [ + "conduwuit-admin/gzip_compression", + "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", + "tower-http/compression-gzip", +] +io_uring = [ + "conduwuit-admin/io_uring", + "conduwuit-api/io_uring", + "conduwuit-service/io_uring", + "conduwuit-api/io_uring", +] +jemalloc = [ + "conduwuit-admin/jemalloc", + "conduwuit-api/jemalloc", + "conduwuit-core/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-admin/jemalloc_conf", + "conduwuit-api/jemalloc_conf", + "conduwuit-core/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-admin/jemalloc_prof", + "conduwuit-api/jemalloc_prof", + "conduwuit-core/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-admin/jemalloc_stats", + "conduwuit-api/jemalloc_stats", + "conduwuit-core/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] release_max_log_level = [ + "conduwuit-admin/release_max_log_level", + "conduwuit-api/release_max_log_level", + "conduwuit-core/release_max_log_level", + "conduwuit-service/release_max_log_level", "tracing/max_level_trace", "tracing/release_max_level_info", "log/max_level_trace", "log/release_max_level_info", ] sentry_telemetry = [ + "conduwuit-core/sentry_telemetry", "dep:sentry", "dep:sentry-tracing", "dep:sentry-tower", ] -zstd_compression = [ - "tower-http/compression-zstd", -] -gzip_compression = [ - "tower-http/compression-gzip", -] -brotli_compression = [ - "tower-http/compression-br", -] systemd = [ "dep:sd-notify", ] - -direct_tls = [ - "axum-server/tls-rustls", - "dep:rustls", - "dep:axum-server-dual-protocol", +zstd_compression = [ + "conduwuit-api/zstd_compression", + "conduwuit-core/zstd_compression", + "conduwuit-service/zstd_compression", + "tower-http/compression-zstd", ] [dependencies] diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index caeea318..8b0d1405 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -17,7 +17,12 @@ crate-type = [ ] [features] +blurhashing = [ + "dep:image", + "dep:blurhash", +] brotli_compression = [ + "conduwuit-core/brotli_compression", "reqwest/brotli", ] console = [ @@ -26,25 +31,48 @@ console = [ ] element_hacks = [] gzip_compression = [ + "conduwuit-core/gzip_compression", "reqwest/gzip", ] +io_uring = [ + "conduwuit-database/io_uring", +] +jemalloc = [ + "conduwuit-core/jemalloc", + "conduwuit-database/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", + "conduwuit-database/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", + "conduwuit-database/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", + "conduwuit-database/jemalloc_stats", +] media_thumbnail = [ "dep:image", ] release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", + "conduwuit-core/release_max_log_level", + "conduwuit-database/release_max_log_level", "log/max_level_trace", "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", ] url_preview = [ "dep:image", "dep:webpage", ] zstd_compression = [ + "conduwuit-core/zstd_compression", + "conduwuit-database/zstd_compression", "reqwest/zstd", ] -blurhashing = ["dep:image","dep:blurhash"] [dependencies] async-trait.workspace = true From 8e7373c02790a4e48e29346f678a0181de6c42f6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 23:04:13 +0000 Subject: [PATCH 0878/1248] mitigate additional debuginfo expansions Signed-off-by: Jason Volk --- src/core/debug.rs | 1 + src/core/error/err.rs | 3 +++ src/core/log/mod.rs | 1 + src/core/utils/math.rs | 4 ++++ src/core/utils/mod.rs | 4 ---- src/core/utils/string.rs | 2 ++ src/core/utils/sys/storage.rs | 2 +- src/service/mod.rs | 2 +- 8 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/core/debug.rs b/src/core/debug.rs index b9a53038..21a5ada4 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -12,6 +12,7 @@ pub use crate::{result::DebugInspect, utils::debug::*}; /// Log event at given level in debug-mode (when debug-assertions are enabled). /// In release-mode it becomes DEBUG level, and possibly subject to elision. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! debug_event { ( $level:expr_2021, $($x:tt)+ ) => { if $crate::debug::logging() { diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 9c24d3b4..2eb6823a 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -33,6 +33,7 @@ //! option of replacing `error!` with `debug_error!`. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! Err { ($($args:tt)*) => { Err($crate::err!($($args)*)) @@ -40,6 +41,7 @@ macro_rules! Err { } #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err { (Request(Forbidden($level:ident!($($args:tt)+)))) => {{ let mut buf = String::new(); @@ -109,6 +111,7 @@ macro_rules! err { /// can share the same callsite metadata for the source of our Error and the /// associated logging and tracing event dispatches. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err_log { ($out:ident, $level:ident, $($fields:tt)+) => {{ use $crate::tracing::{ diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 5ac374e8..f7b2521a 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -33,6 +33,7 @@ pub struct Log { // the crate namespace like these. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! event { ( $level:expr_2021, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } } diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index 488f2a13..9316731c 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -10,6 +10,7 @@ use crate::{Err, Error, Result, debug::type_name, err}; /// Checked arithmetic expression. Returns a Result #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! checked { ($($input:tt)+) => { $crate::utils::math::checked_ops!($($input)+) @@ -22,6 +23,7 @@ macro_rules! checked { /// has no realistic expectation for error and no interest in cluttering the /// callsite with result handling from checked!. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! expected { ($msg:literal, $($input:tt)+) => { $crate::checked!($($input)+).expect($msg) @@ -37,6 +39,7 @@ macro_rules! expected { /// regression analysis. #[cfg(not(debug_assertions))] #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! validated { ($($input:tt)+) => { //#[allow(clippy::arithmetic_side_effects)] { @@ -53,6 +56,7 @@ macro_rules! validated { /// the expression is obviously safe. The check is elided in release-mode. #[cfg(debug_assertions)] #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! validated { ($($input:tt)+) => { $crate::expected!($($input)+) } } diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 5e6f2868..54404e4c 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -173,7 +173,6 @@ macro_rules! is_equal { /// Functor for |x| *x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! deref_at { ($idx:tt) => { |t| *t.$idx @@ -182,7 +181,6 @@ macro_rules! deref_at { /// Functor for |ref x| x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! ref_at { ($idx:tt) => { |ref t| &t.$idx @@ -191,7 +189,6 @@ macro_rules! ref_at { /// Functor for |&x| x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! val_at { ($idx:tt) => { |&t| t.$idx @@ -200,7 +197,6 @@ macro_rules! val_at { /// Functor for |x| x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! at { ($idx:tt) => { |t| t.$idx diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index d8fa3f95..7d81903d 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -14,6 +14,7 @@ pub const EMPTY: &str = ""; /// returned otherwise the input (i.e. &'static str) is returned. If multiple /// arguments are provided the first is assumed to be a format string. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! format_maybe { ($s:literal $(,)?) => { if $crate::is_format!($s) { std::format!($s).into() } else { $s.into() } @@ -27,6 +28,7 @@ macro_rules! format_maybe { /// Constant expression to decide if a literal is a format string. Note: could /// use some improvement. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! is_format { ($s:literal) => { ::const_str::contains!($s, "{") && ::const_str::contains!($s, "}") diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index 452b04b2..b71c3437 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -117,7 +117,7 @@ pub fn name_from_path(path: &Path) -> Result { /// Get the (major, minor) of the block device on which Path is mounted. #[allow(clippy::useless_conversion, clippy::unnecessary_fallible_conversions)] -pub fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { +fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { #[cfg(target_family = "unix")] use std::os::unix::fs::MetadataExt; diff --git a/src/service/mod.rs b/src/service/mod.rs index 63a51213..2be16f79 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,4 +1,4 @@ -#![type_length_limit = "2048"] +#![type_length_limit = "8192"] #![allow(refining_impl_trait)] mod manager; From e71138ab6ffbea621120c41bafb1c65c7b1a3e39 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 8 Apr 2025 03:17:23 +0000 Subject: [PATCH 0879/1248] reduce large stack frames --- src/service/admin/create.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 4de37092..cd0fc5a9 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use conduwuit::{Result, pdu::PduBuilder}; +use futures::FutureExt; use ruma::{ RoomId, RoomVersionId, events::room::{ @@ -63,6 +64,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 2. Make server user/bot join @@ -78,6 +80,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 3. Power levels @@ -95,6 +98,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 4.1 Join Rules @@ -107,6 +111,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 4.2 History Visibility @@ -122,6 +127,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 4.3 Guest Access @@ -137,6 +143,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 5. Events implied by name and topic @@ -150,6 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; services @@ -163,6 +171,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 6. Room alias @@ -180,6 +189,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; services @@ -197,6 +207,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; Ok(()) From 0eb9e4f3d2284a9c96b4c781e25328f1a6e9f9e2 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Apr 2025 23:02:43 +0100 Subject: [PATCH 0880/1248] refactor: Centralize server forbidden checks into moderation module This moves all checks related to `forbidden_remote_server_names`, `forbidden_remote_room_directory_server_names` and `prevent_media_downloads_from` to a new `moderation` module. This is useful for implementing more complicated logic globally. Mostly the changes from #673, but is also relevant for #750 --- conduwuit-example.toml | 3 ++ src/api/client/directory.rs | 19 ++-------- src/api/client/membership.rs | 5 +-- src/api/client/message.rs | 5 +-- src/api/router/auth.rs | 8 +--- src/api/server/invite.rs | 11 ++---- src/api/server/make_join.rs | 11 ++---- src/api/server/make_knock.rs | 11 ++---- src/api/server/send_join.rs | 22 +++-------- src/api/server/send_knock.rs | 11 ++---- src/core/config/mod.rs | 5 ++- src/service/federation/execute.rs | 8 +--- src/service/federation/mod.rs | 4 +- src/service/media/mod.rs | 4 +- src/service/media/remote.rs | 12 +----- src/service/mod.rs | 1 + src/service/moderation.rs | 62 +++++++++++++++++++++++++++++++ src/service/services.rs | 4 +- 18 files changed, 109 insertions(+), 97 deletions(-) create mode 100644 src/service/moderation.rs diff --git a/conduwuit-example.toml b/conduwuit-example.toml index af8da6bb..5a4b7b3f 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1197,6 +1197,9 @@ # incoming AND outgoing federation with, and block client room joins / # remote user invites. # +# Additionally, it will hide messages from these servers for all users +# on this server. +# # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and # outbound federation handler. diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index b44b9f64..aa6ae168 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -52,13 +52,8 @@ pub(crate) async fn get_public_rooms_filtered_route( ) -> Result { if let Some(server) = &body.server { if services - .config - .forbidden_remote_room_directory_server_names - .is_match(server.host()) - || services - .config - .forbidden_remote_server_names - .is_match(server.host()) + .moderation + .is_remote_server_room_directory_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } @@ -92,15 +87,7 @@ pub(crate) async fn get_public_rooms_route( body: Ruma, ) -> Result { if let Some(server) = &body.server { - if services - .config - .forbidden_remote_room_directory_server_names - .is_match(server.host()) - || services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 18a1c741..b1b85b81 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -83,9 +83,8 @@ async fn banned_room_check( if let Some(room_id) = room_id { if services.rooms.metadata.is_banned(room_id).await || services - .config - .forbidden_remote_server_names - .is_match(room_id.server_name().expect("legacy room mxid").host()) + .moderation + .is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")) { warn!( "User {user_id} who is not an admin attempted to send an invite for or \ diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 9c2c4057..08887e18 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -274,9 +274,8 @@ pub(crate) async fn is_ignored_pdu( let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); let ignored_server = services - .config - .forbidden_remote_server_names - .is_match(pdu.sender().server_name().host()); + .moderation + .is_remote_server_forbidden(pdu.sender().server_name()); if ignored_type && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 0eb61ca6..01254c32 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -306,7 +306,7 @@ async fn auth_server( } fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { - if !services.server.config.allow_federation { + if !services.config.allow_federation { return Err!(Config("allow_federation", "Federation is disabled.")); } @@ -316,11 +316,7 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { } let origin = &x_matrix.origin; - if services - .config - .forbidden_remote_server_names - .is_match(origin.host()) - { + if services.moderation.is_remote_server_forbidden(origin) { return Err!(Request(Forbidden(debug_warn!( "Federation requests from {origin} denied." )))); diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index edd6ac16..f53e1a15 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -37,19 +37,14 @@ pub(crate) async fn create_invite_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Received federated/remote invite from banned server {} for room ID {}. Rejecting.", diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index ac2c5485..3204c30c 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -42,9 +42,8 @@ pub(crate) async fn create_join_event_template_route( .await?; if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} for remote user {} tried joining room ID {} which has a server name that \ @@ -57,11 +56,7 @@ pub(crate) async fn create_join_event_template_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden(warn!( "Room ID server name {server} is banned on this homeserver." )))); diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 511c13b2..423c8e81 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -33,9 +33,8 @@ pub(crate) async fn create_knock_event_template_route( .await?; if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} for remote user {} tried knocking room ID {} which has a server name \ @@ -48,11 +47,7 @@ pub(crate) async fn create_knock_event_template_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index a66d8890..895eca81 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -268,9 +268,8 @@ pub(crate) async fn create_join_event_v1_route( body: Ruma, ) -> Result { if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} tried joining room ID {} through us who has a server name that is \ @@ -282,11 +281,7 @@ pub(crate) async fn create_join_event_v1_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ globally forbidden. Rejecting.", @@ -314,19 +309,14 @@ pub(crate) async fn create_join_event_v2_route( body: Ruma, ) -> Result { if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ globally forbidden. Rejecting.", diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index ee7b6cba..8d3697d2 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -26,9 +26,8 @@ pub(crate) async fn create_knock_event_v1_route( body: Ruma, ) -> Result { if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} tried knocking room ID {} who has a server name that is globally \ @@ -40,11 +39,7 @@ pub(crate) async fn create_knock_event_v1_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried knocking room ID {} which has a server name that is globally \ forbidden. Rejecting.", diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a7205423..2de3b710 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1374,6 +1374,9 @@ pub struct Config { /// incoming AND outgoing federation with, and block client room joins / /// remote user invites. /// + /// Additionally, it will hide messages from these servers for all users + /// on this server. + /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and /// outbound federation handler. @@ -1954,7 +1957,7 @@ impl Config { let mut addrs = Vec::with_capacity( self.get_bind_hosts() .len() - .saturating_add(self.get_bind_ports().len()), + .saturating_mul(self.get_bind_ports().len()), ); for host in &self.get_bind_hosts() { for port in &self.get_bind_ports() { diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 97314ffb..1d1d1154 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -64,13 +64,7 @@ where return Err!(Config("allow_federation", "Federation is disabled.")); } - if self - .services - .server - .config - .forbidden_remote_server_names - .is_match(dest.host()) - { + if self.services.moderation.is_remote_server_forbidden(dest) { return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs index ce7765ee..15521875 100644 --- a/src/service/federation/mod.rs +++ b/src/service/federation/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use conduwuit::{Result, Server}; -use crate::{Dep, client, resolver, server_keys}; +use crate::{Dep, client, moderation, resolver, server_keys}; pub struct Service { services: Services, @@ -15,6 +15,7 @@ struct Services { client: Dep, resolver: Dep, server_keys: Dep, + moderation: Dep, } impl crate::Service for Service { @@ -25,6 +26,7 @@ impl crate::Service for Service { client: args.depend::("client"), resolver: args.depend::("resolver"), server_keys: args.depend::("server_keys"), + moderation: args.depend::("moderation"), }, })) } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 5c26efe8..d053ba54 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -22,7 +22,7 @@ use tokio::{ use self::data::{Data, Metadata}; pub use self::thumbnail::Dim; -use crate::{Dep, client, globals, sending}; +use crate::{Dep, client, globals, moderation, sending}; #[derive(Debug)] pub struct FileMeta { @@ -42,6 +42,7 @@ struct Services { client: Dep, globals: Dep, sending: Dep, + moderation: Dep, } /// generated MXC ID (`media-id`) length @@ -64,6 +65,7 @@ impl crate::Service for Service { client: args.depend::("client"), globals: args.depend::("globals"), sending: args.depend::("sending"), + moderation: args.depend::("moderation"), }, })) } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index cdcb429e..a1e874d8 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -423,16 +423,8 @@ pub async fn fetch_remote_content_legacy( fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { if self .services - .server - .config - .prevent_media_downloads_from - .is_match(mxc.server_name.host()) - || self - .services - .server - .config - .forbidden_remote_server_names - .is_match(mxc.server_name.host()) + .moderation + .is_remote_server_media_downloads_forbidden(mxc.server_name) { // we'll lie to the client and say the blocked server's media was not found and // log. the client has no way of telling anyways so this is a security bonus. diff --git a/src/service/mod.rs b/src/service/mod.rs index 2be16f79..a3214408 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -16,6 +16,7 @@ pub mod federation; pub mod globals; pub mod key_backups; pub mod media; +pub mod moderation; pub mod presence; pub mod pusher; pub mod resolver; diff --git a/src/service/moderation.rs b/src/service/moderation.rs new file mode 100644 index 00000000..bd2616f6 --- /dev/null +++ b/src/service/moderation.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use conduwuit::{Result, Server, implement}; +use ruma::ServerName; + +pub struct Service { + services: Services, +} + +struct Services { + pub server: Arc, +} + +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + services: Services { server: args.server.clone() }, + })) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.services + .server + .config + .forbidden_remote_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_room_directory_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.is_remote_server_forbidden(server_name) + || self + .services + .server + .config + .forbidden_remote_room_directory_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_media_downloads_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.is_remote_server_forbidden(server_name) + || self + .services + .server + .config + .prevent_media_downloads_from + .is_match(server_name.host()) +} diff --git a/src/service/services.rs b/src/service/services.rs index dc390054..5dcc120e 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -12,7 +12,7 @@ use tokio::sync::Mutex; use crate::{ account_data, admin, appservice, client, config, emergency, federation, globals, key_backups, manager::Manager, - media, presence, pusher, resolver, rooms, sending, server_keys, service, + media, moderation, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, sync, transaction_ids, uiaa, updates, users, }; @@ -39,6 +39,7 @@ pub struct Services { pub uiaa: Arc, pub updates: Arc, pub users: Arc, + pub moderation: Arc, manager: Mutex>>, pub(crate) service: Arc, @@ -106,6 +107,7 @@ impl Services { uiaa: build!(uiaa::Service), updates: build!(updates::Service), users: build!(users::Service), + moderation: build!(moderation::Service), manager: Mutex::new(None), service, From 9e62076baa2fb4a6bb46f8a763e38240c98be5ee Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Apr 2025 23:29:33 +0100 Subject: [PATCH 0881/1248] feat: Add `allowed_remote_server_names` This allows explicitly allowing servers. Can be combined with the opposite to create allowlist-only federation. See also #31 Closes #673 --- conduwuit-example.toml | 10 ++++++++++ src/core/config/mod.rs | 12 ++++++++++++ src/service/moderation.rs | 19 +++++++++++++++++-- 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 5a4b7b3f..326127c3 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1206,10 +1206,20 @@ # # Basically "global" ACLs. # +# You can set this to ["*"] to block all servers by default, and then +# use `allowed_remote_server_names` to allow only specific servers. +# # example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # #forbidden_remote_server_names = [] +# List of allowed server names via regex patterns that we will allow, +# regardless of if they match `forbidden_remote_server_names`. +# +# example: ["goodserver\.tld$", "goodphrase"] +# +#allowed_remote_server_names = [] + # List of forbidden server names via regex patterns that we will block all # outgoing federated room directory requests for. Useful for preventing # our users from wandering into bad servers or spaces. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 2de3b710..22e09956 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1383,12 +1383,24 @@ pub struct Config { /// /// Basically "global" ACLs. /// + /// You can set this to ["*"] to block all servers by default, and then + /// use `allowed_remote_server_names` to allow only specific servers. + /// /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] #[serde(default, with = "serde_regex")] pub forbidden_remote_server_names: RegexSet, + /// List of allowed server names via regex patterns that we will allow, + /// regardless of if they match `forbidden_remote_server_names`. + /// + /// example: ["goodserver\.tld$", "goodphrase"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub allowed_remote_server_names: RegexSet, + /// List of forbidden server names via regex patterns that we will block all /// outgoing federated room directory requests for. Useful for preventing /// our users from wandering into bad servers or spaces. diff --git a/src/service/moderation.rs b/src/service/moderation.rs index bd2616f6..d571de88 100644 --- a/src/service/moderation.rs +++ b/src/service/moderation.rs @@ -24,8 +24,23 @@ impl crate::Service for Service { #[implement(Service)] #[must_use] pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { - // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) - // OR forbidden contains server + // We must never block federating with ourselves + if server_name == self.services.server.config.server_name { + return false; + } + + // Check if server is explicitly allowed + if self + .services + .server + .config + .allowed_remote_server_names + .is_match(server_name.host()) + { + return false; + } + + // Check if server is explicitly forbidden self.services .server .config From 84445b84580720b5f296f525d7df655f4195d833 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 00:16:29 +0100 Subject: [PATCH 0882/1248] docs: Document backfill bypassing federation restrictions --- conduwuit-example.toml | 4 ++++ src/core/config/mod.rs | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 326127c3..8f86fdd0 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1200,6 +1200,10 @@ # Additionally, it will hide messages from these servers for all users # on this server. # +# Note that your messages can still make it to forbidden servers through +# backfilling. Events we receive from forbidden servers via backfill will +# be stored in the database, but will not be sent to the client. +# # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and # outbound federation handler. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 22e09956..cde5c313 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1377,6 +1377,10 @@ pub struct Config { /// Additionally, it will hide messages from these servers for all users /// on this server. /// + /// Note that your messages can still make it to forbidden servers through + /// backfilling. Events we receive from forbidden servers via backfill will + /// be stored in the database, but will not be sent to the client. + /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and /// outbound federation handler. From fe7963d30648addbc4ecfd1df3798cf0f5c0c8fa Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 00:31:08 +0100 Subject: [PATCH 0883/1248] docs: Clarify --- src/core/config/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index cde5c313..800ffc8d 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1385,8 +1385,6 @@ pub struct Config { /// sender user's server name, inbound federation X-Matrix origin, and /// outbound federation handler. /// - /// Basically "global" ACLs. - /// /// You can set this to ["*"] to block all servers by default, and then /// use `allowed_remote_server_names` to allow only specific servers. /// @@ -1399,6 +1397,8 @@ pub struct Config { /// List of allowed server names via regex patterns that we will allow, /// regardless of if they match `forbidden_remote_server_names`. /// + /// This option has no effect if `forbidden_remote_server_names` is empty. + /// /// example: ["goodserver\.tld$", "goodphrase"] /// /// default: [] From 6920814da9867a74fa1b87fb4776c2587fe3bd54 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 20 Apr 2025 02:31:58 +0100 Subject: [PATCH 0884/1248] Support fi.mau.room_id, and fully qualified room_id in /createRoom --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- src/api/client/room/create.rs | 25 +++++++++++-------------- 3 files changed, 23 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00aeca81..cf3ac6db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3652,7 +3652,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "assign", "js_int", @@ -3672,7 +3672,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "as_variant", "assign", @@ -3707,7 +3707,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "as_variant", "base64 0.22.1", @@ -3739,7 +3739,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3764,7 +3764,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "bytes", "headers", @@ -3786,7 +3786,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3795,7 +3795,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "js_int", "ruma-common", @@ -3805,7 +3805,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3820,7 +3820,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "js_int", "ruma-common", @@ -3832,7 +3832,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index e9ae0007..1ad11256 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4" +rev = "fa3c868e5a1c049dc9472310dc4955289a96bb35" features = [ "compat", "rand", diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 4ce53f15..bba5939e 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -606,23 +606,20 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Date: Sun, 20 Apr 2025 02:46:16 +0100 Subject: [PATCH 0885/1248] Prevent creating custom room IDs belonging to other servers --- src/api/client/room/create.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index bba5939e..2bc6033c 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -621,6 +621,11 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Date: Sun, 20 Apr 2025 15:41:19 +0100 Subject: [PATCH 0886/1248] Fix invalid room ID check & prevent room IDs being prefixed with ! --- src/api/client/room/create.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 2bc6033c..f5f61784 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -107,7 +107,6 @@ pub(crate) async fn create_room_route( return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed"))); } - let _short_id = services .rooms .short @@ -615,17 +614,26 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Date: Wed, 9 Apr 2025 19:17:21 +0200 Subject: [PATCH 0887/1248] config: rocksdb_compaction help was inverted :-) You seem to have replaced `disable_rocksdb_compaction` with `rocksdb_compaction`, since the help is blackmailing me never to set it to `true`, except **true is the default**. I have tried to make it say what you possibly meant. --- conduwuit-example.toml | 8 ++++---- src/core/config/mod.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 8f86fdd0..273d5ea5 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -970,8 +970,8 @@ # #rocksdb_compaction_ioprio_idle = true -# Disables RocksDB compaction. You should never ever have to set this -# option to true. If you for some reason find yourself needing to use this +# Enables RocksDB compaction. You should never ever have to set this +# option to false. If you for some reason find yourself needing to use this # option as part of troubleshooting or a bug, please reach out to us in # the conduwuit Matrix room with information and details. # @@ -1208,8 +1208,6 @@ # sender user's server name, inbound federation X-Matrix origin, and # outbound federation handler. # -# Basically "global" ACLs. -# # You can set this to ["*"] to block all servers by default, and then # use `allowed_remote_server_names` to allow only specific servers. # @@ -1220,6 +1218,8 @@ # List of allowed server names via regex patterns that we will allow, # regardless of if they match `forbidden_remote_server_names`. # +# This option has no effect if `forbidden_remote_server_names` is empty. +# # example: ["goodserver\.tld$", "goodphrase"] # #allowed_remote_server_names = [] diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 800ffc8d..bdfcee41 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1133,8 +1133,8 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_compaction_ioprio_idle: bool, - /// Disables RocksDB compaction. You should never ever have to set this - /// option to true. If you for some reason find yourself needing to use this + /// Enables RocksDB compaction. You should never ever have to set this + /// option to false. If you for some reason find yourself needing to use this /// option as part of troubleshooting or a bug, please reach out to us in /// the conduwuit Matrix room with information and details. /// From 2d9bdc0979ecb1102ca2cc3f6b33d1090bd08025 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 19:30:02 +0100 Subject: [PATCH 0888/1248] refactor: The update checker has become the announcements checker Replaces June's endpoint with a continuwuity endpoint. Adds a JSON schema. Closes #89 Closes #760 --- .forgejo/workflows/documentation.yml | 4 + conduwuit-example.toml | 10 +- docs/static/_headers | 3 + docs/static/announcements.json | 9 ++ docs/static/announcements.schema.json | 31 +++++ src/admin/query/globals.rs | 9 +- src/core/config/mod.rs | 12 +- src/service/announcements/mod.rs | 169 ++++++++++++++++++++++++++ src/service/globals/mod.rs | 4 +- src/service/mod.rs | 2 +- src/service/services.rs | 9 +- src/service/updates/mod.rs | 142 ---------------------- 12 files changed, 238 insertions(+), 166 deletions(-) create mode 100644 docs/static/announcements.json create mode 100644 docs/static/announcements.schema.json create mode 100644 src/service/announcements/mod.rs delete mode 100644 src/service/updates/mod.rs diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index c08c1abb..c84c566b 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -36,9 +36,13 @@ jobs: - name: Prepare static files for deployment run: | mkdir -p ./public/.well-known/matrix + mkdir -p ./public/.well-known/continuwuity + mkdir -p ./public/schema # Copy the Matrix .well-known files cp ./docs/static/server ./public/.well-known/matrix/server cp ./docs/static/client ./public/.well-known/matrix/client + cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements + cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json # Copy the custom headers file cp ./docs/static/_headers ./public/_headers echo "Copied .well-known files and _headers to ./public" diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 273d5ea5..b6bfd092 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -113,14 +113,10 @@ #new_user_displayname_suffix = "🏳️‍⚧️" # If enabled, conduwuit will send a simple GET request periodically to -# `https://pupbrain.dev/check-for-updates/stable` for any new -# announcements made. Despite the name, this is not an update check -# endpoint, it is simply an announcement check endpoint. +# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new +# announcements or major updates. This is not an update check endpoint. # -# This is disabled by default as this is rarely used except for security -# updates or major updates. -# -#allow_check_for_updates = false +#allow_announcements_check = # Set this to any float value to multiply conduwuit's in-memory LRU caches # with such as "auth_chain_cache_capacity". diff --git a/docs/static/_headers b/docs/static/_headers index 5e960241..6e52de9f 100644 --- a/docs/static/_headers +++ b/docs/static/_headers @@ -1,3 +1,6 @@ /.well-known/matrix/* Access-Control-Allow-Origin: * Content-Type: application/json +/.well-known/continuwuity/* + Access-Control-Allow-Origin: * + Content-Type: application/json \ No newline at end of file diff --git a/docs/static/announcements.json b/docs/static/announcements.json new file mode 100644 index 00000000..9b97d091 --- /dev/null +++ b/docs/static/announcements.json @@ -0,0 +1,9 @@ +{ + "$schema": "https://continuwuity.org/schema/announcements.schema.json", + "announcements": [ + { + "id": 1, + "message": "Welcome to Continuwuity! Important announcements about the project will appear here." + } + ] +} \ No newline at end of file diff --git a/docs/static/announcements.schema.json b/docs/static/announcements.schema.json new file mode 100644 index 00000000..95b1d153 --- /dev/null +++ b/docs/static/announcements.schema.json @@ -0,0 +1,31 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "$id": "https://continwuity.org/schema/announcements.schema.json", + "type": "object", + "properties": { + "updates": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "integer" + }, + "message": { + "type": "string" + }, + "date": { + "type": "string" + } + }, + "required": [ + "id", + "message" + ] + } + } + }, + "required": [ + "updates" + ] + } \ No newline at end of file diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 3681acfd..c8c1f512 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -11,7 +11,7 @@ pub(crate) enum GlobalsCommand { CurrentCount, - LastCheckForUpdatesId, + LastCheckForAnnouncementsId, /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// for the server. @@ -39,9 +39,12 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, - | GlobalsCommand::LastCheckForUpdatesId => { + | GlobalsCommand::LastCheckForAnnouncementsId => { let timer = tokio::time::Instant::now(); - let results = services.updates.last_check_for_updates_id().await; + let results = services + .announcements + .last_check_for_announcements_id() + .await; let query_time = timer.elapsed(); write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index bdfcee41..033be40a 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -161,14 +161,10 @@ pub struct Config { pub new_user_displayname_suffix: String, /// If enabled, conduwuit will send a simple GET request periodically to - /// `https://pupbrain.dev/check-for-updates/stable` for any new - /// announcements made. Despite the name, this is not an update check - /// endpoint, it is simply an announcement check endpoint. - /// - /// This is disabled by default as this is rarely used except for security - /// updates or major updates. - #[serde(default, alias = "allow_announcements_check")] - pub allow_check_for_updates: bool, + /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new + /// announcements or major updates. This is not an update check endpoint. + #[serde(alias = "allow_check_for_updates", default = "true_fn")] + pub allow_announcements_check: bool, /// Set this to any float value to multiply conduwuit's in-memory LRU caches /// with such as "auth_chain_cache_capacity". diff --git a/src/service/announcements/mod.rs b/src/service/announcements/mod.rs new file mode 100644 index 00000000..4df8971b --- /dev/null +++ b/src/service/announcements/mod.rs @@ -0,0 +1,169 @@ +//! # Announcements service +//! +//! This service is responsible for checking for announcements and sending them +//! to the client. +//! +//! It is used to send announcements to the admin room and logs. +//! Annuncements are stored in /docs/static/announcements right now. +//! The highest seen announcement id is stored in the database. When the +//! announcement check is run, all announcements with an ID higher than those +//! seen before are printed to the console and sent to the admin room. +//! +//! Old announcements should be deleted to avoid spamming the room on first +//! install. +//! +//! Announcements are displayed as markdown in the admin room, but plain text in +//! the console. + +use std::{sync::Arc, time::Duration}; + +use async_trait::async_trait; +use conduwuit::{Result, Server, debug, info, warn}; +use database::{Deserialized, Map}; +use ruma::events::room::message::RoomMessageEventContent; +use serde::Deserialize; +use tokio::{ + sync::Notify, + time::{MissedTickBehavior, interval}, +}; + +use crate::{Dep, admin, client, globals}; + +pub struct Service { + interval: Duration, + interrupt: Notify, + db: Arc, + services: Services, +} + +struct Services { + admin: Dep, + client: Dep, + globals: Dep, + server: Arc, +} + +#[derive(Debug, Deserialize)] +struct CheckForAnnouncementsResponse { + announcements: Vec, +} + +#[derive(Debug, Deserialize)] +struct CheckForAnnouncementsResponseEntry { + id: u64, + date: Option, + message: String, +} + +const CHECK_FOR_ANNOUNCEMENTS_URL: &str = + "https://continuwuity.org/.well-known/continuwuity/announcements"; +const CHECK_FOR_ANNOUNCEMENTS_INTERVAL: u64 = 7200; // 2 hours +const LAST_CHECK_FOR_ANNOUNCEMENTS_ID: &[u8; 25] = b"last_seen_announcement_id"; +// In conduwuit, this was under b"a" + +#[async_trait] +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + interval: Duration::from_secs(CHECK_FOR_ANNOUNCEMENTS_INTERVAL), + interrupt: Notify::new(), + db: args.db["global"].clone(), + services: Services { + globals: args.depend::("globals"), + admin: args.depend::("admin"), + client: args.depend::("client"), + server: args.server.clone(), + }, + })) + } + + #[tracing::instrument(skip_all, name = "announcements", level = "debug")] + async fn worker(self: Arc) -> Result<()> { + if !self.services.globals.allow_announcements_check() { + debug!("Disabling announcements check"); + return Ok(()); + } + + let mut i = interval(self.interval); + i.set_missed_tick_behavior(MissedTickBehavior::Delay); + i.reset_after(self.interval); + loop { + tokio::select! { + () = self.interrupt.notified() => break, + _ = i.tick() => (), + } + + if let Err(e) = self.check().await { + warn!(%e, "Failed to check for announcements"); + } + } + + Ok(()) + } + + fn interrupt(&self) { self.interrupt.notify_waiters(); } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +impl Service { + #[tracing::instrument(skip_all)] + async fn check(&self) -> Result<()> { + debug_assert!(self.services.server.running(), "server must not be shutting down"); + + let response = self + .services + .client + .default + .get(CHECK_FOR_ANNOUNCEMENTS_URL) + .send() + .await? + .text() + .await?; + + let response = serde_json::from_str::(&response)?; + for announcement in &response.announcements { + if announcement.id > self.last_check_for_announcements_id().await { + self.handle(announcement).await; + self.update_check_for_announcements_id(announcement.id); + } + } + + Ok(()) + } + + #[tracing::instrument(skip_all)] + async fn handle(&self, announcement: &CheckForAnnouncementsResponseEntry) { + if let Some(date) = &announcement.date { + info!("[announcements] {date} {:#}", announcement.message); + } else { + info!("[announcements] {:#}", announcement.message); + } + + self.services + .admin + .send_message(RoomMessageEventContent::text_markdown(format!( + "### New announcement{}\n\n{}", + announcement + .date + .as_ref() + .map_or_else(String::new, |date| format!(" - `{date}`")), + announcement.message + ))) + .await + .ok(); + } + + #[inline] + pub fn update_check_for_announcements_id(&self, id: u64) { + self.db.raw_put(LAST_CHECK_FOR_ANNOUNCEMENTS_ID, id); + } + + pub async fn last_check_for_announcements_id(&self) -> u64 { + self.db + .get(LAST_CHECK_FOR_ANNOUNCEMENTS_ID) + .await + .deserialized() + .unwrap_or(0_u64) + } +} diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index a7a9be9d..a23a4c21 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -127,7 +127,9 @@ impl Service { &self.server.config.new_user_displayname_suffix } - pub fn allow_check_for_updates(&self) -> bool { self.server.config.allow_check_for_updates } + pub fn allow_announcements_check(&self) -> bool { + self.server.config.allow_announcements_check + } pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.server.config.trusted_servers } diff --git a/src/service/mod.rs b/src/service/mod.rs index a3214408..eb15e5ec 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -8,6 +8,7 @@ pub mod services; pub mod account_data; pub mod admin; +pub mod announcements; pub mod appservice; pub mod client; pub mod config; @@ -26,7 +27,6 @@ pub mod server_keys; pub mod sync; pub mod transaction_ids; pub mod uiaa; -pub mod updates; pub mod users; extern crate conduwuit_core as conduwuit; diff --git a/src/service/services.rs b/src/service/services.rs index 5dcc120e..daece245 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -10,11 +10,12 @@ use futures::{Stream, StreamExt, TryStreamExt}; use tokio::sync::Mutex; use crate::{ - account_data, admin, appservice, client, config, emergency, federation, globals, key_backups, + account_data, admin, announcements, appservice, client, config, emergency, federation, + globals, key_backups, manager::Manager, media, moderation, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, - sync, transaction_ids, uiaa, updates, users, + sync, transaction_ids, uiaa, users, }; pub struct Services { @@ -37,9 +38,9 @@ pub struct Services { pub sync: Arc, pub transaction_ids: Arc, pub uiaa: Arc, - pub updates: Arc, pub users: Arc, pub moderation: Arc, + pub announcements: Arc, manager: Mutex>>, pub(crate) service: Arc, @@ -105,9 +106,9 @@ impl Services { sync: build!(sync::Service), transaction_ids: build!(transaction_ids::Service), uiaa: build!(uiaa::Service), - updates: build!(updates::Service), users: build!(users::Service), moderation: build!(moderation::Service), + announcements: build!(announcements::Service), manager: Mutex::new(None), service, diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs deleted file mode 100644 index 28bee65a..00000000 --- a/src/service/updates/mod.rs +++ /dev/null @@ -1,142 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use async_trait::async_trait; -use conduwuit::{Result, Server, debug, info, warn}; -use database::{Deserialized, Map}; -use ruma::events::room::message::RoomMessageEventContent; -use serde::Deserialize; -use tokio::{ - sync::Notify, - time::{MissedTickBehavior, interval}, -}; - -use crate::{Dep, admin, client, globals}; - -pub struct Service { - interval: Duration, - interrupt: Notify, - db: Arc, - services: Services, -} - -struct Services { - admin: Dep, - client: Dep, - globals: Dep, - server: Arc, -} - -#[derive(Debug, Deserialize)] -struct CheckForUpdatesResponse { - updates: Vec, -} - -#[derive(Debug, Deserialize)] -struct CheckForUpdatesResponseEntry { - id: u64, - date: String, - message: String, -} - -const CHECK_FOR_UPDATES_URL: &str = "https://pupbrain.dev/check-for-updates/stable"; -const CHECK_FOR_UPDATES_INTERVAL: u64 = 7200; // 2 hours -const LAST_CHECK_FOR_UPDATES_COUNT: &[u8; 1] = b"u"; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - interval: Duration::from_secs(CHECK_FOR_UPDATES_INTERVAL), - interrupt: Notify::new(), - db: args.db["global"].clone(), - services: Services { - globals: args.depend::("globals"), - admin: args.depend::("admin"), - client: args.depend::("client"), - server: args.server.clone(), - }, - })) - } - - #[tracing::instrument(skip_all, name = "updates", level = "debug")] - async fn worker(self: Arc) -> Result<()> { - if !self.services.globals.allow_check_for_updates() { - debug!("Disabling update check"); - return Ok(()); - } - - let mut i = interval(self.interval); - i.set_missed_tick_behavior(MissedTickBehavior::Delay); - i.reset_after(self.interval); - loop { - tokio::select! { - () = self.interrupt.notified() => break, - _ = i.tick() => (), - } - - if let Err(e) = self.check().await { - warn!(%e, "Failed to check for updates"); - } - } - - Ok(()) - } - - fn interrupt(&self) { self.interrupt.notify_waiters(); } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -impl Service { - #[tracing::instrument(skip_all)] - async fn check(&self) -> Result<()> { - debug_assert!(self.services.server.running(), "server must not be shutting down"); - - let response = self - .services - .client - .default - .get(CHECK_FOR_UPDATES_URL) - .send() - .await? - .text() - .await?; - - let response = serde_json::from_str::(&response)?; - for update in &response.updates { - if update.id > self.last_check_for_updates_id().await { - self.handle(update).await; - self.update_check_for_updates_id(update.id); - } - } - - Ok(()) - } - - #[tracing::instrument(skip_all)] - async fn handle(&self, update: &CheckForUpdatesResponseEntry) { - info!("{} {:#}", update.date, update.message); - self.services - .admin - .send_message(RoomMessageEventContent::text_markdown(format!( - "### the following is a message from the conduwuit puppy\n\nit was sent on \ - `{}`:\n\n@room: {}", - update.date, update.message - ))) - .await - .ok(); - } - - #[inline] - pub fn update_check_for_updates_id(&self, id: u64) { - self.db.raw_put(LAST_CHECK_FOR_UPDATES_COUNT, id); - } - - pub async fn last_check_for_updates_id(&self) -> u64 { - self.db - .get(LAST_CHECK_FOR_UPDATES_COUNT) - .await - .deserialized() - .unwrap_or(0_u64) - } -} From b7b7d3a9e70007efb7128ee7d108ed15fe5362db Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 23:07:01 +0100 Subject: [PATCH 0889/1248] chore: Add the current prerelease to cargo.toml --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf3ac6db..616b1034 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -725,7 +725,7 @@ dependencies = [ [[package]] name = "conduwuit" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "clap", "conduwuit_admin", @@ -754,7 +754,7 @@ dependencies = [ [[package]] name = "conduwuit_admin" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "clap", "conduwuit_api", @@ -775,7 +775,7 @@ dependencies = [ [[package]] name = "conduwuit_api" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "async-trait", "axum", @@ -807,7 +807,7 @@ dependencies = [ [[package]] name = "conduwuit_core" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "argon2", "arrayvec", @@ -865,7 +865,7 @@ dependencies = [ [[package]] name = "conduwuit_database" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "async-channel", "conduwuit_core", @@ -883,7 +883,7 @@ dependencies = [ [[package]] name = "conduwuit_macros" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "itertools 0.14.0", "proc-macro2", @@ -893,7 +893,7 @@ dependencies = [ [[package]] name = "conduwuit_router" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "axum", "axum-client-ip", @@ -926,7 +926,7 @@ dependencies = [ [[package]] name = "conduwuit_service" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "async-trait", "base64 0.22.1", diff --git a/Cargo.toml b/Cargo.toml index 1ad11256..5feba474 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ license = "Apache-2.0" readme = "README.md" repository = "https://forgejo.ellis.link/continuwuation/continuwuity" rust-version = "1.86.0" -version = "0.5.0" +version = "0.5.0-rc.4" [workspace.metadata.crane] name = "conduwuit" From 22e7617362880ba9723fc239e5bd7b978599c866 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 23:07:20 +0100 Subject: [PATCH 0890/1248] chore: Release --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- src/router/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 616b1034..afaa5622 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -725,7 +725,7 @@ dependencies = [ [[package]] name = "conduwuit" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "clap", "conduwuit_admin", @@ -754,7 +754,7 @@ dependencies = [ [[package]] name = "conduwuit_admin" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "clap", "conduwuit_api", @@ -775,7 +775,7 @@ dependencies = [ [[package]] name = "conduwuit_api" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "async-trait", "axum", @@ -807,7 +807,7 @@ dependencies = [ [[package]] name = "conduwuit_core" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "argon2", "arrayvec", @@ -865,7 +865,7 @@ dependencies = [ [[package]] name = "conduwuit_database" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "async-channel", "conduwuit_core", @@ -883,7 +883,7 @@ dependencies = [ [[package]] name = "conduwuit_macros" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "itertools 0.14.0", "proc-macro2", @@ -893,7 +893,7 @@ dependencies = [ [[package]] name = "conduwuit_router" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "axum", "axum-client-ip", @@ -926,7 +926,7 @@ dependencies = [ [[package]] name = "conduwuit_service" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "async-trait", "base64 0.22.1", diff --git a/Cargo.toml b/Cargo.toml index 5feba474..1517cfc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ license = "Apache-2.0" readme = "README.md" repository = "https://forgejo.ellis.link/continuwuation/continuwuity" rust-version = "1.86.0" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" [workspace.metadata.crane] name = "conduwuit" diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 31a44983..e4ddcb9b 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -114,11 +114,11 @@ ruma.workspace = true rustls.workspace = true rustls.optional = true sentry.optional = true +sentry.workspace = true sentry-tower.optional = true sentry-tower.workspace = true sentry-tracing.optional = true sentry-tracing.workspace = true -sentry.workspace = true serde_json.workspace = true tokio.workspace = true tower.workspace = true From ff93cfdc6454b990f35424d3d7c17fae14df2c4d Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 20 Apr 2025 23:50:48 +0100 Subject: [PATCH 0891/1248] Fix up the docs, replace a lot of conduwuit references --- docs/appservices.md | 10 ++-- docs/conduwuit_coc.md | 28 +++++----- docs/configuration.md | 10 ++-- docs/deploying.md | 2 +- docs/deploying/arch-linux.md | 16 +----- docs/deploying/docker-compose.for-traefik.yml | 2 +- docs/deploying/docker-compose.with-caddy.yml | 2 +- .../deploying/docker-compose.with-traefik.yml | 2 +- docs/deploying/docker-compose.yml | 2 +- docs/deploying/docker.md | 44 +++++---------- docs/deploying/freebsd.md | 6 +- docs/deploying/generic.md | 52 +++++++++--------- docs/deploying/nixos.md | 55 ++++--------------- docs/development.md | 20 +++---- docs/introduction.md | 4 +- docs/maintenance.md | 26 ++++----- docs/troubleshooting.md | 38 ++++++------- docs/turn.md | 8 +-- 18 files changed, 133 insertions(+), 194 deletions(-) diff --git a/docs/appservices.md b/docs/appservices.md index 28ea9717..57cd031c 100644 --- a/docs/appservices.md +++ b/docs/appservices.md @@ -3,8 +3,8 @@ ## Getting help If you run into any problems while setting up an Appservice: ask us in -[#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay) or -[open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new). +[#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or +[open an issue on Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). ## Set up the appservice - general instructions @@ -14,7 +14,7 @@ later starting it. At some point the appservice guide should ask you to add a registration yaml file to the homeserver. In Synapse you would do this by adding the path to the -homeserver.yaml, but in conduwuit you can do this from within Matrix: +homeserver.yaml, but in Continuwuity you can do this from within Matrix: First, go into the `#admins` room of your homeserver. The first person that registered on the homeserver automatically joins it. Then send a message into @@ -37,9 +37,9 @@ You can confirm it worked by sending a message like this: The server bot should answer with `Appservices (1): your-bridge` -Then you are done. conduwuit will send messages to the appservices and the +Then you are done. Continuwuity will send messages to the appservices and the appservice can send requests to the homeserver. You don't need to restart -conduwuit, but if it doesn't work, restarting while the appservice is running +Continuwuity, but if it doesn't work, restarting while the appservice is running could help. ## Appservice-specific instructions diff --git a/docs/conduwuit_coc.md b/docs/conduwuit_coc.md index 0fce2fe3..19262765 100644 --- a/docs/conduwuit_coc.md +++ b/docs/conduwuit_coc.md @@ -1,17 +1,17 @@ -# conduwuit Community Code of Conduct +# Continuwuity Community Code of Conduct -Welcome to the conduwuit community! We’re excited to have you here. conduwuit is +Welcome to the Continuwuity community! We’re excited to have you here. Continuwuity is a hard-fork of the Conduit homeserver, aimed at making Matrix more accessible and inclusive for everyone. This space is dedicated to fostering a positive, supportive, and inclusive -environment for everyone. This Code of Conduct applies to all conduwuit spaces, +environment for everyone. This Code of Conduct applies to all Continuwuity spaces, including any further community rooms that reference this CoC. Here are our -guidelines to help maintain the welcoming atmosphere that sets conduwuit apart. +guidelines to help maintain the welcoming atmosphere that sets Continuwuity apart. For the general foundational rules, please refer to the [Contributor's -Covenant](https://github.com/girlbossceo/conduwuit/blob/main/CODE_OF_CONDUCT.md). -Below are additional guidelines specific to the conduwuit community. +Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md). +Below are additional guidelines specific to the Continuwuity community. ## Our Values and Guidelines @@ -48,25 +48,25 @@ members. ## Matrix Community -This Code of Conduct applies to the entire [conduwuit Matrix -Space](https://matrix.to/#/#conduwuit-space:puppygock.gay) and its rooms, +This Code of Conduct applies to the entire [Continuwuity Matrix +Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, including: -### [#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay) +### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) -This room is for support and discussions about conduwuit. Ask questions, share +This room is for support and discussions about Continuwuity. Ask questions, share insights, and help each other out. -### [#conduwuit-offtopic:girlboss.ceo](https://matrix.to/#/#conduwuit-offtopic:girlboss.ceo) +### [#continuwuity-offtopic:continuwuity.org](https://matrix.to/#/#continuwuity-offtopic:continuwuity.org) For off-topic community conversations about any subject. While this room allows for a wide range of topics, the same CoC applies. Keep discussions respectful and inclusive, and avoid divisive subjects like country/world politics. General topics, such as world events, are welcome as long as they follow the CoC. -### [#conduwuit-dev:puppygock.gay](https://matrix.to/#/#conduwuit-dev:puppygock.gay) +### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org) -This room is dedicated to discussing active development of conduwuit. Posting +This room is dedicated to discussing active development of Continuwuity. Posting requires an elevated power level, which can be requested in one of the other rooms. Use this space to collaborate and innovate. @@ -90,4 +90,4 @@ comfortable doing that, then please send a DM to one of the moderators directly. Together, let’s build a community where everyone feels valued and respected. -— The conduwuit Moderation Team +— The Continuwuity Moderation Team diff --git a/docs/configuration.md b/docs/configuration.md index 0c670210..778e5c56 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,10 +1,10 @@ # Configuration -This chapter describes various ways to configure conduwuit. +This chapter describes various ways to configure Continuwuity. ## Basics -conduwuit uses a config file for the majority of the settings, but also supports +Continuwuity uses a config file for the majority of the settings, but also supports setting individual config options via commandline. Please refer to the [example config @@ -12,13 +12,13 @@ file](./configuration/examples.md#example-configuration) for all of those settings. The config file to use can be specified on the commandline when running -conduwuit by specifying the `-c`, `--config` flag. Alternatively, you can use +Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use the environment variable `CONDUWUIT_CONFIG` to specify the config file to used. Conduit's environment variables are supported for backwards compatibility. ## Option commandline flag -conduwuit supports setting individual config options in TOML format from the +Continuwuity supports setting individual config options in TOML format from the `-O` / `--option` flag. For example, you can set your server name via `-O server_name=\"example.com\"`. @@ -33,7 +33,7 @@ string. This does not apply to options that take booleans or numbers: ## Execute commandline flag -conduwuit supports running admin commands on startup using the commandline +Continuwuity supports running admin commands on startup using the commandline argument `--execute`. The most notable use for this is to create an admin user on first startup. diff --git a/docs/deploying.md b/docs/deploying.md index 86277aba..be1bf736 100644 --- a/docs/deploying.md +++ b/docs/deploying.md @@ -1,3 +1,3 @@ # Deploying -This chapter describes various ways to deploy conduwuit. +This chapter describes various ways to deploy Continuwuity. diff --git a/docs/deploying/arch-linux.md b/docs/deploying/arch-linux.md index 7436e5bf..a14201e3 100644 --- a/docs/deploying/arch-linux.md +++ b/docs/deploying/arch-linux.md @@ -1,15 +1,3 @@ -# conduwuit for Arch Linux +# Continuwuity for Arch Linux -Currently conduwuit is only on the Arch User Repository (AUR). - -The conduwuit AUR packages are community maintained and are not maintained by -conduwuit development team, but the AUR package maintainers are in the Matrix -room. Please attempt to verify your AUR package's PKGBUILD file looks fine -before asking for support. - -- [conduwuit](https://aur.archlinux.org/packages/conduwuit) - latest tagged -conduwuit -- [conduwuit-git](https://aur.archlinux.org/packages/conduwuit-git) - latest git -conduwuit from `main` branch -- [conduwuit-bin](https://aur.archlinux.org/packages/conduwuit-bin) - latest -tagged conduwuit static binary +Continuwuity does not have any Arch Linux packages at this time. diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 366f6999..694bd112 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -4,7 +4,7 @@ services: homeserver: ### If you already built the conduduwit image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - db:/var/lib/conduwuit diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 431cf2d4..8ff8076a 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -22,7 +22,7 @@ services: homeserver: ### If you already built the conduwuit image with 'docker build' or want to use a registry image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - db:/var/lib/conduwuit diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 89118c74..842bf945 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -4,7 +4,7 @@ services: homeserver: ### If you already built the conduwuit image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - db:/var/lib/conduwuit diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index ca33b5f5..ca56d0b0 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -4,7 +4,7 @@ services: homeserver: ### If you already built the conduwuit image with 'docker build' or want to use a registry image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped ports: - 8448:6167 diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index bdbfb59c..bd6eff1d 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -1,31 +1,20 @@ -# conduwuit for Docker +# Continuwuity for Docker ## Docker -To run conduwuit with Docker you can either build the image yourself or pull it +To run Continuwuity with Docker you can either build the image yourself or pull it from a registry. ### Use a registry -OCI images for conduwuit are available in the registries listed below. +OCI images for Continuwuity are available in the registries listed below. -| Registry | Image | Size | Notes | -| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- | -| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable latest tagged image. | -| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable latest tagged image. | -| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable latest tagged image. | -| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. | -| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. | -| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. | +| Registry | Image | Notes | +| --------------- | --------------------------------------------------------------- | -----------------------| +| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest][fj] | Latest tagged image. | +| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main][fj] | Main branch image. | -[dh]: https://hub.docker.com/r/girlbossceo/conduwuit -[gh]: https://github.com/girlbossceo/conduwuit/pkgs/container/conduwuit -[gl]: https://gitlab.com/conduwuit/conduwuit/container_registry/6369729 -[shield-latest]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest -[shield-main]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main - -OCI image `.tar.gz` files are also hosted directly at when uploaded by CI with a -commit hash/revision or a tagged release: +[fj]: https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity Use @@ -52,11 +41,11 @@ or you can use [docker compose](#docker-compose). The `-d` flag lets the container run in detached mode. You may supply an optional `conduwuit.toml` config file, the example config can be found [here](../configuration/examples.md). You can pass in different env vars to -change config values on the fly. You can even configure conduwuit completely by +change config values on the fly. You can even configure Continuwuity completely by using env vars. For an overview of possible values, please take a look at the [`docker-compose.yml`](docker-compose.yml) file. -If you just want to test conduwuit for a short time, you can use the `--rm` +If you just want to test Continuwuity for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. @@ -91,11 +80,11 @@ docker network create caddy After that, you can rename it so it matches `docker-compose.yml` and spin up the containers! -Additional info about deploying conduwuit can be found [here](generic.md). +Additional info about deploying Continuwuity can be found [here](generic.md). ### Build -Official conduwuit images are built using Nix's +Official Continuwuity images are built using Nix's [`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are repeatable and reproducible by anyone, keeps the images lightweight, and can be built offline. @@ -104,13 +93,11 @@ This also ensures portability of our images because `buildLayeredImage` builds OCI images, not Docker images, and works with other container software. The OCI images are OS-less with only a very minimal environment of the `tini` -init system, CA certificates, and the conduwuit binary. This does mean there is +init system, CA certificates, and the Continuwuity binary. This does mean there is not a shell, but in theory you can get a shell by adding the necessary layers to the layered image. However it's very unlikely you will need a shell for any real troubleshooting. -The flake file for the OCI image definition is at [`nix/pkgs/oci-image/default.nix`][oci-image-def]. - To build an OCI image using Nix, the following outputs can be built: - `nix build -L .#oci-image` (default features, x86_64 glibc) - `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl) @@ -138,10 +125,10 @@ web. With the two provided files, [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy -to deploy and use conduwuit, with a little caveat. If you already took a look at +to deploy and use Continuwuity, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and loadbalancer and is not able to -serve any kind of content, but for conduwuit to federate, we need to either +serve any kind of content, but for Continuwuity to federate, we need to either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`. @@ -153,4 +140,3 @@ those two files. See the [TURN](../turn.md) page. [nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage -[oci-image-def]: https://github.com/girlbossceo/conduwuit/blob/main/nix/pkgs/oci-image/default.nix diff --git a/docs/deploying/freebsd.md b/docs/deploying/freebsd.md index 65b40204..3764ffa8 100644 --- a/docs/deploying/freebsd.md +++ b/docs/deploying/freebsd.md @@ -1,5 +1,5 @@ -# conduwuit for FreeBSD +# Continuwuity for FreeBSD -conduwuit at the moment does not provide FreeBSD builds or have FreeBSD packaging, however conduwuit does build and work on FreeBSD using the system-provided RocksDB. +Continuwuity at the moment does not provide FreeBSD builds or have FreeBSD packaging, however Continuwuity does build and work on FreeBSD using the system-provided RocksDB. -Contributions for getting conduwuit packaged are welcome. +Contributions for getting Continuwuity packaged are welcome. diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index a07da560..46b9b439 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -2,11 +2,11 @@ > ### Getting help > -> If you run into any problems while setting up conduwuit, ask us in -> `#conduwuit:puppygock.gay` or [open an issue on -> GitHub](https://github.com/girlbossceo/conduwuit/issues/new). +> If you run into any problems while setting up Continuwuity, ask us in +> `#continuwuity:continuwuity.org` or [open an issue on +> Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). -## Installing conduwuit +## Installing Continuwuity ### Static prebuilt binary @@ -14,12 +14,10 @@ You may simply download the binary that fits your machine architecture (x86_64 or aarch64). Run `uname -m` to see what you need. Prebuilt fully static musl binaries can be downloaded from the latest tagged -release [here](https://github.com/girlbossceo/conduwuit/releases/latest) or +release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or `main` CI branch workflow artifact output. These also include Debian/Ubuntu packages. -Binaries are also available on my website directly at: - These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit hash/revision, and `releases` are tagged releases. Sort by descending last modified for the latest. @@ -37,7 +35,7 @@ for performance. ### Compiling Alternatively, you may compile the binary yourself. We recommend using -Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most +Nix (or [Lix](https://lix.systems)) to build Continuwuity as this has the most guaranteed reproducibiltiy and easiest to get a build environment and output going. This also allows easy cross-compilation. @@ -51,35 +49,35 @@ If wanting to build using standard Rust toolchains, make sure you install: - `liburing-dev` on the compiling machine, and `liburing` on the target host - LLVM and libclang for RocksDB -You can build conduwuit using `cargo build --release --all-features` +You can build Continuwuity using `cargo build --release --all-features` -## Adding a conduwuit user +## Adding a Continuwuity user -While conduwuit can run as any user it is better to use dedicated users for +While Continuwuity can run as any user it is better to use dedicated users for different services. This also allows you to make sure that the file permissions are correctly set up. -In Debian, you can use this command to create a conduwuit user: +In Debian, you can use this command to create a Continuwuity user: ```bash -sudo adduser --system conduwuit --group --disabled-login --no-create-home +sudo adduser --system continuwuity --group --disabled-login --no-create-home ``` For distros without `adduser` (or where it's a symlink to `useradd`): ```bash -sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit +sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity ``` ## Forwarding ports in the firewall or the router Matrix's default federation port is port 8448, and clients must be using port 443. If you would like to use only port 443, or a different port, you will need to setup -delegation. conduwuit has config options for doing delegation, or you can configure +delegation. Continuwuity has config options for doing delegation, or you can configure your reverse proxy to manually serve the necessary JSON files to do delegation (see the `[global.well_known]` config section). -If conduwuit runs behind a router or in a container and has a different public +If Continuwuity runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. @@ -94,9 +92,9 @@ on the network level, consider something like NextDNS or Pi-Hole. ## Setting up a systemd service -Two example systemd units for conduwuit can be found +Two example systemd units for Continuwuity can be found [on the configuration page](../configuration/examples.md#debian-systemd-unit-file). -You may need to change the `ExecStart=` path to where you placed the conduwuit +You may need to change the `ExecStart=` path to where you placed the Continuwuity binary if it is not `/usr/bin/conduwuit`. On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros @@ -114,9 +112,9 @@ and entering the following: ReadWritePaths=/path/to/custom/database/path ``` -## Creating the conduwuit configuration file +## Creating the Continuwuity configuration file -Now we need to create the conduwuit's config file in +Now we need to create the Continuwuity's config file in `/etc/conduwuit/conduwuit.toml`. The example config can be found at [conduwuit-example.toml](../configuration/examples.md). @@ -127,7 +125,7 @@ RocksDB is the only supported database backend. ## Setting the correct file permissions -If you are using a dedicated user for conduwuit, you will need to allow it to +If you are using a dedicated user for Continuwuity, you will need to allow it to read the config. To do that you can run this: ```bash @@ -139,7 +137,7 @@ If you use the default database path you also need to run this: ```bash sudo mkdir -p /var/lib/conduwuit/ -sudo chown -R conduwuit:conduwuit /var/lib/conduwuit/ +sudo chown -R continuwuity:continuwuity /var/lib/conduwuit/ sudo chmod 700 /var/lib/conduwuit/ ``` @@ -174,13 +172,13 @@ As we would prefer our users to use Caddy, we will not provide configuration fil You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs -- `/_conduwuit/` - ad-hoc conduwuit routes such as `/local_user_count` and +- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and `/server_version` You can optionally reverse proxy the following individual routes: - `/.well-known/matrix/client` and `/.well-known/matrix/server` if using -conduwuit to perform delegation (see the `[global.well_known]` config section) -- `/.well-known/matrix/support` if using conduwuit to send the homeserver admin +Continuwuity to perform delegation (see the `[global.well_known]` config section) +- `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin contact and support page (formerly known as MSC1929) - `/` if you would like to see `hewwo from conduwuit woof!` at the root @@ -200,7 +198,7 @@ header, making federation non-functional. If a workaround is found, feel free to If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). -If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: +If using Nginx, you need to give Continuwuity the request URI using `$request_uri`, or like so: - `proxy_pass http://127.0.0.1:6167$request_uri;` - `proxy_pass http://127.0.0.1:6167;` @@ -209,7 +207,7 @@ Nginx users need to increase `client_max_body_size` (default is 1M) to match ## You're done -Now you can start conduwuit with: +Now you can start Continuwuity with: ```bash sudo systemctl start conduwuit diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md index 3c5b0e69..cf2c09e4 100644 --- a/docs/deploying/nixos.md +++ b/docs/deploying/nixos.md @@ -1,66 +1,33 @@ -# conduwuit for NixOS +# Continuwuity for NixOS -conduwuit can be acquired by Nix (or [Lix][lix]) from various places: +Continuwuity can be acquired by Nix (or [Lix][lix]) from various places: * The `flake.nix` at the root of the repo * The `default.nix` at the root of the repo -* From conduwuit's binary cache - -A community maintained NixOS package is available at [`conduwuit`](https://search.nixos.org/packages?channel=unstable&show=conduwuit&from=0&size=50&sort=relevance&type=packages&query=conduwuit) - -### Binary cache - -A binary cache for conduwuit that the CI/CD publishes to is available at the -following places (both are the same just different names): - -``` -https://attic.kennel.juneis.dog/conduit -conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= - -https://attic.kennel.juneis.dog/conduwuit -conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= -``` - -The binary caches were recreated some months ago due to attic issues. The old public -keys were: - -``` -conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg= -conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw= -``` - -If needed, we have a binary cache on Cachix but it is only limited to 5GB: - -``` -https://conduwuit.cachix.org -conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= -``` - -If specifying a Git remote URL in your flake, you can use any remotes that -are specified on the README (the mirrors), such as the GitHub: `github:girlbossceo/conduwuit` +* From Continuwuity's binary cache ### NixOS module The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure -conduwuit. +Continuwuity. ### Conduit NixOS Config Module and SQLite Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend. -Conduwuit dropped SQLite support in favor of exclusively supporting the much faster RocksDB. +Continuwuity dropped SQLite support in favor of exclusively supporting the much faster RocksDB. Make sure that you are using the RocksDB backend before migrating! There is a [tool to migrate a Conduit SQLite database to RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/). -If you want to run the latest code, you should get conduwuit from the `flake.nix` +If you want to run the latest code, you should get Continuwuity from the `flake.nix` or `default.nix` and set [`services.matrix-conduit.package`][package] -appropriately to use conduwuit instead of Conduit. +appropriately to use Continuwuity instead of Conduit. ### UNIX sockets -Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module +Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX socket option does not exist in Conduit, and the module forcibly sets the `address` and `port` config options. @@ -84,13 +51,13 @@ disallows the namespace from accessing or creating UNIX sockets and has to be en systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ]; ``` -Even though those workarounds are feasible a conduwuit NixOS configuration module, developed and +Even though those workarounds are feasible a Continuwuity NixOS configuration module, developed and published by the community, would be appreciated. ### jemalloc and hardened profile -conduwuit uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] -due to them using `scudo` by default. You must either disable/hide `scudo` from conduwuit, or +Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] +due to them using `scudo` by default. You must either disable/hide `scudo` from Continuwuity, or disable jemalloc like so: ```nix diff --git a/docs/development.md b/docs/development.md index fa7519c0..1e344f41 100644 --- a/docs/development.md +++ b/docs/development.md @@ -4,9 +4,9 @@ Information about developing the project. If you are only interested in using it, you can safely ignore this page. If you plan on contributing, see the [contributor's guide](./contributing.md). -## conduwuit project layout +## Continuwuity project layout -conduwuit uses a collection of sub-crates, packages, or workspace members +Continuwuity uses a collection of sub-crates, packages, or workspace members that indicate what each general area of code is for. All of the workspace members are under `src/`. The workspace definition is at the top level / root `Cargo.toml`. @@ -14,11 +14,11 @@ members are under `src/`. The workspace definition is at the top level / root The crate names are generally self-explanatory: - `admin` is the admin room - `api` is the HTTP API, Matrix C-S and S-S endpoints, etc -- `core` is core conduwuit functionality like config loading, error definitions, +- `core` is core Continuwuity functionality like config loading, error definitions, global utilities, logging infrastructure, etc - `database` is RocksDB methods, helpers, RocksDB config, and general database definitions, utilities, or functions -- `macros` are conduwuit Rust [macros][macros] like general helper macros, logging +- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging and error handling macros, and [syn][syn] and [procedural macros][proc-macro] used for admin room commands and others - `main` is the "primary" sub-crate. This is where the `main()` function lives, @@ -35,7 +35,7 @@ if you truly find yourself needing to, we recommend reaching out to us in the Matrix room for discussions about it beforehand. The primary inspiration for this design was apart of hot reloadable development, -to support "conduwuit as a library" where specific parts can simply be swapped out. +to support "Continuwuity as a library" where specific parts can simply be swapped out. There is evidence Conduit wanted to go this route too as `axum` is technically an optional feature in Conduit, and can be compiled without the binary or axum library for handling inbound web requests; but it was never completed or worked. @@ -68,10 +68,10 @@ do this if Rust supported workspace-level features to begin with. ## List of forked dependencies -During conduwuit development, we have had to fork +During Continuwuity development, we have had to fork some dependencies to support our use-cases in some areas. This ranges from things said upstream project won't accept for any reason, faster-paced -development (unresponsive or slow upstream), conduwuit-specific usecases, or +development (unresponsive or slow upstream), Continuwuity-specific usecases, or lack of time to upstream some things. - [ruma/ruma][1]: - various performance @@ -84,7 +84,7 @@ builds seem to be broken on upstream, fixes some broken/suspicious code in places, additional safety measures, and support redzones for Valgrind - [zyansheep/rustyline-async][4]: - tab completion callback and -`CTRL+\` signal quit event for conduwuit console CLI +`CTRL+\` signal quit event for Continuwuity console CLI - [rust-rocksdb/rust-rocksdb][5]: - [`@zaidoon1`][8]'s fork has quicker updates, more up to date dependencies, etc. Our fork fixes musl build @@ -97,7 +97,7 @@ alongside other logging/metrics things ## Debugging with `tokio-console` [`tokio-console`][7] can be a useful tool for debugging and profiling. To make a -`tokio-console`-enabled build of conduwuit, enable the `tokio_console` feature, +`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature, disable the default `release_max_log_level` feature, and set the `--cfg tokio_unstable` flag to enable experimental tokio APIs. A build might look like this: @@ -109,7 +109,7 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \ --features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console ``` -You will also need to enable the `tokio_console` config option in conduwuit when +You will also need to enable the `tokio_console` config option in Continuwuity when starting it. This was due to tokio-console causing gradual memory leak/usage if left enabled. diff --git a/docs/introduction.md b/docs/introduction.md index 9d3a294a..d193f7c7 100644 --- a/docs/introduction.md +++ b/docs/introduction.md @@ -1,4 +1,4 @@ -# conduwuit +# Continuwuity {{#include ../README.md:catchphrase}} @@ -8,7 +8,7 @@ - [Deployment options](deploying.md) -If you want to connect an appservice to conduwuit, take a look at the +If you want to connect an appservice to Continuwuity, take a look at the [appservices documentation](appservices.md). #### How can I contribute? diff --git a/docs/maintenance.md b/docs/maintenance.md index 5c8c853a..b85a1971 100644 --- a/docs/maintenance.md +++ b/docs/maintenance.md @@ -1,14 +1,14 @@ -# Maintaining your conduwuit setup +# Maintaining your Continuwuity setup ## Moderation -conduwuit has moderation through admin room commands. "binary commands" (medium +Continuwuity has moderation through admin room commands. "binary commands" (medium priority) and an admin API (low priority) is planned. Some moderation-related config options are available in the example config such as "global ACLs" and blocking media requests to certain servers. See the example config for the moderation config options under the "Moderation / Privacy / Security" section. -conduwuit has moderation admin commands for: +Continuwuity has moderation admin commands for: - managing room aliases (`!admin rooms alias`) - managing room directory (`!admin rooms directory`) @@ -36,7 +36,7 @@ each object being newline delimited. An example of doing this is: ## Database (RocksDB) Generally there is very little you need to do. [Compaction][rocksdb-compaction] -is ran automatically based on various defined thresholds tuned for conduwuit to +is ran automatically based on various defined thresholds tuned for Continuwuity to be high performance with the least I/O amplifcation or overhead. Manually running compaction is not recommended, or compaction via a timer, due to creating unnecessary I/O amplification. RocksDB is built with io_uring support @@ -50,7 +50,7 @@ Some RocksDB settings can be adjusted such as the compression method chosen. See the RocksDB section in the [example config](configuration/examples.md). btrfs users have reported that database compression does not need to be disabled -on conduwuit as the filesystem already does not attempt to compress. This can be +on Continuwuity as the filesystem already does not attempt to compress. This can be validated by using `filefrag -v` on a `.SST` file in your database, and ensure the `physical_offset` matches (no filesystem compression). It is very important to ensure no additional filesystem compression takes place as this can render @@ -70,7 +70,7 @@ they're server logs or database logs, however they are critical RocksDB files related to WAL tracking. The only safe files that can be deleted are the `LOG` files (all caps). These -are the real RocksDB telemetry/log files, however conduwuit has already +are the real RocksDB telemetry/log files, however Continuwuity has already configured to only store up to 3 RocksDB `LOG` files due to generall being useless for average users unless troubleshooting something low-level. If you would like to store nearly none at all, see the `rocksdb_max_log_files` @@ -88,7 +88,7 @@ still be joined together. To restore a backup from an online RocksDB backup: -- shutdown conduwuit +- shutdown Continuwuity - create a new directory for merging together the data - in the online backup created, copy all `.sst` files in `$DATABASE_BACKUP_PATH/shared_checksum` to your new directory @@ -99,9 +99,9 @@ To restore a backup from an online RocksDB backup: if you have multiple) to your new directory - set your `database_path` config option to your new directory, or replace your old one with the new one you crafted -- start up conduwuit again and it should open as normal +- start up Continuwuity again and it should open as normal -If you'd like to do an offline backup, shutdown conduwuit and copy your +If you'd like to do an offline backup, shutdown Continuwuity and copy your `database_path` directory elsewhere. This can be restored with no modifications needed. @@ -110,7 +110,7 @@ directory. ## Media -Media still needs various work, however conduwuit implements media deletion via: +Media still needs various work, however Continuwuity implements media deletion via: - MXC URI or Event ID (unencrypted and attempts to find the MXC URI in the event) @@ -118,17 +118,17 @@ event) - Delete remote media in the past `N` seconds/minutes via filesystem metadata on the file created time (`btime`) or file modified time (`mtime`) -See the `!admin media` command for further information. All media in conduwuit +See the `!admin media` command for further information. All media in Continuwuity is stored at `$DATABASE_DIR/media`. This will be configurable soon. If you are finding yourself needing extensive granular control over media, we recommend looking into [Matrix Media -Repo](https://github.com/t2bot/matrix-media-repo). conduwuit intends to +Repo](https://github.com/t2bot/matrix-media-repo). Continuwuity intends to implement various utilities for media, but MMR is dedicated to extensive media management. Built-in S3 support is also planned, but for now using a "S3 filesystem" on -`media/` works. conduwuit also sends a `Cache-Control` header of 1 year and +`media/` works. Continuwuity also sends a `Cache-Control` header of 1 year and immutable for all media requests (download and thumbnail) to reduce unnecessary media requests from browsers, reduce bandwidth usage, and reduce load. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index d25c9762..37b1a5cd 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,9 +1,9 @@ -# Troubleshooting conduwuit +# Troubleshooting Continuwuity > ## Docker users ⚠️ > > Docker is extremely UX unfriendly. Because of this, a ton of issues or support -> is actually Docker support, not conduwuit support. We also cannot document the +> is actually Docker support, not Continuwuity support. We also cannot document the > ever-growing list of Docker issues here. > > If you intend on asking for support and you are using Docker, **PLEASE** @@ -13,14 +13,14 @@ > If there are things like Compose file issues or Dockerhub image issues, those > can still be mentioned as long as they're something we can fix. -## conduwuit and Matrix issues +## Continuwuity and Matrix issues #### Lost access to admin room You can reinvite yourself to the admin room through the following methods: -- Use the `--execute "users make_user_admin "` conduwuit binary +- Use the `--execute "users make_user_admin "` Continuwuity binary argument once to invite yourslf to the admin room on startup -- Use the conduwuit console/CLI to run the `users make_user_admin` command +- Use the Continuwuity console/CLI to run the `users make_user_admin` command - Or specify the `emergency_password` config option to allow you to temporarily log into the server account (`@conduit`) from a web client @@ -29,12 +29,12 @@ log into the server account (`@conduit`) from a web client #### Potential DNS issues when using Docker Docker has issues with its default DNS setup that may cause DNS to not be -properly functional when running conduwuit, resulting in federation issues. The +properly functional when running Continuwuity, resulting in federation issues. The symptoms of this have shown in excessively long room joins (30+ minutes) from very long DNS timeouts, log entries of "mismatching responding nameservers", and/or partial or non-functional inbound/outbound federation. -This is **not** a conduwuit issue, and is purely a Docker issue. It is not +This is **not** a Continuwuity issue, and is purely a Docker issue. It is not sustainable for heavy DNS activity which is normal for Matrix federation. The workarounds for this are: - Use DNS over TCP via the config option `query_over_tcp_only = true` @@ -64,7 +64,7 @@ very computationally expensive, and is extremely susceptible to denial of service, especially on Matrix. Many servers also strangely have broken DNSSEC setups and will result in non-functional federation. -conduwuit cannot provide a "works-for-everyone" Unbound DNS setup guide, but +Continuwuity cannot provide a "works-for-everyone" Unbound DNS setup guide, but the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch] may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors config options and removing the `validator` module. @@ -75,9 +75,9 @@ high load, and we have identified its DNS caching to not be very effective. dnsmasq can possibly work, but it does **not** support TCP fallback which can be problematic when receiving large DNS responses such as from large SRV records. If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback` -in conduwuit config. +in Continuwuity config. -Raising `dns_cache_entries` in conduwuit config from the default can also assist +Raising `dns_cache_entries` in Continuwuity config from the default can also assist in DNS caching, but a full-fledged external caching resolver is better and more reliable. @@ -97,7 +97,7 @@ If your database is corrupted *and* is failing to start (e.g. checksum mismatch), it may be recoverable but careful steps must be taken, and there is no guarantee it may be recoverable. -The first thing that can be done is launching conduwuit with the +The first thing that can be done is launching Continuwuity with the `rocksdb_repair` config option set to true. This will tell RocksDB to attempt to repair itself at launch. If this does not work, disable the option and continue reading. @@ -109,7 +109,7 @@ RocksDB has the following recovery modes: - `PointInTime` - `SkipAnyCorruptedRecord` -By default, conduwuit uses `TolerateCorruptedTailRecords` as generally these may +By default, Continuwuity uses `TolerateCorruptedTailRecords` as generally these may be due to bad federation and we can re-fetch the correct data over federation. The RocksDB default is `PointInTime` which will attempt to restore a "snapshot" of the data when it was last known to be good. This data can be either a few @@ -126,12 +126,12 @@ if `PointInTime` does not work as a last ditch effort. With this in mind: -- First start conduwuit with the `PointInTime` recovery method. See the [example +- First start Continuwuity with the `PointInTime` recovery method. See the [example config](configuration/examples.md) for how to do this using `rocksdb_recovery_mode` - If your database successfully opens, clients are recommended to clear their client cache to account for the rollback -- Leave your conduwuit running in `PointInTime` for at least 30-60 minutes so as +- Leave your Continuwuity running in `PointInTime` for at least 30-60 minutes so as much possible corruption is restored - If all goes will, you should be able to restore back to using `TolerateCorruptedTailRecords` and you have successfully recovered your database @@ -144,14 +144,14 @@ Various debug commands can be found in `!admin debug`. #### Debug/Trace log level -conduwuit builds without debug or trace log levels at compile time by default +Continuwuity builds without debug or trace log levels at compile time by default for substantial performance gains in CPU usage and improved compile times. If you need to access debug/trace log levels, you will need to build without the `release_max_log_level` feature or use our provided static debug binaries. #### Changing log level dynamically -conduwuit supports changing the tracing log environment filter on-the-fly using +Continuwuity supports changing the tracing log environment filter on-the-fly using the admin command `!admin debug change-log-level `. This accepts a string **without quotes** the same format as the `log` config option. @@ -168,7 +168,7 @@ load, simply pass the `--reset` flag. #### Pinging servers -conduwuit can ping other servers using `!admin debug ping `. This takes +Continuwuity can ping other servers using `!admin debug ping `. This takes a server name and goes through the server discovery process and queries `/_matrix/federation/v1/version`. Errors are outputted. @@ -180,12 +180,12 @@ bandwidth and computationally. #### Allocator memory stats When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you -can see conduwuit's high-level allocator stats by using +can see Continuwuity's high-level allocator stats by using `!admin server memory-usage` at the bottom. If you are a developer, you can also view the raw jemalloc statistics with `!admin debug memory-stats`. Please note that this output is extremely large -which may only be visible in the conduwuit console CLI due to PDU size limits, +which may only be visible in the Continuwuity console CLI due to PDU size limits, and is not easy for non-developers to understand. [unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html diff --git a/docs/turn.md b/docs/turn.md index 287f2545..5dba823c 100644 --- a/docs/turn.md +++ b/docs/turn.md @@ -1,6 +1,6 @@ # Setting up TURN/STURN -In order to make or receive calls, a TURN server is required. conduwuit suggests +In order to make or receive calls, a TURN server is required. Continuwuity suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. @@ -17,9 +17,9 @@ realm= A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`. -These same values need to be set in conduwuit. See the [example +These same values need to be set in Continuwuity. See the [example config](configuration/examples.md) in the TURN section for configuring these and -restart conduwuit after. +restart Continuwuity after. `turn_secret` or a path to `turn_secret_file` must have a value of your coturn `static-auth-secret`, or use `turn_username` and `turn_password` @@ -34,7 +34,7 @@ If you are using TURN over TLS, you can replace `turn:` with `turns:` in the TURN over TLS. This is highly recommended. If you need unauthenticated access to the TURN URIs, or some clients may be -having trouble, you can enable `turn_guest_access` in conduwuit which disables +having trouble, you can enable `turn_guest_access` in Continuwuity which disables authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer` ### Run From fbd404fa84647c1e4794852028a1b1e5cc27c97d Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 00:19:08 +0100 Subject: [PATCH 0892/1248] docs: Update docker documentation --- docs/deploying/docker.md | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index bd6eff1d..08a0dc4f 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -84,26 +84,28 @@ Additional info about deploying Continuwuity can be found [here](generic.md). ### Build -Official Continuwuity images are built using Nix's -[`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are -repeatable and reproducible by anyone, keeps the images lightweight, and can be -built offline. +Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables multi-platform builds efficiently. -This also ensures portability of our images because `buildLayeredImage` builds -OCI images, not Docker images, and works with other container software. +The resulting images are broadly compatible with Docker and other container runtimes like Podman or containerd. -The OCI images are OS-less with only a very minimal environment of the `tini` -init system, CA certificates, and the Continuwuity binary. This does mean there is -not a shell, but in theory you can get a shell by adding the necessary layers -to the layered image. However it's very unlikely you will need a shell for any -real troubleshooting. +The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition. -To build an OCI image using Nix, the following outputs can be built: -- `nix build -L .#oci-image` (default features, x86_64 glibc) -- `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl) -- `nix build -L .#oci-image-aarch64-linux-musl` (default features, aarch64 musl) -- `nix build -L .#oci-image-x86_64-linux-musl-all-features` (all features, x86_64 musl) -- `nix build -L .#oci-image-aarch64-linux-musl-all-features` (all features, aarch64 musl) +To build an image locally using Docker Buildx, you can typically run a command like: + +```bash +# Build for the current platform and load into the local Docker daemon +docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile . + +# Example: Build for specific platforms and push to a registry. +# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push + +# Example: Build binary optimized for the current CPU +# docker buildx build --load --tag continuwuity:latest --build-arg TARGET_CPU=native -f docker/Dockerfile . +``` + +Refer to the Docker Buildx documentation for more advanced build options. + +[dockerfile-path]: ../../docker/Dockerfile ### Run From c68378ffe34574d8e876e98ec6b961f08a0041a8 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 00:38:47 +0100 Subject: [PATCH 0893/1248] docs: Update 'Try it out' section --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index deaed364..9b8f142b 100644 --- a/README.md +++ b/README.md @@ -46,8 +46,9 @@ Continuwuity aims to: ### Can I try it out? -Not right now. We've still got work to do! +Check out the [documentation](introduction) for installation instructions. +There are currently no open registration Continuwuity instances available. ### What are we working on? @@ -111,3 +112,4 @@ When incorporating code from other forks: [continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity +F From c7ac2483a968560b922ae7d37adc564d828ed9d1 Mon Sep 17 00:00:00 2001 From: n Date: Tue, 22 Apr 2025 01:27:50 +0000 Subject: [PATCH 0894/1248] Fix offtopic room link Signed-off-by: n --- docs/conduwuit_coc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conduwuit_coc.md b/docs/conduwuit_coc.md index 19262765..9a084150 100644 --- a/docs/conduwuit_coc.md +++ b/docs/conduwuit_coc.md @@ -57,7 +57,7 @@ including: This room is for support and discussions about Continuwuity. Ask questions, share insights, and help each other out. -### [#continuwuity-offtopic:continuwuity.org](https://matrix.to/#/#continuwuity-offtopic:continuwuity.org) +### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org) For off-topic community conversations about any subject. While this room allows for a wide range of topics, the same CoC applies. Keep discussions respectful From 0c302f31371aedbd313228004f1971b9e0a5d64c Mon Sep 17 00:00:00 2001 From: nex Date: Tue, 22 Apr 2025 01:33:09 +0000 Subject: [PATCH 0895/1248] Don't re-build images for docs changes Ironically, this will trigger a rebuild anyway --- .forgejo/workflows/release-image.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index adf70594..2cb6a329 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -11,6 +11,7 @@ on: - 'renovate.json' - 'debian/**' - 'docker/**' + - 'docs/**' # Allows you to run this workflow manually from the Actions tab workflow_dispatch: From 7beff25d3d3d59cba3a8b634a92e9a562267b1fc Mon Sep 17 00:00:00 2001 From: Nyx Tutt Date: Mon, 21 Apr 2025 20:45:05 -0500 Subject: [PATCH 0896/1248] Update welcome message --- src/service/admin/grant.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 6780b7ae..2d90ea52 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -126,7 +126,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result { if self.services.server.config.admin_room_notices { let welcome_message = String::from( - "## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`", + "## Thank you for trying out Continuwuity!\n\nContinuwuity is a hard fork of conduwuit, which is also a hard fork of Conduit, currently in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. Continuwuity is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> Source code: https://forgejo.ellis.link/continuwuation/continuwuity\n> Documentation: https://continuwuity.org/\n> Report issues: https://forgejo.ellis.link/continuwuation/continuwuity/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nContinuwuity space: `/join #space:continuwuity.org`\nContinuwuity main room (Ask questions and get notified on updates): `/join #continuwuity:continuwuity.org`\nContinuwuity offtopic room: `/join #offtopic:continuwuity.org`", ); // Send welcome message From 66e8cd8908a7a96e1f929c948ccef0238a790a60 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 15:19:30 +0100 Subject: [PATCH 0897/1248] docs: Tone down the docker warning --- docs/troubleshooting.md | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 37b1a5cd..81e90636 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -2,16 +2,9 @@ > ## Docker users ⚠️ > -> Docker is extremely UX unfriendly. Because of this, a ton of issues or support -> is actually Docker support, not Continuwuity support. We also cannot document the -> ever-growing list of Docker issues here. -> -> If you intend on asking for support and you are using Docker, **PLEASE** -> triple validate your issues are **NOT** because you have a misconfiguration in -> your Docker setup. -> -> If there are things like Compose file issues or Dockerhub image issues, those -> can still be mentioned as long as they're something we can fix. +> Docker can be difficult to use and debug. It's common for Docker +> misconfigurations to cause issues, particularly with networking and permissions. +> Please check that your issues are not due to problems with your Docker setup. ## Continuwuity and Matrix issues From 81f8151acabc3f0f824c7c04b47e31ed61422a8b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 16:15:31 +0100 Subject: [PATCH 0898/1248] docs: Add matrix rooms to README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9b8f142b..bf4f5613 100644 --- a/README.md +++ b/README.md @@ -106,10 +106,10 @@ When incorporating code from other forks: #### Contact - +Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [space](https://matrix.to/#/#space:continuwuity.org) to chat with us about the project! [continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity -F + From 1d42b88f501fb2c5115f2f4d569cd9b07171fcb6 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 16:26:38 +0100 Subject: [PATCH 0899/1248] docs: Update Docker DNS troubleshooting section --- docs/troubleshooting.md | 46 ++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 81e90636..d84dbc7a 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,6 +1,6 @@ # Troubleshooting Continuwuity -> ## Docker users ⚠️ +> **Docker users ⚠️** > > Docker can be difficult to use and debug. It's common for Docker > misconfigurations to cause issues, particularly with networking and permissions. @@ -8,9 +8,10 @@ ## Continuwuity and Matrix issues -#### Lost access to admin room +### Lost access to admin room You can reinvite yourself to the admin room through the following methods: + - Use the `--execute "users make_user_admin "` Continuwuity binary argument once to invite yourslf to the admin room on startup - Use the Continuwuity console/CLI to run the `users make_user_admin` command @@ -19,22 +20,29 @@ log into the server account (`@conduit`) from a web client ## General potential issues -#### Potential DNS issues when using Docker +### Potential DNS issues when using Docker -Docker has issues with its default DNS setup that may cause DNS to not be -properly functional when running Continuwuity, resulting in federation issues. The -symptoms of this have shown in excessively long room joins (30+ minutes) from -very long DNS timeouts, log entries of "mismatching responding nameservers", +Docker's DNS setup for containers in a non-default network intercepts queries to +enable resolving of container hostnames to IP addresses. However, due to +performance issues with Docker's built-in resolver, this can cause DNS queries +to take a long time to resolve, resulting in federation issues. + +This is particularly common with Docker Compose, as custom networks are easily +created and configured. + +Symptoms of this include excessively long room joins (30+ minutes) from very +long DNS timeouts, log entries of "mismatching responding nameservers", and/or partial or non-functional inbound/outbound federation. -This is **not** a Continuwuity issue, and is purely a Docker issue. It is not -sustainable for heavy DNS activity which is normal for Matrix federation. The -workarounds for this are: -- Use DNS over TCP via the config option `query_over_tcp_only = true` -- Don't use Docker's default DNS setup and instead allow the container to use -and communicate with your host's DNS servers (host's `/etc/resolv.conf`) +This is not a bug in continuwuity. Docker's default DNS resolver is not suitable +for heavy DNS activity, which is normal for federated protocols like Matrix. -#### DNS No connections available error message +Workarounds: + +- Use DNS over TCP via the config option `query_over_tcp_only = true` +- Bypass Docker's default DNS setup and instead allow the container to use and communicate with your host's DNS servers. Typically, this can be done by mounting the host's `/etc/resolv.conf`. + +### DNS No connections available error message If you receive spurious amounts of error logs saying "DNS No connections available", this is due to your DNS server (servers from `/etc/resolv.conf`) @@ -84,7 +92,7 @@ reliability at a slight performance cost due to TCP overhead. ## RocksDB / database issues -#### Database corruption +### Database corruption If your database is corrupted *and* is failing to start (e.g. checksum mismatch), it may be recoverable but careful steps must be taken, and there is @@ -135,14 +143,14 @@ Note that users should not really be debugging things. If you find yourself debugging and find the issue, please let us know and/or how we can fix it. Various debug commands can be found in `!admin debug`. -#### Debug/Trace log level +### Debug/Trace log level Continuwuity builds without debug or trace log levels at compile time by default for substantial performance gains in CPU usage and improved compile times. If you need to access debug/trace log levels, you will need to build without the `release_max_log_level` feature or use our provided static debug binaries. -#### Changing log level dynamically +### Changing log level dynamically Continuwuity supports changing the tracing log environment filter on-the-fly using the admin command `!admin debug change-log-level `. This accepts @@ -159,7 +167,7 @@ load, simply pass the `--reset` flag. `!admin debug change-log-level --reset` -#### Pinging servers +### Pinging servers Continuwuity can ping other servers using `!admin debug ping `. This takes a server name and goes through the server discovery process and queries @@ -170,7 +178,7 @@ server performance on either side as that endpoint is completely unauthenticated and simply fetches a string on a static JSON endpoint. It is very low cost both bandwidth and computationally. -#### Allocator memory stats +### Allocator memory stats When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you can see Continuwuity's high-level allocator stats by using From 0307238bf890a3a0249cd7d91c0ae3728664fe4b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 22 Apr 2025 14:29:02 +0100 Subject: [PATCH 0900/1248] docs: Work around DNS issues in example compose files --- docs/deploying/docker-compose.for-traefik.yml | 1 + docs/deploying/docker-compose.with-caddy.yml | 1 + docs/deploying/docker-compose.with-traefik.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 694bd112..57b124c7 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -8,6 +8,7 @@ services: restart: unless-stopped volumes: - db:/var/lib/conduwuit + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./conduwuit.toml:/etc/conduwuit.toml networks: - proxy diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 8ff8076a..ac4fb1ff 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -26,6 +26,7 @@ services: restart: unless-stopped volumes: - db:/var/lib/conduwuit + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./conduwuit.toml:/etc/conduwuit.toml environment: CONDUWUIT_SERVER_NAME: example.com # EDIT THIS diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 842bf945..86ad9cb6 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -8,6 +8,7 @@ services: restart: unless-stopped volumes: - db:/var/lib/conduwuit + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./conduwuit.toml:/etc/conduwuit.toml networks: - proxy From 4fbecca2d31f5cbc8992a6ab854910e6afddeea6 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 17:39:26 +0100 Subject: [PATCH 0901/1248] Add well-known/matrix/support --- docs/static/support | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 docs/static/support diff --git a/docs/static/support b/docs/static/support new file mode 100644 index 00000000..3fee0187 --- /dev/null +++ b/docs/static/support @@ -0,0 +1,24 @@ +{ + "contacts": [ + { + "email_address": "security@continuwuity.org", + "role": "m.role.security" + }, + { + "matrix_id": "@tom:continuwuity.org", + "email_address": "tom@continuwuity.org", + "role": "m.role.admin" + }, + { + "matrix_id": "@jade:continuwuity.org", + "email_address": "jade@continuwuity.org", + "role": "m.role.admin" + }, + { + "matrix_id": "@nex:continuwuity.org", + "email_address": "nex@continuwuity.org", + "role": "m.role.admin" + } + ], + "support_page": "https://continuwuity.org/introduction#contact" +} \ No newline at end of file From 8f21403796078d586746829eba1701b9810fd462 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 17:39:36 +0100 Subject: [PATCH 0902/1248] Use any runner for docs --- .forgejo/workflows/documentation.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index c84c566b..55f25058 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -16,7 +16,7 @@ concurrency: jobs: docs: name: Build and Deploy Documentation - runs-on: not-nexy + runs-on: ubuntu-latest steps: - name: Sync repository @@ -41,6 +41,7 @@ jobs: # Copy the Matrix .well-known files cp ./docs/static/server ./public/.well-known/matrix/server cp ./docs/static/client ./public/.well-known/matrix/client + cp ./docs/static/client ./public/.well-known/matrix/support cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json # Copy the custom headers file From 3eb4ee7af1b8980009bcbe0f6c0585a9d235ffb6 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 19:30:20 +0100 Subject: [PATCH 0903/1248] Change tom's email address --- docs/static/support | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/static/support b/docs/static/support index 3fee0187..6b7a9860 100644 --- a/docs/static/support +++ b/docs/static/support @@ -6,7 +6,7 @@ }, { "matrix_id": "@tom:continuwuity.org", - "email_address": "tom@continuwuity.org", + "email_address": "tom@tcpip.uk", "role": "m.role.admin" }, { From f791dc69185b853139ed7f6705ad6d095b398e33 Mon Sep 17 00:00:00 2001 From: Nyx Tutt Date: Tue, 22 Apr 2025 07:56:42 -0500 Subject: [PATCH 0904/1248] docs: Rename in more places --- docs/deploying/docker-compose.for-traefik.yml | 6 ++--- docs/deploying/docker-compose.override.yml | 6 ++--- docs/deploying/docker-compose.with-caddy.yml | 4 ++-- .../deploying/docker-compose.with-traefik.yml | 10 ++++----- docs/deploying/docker-compose.yml | 6 ++--- docs/deploying/kubernetes.md | 8 +++---- docs/development/hot_reload.md | 22 +++++++++---------- src/admin/server/mod.rs | 2 +- 8 files changed, 32 insertions(+), 32 deletions(-) diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 57b124c7..04142e0c 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -1,4 +1,4 @@ -# conduwuit - Behind Traefik Reverse Proxy +# Continuwuity - Behind Traefik Reverse Proxy services: homeserver: @@ -36,14 +36,14 @@ services: server=your.server.name.example:443 } #cpuset: "0-4" # Uncomment to limit to specific CPU cores - ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it + ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it nofile: soft: 1048567 hard: 1048567 ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker-compose.override.yml b/docs/deploying/docker-compose.override.yml index a343eeee..ec82fac3 100644 --- a/docs/deploying/docker-compose.override.yml +++ b/docs/deploying/docker-compose.override.yml @@ -1,4 +1,4 @@ -# conduwuit - Traefik Reverse Proxy Labels +# Continuwuity - Traefik Reverse Proxy Labels services: homeserver: @@ -6,7 +6,7 @@ services: - "traefik.enable=true" - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - - "traefik.http.routers.to-conduwuit.rule=Host(`.`)" # Change to the address on which conduwuit is hosted + - "traefik.http.routers.to-conduwuit.rule=Host(`.`)" # Change to the address on which Continuwuity is hosted - "traefik.http.routers.to-conduwuit.tls=true" - "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt" - "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker" @@ -16,7 +16,7 @@ services: - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" - # If you want to have your account on , but host conduwuit on a subdomain, + # If you want to have your account on , but host Continuwuity on a subdomain, # you can let it only handle the well known file on that domain instead #- "traefik.http.routers.to-matrix-wellknown.rule=Host(``) && PathPrefix(`/.well-known/matrix`)" #- "traefik.http.routers.to-matrix-wellknown.tls=true" diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index ac4fb1ff..9ee98428 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -1,6 +1,6 @@ services: caddy: - # This compose file uses caddy-docker-proxy as the reverse proxy for conduwuit! + # This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity! # For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy image: lucaslorentz/caddy-docker-proxy:ci-alpine ports: @@ -20,7 +20,7 @@ services: caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}} homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use a registry image, + ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, ### then you are ready to go. image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 86ad9cb6..9083b796 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -1,8 +1,8 @@ -# conduwuit - Behind Traefik Reverse Proxy +# Continuwuity - Behind Traefik Reverse Proxy services: homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use the Docker Hub image, + ### If you already built the Continuwuity image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped @@ -22,7 +22,7 @@ services: CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit #CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above - ### Uncomment and change values as desired, note that conduwuit has plenty of config options, so you should check out the example example config too + ### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUWUIT_LOG: info # default is: "warn,state_res=warn" # CONDUWUIT_ALLOW_ENCRYPTION: 'true' @@ -44,14 +44,14 @@ services: server=your.server.name.example:443 } #cpuset: "0-4" # Uncomment to limit to specific CPU cores - ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it + ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it nofile: soft: 1048567 hard: 1048567 ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index ca56d0b0..1a3ab811 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -1,8 +1,8 @@ -# conduwuit +# Continuwuity services: homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use a registry image, + ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, ### then you are ready to go. image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped @@ -28,7 +28,7 @@ services: # ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/kubernetes.md b/docs/deploying/kubernetes.md index d7721722..aceb2d52 100644 --- a/docs/deploying/kubernetes.md +++ b/docs/deploying/kubernetes.md @@ -1,8 +1,8 @@ -# conduwuit for Kubernetes +# Continuwuity for Kubernetes -conduwuit doesn't support horizontal scalability or distributed loading +Continuwuity doesn't support horizontal scalability or distributed loading natively, however a community maintained Helm Chart is available here to run -conduwuit on Kubernetes: +Continuwuity on Kubernetes: Should changes need to be made, please reach out to the maintainer in our -Matrix room as this is not maintained/controlled by the conduwuit maintainers. +Matrix room as this is not maintained/controlled by the Continuwuity maintainers. diff --git a/docs/development/hot_reload.md b/docs/development/hot_reload.md index 65fd4adf..ecfb6396 100644 --- a/docs/development/hot_reload.md +++ b/docs/development/hot_reload.md @@ -5,7 +5,7 @@ guaranteed to work at this time. ### Summary -When developing in debug-builds with the nightly toolchain, conduwuit is modular +When developing in debug-builds with the nightly toolchain, Continuwuity is modular using dynamic libraries and various parts of the application are hot-reloadable while the server is running: http api handlers, admin commands, services, database, etc. These are all split up into individual workspace crates as seen @@ -42,7 +42,7 @@ library, macOS, and likely other host architectures are not supported (if other architectures work, feel free to let us know and/or make a PR updating this). This should work on GNU ld and lld (rust-lld) and gcc/clang, however if you happen to have linker issues it's recommended to try using `mold` or `gold` -linkers, and please let us know in the [conduwuit Matrix room][7] the linker +linkers, and please let us know in the [Continuwuity Matrix room][7] the linker error and what linker solved this issue so we can figure out a solution. Ideally there should be minimal friction to using this, and in the future a build script (`build.rs`) may be suitable to making this easier to use if the capabilities @@ -52,13 +52,13 @@ allow us. As of 19 May 2024, the instructions for using this are: -0. Have patience. Don't hesitate to join the [conduwuit Matrix room][7] to +0. Have patience. Don't hesitate to join the [Continuwuity Matrix room][7] to receive help using this. As indicated by the various rustflags used and some of the interesting issues linked at the bottom, this is definitely not something the Rust ecosystem or toolchain is used to doing. 1. Install the nightly toolchain using rustup. You may need to use `rustup - override set nightly` in your local conduwuit directory, or use `cargo + override set nightly` in your local Continuwuity directory, or use `cargo +nightly` for all actions. 2. Uncomment `cargo-features` at the top level / root Cargo.toml @@ -85,14 +85,14 @@ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.rustup/toolchains/nightly-x86_64-unknown Cargo should only rebuild what was changed / what's necessary, so it should not be rebuilding all the crates. -9. In your conduwuit server terminal, hit/send `CTRL+C` signal. This will tell - conduwuit to find which libraries need to be reloaded, and reloads them as +9. In your Continuwuity server terminal, hit/send `CTRL+C` signal. This will tell + Continuwuity to find which libraries need to be reloaded, and reloads them as necessary. 10. If there were no errors, it will tell you it successfully reloaded `#` modules, and your changes should now be visible. Repeat 7 - 9 as needed. -To shutdown conduwuit in this setup, hit/send `CTRL+\`. Normal builds still +To shutdown Continuwuity in this setup, hit/send `CTRL+\`. Normal builds still shutdown with `CTRL+C` as usual. Steps 1 - 5 are the initial first-time steps for using this. To remove the hot @@ -101,7 +101,7 @@ reload setup, revert/comment all the Cargo.toml changes. As mentioned in the requirements section, if you happen to have some linker issues, try using the `-fuse-ld=` rustflag and specify mold or gold in all the `rustflags` definitions in the top level Cargo.toml, and please let us know in -the [conduwuit Matrix room][7] the problem. mold can be installed typically +the [Continuwuity Matrix room][7] the problem. mold can be installed typically through your distro, and gold is provided by the binutils package. It's possible a helper script can be made to do all of this, or most preferably @@ -136,7 +136,7 @@ acyclic graph. The primary rule is simple and illustrated in the figure below: **no crate is allowed to call a function or use a variable from a crate below it.** -![conduwuit's dynamic library setup diagram - created by Jason +![Continuwuity's dynamic library setup diagram - created by Jason Volk](assets/libraries.png) When a symbol is referenced between crates they become bound: **crates cannot be @@ -147,7 +147,7 @@ by using an `RTLD_LOCAL` binding for just one link between the main executable and the first crate, freeing the executable from all modules as no global binding ever occurs between them. -![conduwuit's reload and load order diagram - created by Jason +![Continuwuity's reload and load order diagram - created by Jason Volk](assets/reload_order.png) Proper resource management is essential for reliable reloading to occur. This is @@ -196,5 +196,5 @@ The initial implementation PR is available [here][1]. [4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049 [5]: https://github.com/rust-lang/cargo/issues/12746 [6]: https://crates.io/crates/hot-lib-reloader/ -[7]: https://matrix.to/#/#conduwuit:puppygock.gay +[7]: https://matrix.to/#/#continuwuity:continuwuity.org [8]: https://crates.io/crates/libloading diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 60615365..6b99e5de 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -36,7 +36,7 @@ pub(super) enum ServerCommand { /// - Print database memory usage statistics MemoryUsage, - /// - Clears all of Conduwuit's caches + /// - Clears all of Continuwuity's caches ClearCaches, /// - Performs an online backup of the database (only available for RocksDB From 1d840950b3a1c721c6c98ac3bb5c843f7fee7b9e Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 23 Apr 2025 22:20:08 +0100 Subject: [PATCH 0905/1248] docs: Mention Helm chart is for conduwuit --- docs/deploying/kubernetes.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/deploying/kubernetes.md b/docs/deploying/kubernetes.md index aceb2d52..0cbfbbc0 100644 --- a/docs/deploying/kubernetes.md +++ b/docs/deploying/kubernetes.md @@ -2,7 +2,8 @@ Continuwuity doesn't support horizontal scalability or distributed loading natively, however a community maintained Helm Chart is available here to run -Continuwuity on Kubernetes: +conduwuit on Kubernetes: -Should changes need to be made, please reach out to the maintainer in our -Matrix room as this is not maintained/controlled by the Continuwuity maintainers. +This should be compatible with continuwuity, but you will need to change the image reference. + +Should changes need to be made, please reach out to the maintainer as this is not maintained/controlled by the Continuwuity maintainers. From ee11afb460f03b4f40f4b533f5e3a908d598cd87 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 01:51:52 +0100 Subject: [PATCH 0906/1248] Inject reason into federated leave request membership --- src/api/client/membership.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index b1b85b81..2847d668 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1855,7 +1855,10 @@ pub async fn leave_room( // Ask a remote server if we don't have this room and are not knocking on it if dont_have_room.and(not_knocked).await { - if let Err(e) = remote_leave_room(services, user_id, room_id).boxed().await { + if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone()) + .boxed() + .await + { warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); // Don't tell the client about this error } @@ -1940,6 +1943,7 @@ async fn remote_leave_room( services: &Services, user_id: &UserId, room_id: &RoomId, + reason: Option, ) -> Result<()> { let mut make_leave_response_and_server = Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); @@ -2056,6 +2060,12 @@ async fn remote_leave_room( .expect("Timestamp is valid js_int value"), ), ); + // Inject the reason key into the event content dict if it exists + if let Some(reason) = reason { + if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") { + content.insert("reason".to_owned(), CanonicalJsonValue::String(reason)); + } + } // room v3 and above removed the "event_id" field from remote PDU format match room_version_id { From bfd7ab5a22129d01b3e87c290414c0148ad635bf Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 17:21:59 +0100 Subject: [PATCH 0907/1248] Bump ruwuma to 652cc48 --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index afaa5622..216114af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3652,7 +3652,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "assign", "js_int", @@ -3672,7 +3672,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "as_variant", "assign", @@ -3707,7 +3707,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "as_variant", "base64 0.22.1", @@ -3739,7 +3739,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3764,7 +3764,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "bytes", "headers", @@ -3786,7 +3786,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3795,7 +3795,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "js_int", "ruma-common", @@ -3805,7 +3805,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3820,7 +3820,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "js_int", "ruma-common", @@ -3832,7 +3832,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 1517cfc1..c0f857c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "fa3c868e5a1c049dc9472310dc4955289a96bb35" +rev = "652cc4864203ab7ca60cf9c47b931c0385304cc7" features = [ "compat", "rand", From 4c8dfc4c2c53be1dfd2d8c79182240dd0f4c9cec Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Wed, 23 Apr 2025 22:47:45 +0100 Subject: [PATCH 0908/1248] Suggested community guidelines changes --- docs/SUMMARY.md | 2 +- docs/community.md | 139 ++++++++++++++++++++++++++++++++++++++++++ docs/conduwuit_coc.md | 93 ---------------------------- 3 files changed, 140 insertions(+), 94 deletions(-) create mode 100644 docs/community.md delete mode 100644 docs/conduwuit_coc.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index ad0f8135..473c9e74 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -19,4 +19,4 @@ - [Contributing](contributing.md) - [Testing](development/testing.md) - [Hot Reloading ("Live" Development)](development/hot_reload.md) -- [conduwuit Community Code of Conduct](conduwuit_coc.md) +- [Community (and Guidelines)](community.md) diff --git a/docs/community.md b/docs/community.md new file mode 100644 index 00000000..a6852c0f --- /dev/null +++ b/docs/community.md @@ -0,0 +1,139 @@ +# Continuwuity Community Guidelines + +Welcome to the Continuwuity commuwunity! We're excited to have you here. Continuwuity is a +continuation of the conduwuit homeserver, which in turn is a hard-fork of the Conduit homeserver, +aimed at making Matrix more accessible and inclusive for everyone. + +This space is dedicated to fostering a positive, supportive, and welcoming environment for everyone. +These guidelines apply to all Continuwuity spaces, including our Matrix rooms and any other +community channels that reference them. We've written these guidelines to help us all create an +environment where everyone feels safe and respected. + +For code and contribution guidelines, please refer to the +[Contributor's Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md). +Below are additional guidelines specific to the Continuwuity community. + +## Our Values and Expected Behaviors + +We strive to create a community based on mutual respect, collaboration, and inclusivity. We expect +all members to: + +1. **Be Respectful and Inclusive**: Treat everyone with respect. We're committed to a community + where everyone feels safe, regardless of background, identity, or experience. Discrimination, + harassment, or hate speech won't be tolerated. Remember that each person experiences the world + differently; share your own perspective and be open to learning about others'. + +2. **Be Positive and Constructive**: Engage in discussions constructively and support each other. + If you feel angry or frustrated, take a break before participating. Approach disagreements with + the goal of understanding, not winning. Focus on the issue, not the person. + +3. **Communicate Clearly and Kindly**: Our community includes neurodivergent individuals and those + who may not appreciate sarcasm or subtlety. Communicate clearly and kindly. Avoid ambiguity and + ensure your messages can be easily understood by all. Avoid placing the burden of education on + marginalized groups; please make an effort to look into your questions before asking others for + detailed explanations. + +4. **Be Open to Improving Inclusivity**: Actively participate in making our community more inclusive. + Report behaviour that contradicts these guidelines (see Reporting and Enforcement below) and be + open to constructive feedback aimed at improving our community. Understand that discussing + negative experiences can be emotionally taxing; focus on the message, not the tone. + +5. **Commit to Our Values**: Building an inclusive community requires ongoing effort from everyone. + Recognise that addressing bias and discrimination is a continuous process that needs commitment + and action from all members. + +## Unacceptable Behaviors + +To ensure everyone feels safe and welcome, the following behaviors are considered unacceptable +within the Continuwuity community: + +* **Harassment and Discrimination**: Avoid offensive comments related to background, family status, + gender, gender identity or expression, marital status, sex, sexual orientation, native language, + age, ability, race and/or ethnicity, caste, national origin, socioeconomic status, religion, + geographic location, or any other dimension of diversity. Don't deliberately misgender someone or + question the legitimacy of their gender identity. + +* **Violence and Threats**: Do not engage in any form of violence or threats, including inciting + violence towards anyone or encouraging self-harm. Posting or threatening to post someone else's + personally identifying information ("doxxing") is also forbidden. + +* **Personal Attacks**: Disagreements happen, but they should never turn into personal attacks. + Don't insult, demean, or belittle others. + +* **Unwelcome Attention or Contact**: Avoid unwelcome sexual attention, inappropriate physical + contact (or simulation thereof), sexualized comments, jokes, or imagery. + +* **Disruption**: Do not engage in sustained disruption of discussions, events, or other + community activities. + +* **Bad Faith Actions**: Do not intentionally make false reports or otherwise abuse the reporting + process. + +This is not an exhaustive list. Any behaviour that makes others feel unsafe or unwelcome may be +subject to enforcement action. + +## Matrix Community + +These Community Guidelines apply to the entire +[Continuwuity Matrix Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, including: + +### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) + +This room is for support and discussions about Continuwuity. Ask questions, share insights, and help +each other out while adhering to these guidelines. + +We ask that this room remain focused on the Continuwuity software specifically: the team are +typically happy to engage in conversations about related subjects in the off-topic room. + +### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org) + +For off-topic community conversations about any subject. While this room allows for a wide range of +topics, the same guidelines apply. Please keep discussions respectful and inclusive, and avoid +divisive or stressful subjects like specific country/world politics unless handled with exceptional +care and respect for diverse viewpoints. + +General topics, such as world events, are welcome as long as they follow the guidelines. If a member +of the team asks for the conversation to end, please respect their decision. + +### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org) + +This room is dedicated to discussing active development of Continuwuity, including ongoing issues or +code development. Collaboration here must follow these guidelines, and please consider raising +[an issue](https://forgejo.ellis.link/continuwuation/continuwuity/issues) on the repository to help +track progress. + +## Reporting and Enforcement + +We take these Community Guidelines seriously to protect our community members. If you witness or +experience unacceptable behaviour, or have any other concerns, please report it. + +**How to Report:** + +* **Alert Moderators in the Room:** If you feel comfortable doing so, you can address the issue + publicly in the relevant room by mentioning the moderation bot, `@rock:continuwuity.org`, which + will immediately alert all available moderators. +* **Direct Message:** If you're not comfortable raising the issue publicly, please send a direct + message (DM) to one of the room moderators. + +Reports will be handled with discretion. We will investigate promptly and thoroughly. + +**Enforcement Actions:** + +Anyone asked to stop unacceptable behaviour is expected to comply immediately. Failure to do so, or +engaging in prohibited behaviour, may result in enforcement action. Moderators may take actions they +deem appropriate, including but not limited to: + +1. **Warning**: A direct message or public warning identifying the violation and requesting + corrective action. +2. **Temporary Mute**: Temporary restriction from participating in discussions for a specified + period. +3. **Kick or Ban**: Removal from a room (kick) or the entire community space (ban). Egregious or + repeated violations may result in an immediate ban. Bans are typically permanent and reviewed + only in exceptional circumstances. + +Retaliation against those who report concerns in good faith will not be tolerated and will be +subject to the same enforcement actions. + +Together, let's build and maintain a community where everyone feels valued, safe, and respected. + +— The Continuwuity Moderation Team diff --git a/docs/conduwuit_coc.md b/docs/conduwuit_coc.md deleted file mode 100644 index 9a084150..00000000 --- a/docs/conduwuit_coc.md +++ /dev/null @@ -1,93 +0,0 @@ -# Continuwuity Community Code of Conduct - -Welcome to the Continuwuity community! We’re excited to have you here. Continuwuity is -a hard-fork of the Conduit homeserver, aimed at making Matrix more accessible -and inclusive for everyone. - -This space is dedicated to fostering a positive, supportive, and inclusive -environment for everyone. This Code of Conduct applies to all Continuwuity spaces, -including any further community rooms that reference this CoC. Here are our -guidelines to help maintain the welcoming atmosphere that sets Continuwuity apart. - -For the general foundational rules, please refer to the [Contributor's -Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md). -Below are additional guidelines specific to the Continuwuity community. - -## Our Values and Guidelines - -1. **Respect and Inclusivity**: We are committed to maintaining a community - where everyone feels safe and respected. Discrimination, harassment, or hate -speech of any kind will not be tolerated. Recognise that each community member -experiences the world differently based on their past experiences, background, -and identity. Share your own experiences and be open to learning about others' -diverse perspectives. - -2. **Positivity and Constructiveness**: Engage in constructive discussions and - support each other. If you feel angry, negative, or aggressive, take a break -until you can participate in a positive and constructive manner. Process intense -feelings with a friend or in a private setting before engaging in community -conversations to help maintain a supportive and focused environment. - -3. **Clarity and Understanding**: Our community includes neurodivergent - individuals and those who may not appreciate sarcasm or subtlety. Communicate -clearly and kindly, avoiding sarcasm and ensuring your messages are easily -understood by all. Additionally, avoid putting the burden of education on -marginalized groups by doing your own research before asking for explanations. - -4. **Be Open to Inclusivity**: Actively engage in conversations about making our - community more inclusive. Report discriminatory behavior to the moderators -and be open to constructive feedback that aims to improve our community. -Understand that discussing discrimination and negative experiences can be -emotionally taxing, so focus on the message rather than critiquing the tone -used. - -5. **Commit to Inclusivity**: Building an inclusive community requires time, - energy, and resources. Recognise that addressing discrimination and bias is -an ongoing process that necessitates commitment and action from all community -members. - -## Matrix Community - -This Code of Conduct applies to the entire [Continuwuity Matrix -Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, -including: - -### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) - -This room is for support and discussions about Continuwuity. Ask questions, share -insights, and help each other out. - -### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org) - -For off-topic community conversations about any subject. While this room allows -for a wide range of topics, the same CoC applies. Keep discussions respectful -and inclusive, and avoid divisive subjects like country/world politics. General -topics, such as world events, are welcome as long as they follow the CoC. - -### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org) - -This room is dedicated to discussing active development of Continuwuity. Posting -requires an elevated power level, which can be requested in one of the other -rooms. Use this space to collaborate and innovate. - -## Enforcement - -We have a zero-tolerance policy for violations of this Code of Conduct. If -someone’s behavior makes you uncomfortable, please report it to the moderators. -Actions we may take include: - -1. **Warning**: A warning given directly in the room or via a private message - from the moderators, identifying the violation and requesting corrective -action. -2. **Temporary Mute**: Temporary restriction from participating in discussions - for a specified period to allow for reflection and cooling off. -3. **Kick or Ban**: Egregious behavior may result in an immediate kick or ban to - protect other community members. Bans are considered permanent and will only -be reversed in exceptional circumstances after proven good behavior. - -Please highlight issues directly in rooms when possible, but if you don't feel -comfortable doing that, then please send a DM to one of the moderators directly. - -Together, let’s build a community where everyone feels valued and respected. - -— The Continuwuity Moderation Team From f83238df783efd38f3da43d565710708b30ba52b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 23 Apr 2025 17:48:26 +0100 Subject: [PATCH 0909/1248] refactor: Use config service --- src/service/moderation.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/service/moderation.rs b/src/service/moderation.rs index d571de88..4a32404c 100644 --- a/src/service/moderation.rs +++ b/src/service/moderation.rs @@ -3,18 +3,24 @@ use std::sync::Arc; use conduwuit::{Result, Server, implement}; use ruma::ServerName; +use crate::{Dep, config}; + pub struct Service { services: Services, } struct Services { pub server: Arc, + pub config: Dep, } impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - services: Services { server: args.server.clone() }, + services: Services { + server: args.server.clone(), + config: args.depend::("config"), + }, })) } @@ -25,14 +31,13 @@ impl crate::Service for Service { #[must_use] pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { // We must never block federating with ourselves - if server_name == self.services.server.config.server_name { + if server_name == self.services.config.server_name { return false; } // Check if server is explicitly allowed if self .services - .server .config .allowed_remote_server_names .is_match(server_name.host()) @@ -42,7 +47,6 @@ pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { // Check if server is explicitly forbidden self.services - .server .config .forbidden_remote_server_names .is_match(server_name.host()) @@ -56,7 +60,6 @@ pub fn is_remote_server_room_directory_forbidden(&self, server_name: &ServerName self.is_remote_server_forbidden(server_name) || self .services - .server .config .forbidden_remote_room_directory_server_names .is_match(server_name.host()) @@ -70,7 +73,6 @@ pub fn is_remote_server_media_downloads_forbidden(&self, server_name: &ServerNam self.is_remote_server_forbidden(server_name) || self .services - .server .config .prevent_media_downloads_from .is_match(server_name.host()) From 45872ede7a4b5a2335295e5ba52dd1bd393d21ec Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 23 Apr 2025 17:48:33 +0100 Subject: [PATCH 0910/1248] chore: Fix formatting --- conduwuit-example.toml | 6 +++--- src/api/client/room/create.rs | 29 ++++++++++++++++++----------- src/core/config/mod.rs | 6 +++--- 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index b6bfd092..c87f21ef 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -967,9 +967,9 @@ #rocksdb_compaction_ioprio_idle = true # Enables RocksDB compaction. You should never ever have to set this -# option to false. If you for some reason find yourself needing to use this -# option as part of troubleshooting or a bug, please reach out to us in -# the conduwuit Matrix room with information and details. +# option to false. If you for some reason find yourself needing to use +# this option as part of troubleshooting or a bug, please reach out to us +# in the conduwuit Matrix room with information and details. # # Disabling compaction will lead to a significantly bloated and # explosively large database, gradually poor performance, unnecessarily diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index f5f61784..be3fd23b 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -614,24 +614,31 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Date: Wed, 23 Apr 2025 23:54:16 +0100 Subject: [PATCH 0911/1248] docs: Fix configuration examples and defaults --- conduwuit-example.toml | 6 +++--- src/core/config/mod.rs | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index c87f21ef..b0b59344 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -116,7 +116,7 @@ # `https://continuwuity.org/.well-known/continuwuity/announcements` for any new # announcements or major updates. This is not an update check endpoint. # -#allow_announcements_check = +#allow_announcements_check = true # Set this to any float value to multiply conduwuit's in-memory LRU caches # with such as "auth_chain_cache_capacity". @@ -1207,7 +1207,7 @@ # You can set this to ["*"] to block all servers by default, and then # use `allowed_remote_server_names` to allow only specific servers. # -# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] +# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] # #forbidden_remote_server_names = [] @@ -1216,7 +1216,7 @@ # # This option has no effect if `forbidden_remote_server_names` is empty. # -# example: ["goodserver\.tld$", "goodphrase"] +# example: ["goodserver\\.tld$", "goodphrase"] # #allowed_remote_server_names = [] diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 6252f177..e3b2a531 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -163,6 +163,8 @@ pub struct Config { /// If enabled, conduwuit will send a simple GET request periodically to /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new /// announcements or major updates. This is not an update check endpoint. + /// + /// default: true #[serde(alias = "allow_check_for_updates", default = "true_fn")] pub allow_announcements_check: bool, @@ -1384,7 +1386,7 @@ pub struct Config { /// You can set this to ["*"] to block all servers by default, and then /// use `allowed_remote_server_names` to allow only specific servers. /// - /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] + /// example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] #[serde(default, with = "serde_regex")] @@ -1395,7 +1397,7 @@ pub struct Config { /// /// This option has no effect if `forbidden_remote_server_names` is empty. /// - /// example: ["goodserver\.tld$", "goodphrase"] + /// example: ["goodserver\\.tld$", "goodphrase"] /// /// default: [] #[serde(default, with = "serde_regex")] From cdf105a24eb7398d6e09d0840ff2c11c43c05ce9 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Fri, 25 Apr 2025 02:18:00 +0100 Subject: [PATCH 0912/1248] Don't serialize the x-key before storing it Co-authored-by: dasha --- src/service/users/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 1eb289fc..701561a8 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -577,7 +577,7 @@ impl Service { self.db .userid_usersigningkeyid - .put(user_id, user_signing_key_key); + .raw_put(user_id, user_signing_key_key); } if notify { From c203c1fead9a040de28b952eafdfdeab9cec77bc Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 24 Apr 2025 22:49:47 +0100 Subject: [PATCH 0913/1248] chore: Enable blurhashing by default --- src/main/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index e2fed5d5..0c5e2b6f 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -36,6 +36,7 @@ assets = [ [features] default = [ + "blurhashing", "brotli_compression", "element_hacks", "gzip_compression", From dcbacb5b78ac679ddb17465c9e60042cff4ab3e7 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 24 Apr 2025 00:40:36 +0100 Subject: [PATCH 0914/1248] feat: Allow controlling client message filtering --- conduwuit-example.toml | 44 ++++++++++++++++++++++++--------- src/api/client/message.rs | 6 +++-- src/core/config/mod.rs | 52 ++++++++++++++++++++++++++++----------- src/service/moderation.rs | 20 ++++++++++++--- 4 files changed, 90 insertions(+), 32 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index b0b59344..3d92ab15 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1182,23 +1182,13 @@ # #prune_missing_media = false -# Vector list of regex patterns of server names that conduwuit will refuse -# to download remote media from. -# -# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] -# -#prevent_media_downloads_from = [] - # List of forbidden server names via regex patterns that we will block # incoming AND outgoing federation with, and block client room joins / # remote user invites. # -# Additionally, it will hide messages from these servers for all users -# on this server. -# # Note that your messages can still make it to forbidden servers through -# backfilling. Events we receive from forbidden servers via backfill will -# be stored in the database, but will not be sent to the client. +# backfilling. Events we receive from forbidden servers via backfill +# from servers we *do* federate with will be stored in the database. # # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and @@ -1220,6 +1210,13 @@ # #allowed_remote_server_names = [] +# Vector list of regex patterns of server names that conduwuit will refuse +# to download remote media from. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] +# +#prevent_media_downloads_from = [] + # List of forbidden server names via regex patterns that we will block all # outgoing federated room directory requests for. Useful for preventing # our users from wandering into bad servers or spaces. @@ -1228,6 +1225,29 @@ # #forbidden_remote_room_directory_server_names = [] +# Vector list of regex patterns of server names that conduwuit will not +# send messages to the client from. +# +# Note that there is no way for clients to receive messages once a server +# has become unignored without doing a full sync. This is a protocol +# limitation with the current sync protocols. This means this is somewhat +# of a nuclear option. +# +# example: ["reallybadserver\.tld$", "reallybadphrase", +# "69dollarfortnitecards"] +# +#ignore_messages_from_server_names = [] + +# Send messages from users that the user has ignored to the client. +# +# There is no way for clients to receive messages sent while a user was +# ignored without doing a full sync. This is a protocol limitation with +# the current sync protocols. Disabling this option will move +# responsibility of ignoring messages to the client, which can avoid this +# limitation. +# +#send_messages_from_ignored_users_to_client = false + # Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you # do not want conduwuit to send outbound requests to. Defaults to # RFC1918, unroutable, loopback, multicast, and testnet addresses for diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 08887e18..bedfdc7a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -275,10 +275,12 @@ pub(crate) async fn is_ignored_pdu( let ignored_server = services .moderation - .is_remote_server_forbidden(pdu.sender().server_name()); + .is_remote_server_ignored(pdu.sender().server_name()); if ignored_type - && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) + && (ignored_server + || (!services.config.send_messages_from_ignored_users_to_client + && services.users.user_is_ignored(&pdu.sender, user_id).await)) { return true; } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e3b2a531..5374c2c2 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -163,7 +163,7 @@ pub struct Config { /// If enabled, conduwuit will send a simple GET request periodically to /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new /// announcements or major updates. This is not an update check endpoint. - /// + /// /// default: true #[serde(alias = "allow_check_for_updates", default = "true_fn")] pub allow_announcements_check: bool, @@ -1359,25 +1359,13 @@ pub struct Config { #[serde(default)] pub prune_missing_media: bool, - /// Vector list of regex patterns of server names that conduwuit will refuse - /// to download remote media from. - /// - /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub prevent_media_downloads_from: RegexSet, - /// List of forbidden server names via regex patterns that we will block /// incoming AND outgoing federation with, and block client room joins / /// remote user invites. /// - /// Additionally, it will hide messages from these servers for all users - /// on this server. - /// /// Note that your messages can still make it to forbidden servers through - /// backfilling. Events we receive from forbidden servers via backfill will - /// be stored in the database, but will not be sent to the client. + /// backfilling. Events we receive from forbidden servers via backfill + /// from servers we *do* federate with will be stored in the database. /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and @@ -1403,6 +1391,15 @@ pub struct Config { #[serde(default, with = "serde_regex")] pub allowed_remote_server_names: RegexSet, + /// Vector list of regex patterns of server names that conduwuit will refuse + /// to download remote media from. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub prevent_media_downloads_from: RegexSet, + /// List of forbidden server names via regex patterns that we will block all /// outgoing federated room directory requests for. Useful for preventing /// our users from wandering into bad servers or spaces. @@ -1413,6 +1410,31 @@ pub struct Config { #[serde(default, with = "serde_regex")] pub forbidden_remote_room_directory_server_names: RegexSet, + /// Vector list of regex patterns of server names that conduwuit will not + /// send messages to the client from. + /// + /// Note that there is no way for clients to receive messages once a server + /// has become unignored without doing a full sync. This is a protocol + /// limitation with the current sync protocols. This means this is somewhat + /// of a nuclear option. + /// + /// example: ["reallybadserver\.tld$", "reallybadphrase", + /// "69dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub ignore_messages_from_server_names: RegexSet, + + /// Send messages from users that the user has ignored to the client. + /// + /// There is no way for clients to receive messages sent while a user was + /// ignored without doing a full sync. This is a protocol limitation with + /// the current sync protocols. Disabling this option will move + /// responsibility of ignoring messages to the client, which can avoid this + /// limitation. + #[serde(default)] + pub send_messages_from_ignored_users_to_client: bool, + /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you /// do not want conduwuit to send outbound requests to. Defaults to /// RFC1918, unroutable, loopback, multicast, and testnet addresses for diff --git a/src/service/moderation.rs b/src/service/moderation.rs index 4a32404c..c3e55a1d 100644 --- a/src/service/moderation.rs +++ b/src/service/moderation.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{Result, Server, implement}; +use conduwuit::{Result, implement}; use ruma::ServerName; use crate::{Dep, config}; @@ -10,7 +10,7 @@ pub struct Service { } struct Services { - pub server: Arc, + // pub server: Arc, pub config: Dep, } @@ -18,7 +18,7 @@ impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { services: Services { - server: args.server.clone(), + // server: args.server.clone(), config: args.depend::("config"), }, })) @@ -27,6 +27,20 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +#[implement(Service)] +#[must_use] +pub fn is_remote_server_ignored(&self, server_name: &ServerName) -> bool { + // We must never block federating with ourselves + if server_name == self.services.config.server_name { + return false; + } + + self.services + .config + .ignore_messages_from_server_names + .is_match(server_name.host()) +} + #[implement(Service)] #[must_use] pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { From 77c4f9ff2f617b92f8afd3ac837fe4adfd7147ce Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 25 Apr 2025 23:01:05 +0100 Subject: [PATCH 0915/1248] fix: Do not panic on invalid membership event content --- src/core/matrix/state_res/event_auth.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 8c9339ec..c69db50e 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -38,7 +38,7 @@ struct GetMembership { membership: MembershipState, } -#[derive(Deserialize)] +#[derive(Deserialize, Debug)] struct RoomMemberContentFields { membership: Option>, join_authorised_via_users_server: Option>, @@ -149,9 +149,9 @@ where Incoming: Event + Send + Sync, { debug!( - "auth_check beginning for {} ({})", - incoming_event.event_id(), - incoming_event.event_type() + event_id = format!("{}", incoming_event.event_id()), + event_type = format!("{}", incoming_event.event_type()), + "auth_check beginning" ); // [synapse] check that all the events are in the same room as `incoming_event` @@ -383,10 +383,15 @@ where let sender_membership_event_content: RoomMemberContentFields = from_json_str(sender_member_event.content().get())?; - let membership_state = sender_membership_event_content - .membership - .expect("we should test before that this field exists") - .deserialize()?; + let Some(membership_state) = sender_membership_event_content.membership else { + warn!( + sender_membership_event_content = format!("{sender_membership_event_content:?}"), + event_id = format!("{}", incoming_event.event_id()), + "Sender membership event content missing membership field" + ); + return Err(Error::InvalidPdu("Missing membership field".to_owned())); + }; + let membership_state = membership_state.deserialize()?; if !matches!(membership_state, MembershipState::Join) { warn!("sender's membership is not join"); From 1a5ab33852b1ef301d6a3ce4c3154d430ef24a03 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 25 Apr 2025 23:51:23 +0100 Subject: [PATCH 0916/1248] chore: Error on missing ID in messages --- src/api/client/message.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index bedfdc7a..16b1796a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,3 +1,5 @@ +use core::panic; + use axum::extract::State; use conduwuit::{ Err, Result, at, @@ -132,8 +134,6 @@ pub(crate) async fn get_message_events_route( .take(limit) .collect() .await; - // let appservice_id = body.appservice_info.map(|appservice| - // appservice.registration.id); let lazy_loading_context = lazy_loading::Context { user_id: sender_user, @@ -143,7 +143,7 @@ pub(crate) async fn get_message_events_route( if let Some(registration) = body.appservice_info.as_ref() { <&DeviceId>::from(registration.registration.id.as_str()) } else { - <&DeviceId>::from("") + panic!("No device_id provided and no appservice registration found, this should be unreachable"); }, }, room_id, From c698d65a92fb6d3378e9d493b5356ecf067ae286 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Thu, 24 Apr 2025 17:15:03 +0100 Subject: [PATCH 0917/1248] Make Cloudflare Pages optional in CI --- .forgejo/workflows/documentation.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index 55f25058..1bda64f8 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -57,17 +57,17 @@ jobs: run: npm install --save-dev wrangler@latest - name: Deploy to Cloudflare Pages (Production) - if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + if: (github.event_name == 'push' && github.ref == 'refs/heads/main') && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" + command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }} - name: Deploy to Cloudflare Pages (Preview) - if: ${{ github.event_name != 'push' || github.ref != 'refs/heads/main' }} + if: (github.event_name != 'push' || github.ref != 'refs/heads/main') && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" + command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }} From 73c991edd03d29cbac2cd4d0be07254b2c87df82 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Thu, 24 Apr 2025 17:24:28 +0100 Subject: [PATCH 0918/1248] Ignore all markdown for auto image builds --- .forgejo/workflows/release-image.yml | 68 +++++++++++++--------------- 1 file changed, 31 insertions(+), 37 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 2cb6a329..141bfef9 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -1,24 +1,25 @@ name: Release Docker Image -concurrency: +concurrency: group: "release-image-${{ github.ref }}" on: pull_request: push: paths-ignore: - - '.gitlab-ci.yml' - - '.gitignore' - - 'renovate.json' - - 'debian/**' - - 'docker/**' - - 'docs/**' + - "*.md" + - "**/*.md" + - ".gitlab-ci.yml" + - ".gitignore" + - "renovate.json" + - "debian/**" + - "docker/**" + - "docs/**" # Allows you to run this workflow manually from the Actions tab workflow_dispatch: env: - BUILTIN_REGISTRY: forgejo.ellis.link - BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}" - + BUILTIN_REGISTRY: forgejo.ellis.link + BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}" jobs: define-variables: @@ -37,7 +38,7 @@ jobs: script: | const githubRepo = '${{ github.repository }}'.toLowerCase() const repoId = githubRepo.split('/')[1] - + core.setOutput('github_repository', githubRepo) const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo let images = [] @@ -48,7 +49,7 @@ jobs: core.setOutput('images_list', images.join(",")) const platforms = ['linux/amd64', 'linux/arm64'] core.setOutput('build_matrix', JSON.stringify({ - platform: platforms, + platform: platforms, include: platforms.map(platform => { return { platform, slug: platform.replace('/', '-') @@ -65,22 +66,15 @@ jobs: attestations: write id-token: write strategy: - matrix: { - "include": [ - { - "platform": "linux/amd64", - "slug": "linux-amd64" - }, - { - "platform": "linux/arm64", - "slug": "linux-arm64" - } - ], - "platform": [ - "linux/amd64", - "linux/arm64" - ] - } + matrix: + { + "include": + [ + { "platform": "linux/amd64", "slug": "linux-amd64" }, + { "platform": "linux/arm64", "slug": "linux-arm64" }, + ], + "platform": ["linux/amd64", "linux/arm64"], + } steps: - name: Echo strategy run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' @@ -105,9 +99,9 @@ jobs: - name: Login to builtin registry uses: docker/login-action@v3 with: - registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} - password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. - name: Extract metadata (labels, annotations) for Docker @@ -165,7 +159,7 @@ jobs: run: | mkdir -p /tmp/digests digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" + touch "/tmp/digests/${digest#sha256:}" - name: Upload digest uses: forgejo/upload-artifact@v4 @@ -174,7 +168,7 @@ jobs: path: /tmp/digests/* if-no-files-found: error retention-days: 1 - + merge: runs-on: dind container: ghcr.io/catthehacker/ubuntu:act-latest @@ -190,9 +184,9 @@ jobs: - name: Login to builtin registry uses: docker/login-action@v3 with: - registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} - password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -211,7 +205,7 @@ jobs: images: ${{needs.define-variables.outputs.images}} # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 env: - DOCKER_METADATA_ANNOTATIONS_LEVELS: index + DOCKER_METADATA_ANNOTATIONS_LEVELS: index - name: Create manifest list and push working-directory: /tmp/digests From eb886b6760ceea19412050df99703c4825ae56eb Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Thu, 24 Apr 2025 21:24:21 +0100 Subject: [PATCH 0919/1248] Element Web client build --- .forgejo/workflows/element.yml | 127 +++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 .forgejo/workflows/element.yml diff --git a/.forgejo/workflows/element.yml b/.forgejo/workflows/element.yml new file mode 100644 index 00000000..db771197 --- /dev/null +++ b/.forgejo/workflows/element.yml @@ -0,0 +1,127 @@ +name: Deploy Element Web + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + +concurrency: + group: "element-${{ github.ref }}" + cancel-in-progress: true + +jobs: + build-and-deploy: + name: Build and Deploy Element Web + runs-on: ubuntu-latest + + steps: + - name: Setup Node.js + uses: https://code.forgejo.org/actions/setup-node@v4 + with: + node-version: "20" + + - name: Clone, setup, and build Element Web + run: | + echo "Cloning Element Web..." + git clone https://github.com/maunium/element-web + cd element-web + git checkout develop + git pull + + echo "Cloning matrix-js-sdk..." + git clone https://github.com/matrix-org/matrix-js-sdk.git + + echo "Installing Yarn..." + npm install -g yarn + + echo "Installing dependencies..." + yarn install + + echo "Preparing build environment..." + mkdir -p .home + + echo "Cleaning up specific node_modules paths..." + rm -rf node_modules/@types/eslint-scope/ matrix-*-sdk/node_modules/@types/eslint-scope || echo "Cleanup paths not found, continuing." + + echo "Getting matrix-js-sdk commit hash..." + cd matrix-js-sdk + jsver=$(git rev-parse HEAD) + jsver=${jsver:0:12} + cd .. + echo "matrix-js-sdk version hash: $jsver" + + echo "Getting element-web commit hash..." + ver=$(git rev-parse HEAD) + ver=${ver:0:12} + echo "element-web version hash: $ver" + + chmod +x ./build-sh + + export VERSION="$ver-js-$jsver" + echo "Building Element Web version: $VERSION" + ./build-sh + + echo "Checking for build output..." + ls -la webapp/ + + - name: Create config.json + run: | + cat < ./element-web/webapp/config.json + { + "default_server_name": "continuwuity.org", + "default_server_config": { + "m.homeserver": { + "base_url": "https://matrix.continuwuity.org" + } + }, + "default_country_code": "GB", + "default_theme": "dark", + "mobile_guide_toast": false, + "show_labs_settings": true, + "room_directory": [ + "continuwuity.org", + "matrixrooms.info" + ], + "settings_defaults": { + "UIFeature.urlPreviews": true, + "UIFeature.feedback": false, + "UIFeature.voip": false, + "UIFeature.shareQrCode": false, + "UIFeature.shareSocial": false, + "UIFeature.locationSharing": false, + "enableSyntaxHighlightLanguageDetection": true + }, + "features": { + "feature_pinning": true, + "feature_custom_themes": true + } + } + EOF + echo "Created ./element-web/webapp/config.json" + cat ./element-web/webapp/config.json + + - name: Upload Artifact + uses: https://code.forgejo.org/actions/upload-artifact@v3 + with: + name: element-web + path: ./element-web/webapp/ + retention-days: 14 + + - name: Install Wrangler + run: npm install --save-dev wrangler@latest + + - name: Deploy to Cloudflare Pages (Production) + if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" + + - name: Deploy to Cloudflare Pages (Preview) + if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" From 60caa448b0e2119ee00ad67104da8e19a18982c4 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Thu, 24 Apr 2025 21:27:41 +0100 Subject: [PATCH 0920/1248] Tidy up publishing restriction check --- .forgejo/workflows/documentation.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index 1bda64f8..7d95a317 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -57,17 +57,17 @@ jobs: run: npm install --save-dev wrangler@latest - name: Deploy to Cloudflare Pages (Production) - if: (github.event_name == 'push' && github.ref == 'refs/heads/main') && vars.CLOUDFLARE_PROJECT_NAME != '' + if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }} + command: pages deploy ./public --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" - name: Deploy to Cloudflare Pages (Preview) - if: (github.event_name != 'push' || github.ref != 'refs/heads/main') && vars.CLOUDFLARE_PROJECT_NAME != '' + if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }} + command: pages deploy ./public --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" From b2620e6922a4acf3e2b86ac220f7c4974fa268ca Mon Sep 17 00:00:00 2001 From: Kokomo Date: Sun, 27 Apr 2025 15:43:15 +0000 Subject: [PATCH 0921/1248] Remove email and add reference to matrix space --- CODE_OF_CONDUCT.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index e77154e7..65ee41e0 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,3 @@ - # Contributor Covenant Code of Conduct ## Our Pledge @@ -60,8 +59,7 @@ representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement over email at - or over Matrix at @strawberry:puppygock.gay. +reported to the community leaders responsible for enforcement over Matrix at #continuwuity:continuwuity.org. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the From a9a478f0778a355e82a5b54e7cb292369e39a792 Mon Sep 17 00:00:00 2001 From: Kokomo Date: Sun, 27 Apr 2025 15:47:40 +0000 Subject: [PATCH 0922/1248] Add back space oops --- CODE_OF_CONDUCT.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 65ee41e0..3ac0a83d 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,3 +1,4 @@ + # Contributor Covenant Code of Conduct ## Our Pledge From 90f1a193e38f41c0108f6009e7a027bb7aa6753b Mon Sep 17 00:00:00 2001 From: Kokomo Date: Sun, 27 Apr 2025 16:06:34 +0000 Subject: [PATCH 0923/1248] Add maintainer emails --- CODE_OF_CONDUCT.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 3ac0a83d..476e68fb 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,3 @@ - # Contributor Covenant Code of Conduct ## Our Pledge @@ -60,7 +59,7 @@ representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement over Matrix at #continuwuity:continuwuity.org. +reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or email at , and respectively. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the From 6b0288dd4c4649e1f3e2772a73e5b204f645d560 Mon Sep 17 00:00:00 2001 From: Kokomo Date: Sun, 27 Apr 2025 18:42:02 +0000 Subject: [PATCH 0924/1248] Update Contributing.md file (#807) Cleaned up wording and adjusted the links Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/807 Reviewed-by: nex Reviewed-by: Jade Ellis Co-authored-by: Kokomo Co-committed-by: Kokomo --- CONTRIBUTING.md | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fb540011..ecff7173 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ This page is for about contributing to conduwuit. The [development](./development.md) page may be of interest for you as well. If you would like to work on an [issue][issues] that is not assigned, preferably -ask in the Matrix room first at [#conduwuit:puppygock.gay][conduwuit-matrix], +ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix], and comment on it. ### Linting and Formatting @@ -23,9 +23,9 @@ suggestion, allow the lint and mention that in a comment. ### Running CI tests locally -conduwuit's CI for tests, linting, formatting, audit, etc use +continuwuity's CI for tests, linting, formatting, audit, etc use [`engage`][engage]. engage can be installed from nixpkgs or `cargo install -engage`. conduwuit's Nix flake devshell has the nixpkgs engage with `direnv`. +engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`. Use `engage --help` for more usage details. To test, format, lint, etc that CI would do, install engage, allow the `.envrc` @@ -111,33 +111,28 @@ applies here. ### Creating pull requests -Please try to keep contributions to the GitHub. While the mirrors of conduwuit -allow for pull/merge requests, there is no guarantee I will see them in a timely +Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity +allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts. -This prevents me from having to ping once in a while to double check the status +This prevents us from having to ping once in a while to double check the status of it, especially when the CI completed successfully and everything so it *looks* done. -If you open a pull request on one of the mirrors, it is your responsibility to -inform me about its existence. In the future I may try to solve this with more -repo bots in the conduwuit Matrix room. There is no mailing list or email-patch -support on the sr.ht mirror, but if you'd like to email me a git patch you can -do so at `strawberry@puppygock.gay`. Direct all PRs/MRs to the `main` branch. By sending a pull request or patch, you are agreeing that your changes are allowed to be licenced under the Apache-2.0 licence and all of your conduct is -in line with the Contributor's Covenant, and conduwuit's Code of Conduct. +in line with the Contributor's Covenant, and continuwuity's Code of Conduct. Contribution by users who violate either of these code of conducts will not have their contributions accepted. This includes users who have been banned from -conduwuit Matrix rooms for Code of Conduct violations. +continuwuityMatrix rooms for Code of Conduct violations. -[issues]: https://github.com/girlbossceo/conduwuit/issues -[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay +[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues +[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org [complement]: https://github.com/matrix-org/complement/ -[engage.toml]: https://github.com/girlbossceo/conduwuit/blob/main/engage.toml +[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml [engage]: https://charles.page.computer.surgery/engage/ [sytest]: https://github.com/matrix-org/sytest/ [cargo-deb]: https://github.com/kornelski/cargo-deb @@ -146,4 +141,4 @@ conduwuit Matrix rooms for Code of Conduct violations. [cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit [direnv]: https://direnv.net/ [mdbook]: https://rust-lang.github.io/mdBook/ -[documentation.yml]: https://github.com/girlbossceo/conduwuit/blob/main/.github/workflows/documentation.yml +[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml From edd5fc6c7ebe7c2b01d139f5bf34c97b5ac7d653 Mon Sep 17 00:00:00 2001 From: Glandos Date: Sun, 27 Apr 2025 18:52:20 +0000 Subject: [PATCH 0925/1248] Actualiser debian/conduwuit.service --- debian/conduwuit.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/conduwuit.service b/debian/conduwuit.service index a079499e..3d2fbc9b 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -3,7 +3,7 @@ Description=conduwuit Matrix homeserver Wants=network-online.target After=network-online.target Alias=matrix-conduwuit.service -Documentation=https://conduwuit.puppyirl.gay/ +Documentation=https://continuwuity.org/ [Service] DynamicUser=yes From 4158c1cf623a83b96d6a2d3cabb9f6aa1d618b4b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 28 Apr 2025 20:45:08 +0100 Subject: [PATCH 0926/1248] fix: Hack around software treating empty join rule incorrectly --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 216114af..2d8a2d0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3652,7 +3652,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "assign", "js_int", @@ -3672,7 +3672,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "assign", @@ -3707,7 +3707,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "base64 0.22.1", @@ -3739,7 +3739,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3764,7 +3764,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "bytes", "headers", @@ -3786,7 +3786,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3795,7 +3795,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3805,7 +3805,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3820,7 +3820,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3832,7 +3832,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index c0f857c6..1ce5c1db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "652cc4864203ab7ca60cf9c47b931c0385304cc7" +rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" features = [ "compat", "rand", From e1655edd83893946cace9a44efb979a7357736b6 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 25 Apr 2025 02:47:48 +0100 Subject: [PATCH 0927/1248] feat: HTML default page --- .dockerignore | 2 +- .forgejo/workflows/release-image.yml | 3 + Cargo.lock | 64 ++++++++++++++++ Cargo.toml | 5 ++ docker/Dockerfile | 106 +++++++++++++++++++++++++-- nix/pkgs/main/default.nix | 2 + src/router/Cargo.toml | 1 + src/router/router.rs | 6 +- src/web/Cargo.toml | 32 ++++++++ src/web/css/index.css | 68 +++++++++++++++++ src/web/mod.rs | 61 +++++++++++++++ src/web/templates/_layout.html.j2 | 32 ++++++++ src/web/templates/error.html.j2 | 20 +++++ src/web/templates/index.html.j2 | 16 ++++ 14 files changed, 408 insertions(+), 10 deletions(-) create mode 100644 src/web/Cargo.toml create mode 100644 src/web/css/index.css create mode 100644 src/web/mod.rs create mode 100644 src/web/templates/_layout.html.j2 create mode 100644 src/web/templates/error.html.j2 create mode 100644 src/web/templates/index.html.j2 diff --git a/.dockerignore b/.dockerignore index 453634df..8ca2e3f8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -11,7 +11,7 @@ docker/ *.iml # Git folder -.git +# .git .gitea .gitlab .github diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 141bfef9..3eb84223 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -144,6 +144,9 @@ jobs: file: "docker/Dockerfile" build-args: | CONDUWUIT_VERSION_EXTRA=${{ env.COMMIT_SHORT_SHA }} + COMMIT_SHA=${{ github.sha }}) + REMOTE_URL=${{github.event.repository.html_url }} + REMOTE_COMMIT_URL=${{github.event.head_commit.url }} platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} annotations: ${{ steps.meta.outputs.annotations }} diff --git a/Cargo.lock b/Cargo.lock index 2d8a2d0f..1b6c5d26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -109,6 +109,48 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbc3a507a82b17ba0d98f6ce8fd6954ea0c8152e98009d36a40d8dcc8ce078a" +[[package]] +name = "askama" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f75363874b771be265f4ffe307ca705ef6f3baa19011c149da8674a87f1b75c4" +dependencies = [ + "askama_derive", + "itoa", + "percent-encoding", + "serde", + "serde_json", +] + +[[package]] +name = "askama_derive" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "129397200fe83088e8a68407a8e2b1f826cf0086b21ccdb866a722c8bcd3a94f" +dependencies = [ + "askama_parser", + "basic-toml", + "memchr", + "proc-macro2", + "quote", + "rustc-hash 2.1.1", + "serde", + "serde_derive", + "syn", +] + +[[package]] +name = "askama_parser" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ab5630b3d5eaf232620167977f95eb51f3432fc76852328774afbd242d4358" +dependencies = [ + "memchr", + "serde", + "serde_derive", + "winnow", +] + [[package]] name = "assign" version = "1.1.1" @@ -415,6 +457,15 @@ version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +[[package]] +name = "basic-toml" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba62675e8242a4c4e806d12f11d136e626e6c8361d6b829310732241652a178a" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.69.5" @@ -904,6 +955,7 @@ dependencies = [ "conduwuit_api", "conduwuit_core", "conduwuit_service", + "conduwuit_web", "const-str", "futures", "http", @@ -961,6 +1013,18 @@ dependencies = [ "webpage", ] +[[package]] +name = "conduwuit_web" +version = "0.5.0-rc.5" +dependencies = [ + "askama", + "axum", + "futures", + "rand 0.8.5", + "thiserror 2.0.12", + "tracing", +] + [[package]] name = "console-api" version = "0.8.1" diff --git a/Cargo.toml b/Cargo.toml index 1ce5c1db..44db4c4b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -626,6 +626,11 @@ package = "conduwuit_macros" path = "src/macros" default-features = false +[workspace.dependencies.conduwuit-web] +package = "conduwuit_web" +path = "src/web" +default-features = false + ############################################################################### # # Release profiles diff --git a/docker/Dockerfile b/docker/Dockerfile index 536af632..56f2d5fa 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -111,14 +111,112 @@ RUN mkdir /out FROM toolchain AS builder + +# Get source +COPY . . + # Conduwuit version info ARG COMMIT_SHA= +ARG SHORT_COMMIT_SHA= +ARG REMOTE_URL= ARG CONDUWUIT_VERSION_EXTRA= +ENV COMMIT_SHA=$COMMIT_SHA +ENV SHORT_COMMIT_SHA=$SHORT_COMMIT_SHA +ENV REMOTE_URL=$REMOTE_URL ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA -RUN <> /etc/environment + +# Calculate version info from git if not provided via ARGs +# and write all relevant vars to /etc/environment +RUN <<'EOF' +set -e # Exit on error + +# Use temp variables to store calculated values +calculated_commit_sha="" +calculated_remote_url="" +calculated_version_extra="" + +# --- COMMIT_SHA --- +# Calculate COMMIT_SHA if ENV var (from ARG) is empty +if [ -z "${COMMIT_SHA}" ]; then + # Try to get short commit hash from git + calculated_commit_sha=$(git rev-parse HEAD 2>/dev/null || echo "") + if [ -n "${calculated_commit_sha}" ]; then + echo "COMMIT_SHA='${calculated_commit_sha}'" >> /etc/environment + fi +else + # Ensure ARG-provided value is in /etc/environment + echo "COMMIT_SHA='${COMMIT_SHA}'" >> /etc/environment fi + + +if [ -z "${SHORT_COMMIT_SHA}" ]; then + # Try to get short commit hash from git + calculated_short_commit_sha=$(git rev-parse --short HEAD 2>/dev/null || echo "") + if [ -n "${calculated_short_commit_sha}" ]; then + echo "SHORT_COMMIT_SHA='${calculated_short_commit_sha}'" >> /etc/environment + fi +else + # Ensure ARG-provided value is in /etc/environment + echo "SHORT_COMMIT_SHA='${SHORT_COMMIT_SHA}'" >> /etc/environment +fi + +# --- REMOTE_URL --- +# Calculate REMOTE_URL if ENV var (from ARG) is empty +if [ -z "${REMOTE_URL}" ]; then + # Try to get remote origin URL from git + remote_url_raw=$(git config --get remote.origin.url 2>/dev/null || echo "") + if [ -n "${remote_url_raw}" ]; then + # Transform git URL (SSH or HTTPS) to web URL + if [[ $remote_url_raw == "https://"* ]]; then + # Already HTTPS, just remove .git suffix + calculated_remote_url=$(echo "$remote_url_raw" | sed 's/\.git$//') + else + # Convert SSH URL to HTTPS URL + calculated_remote_url=$(echo "$remote_url_raw" | sed 's/\.git$//' | sed 's/:/\//' | sed 's/^git@/https:\/\//') + fi + + # Write calculated web URL if transformation was successful + if [ -n "${calculated_remote_url}" ]; then + echo "REMOTE_URL='${calculated_remote_url}'" >> /etc/environment + fi + fi +else + # Ensure ARG-provided value is in /etc/environment (assume it's a valid web URL) + echo "REMOTE_URL='${REMOTE_URL}'" >> /etc/environment + # Use provided value for REMOTE_COMMIT_URL calculation below + calculated_remote_url="${REMOTE_URL}" +fi + +# --- Determine effective values for subsequent calculations --- +# Use ENV var value if set (from ARG), otherwise use calculated value +effective_commit_sha="${COMMIT_SHA:-$calculated_commit_sha}" +effective_short_commit_sha="${SHORT_COMMIT_SHA:-$calculated_short_commit_sha}" +effective_remote_url="${REMOTE_URL:-$calculated_remote_url}" + +# --- REMOTE_COMMIT_URL --- +# Calculate and write REMOTE_COMMIT_URL if both components are available +if [ -z "${REMOTE_COMMIT_URL}" ] && [ -n "${effective_remote_url}" ] && [ -n "${effective_commit_sha}" ]; then + echo "REMOTE_COMMIT_URL='${effective_remote_url}/commit/${effective_commit_sha}'" >> /etc/environment +else + # Ensure ARG-provided value is in /etc/environment + echo "REMOTE_COMMIT_URL='${REMOTE_COMMIT_URL}'" >> /etc/environment +fi + +# --- CONDUWUIT_VERSION_EXTRA --- +# Calculate CONDUWUIT_VERSION_EXTRA if ENV var (from ARG) is empty +if [ -z "${CONDUWUIT_VERSION_EXTRA}" ]; then + # Use the effective short commit sha, fallback to "unknown revision" + calculated_version_extra="${effective_short_commit_sha:-unknown revision}" + # Handle case where commit sha calculation failed and ARG wasn't set + if [ -z "${calculated_version_extra}" ]; then + calculated_version_extra="unknown revision" + fi + echo "CONDUWUIT_VERSION_EXTRA='${calculated_version_extra}'" >> /etc/environment +else + # Ensure ARG-provided value is in /etc/environment + echo "CONDUWUIT_VERSION_EXTRA='${CONDUWUIT_VERSION_EXTRA}'" >> /etc/environment +fi + EOF ARG TARGETPLATFORM @@ -127,8 +225,6 @@ ARG TARGETPLATFORM RUN cat /etc/environment RUN xx-cargo --print-target-triple -# Get source -COPY . . # Build the binary RUN --mount=type=cache,target=/usr/local/cargo/registry \ diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 9c8038a7..3a43af5a 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -130,6 +130,8 @@ buildDepsOnlyEnv = }); buildPackageEnv = { + COMMIT_SHA = inputs.self.rev or inputs.self.dirtyRev or ""; + SHORT_COMMIT_SHA = inputs.self.shortRev or inputs.self.dirtyShortRev or ""; CONDUWUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev or ""; } // buildDepsOnlyEnv // { # Only needed in static stdenv because these are transitive dependencies of rocksdb diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index e4ddcb9b..9fcb8d6a 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -103,6 +103,7 @@ conduwuit-admin.workspace = true conduwuit-api.workspace = true conduwuit-core.workspace = true conduwuit-service.workspace = true +conduwuit-web.workspace = true const-str.workspace = true futures.workspace = true http.workspace = true diff --git a/src/router/router.rs b/src/router/router.rs index 0f95b924..2b9ce2ec 100644 --- a/src/router/router.rs +++ b/src/router/router.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use axum::{Router, response::IntoResponse, routing::get}; +use axum::{Router, response::IntoResponse}; use conduwuit::Error; use conduwuit_api::router::{state, state::Guard}; use conduwuit_service::Services; @@ -11,7 +11,7 @@ pub(crate) fn build(services: &Arc) -> (Router, Guard) { let router = Router::::new(); let (state, guard) = state::create(services.clone()); let router = conduwuit_api::router::build(router, &services.server) - .route("/", get(it_works)) + .merge(conduwuit_web::build::().with_state(())) .fallback(not_found) .with_state(state); @@ -21,5 +21,3 @@ pub(crate) fn build(services: &Arc) -> (Router, Guard) { async fn not_found(_uri: Uri) -> impl IntoResponse { Error::Request(ErrorKind::Unrecognized, "Not Found".into(), StatusCode::NOT_FOUND) } - -async fn it_works() -> &'static str { "hewwo from conduwuit woof!" } diff --git a/src/web/Cargo.toml b/src/web/Cargo.toml new file mode 100644 index 00000000..07dde0f7 --- /dev/null +++ b/src/web/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "conduwuit_web" +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +[lib] +path = "mod.rs" +crate-type = [ + "rlib", +# "dylib", +] + +[features] + + +[dependencies] +askama = "0.14.0" + +axum.workspace = true +futures.workspace = true +tracing.workspace = true +rand.workspace = true +thiserror.workspace = true + +[lints] +workspace = true diff --git a/src/web/css/index.css b/src/web/css/index.css new file mode 100644 index 00000000..86cb6d8d --- /dev/null +++ b/src/web/css/index.css @@ -0,0 +1,68 @@ +:root { + color-scheme: light; + --font-stack: sans-serif; + + --background-color: #fff; + --text-color: #000; + + --bg: oklch(0.76 0.0854 317.27); + --panel-bg: oklch(0.91 0.042 317.27); + + --name-lightness: 0.45; + + @media (prefers-color-scheme: dark) { + color-scheme: dark; + --text-color: #fff; + --bg: oklch(0.15 0.042 317.27); + --panel-bg: oklch(0.24 0.03 317.27); + + --name-lightness: 0.8; + } + + --c1: oklch(0.44 0.177 353.06); + --c2: oklch(0.59 0.158 150.88); + + --normal-font-size: 1rem; + --small-font-size: 0.8rem; +} + +body { + color: var(--text-color); + font-family: var(--font-stack); + margin: 0; + padding: 0; + display: grid; + place-items: center; + min-height: 100vh; +} + +html { + background-color: var(--bg); + background-image: linear-gradient( + 70deg, + oklch(from var(--bg) l + 0.2 c h), + oklch(from var(--bg) l - 0.2 c h) + ); + font-size: 16px; +} + +.panel { + width: min(clamp(24rem, 12rem + 40vw, 48rem), 100vw); + border-radius: 15px; + background-color: var(--panel-bg); + padding-inline: 1.5rem; + padding-block: 1rem; + box-shadow: 0 0.25em 0.375em hsla(0, 0%, 0%, 0.1); +} + +.project-name { + text-decoration: none; + background: linear-gradient( + 130deg, + oklch(from var(--c1) var(--name-lightness) c h), + oklch(from var(--c2) var(--name-lightness) c h) + ); + background-clip: text; + color: transparent; + filter: brightness(1.2); +} diff --git a/src/web/mod.rs b/src/web/mod.rs new file mode 100644 index 00000000..ddf13be4 --- /dev/null +++ b/src/web/mod.rs @@ -0,0 +1,61 @@ +use askama::Template; +use axum::{ + Router, + http::{StatusCode, header}, + response::{Html, IntoResponse, Response}, + routing::get, +}; + +pub fn build() -> Router<()> { Router::new().route("/", get(index_handler)) } + +async fn index_handler() -> Result { + #[derive(Debug, Template)] + #[template(path = "index.html.j2")] + struct Tmpl<'a> { + nonce: &'a str, + } + let nonce = rand::random::().to_string(); + + let template = Tmpl { nonce: &nonce }; + Ok(( + [(header::CONTENT_SECURITY_POLICY, format!("default-src 'none' 'nonce-{nonce}';"))], + Html(template.render()?), + )) +} + +#[derive(Debug, thiserror::Error)] +enum WebError { + #[error("Failed to render template: {0}")] + Render(#[from] askama::Error), +} + +impl IntoResponse for WebError { + fn into_response(self) -> Response { + #[derive(Debug, Template)] + #[template(path = "error.html.j2")] + struct Tmpl<'a> { + nonce: &'a str, + err: WebError, + } + + let nonce = rand::random::().to_string(); + + let status = match &self { + | Self::Render(_) => StatusCode::INTERNAL_SERVER_ERROR, + }; + let tmpl = Tmpl { nonce: &nonce, err: self }; + if let Ok(body) = tmpl.render() { + ( + status, + [( + header::CONTENT_SECURITY_POLICY, + format!("default-src 'none' 'nonce-{nonce}';"), + )], + Html(body), + ) + .into_response() + } else { + (status, "Something went wrong").into_response() + } + } +} diff --git a/src/web/templates/_layout.html.j2 b/src/web/templates/_layout.html.j2 new file mode 100644 index 00000000..a38745e1 --- /dev/null +++ b/src/web/templates/_layout.html.j2 @@ -0,0 +1,32 @@ + + + + + + {% block title %}Continuwuity{% endblock %} + + + + + + +
    {%~ block content %}{% endblock ~%}
    + {%~ block footer ~%} +
    +

    Powered by Continuwuity + {%~ if let Some(version_info) = option_env!("CONDUWUIT_VERSION_EXTRA").or(option_env!("SHORT_COMMIT_SHA")) ~%} + {%~ if let Some(url) = option_env!("REMOTE_COMMIT_URL").or(option_env!("REMOTE_URL")) ~%} + ({{ version_info }}) + {%~ else ~%} + ({{ version_info }}) + {%~ endif ~%} + {%~ endif ~%}

    +
    + {%~ endblock ~%} + + + diff --git a/src/web/templates/error.html.j2 b/src/web/templates/error.html.j2 new file mode 100644 index 00000000..e320d0ed --- /dev/null +++ b/src/web/templates/error.html.j2 @@ -0,0 +1,20 @@ +{% extends "_layout.html.j2" %} + +{%- block title -%} +Server Error +{%- endblock -%} + +{%- block content -%} +

    + {%- match err -%} + {% else -%} 500: Internal Server Error + {%- endmatch -%} +

    + +{%- match err -%} + {% when WebError::Render(err) -%} +
    {{ err }}
    + {% else -%}

    An error occurred

    +{%- endmatch -%} + +{%- endblock -%} diff --git a/src/web/templates/index.html.j2 b/src/web/templates/index.html.j2 new file mode 100644 index 00000000..51393822 --- /dev/null +++ b/src/web/templates/index.html.j2 @@ -0,0 +1,16 @@ +{% extends "_layout.html.j2" %} +{%- block content -%} +
    +
    +

    Welcome to Continuwuity!

    +

    Continuwuity is successfully installed and working.

    +

    To get started, you can:

    + +
    + +{%- endblock content -%} From a98da7d9422500de592824eee0f441bc94803065 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 1 May 2025 00:38:35 +0100 Subject: [PATCH 0928/1248] refactor: Move git version info gather in into a build script --- .forgejo/workflows/release-image.yml | 8 +- Cargo.lock | 44 ++++++++++ Cargo.toml | 6 ++ docker/Dockerfile | 118 ++++----------------------- nix/pkgs/main/default.nix | 5 +- src/api/client/message.rs | 5 +- src/build_metadata/Cargo.toml | 34 ++++++++ src/build_metadata/build.rs | 92 +++++++++++++++++++++ src/build_metadata/mod.rs | 23 ++++++ src/core/Cargo.toml | 1 + src/core/info/version.rs | 11 +-- src/web/Cargo.toml | 2 + src/web/mod.rs | 1 + src/web/templates/_layout.html.j2 | 4 +- 14 files changed, 231 insertions(+), 123 deletions(-) create mode 100644 src/build_metadata/Cargo.toml create mode 100644 src/build_metadata/build.rs create mode 100644 src/build_metadata/mod.rs diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 3eb84223..de009ad5 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -143,10 +143,10 @@ jobs: context: . file: "docker/Dockerfile" build-args: | - CONDUWUIT_VERSION_EXTRA=${{ env.COMMIT_SHORT_SHA }} - COMMIT_SHA=${{ github.sha }}) - REMOTE_URL=${{github.event.repository.html_url }} - REMOTE_COMMIT_URL=${{github.event.head_commit.url }} + GIT_COMMIT_HASH=${{ github.sha }}) + GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}) + GIT_REMOTE_URL=${{github.event.repository.html_url }} + GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }} platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} annotations: ${{ steps.meta.outputs.annotations }} diff --git a/Cargo.lock b/Cargo.lock index 1b6c5d26..5f82384e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -584,6 +584,9 @@ name = "built" version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" +dependencies = [ + "cargo-lock", +] [[package]] name = "bumpalo" @@ -631,6 +634,19 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "cargo-lock" +version = "10.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06acb4f71407ba205a07cb453211e0e6a67b21904e47f6ba1f9589e38f2e454" +dependencies = [ + "petgraph", + "semver", + "serde", + "toml", + "url", +] + [[package]] name = "cargo_toml" version = "0.21.0" @@ -856,6 +872,13 @@ dependencies = [ "tracing", ] +[[package]] +name = "conduwuit_build_metadata" +version = "0.5.0-rc.5" +dependencies = [ + "built", +] + [[package]] name = "conduwuit_core" version = "0.5.0-rc.5" @@ -870,6 +893,7 @@ dependencies = [ "checked_ops", "chrono", "clap", + "conduwuit_build_metadata", "conduwuit_macros", "const-str", "core_affinity", @@ -1019,6 +1043,7 @@ version = "0.5.0-rc.5" dependencies = [ "askama", "axum", + "conduwuit_build_metadata", "futures", "rand 0.8.5", "thiserror 2.0.12", @@ -1515,6 +1540,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "flate2" version = "1.1.1" @@ -3132,6 +3163,16 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.8.0", +] + [[package]] name = "phf" version = "0.11.3" @@ -4127,6 +4168,9 @@ name = "semver" version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] [[package]] name = "sentry" diff --git a/Cargo.toml b/Cargo.toml index 44db4c4b..43cd3f4f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -631,6 +631,12 @@ package = "conduwuit_web" path = "src/web" default-features = false + +[workspace.dependencies.conduwuit-build-metadata] +package = "conduwuit_build_metadata" +path = "src/build_metadata" +default-features = false + ############################################################################### # # Release profiles diff --git a/docker/Dockerfile b/docker/Dockerfile index 56f2d5fa..3029282f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -115,116 +115,26 @@ FROM toolchain AS builder # Get source COPY . . -# Conduwuit version info -ARG COMMIT_SHA= -ARG SHORT_COMMIT_SHA= -ARG REMOTE_URL= -ARG CONDUWUIT_VERSION_EXTRA= -ENV COMMIT_SHA=$COMMIT_SHA -ENV SHORT_COMMIT_SHA=$SHORT_COMMIT_SHA -ENV REMOTE_URL=$REMOTE_URL -ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA - -# Calculate version info from git if not provided via ARGs -# and write all relevant vars to /etc/environment -RUN <<'EOF' -set -e # Exit on error - -# Use temp variables to store calculated values -calculated_commit_sha="" -calculated_remote_url="" -calculated_version_extra="" - -# --- COMMIT_SHA --- -# Calculate COMMIT_SHA if ENV var (from ARG) is empty -if [ -z "${COMMIT_SHA}" ]; then - # Try to get short commit hash from git - calculated_commit_sha=$(git rev-parse HEAD 2>/dev/null || echo "") - if [ -n "${calculated_commit_sha}" ]; then - echo "COMMIT_SHA='${calculated_commit_sha}'" >> /etc/environment - fi -else - # Ensure ARG-provided value is in /etc/environment - echo "COMMIT_SHA='${COMMIT_SHA}'" >> /etc/environment -fi - - -if [ -z "${SHORT_COMMIT_SHA}" ]; then - # Try to get short commit hash from git - calculated_short_commit_sha=$(git rev-parse --short HEAD 2>/dev/null || echo "") - if [ -n "${calculated_short_commit_sha}" ]; then - echo "SHORT_COMMIT_SHA='${calculated_short_commit_sha}'" >> /etc/environment - fi -else - # Ensure ARG-provided value is in /etc/environment - echo "SHORT_COMMIT_SHA='${SHORT_COMMIT_SHA}'" >> /etc/environment -fi - -# --- REMOTE_URL --- -# Calculate REMOTE_URL if ENV var (from ARG) is empty -if [ -z "${REMOTE_URL}" ]; then - # Try to get remote origin URL from git - remote_url_raw=$(git config --get remote.origin.url 2>/dev/null || echo "") - if [ -n "${remote_url_raw}" ]; then - # Transform git URL (SSH or HTTPS) to web URL - if [[ $remote_url_raw == "https://"* ]]; then - # Already HTTPS, just remove .git suffix - calculated_remote_url=$(echo "$remote_url_raw" | sed 's/\.git$//') - else - # Convert SSH URL to HTTPS URL - calculated_remote_url=$(echo "$remote_url_raw" | sed 's/\.git$//' | sed 's/:/\//' | sed 's/^git@/https:\/\//') - fi - - # Write calculated web URL if transformation was successful - if [ -n "${calculated_remote_url}" ]; then - echo "REMOTE_URL='${calculated_remote_url}'" >> /etc/environment - fi - fi -else - # Ensure ARG-provided value is in /etc/environment (assume it's a valid web URL) - echo "REMOTE_URL='${REMOTE_URL}'" >> /etc/environment - # Use provided value for REMOTE_COMMIT_URL calculation below - calculated_remote_url="${REMOTE_URL}" -fi - -# --- Determine effective values for subsequent calculations --- -# Use ENV var value if set (from ARG), otherwise use calculated value -effective_commit_sha="${COMMIT_SHA:-$calculated_commit_sha}" -effective_short_commit_sha="${SHORT_COMMIT_SHA:-$calculated_short_commit_sha}" -effective_remote_url="${REMOTE_URL:-$calculated_remote_url}" - -# --- REMOTE_COMMIT_URL --- -# Calculate and write REMOTE_COMMIT_URL if both components are available -if [ -z "${REMOTE_COMMIT_URL}" ] && [ -n "${effective_remote_url}" ] && [ -n "${effective_commit_sha}" ]; then - echo "REMOTE_COMMIT_URL='${effective_remote_url}/commit/${effective_commit_sha}'" >> /etc/environment -else - # Ensure ARG-provided value is in /etc/environment - echo "REMOTE_COMMIT_URL='${REMOTE_COMMIT_URL}'" >> /etc/environment -fi - -# --- CONDUWUIT_VERSION_EXTRA --- -# Calculate CONDUWUIT_VERSION_EXTRA if ENV var (from ARG) is empty -if [ -z "${CONDUWUIT_VERSION_EXTRA}" ]; then - # Use the effective short commit sha, fallback to "unknown revision" - calculated_version_extra="${effective_short_commit_sha:-unknown revision}" - # Handle case where commit sha calculation failed and ARG wasn't set - if [ -z "${calculated_version_extra}" ]; then - calculated_version_extra="unknown revision" - fi - echo "CONDUWUIT_VERSION_EXTRA='${calculated_version_extra}'" >> /etc/environment -else - # Ensure ARG-provided value is in /etc/environment - echo "CONDUWUIT_VERSION_EXTRA='${CONDUWUIT_VERSION_EXTRA}'" >> /etc/environment -fi - -EOF - ARG TARGETPLATFORM # Verify environment configuration RUN cat /etc/environment RUN xx-cargo --print-target-triple +# Conduwuit version info +ARG GIT_COMMIT_HASH= +ARG GIT_COMMIT_HASH_SHORT= +ARG GIT_REMOTE_URL= +ARG GIT_REMOTE_COMMIT_URL= +ARG CONDUWUIT_VERSION_EXTRA= +ARG CONTINUWUITY_VERSION_EXTRA= +ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH +ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT +ENV GIT_REMOTE_URL=$GIT_REMOTE_URL +ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL +ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA +ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA + # Build the binary RUN --mount=type=cache,target=/usr/local/cargo/registry \ diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 3a43af5a..f2fffec0 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -130,9 +130,8 @@ buildDepsOnlyEnv = }); buildPackageEnv = { - COMMIT_SHA = inputs.self.rev or inputs.self.dirtyRev or ""; - SHORT_COMMIT_SHA = inputs.self.shortRev or inputs.self.dirtyShortRev or ""; - CONDUWUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev or ""; + GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or ""; + GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or ""; } // buildDepsOnlyEnv // { # Only needed in static stdenv because these are transitive dependencies of rocksdb CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 16b1796a..e442850b 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -143,7 +143,10 @@ pub(crate) async fn get_message_events_route( if let Some(registration) = body.appservice_info.as_ref() { <&DeviceId>::from(registration.registration.id.as_str()) } else { - panic!("No device_id provided and no appservice registration found, this should be unreachable"); + panic!( + "No device_id provided and no appservice registration found, this \ + should be unreachable" + ); }, }, room_id, diff --git a/src/build_metadata/Cargo.toml b/src/build_metadata/Cargo.toml new file mode 100644 index 00000000..3a98c6bf --- /dev/null +++ b/src/build_metadata/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "conduwuit_build_metadata" +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + + +build = "build.rs" +# [[bin]] +# path = "main.rs" +# name = "conduwuit_build_metadata" + +[lib] +path = "mod.rs" +crate-type = [ + "rlib", +# "dylib", +] + +[features] + + +[dependencies] + +[build-dependencies] +built = {version = "0.7", features = ["cargo-lock", "dependency-tree"]} + +[lints] +workspace = true diff --git a/src/build_metadata/build.rs b/src/build_metadata/build.rs new file mode 100644 index 00000000..2fec16a7 --- /dev/null +++ b/src/build_metadata/build.rs @@ -0,0 +1,92 @@ +use std::process::Command; + +fn run_git_command(args: &[&str]) -> Option { + Command::new("git") + .args(args) + .output() + .ok() + .filter(|output| output.status.success()) + .and_then(|output| String::from_utf8(output.stdout).ok()) + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()) +} +fn get_env(env_var: &str) -> Option { + match std::env::var(env_var) { + | Ok(val) if !val.is_empty() => Some(val), + | _ => None, + } +} +fn main() { + // built gets the default crate from the workspace. Not sure if this is intended + // behavior, but it's what we want. + built::write_built_file().expect("Failed to acquire build-time information"); + + // --- Git Information --- + let mut commit_hash = None; + let mut commit_hash_short = None; + let mut remote_url_web = None; + + // Get full commit hash + if let Some(hash) = + get_env("GIT_COMMIT_HASH").or_else(|| run_git_command(&["rev-parse", "HEAD"])) + { + println!("cargo:rustc-env=GIT_COMMIT_HASH={hash}"); + commit_hash = Some(hash); + } + + // Get short commit hash + if let Some(short_hash) = get_env("GIT_COMMIT_HASH_SHORT") + .or_else(|| run_git_command(&["rev-parse", "--short", "HEAD"])) + { + println!("cargo:rustc-env=GIT_COMMIT_HASH_SHORT={short_hash}"); + commit_hash_short = Some(short_hash); + } + + // Get remote URL and convert to web URL + if let Some(remote_url_raw) = get_env("GIT_REMOTE_URL") + .or_else(|| run_git_command(&["config", "--get", "remote.origin.url"])) + { + println!("cargo:rustc-env=GIT_REMOTE_URL={remote_url_raw}"); + let web_url = if remote_url_raw.starts_with("https://") { + remote_url_raw.trim_end_matches(".git").to_owned() + } else if remote_url_raw.starts_with("git@") { + remote_url_raw + .trim_end_matches(".git") + .replacen(':', "/", 1) + .replacen("git@", "https://", 1) + } else if remote_url_raw.starts_with("ssh://") { + remote_url_raw + .trim_end_matches(".git") + .replacen("git@", "", 1) + .replacen("ssh:", "https:", 1) + } else { + // Assume it's already a web URL or unknown format + remote_url_raw + }; + println!("cargo:rustc-env=GIT_REMOTE_WEB_URL={web_url}"); + remote_url_web = Some(web_url); + } + + // Construct remote commit URL + if let Some(remote_commit_url) = get_env("GIT_REMOTE_COMMIT_URL") { + println!("cargo:rustc-env=GIT_REMOTE_COMMIT_URL={remote_commit_url}"); + } else if let (Some(base_url), Some(hash)) = + (&remote_url_web, commit_hash.as_ref().or(commit_hash_short.as_ref())) + { + let commit_page = format!("{base_url}/commit/{hash}"); + println!("cargo:rustc-env=GIT_REMOTE_COMMIT_URL={commit_page}"); + } + + // --- Rerun Triggers --- + // Rerun if the git HEAD changes + println!("cargo:rerun-if-changed=.git/HEAD"); + // Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch) + if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"]) { + println!("cargo:rerun-if-changed=.git/{ref_path}"); + } + + println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH"); + println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH_SHORT"); + println!("cargo:rerun-if-env-changed=GIT_REMOTE_URL"); + println!("cargo:rerun-if-env-changed=GIT_REMOTE_COMMIT_URL"); +} diff --git a/src/build_metadata/mod.rs b/src/build_metadata/mod.rs new file mode 100644 index 00000000..cf3364c1 --- /dev/null +++ b/src/build_metadata/mod.rs @@ -0,0 +1,23 @@ +pub mod built { + include!(concat!(env!("OUT_DIR"), "/built.rs")); +} + +pub static GIT_COMMIT_HASH: Option<&str> = option_env!("GIT_COMMIT_HASH"); + +pub static GIT_COMMIT_HASH_SHORT: Option<&str> = option_env!("GIT_COMMIT_HASH_SHORT"); + +// this would be a lot better if Option::or was const. +pub static VERSION_EXTRA: Option<&str> = + if let v @ Some(_) = option_env!("CONTINUWUITY_VERSION_EXTRA") { + v + } else if let v @ Some(_) = option_env!("CONDUWUIT_VERSION_EXTRA") { + v + } else if let v @ Some(_) = option_env!("CONDUIT_VERSION_EXTRA") { + v + } else { + GIT_COMMIT_HASH_SHORT + }; +pub static GIT_REMOTE_WEB_URL: Option<&str> = option_env!("GIT_REMOTE_WEB_URL"); +pub static GIT_REMOTE_COMMIT_URL: Option<&str> = option_env!("GIT_REMOTE_COMMIT_URL"); + +// TODO: Mark dirty builds within the version string diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index f42b049b..0c33c590 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -67,6 +67,7 @@ checked_ops.workspace = true chrono.workspace = true clap.workspace = true conduwuit-macros.workspace = true +conduwuit-build-metadata.workspace = true const-str.workspace = true core_affinity.workspace = true ctor.workspace = true diff --git a/src/core/info/version.rs b/src/core/info/version.rs index 6abb6e13..523c40a2 100644 --- a/src/core/info/version.rs +++ b/src/core/info/version.rs @@ -26,13 +26,6 @@ pub fn user_agent() -> &'static str { USER_AGENT.get_or_init(init_user_agent) } fn init_user_agent() -> String { format!("{}/{}", name(), version()) } fn init_version() -> String { - option_env!("CONDUWUIT_VERSION_EXTRA") - .or(option_env!("CONDUIT_VERSION_EXTRA")) - .map_or(SEMANTIC.to_owned(), |extra| { - if extra.is_empty() { - SEMANTIC.to_owned() - } else { - format!("{SEMANTIC} ({extra})") - } - }) + conduwuit_build_metadata::VERSION_EXTRA + .map_or(SEMANTIC.to_owned(), |extra| format!("{SEMANTIC} ({extra})")) } diff --git a/src/web/Cargo.toml b/src/web/Cargo.toml index 07dde0f7..8e03a338 100644 --- a/src/web/Cargo.toml +++ b/src/web/Cargo.toml @@ -20,6 +20,8 @@ crate-type = [ [dependencies] +conduwuit-build-metadata.workspace = true + askama = "0.14.0" axum.workspace = true diff --git a/src/web/mod.rs b/src/web/mod.rs index ddf13be4..520a9a34 100644 --- a/src/web/mod.rs +++ b/src/web/mod.rs @@ -5,6 +5,7 @@ use axum::{ response::{Html, IntoResponse, Response}, routing::get, }; +use conduwuit_build_metadata::{GIT_REMOTE_COMMIT_URL, GIT_REMOTE_WEB_URL, VERSION_EXTRA}; pub fn build() -> Router<()> { Router::new().route("/", get(index_handler)) } diff --git a/src/web/templates/_layout.html.j2 b/src/web/templates/_layout.html.j2 index a38745e1..fd0a5b29 100644 --- a/src/web/templates/_layout.html.j2 +++ b/src/web/templates/_layout.html.j2 @@ -18,8 +18,8 @@ {%~ block footer ~%}