From 73cf7cd578f9def5ee9851580ca413a8731fcf92 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 20 Apr 2024 17:59:54 -0400 Subject: [PATCH 01/45] refactor more of admin code, add unfinished fsck command Signed-off-by: strawberry --- src/service/admin/{ => debug}/debug.rs | 79 +------------------------ src/service/admin/debug/mod.rs | 80 ++++++++++++++++++++++++++ src/service/admin/fsck.rs | 18 ++++++ src/service/admin/mod.rs | 9 +-- src/service/admin/query/mod.rs | 52 ++++++++++++++++- src/service/admin/query/query.rs | 48 ---------------- src/service/admin/user/mod.rs | 63 ++++++++++++++++++++ src/service/admin/{ => user}/user.rs | 61 +------------------- 8 files changed, 219 insertions(+), 191 deletions(-) rename src/service/admin/{ => debug}/debug.rs (82%) create mode 100644 src/service/admin/debug/mod.rs create mode 100644 src/service/admin/fsck.rs delete mode 100644 src/service/admin/query/query.rs create mode 100644 src/service/admin/user/mod.rs rename src/service/admin/{ => user}/user.rs (86%) diff --git a/src/service/admin/debug.rs b/src/service/admin/debug/debug.rs similarity index 82% rename from src/service/admin/debug.rs rename to src/service/admin/debug/debug.rs index c27f5900..0f77b2fd 100644 --- a/src/service/admin/debug.rs +++ b/src/service/admin/debug/debug.rs @@ -1,91 +1,16 @@ use std::{collections::BTreeMap, sync::Arc, time::Instant}; -use clap::Subcommand; use ruma::{ api::client::error::ErrorKind, events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId, - RoomId, RoomVersionId, ServerName, + RoomId, RoomVersionId, }; use tokio::sync::RwLock; use tracing::{debug, error, info, warn}; use tracing_subscriber::EnvFilter; +use super::DebugCommand; use crate::{api::server_server::parse_incoming_pdu, services, utils::HtmlEscape, Error, PduEvent, Result}; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum DebugCommand { - /// - Get the auth_chain of a PDU - GetAuthChain { - /// An event ID (the $ character followed by the base64 reference hash) - event_id: Box, - }, - - /// - Parse and print a PDU from a JSON - /// - /// The PDU event is only checked for validity and is not added to the - /// database. - /// - /// This command needs a JSON blob provided in a Markdown code block below - /// the command. - ParsePdu, - - /// - Retrieve and print a PDU by ID from the conduwuit database - GetPdu { - /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, - }, - - /// - Attempts to retrieve a PDU from a remote server. Inserts it into our - /// database/timeline if found and we do not have this PDU already - /// (following normal event auth rules, handles it as an incoming PDU). - GetRemotePdu { - /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, - - /// Argument for us to attempt to fetch the event from the - /// specified remote server. - server: Box, - }, - - /// - Gets all the room state events for the specified room. - /// - /// This is functionally equivalent to `GET - /// /_matrix/client/v3/rooms/{roomid}/state`, except the admin command does - /// *not* check if the sender user is allowed to see state events. This is - /// done because it's implied that server admins here have database access - /// and can see/get room info themselves anyways if they were malicious - /// admins. - /// - /// Of course the check is still done on the actual client API. - GetRoomState { - /// Room ID - room_id: Box, - }, - - /// - Sends a federation request to the remote server's - /// `/_matrix/federation/v1/version` endpoint and measures the latency it - /// took for the server to respond - Ping { - server: Box, - }, - - /// - Forces device lists for all local and remote users to be updated (as - /// having new keys available) - ForceDeviceListUpdates, - - /// - Change tracing log level/filter on the fly - /// - /// This accepts the same format as the `log` config option. - ChangeLogLevel { - /// Log level/filter - filter: Option, - - /// Resets the log level/filter to the one in your config - #[arg(short, long)] - reset: bool, - }, -} - pub(crate) async fn process(command: DebugCommand, body: Vec<&str>) -> Result { Ok(match command { DebugCommand::GetAuthChain { diff --git a/src/service/admin/debug/mod.rs b/src/service/admin/debug/mod.rs new file mode 100644 index 00000000..43823175 --- /dev/null +++ b/src/service/admin/debug/mod.rs @@ -0,0 +1,80 @@ +use clap::Subcommand; +use ruma::{EventId, RoomId, ServerName}; + +#[allow(clippy::module_inception)] +pub(crate) mod debug; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum DebugCommand { + /// - Get the auth_chain of a PDU + GetAuthChain { + /// An event ID (the $ character followed by the base64 reference hash) + event_id: Box, + }, + + /// - Parse and print a PDU from a JSON + /// + /// The PDU event is only checked for validity and is not added to the + /// database. + /// + /// This command needs a JSON blob provided in a Markdown code block below + /// the command. + ParsePdu, + + /// - Retrieve and print a PDU by ID from the conduwuit database + GetPdu { + /// An event ID (a $ followed by the base64 reference hash) + event_id: Box, + }, + + /// - Attempts to retrieve a PDU from a remote server. Inserts it into our + /// database/timeline if found and we do not have this PDU already + /// (following normal event auth rules, handles it as an incoming PDU). + GetRemotePdu { + /// An event ID (a $ followed by the base64 reference hash) + event_id: Box, + + /// Argument for us to attempt to fetch the event from the + /// specified remote server. + server: Box, + }, + + /// - Gets all the room state events for the specified room. + /// + /// This is functionally equivalent to `GET + /// /_matrix/client/v3/rooms/{roomid}/state`, except the admin command does + /// *not* check if the sender user is allowed to see state events. This is + /// done because it's implied that server admins here have database access + /// and can see/get room info themselves anyways if they were malicious + /// admins. + /// + /// Of course the check is still done on the actual client API. + GetRoomState { + /// Room ID + room_id: Box, + }, + + /// - Sends a federation request to the remote server's + /// `/_matrix/federation/v1/version` endpoint and measures the latency it + /// took for the server to respond + Ping { + server: Box, + }, + + /// - Forces device lists for all local and remote users to be updated (as + /// having new keys available) + ForceDeviceListUpdates, + + /// - Change tracing log level/filter on the fly + /// + /// This accepts the same format as the `log` config option. + ChangeLogLevel { + /// Log level/filter + filter: Option, + + /// Resets the log level/filter to the one in your config + #[arg(short, long)] + reset: bool, + }, +} diff --git a/src/service/admin/fsck.rs b/src/service/admin/fsck.rs new file mode 100644 index 00000000..054976d4 --- /dev/null +++ b/src/service/admin/fsck.rs @@ -0,0 +1,18 @@ +use clap::Subcommand; +use ruma::events::room::message::RoomMessageEventContent; + +use crate::{services, Result}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum FsckCommand { + Register, +} + +pub(crate) async fn fsck(command: FsckCommand, body: Vec<&str>) -> Result { + match command { + FsckCommand::Register => { + todo!() + }, + } +} diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 244df85f..da5a77c4 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -30,7 +30,7 @@ use super::pdu::PduBuilder; use crate::{ service::admin::{ appservice::AppserviceCommand, debug::DebugCommand, federation::FederationCommand, media::MediaCommand, - query::query::QueryCommand, room::RoomCommand, server::ServerCommand, user::UserCommand, + query::QueryCommand, room::RoomCommand, server::ServerCommand, user::UserCommand, }, services, Error, Result, }; @@ -38,6 +38,7 @@ use crate::{ pub(crate) mod appservice; pub(crate) mod debug; pub(crate) mod federation; +pub(crate) mod fsck; pub(crate) mod media; pub(crate) mod query; pub(crate) mod room; @@ -279,12 +280,12 @@ impl Service { let reply_message_content = match command { AdminCommand::Appservices(command) => appservice::process(command, body).await?, AdminCommand::Media(command) => media::process(command, body).await?, - AdminCommand::Users(command) => user::process(command, body).await?, + AdminCommand::Users(command) => user::user::process(command, body).await?, AdminCommand::Rooms(command) => room::process(command, body).await?, AdminCommand::Federation(command) => federation::process(command, body).await?, AdminCommand::Server(command) => server::process(command, body).await?, - AdminCommand::Debug(command) => debug::process(command, body).await?, - AdminCommand::Query(command) => query::query::process(command, body).await?, + AdminCommand::Debug(command) => debug::debug::process(command, body).await?, + AdminCommand::Query(command) => query::process(command, body).await?, }; Ok(reply_message_content) diff --git a/src/service/admin/query/mod.rs b/src/service/admin/query/mod.rs index 998bf756..fc10c1c9 100644 --- a/src/service/admin/query/mod.rs +++ b/src/service/admin/query/mod.rs @@ -1,8 +1,54 @@ -#[allow(clippy::module_inception)] -pub(crate) mod query; - pub(crate) mod account_data; pub(crate) mod appservice; pub(crate) mod globals; pub(crate) mod presence; pub(crate) mod room_alias; + +use clap::Subcommand; +use ruma::events::room::message::RoomMessageEventContent; + +use self::{ + account_data::{account_data, AccountData}, + appservice::{appservice, Appservice}, + globals::{globals, Globals}, + presence::{presence, Presence}, + room_alias::{room_alias, RoomAlias}, +}; +use crate::Result; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +/// Query tables from database +pub(crate) enum QueryCommand { + /// - account_data.rs iterators and getters + #[command(subcommand)] + AccountData(AccountData), + + /// - appservice.rs iterators and getters + #[command(subcommand)] + Appservice(Appservice), + + /// - presence.rs iterators and getters + #[command(subcommand)] + Presence(Presence), + + /// - rooms/alias.rs iterators and getters + #[command(subcommand)] + RoomAlias(RoomAlias), + + /// - globals.rs iterators and getters + #[command(subcommand)] + Globals(Globals), +} + +/// Processes admin query commands +#[allow(non_snake_case)] +pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result { + match command { + QueryCommand::AccountData(AccountData) => account_data(AccountData).await, + QueryCommand::Appservice(Appservice) => appservice(Appservice).await, + QueryCommand::Presence(Presence) => presence(Presence).await, + QueryCommand::RoomAlias(RoomAlias) => room_alias(RoomAlias).await, + QueryCommand::Globals(Globals) => globals(Globals).await, + } +} diff --git a/src/service/admin/query/query.rs b/src/service/admin/query/query.rs deleted file mode 100644 index 8a979aee..00000000 --- a/src/service/admin/query/query.rs +++ /dev/null @@ -1,48 +0,0 @@ -use clap::Subcommand; -use ruma::events::room::message::RoomMessageEventContent; - -use super::{ - account_data::{account_data, AccountData}, - appservice::{appservice, Appservice}, - globals::{globals, Globals}, - presence::{presence, Presence}, - room_alias::{room_alias, RoomAlias}, -}; -use crate::Result; - -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -/// Query tables from database -pub(crate) enum QueryCommand { - /// - account_data.rs iterators and getters - #[command(subcommand)] - AccountData(AccountData), - - /// - appservice.rs iterators and getters - #[command(subcommand)] - Appservice(Appservice), - - /// - presence.rs iterators and getters - #[command(subcommand)] - Presence(Presence), - - /// - rooms/alias.rs iterators and getters - #[command(subcommand)] - RoomAlias(RoomAlias), - - /// - globals.rs iterators and getters - #[command(subcommand)] - Globals(Globals), -} - -/// Processes admin query commands -#[allow(non_snake_case)] -pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result { - match command { - QueryCommand::AccountData(AccountData) => account_data(AccountData).await, - QueryCommand::Appservice(Appservice) => appservice(Appservice).await, - QueryCommand::Presence(Presence) => presence(Presence).await, - QueryCommand::RoomAlias(RoomAlias) => room_alias(RoomAlias).await, - QueryCommand::Globals(Globals) => globals(Globals).await, - } -} diff --git a/src/service/admin/user/mod.rs b/src/service/admin/user/mod.rs new file mode 100644 index 00000000..7ac30043 --- /dev/null +++ b/src/service/admin/user/mod.rs @@ -0,0 +1,63 @@ +#[allow(clippy::module_inception)] +pub(crate) mod user; + +use clap::Subcommand; +use ruma::UserId; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum UserCommand { + /// - Create a new user + Create { + /// Username of the new user + username: String, + /// Password of the new user, if unspecified one is generated + password: Option, + }, + + /// - Reset user password + ResetPassword { + /// Username of the user for whom the password should be reset + username: String, + }, + + /// - Deactivate a user + /// + /// User will not be removed from all rooms by default. + /// Use --leave-rooms to force the user to leave all rooms + Deactivate { + #[arg(short, long)] + leave_rooms: bool, + user_id: Box, + }, + + /// - Deactivate a list of users + /// + /// Recommended to use in conjunction with list-local-users. + /// + /// Users will not be removed from joined rooms by default. + /// Can be overridden with --leave-rooms flag. + /// Removing a mass amount of users from a room may cause a significant + /// amount of leave events. The time to leave rooms may depend significantly + /// on joined rooms and servers. + /// + /// This command needs a newline separated list of users provided in a + /// Markdown code block below the command. + DeactivateAll { + #[arg(short, long)] + /// Remove users from their joined rooms + leave_rooms: bool, + #[arg(short, long)] + /// Also deactivate admin accounts + force: bool, + }, + + /// - List local users in the database + List, + + /// - Lists all the rooms (local and remote) that the specified user is + /// joined in + ListJoinedRooms { + user_id: Box, + }, +} diff --git a/src/service/admin/user.rs b/src/service/admin/user/user.rs similarity index 86% rename from src/service/admin/user.rs rename to src/service/admin/user/user.rs index 11441e86..73273590 100644 --- a/src/service/admin/user.rs +++ b/src/service/admin/user/user.rs @@ -1,74 +1,17 @@ use std::{fmt::Write as _, sync::Arc}; -use clap::Subcommand; use itertools::Itertools; use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, UserId}; use tracing::{error, info, warn}; +use super::UserCommand; + use crate::{ api::client_server::{join_room_by_id_helper, leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, service::admin::{escape_html, get_room_info}, services, utils, Result, }; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum UserCommand { - /// - Create a new user - Create { - /// Username of the new user - username: String, - /// Password of the new user, if unspecified one is generated - password: Option, - }, - - /// - Reset user password - ResetPassword { - /// Username of the user for whom the password should be reset - username: String, - }, - - /// - Deactivate a user - /// - /// User will not be removed from all rooms by default. - /// Use --leave-rooms to force the user to leave all rooms - Deactivate { - #[arg(short, long)] - leave_rooms: bool, - user_id: Box, - }, - - /// - Deactivate a list of users - /// - /// Recommended to use in conjunction with list-local-users. - /// - /// Users will not be removed from joined rooms by default. - /// Can be overridden with --leave-rooms flag. - /// Removing a mass amount of users from a room may cause a significant - /// amount of leave events. The time to leave rooms may depend significantly - /// on joined rooms and servers. - /// - /// This command needs a newline separated list of users provided in a - /// Markdown code block below the command. - DeactivateAll { - #[arg(short, long)] - /// Remove users from their joined rooms - leave_rooms: bool, - #[arg(short, long)] - /// Also deactivate admin accounts - force: bool, - }, - - /// - List local users in the database - List, - - /// - Lists all the rooms (local and remote) that the specified user is - /// joined in - ListJoinedRooms { - user_id: Box, - }, -} - pub(crate) async fn process(command: UserCommand, body: Vec<&str>) -> Result { match command { UserCommand::List => match services().users.list_local_users() { From cb784a63f62ca08c63d3f07ba639fdead612b309 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 20 Apr 2024 19:13:18 -0400 Subject: [PATCH 02/45] refactor a ton of the admin room code (50% done) Signed-off-by: strawberry --- src/service/admin/appservice.rs | 100 ----- .../admin/appservice/appservice_command.rs | 66 ++++ src/service/admin/appservice/mod.rs | 52 +++ src/service/admin/debug/debug.rs | 357 ------------------ src/service/admin/debug/debug_commands.rs | 339 +++++++++++++++++ src/service/admin/debug/mod.rs | 38 +- src/service/admin/fsck.rs | 5 +- src/service/admin/mod.rs | 4 +- src/service/admin/query/account_data.rs | 2 +- src/service/admin/query/appservice.rs | 2 +- src/service/admin/query/globals.rs | 2 +- src/service/admin/query/mod.rs | 14 +- src/service/admin/query/presence.rs | 2 +- src/service/admin/query/room_alias.rs | 2 +- src/service/admin/server.rs | 106 ------ src/service/admin/server/mod.rs | 58 +++ src/service/admin/server/server_commands.rs | 71 ++++ src/service/admin/user/mod.rs | 34 +- src/service/admin/user/user.rs | 349 ----------------- src/service/admin/user/user_commands.rs | 334 ++++++++++++++++ 20 files changed, 1002 insertions(+), 935 deletions(-) delete mode 100644 src/service/admin/appservice.rs create mode 100644 src/service/admin/appservice/appservice_command.rs create mode 100644 src/service/admin/appservice/mod.rs delete mode 100644 src/service/admin/debug/debug.rs create mode 100644 src/service/admin/debug/debug_commands.rs delete mode 100644 src/service/admin/server.rs create mode 100644 src/service/admin/server/mod.rs create mode 100644 src/service/admin/server/server_commands.rs delete mode 100644 src/service/admin/user/user.rs create mode 100644 src/service/admin/user/user_commands.rs diff --git a/src/service/admin/appservice.rs b/src/service/admin/appservice.rs deleted file mode 100644 index ff0611e0..00000000 --- a/src/service/admin/appservice.rs +++ /dev/null @@ -1,100 +0,0 @@ -use clap::Subcommand; -use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; - -use crate::{service::admin::escape_html, services, Result}; - -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum AppserviceCommand { - /// - Register an appservice using its registration YAML - /// - /// This command needs a YAML generated by an appservice (such as a bridge), - /// which must be provided in a Markdown code block below the command. - /// - /// Registering a new bridge using the ID of an existing bridge will replace - /// the old one. - Register, - - /// - Unregister an appservice using its ID - /// - /// You can find the ID using the `list-appservices` command. - Unregister { - /// The appservice to unregister - appservice_identifier: String, - }, - - /// - Show an appservice's config using its ID - /// - /// You can find the ID using the `list-appservices` command. - Show { - /// The appservice to show - appservice_identifier: String, - }, - - /// - List all the currently registered appservices - List, -} - -pub(crate) async fn process(command: AppserviceCommand, body: Vec<&str>) -> Result { - match command { - AppserviceCommand::Register => { - if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { - let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config); - match parsed_config { - Ok(yaml) => match services().appservice.register_appservice(yaml).await { - Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {id}." - ))), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {e}" - ))), - }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config: {e}" - ))), - } - } else { - Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )) - } - }, - AppserviceCommand::Unregister { - appservice_identifier, - } => match services() - .appservice - .unregister_appservice(&appservice_identifier) - .await - { - Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to unregister appservice: {e}" - ))), - }, - AppserviceCommand::Show { - appservice_identifier, - } => match services() - .appservice - .get_registration(&appservice_identifier) - .await - { - Some(config) => { - let config_str = serde_yaml::to_string(&config).expect("config should've been validated on register"); - let output = format!("Config for {}:\n\n```yaml\n{}\n```", appservice_identifier, config_str,); - let output_html = format!( - "Config for {}:\n\n
{}
", - escape_html(&appservice_identifier), - escape_html(&config_str), - ); - Ok(RoomMessageEventContent::text_html(output, output_html)) - }, - None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")), - }, - AppserviceCommand::List => { - let appservices = services().appservice.iter_ids().await; - let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", ")); - Ok(RoomMessageEventContent::text_plain(output)) - }, - } -} diff --git a/src/service/admin/appservice/appservice_command.rs b/src/service/admin/appservice/appservice_command.rs new file mode 100644 index 00000000..e2c47a50 --- /dev/null +++ b/src/service/admin/appservice/appservice_command.rs @@ -0,0 +1,66 @@ +use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; + +use crate::{service::admin::escape_html, services, Result}; + +pub(super) async fn register(body: Vec<&str>) -> Result { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let appservice_config = body[1..body.len() - 1].join("\n"); + let parsed_config = serde_yaml::from_str::(&appservice_config); + match parsed_config { + Ok(yaml) => match services().appservice.register_appservice(yaml).await { + Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( + "Appservice registered with ID: {id}." + ))), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Failed to register appservice: {e}" + ))), + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Could not parse appservice config: {e}" + ))), + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } +} + +pub(super) async fn unregister(_body: Vec<&str>, appservice_identifier: String) -> Result { + match services() + .appservice + .unregister_appservice(&appservice_identifier) + .await + { + Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Failed to unregister appservice: {e}" + ))), + } +} + +pub(super) async fn show(_body: Vec<&str>, appservice_identifier: String) -> Result { + match services() + .appservice + .get_registration(&appservice_identifier) + .await + { + Some(config) => { + let config_str = serde_yaml::to_string(&config).expect("config should've been validated on register"); + let output = format!("Config for {}:\n\n```yaml\n{}\n```", appservice_identifier, config_str,); + let output_html = format!( + "Config for {}:\n\n
{}
", + escape_html(&appservice_identifier), + escape_html(&config_str), + ); + Ok(RoomMessageEventContent::text_html(output, output_html)) + }, + None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")), + } +} + +pub(super) async fn list(_body: Vec<&str>) -> Result { + let appservices = services().appservice.iter_ids().await; + let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", ")); + Ok(RoomMessageEventContent::text_plain(output)) +} diff --git a/src/service/admin/appservice/mod.rs b/src/service/admin/appservice/mod.rs new file mode 100644 index 00000000..b0d225aa --- /dev/null +++ b/src/service/admin/appservice/mod.rs @@ -0,0 +1,52 @@ +use clap::Subcommand; +use ruma::events::room::message::RoomMessageEventContent; + +use self::appservice_command::{list, register, show, unregister}; +use crate::Result; + +pub(crate) mod appservice_command; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum AppserviceCommand { + /// - Register an appservice using its registration YAML + /// + /// This command needs a YAML generated by an appservice (such as a bridge), + /// which must be provided in a Markdown code block below the command. + /// + /// Registering a new bridge using the ID of an existing bridge will replace + /// the old one. + Register, + + /// - Unregister an appservice using its ID + /// + /// You can find the ID using the `list-appservices` command. + Unregister { + /// The appservice to unregister + appservice_identifier: String, + }, + + /// - Show an appservice's config using its ID + /// + /// You can find the ID using the `list-appservices` command. + Show { + /// The appservice to show + appservice_identifier: String, + }, + + /// - List all the currently registered appservices + List, +} + +pub(crate) async fn process(command: AppserviceCommand, body: Vec<&str>) -> Result { + Ok(match command { + AppserviceCommand::Register => register(body).await?, + AppserviceCommand::Unregister { + appservice_identifier, + } => unregister(body, appservice_identifier).await?, + AppserviceCommand::Show { + appservice_identifier, + } => show(body, appservice_identifier).await?, + AppserviceCommand::List => list(body).await?, + }) +} diff --git a/src/service/admin/debug/debug.rs b/src/service/admin/debug/debug.rs deleted file mode 100644 index 0f77b2fd..00000000 --- a/src/service/admin/debug/debug.rs +++ /dev/null @@ -1,357 +0,0 @@ -use std::{collections::BTreeMap, sync::Arc, time::Instant}; - -use ruma::{ - api::client::error::ErrorKind, events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId, - RoomId, RoomVersionId, -}; -use tokio::sync::RwLock; -use tracing::{debug, error, info, warn}; -use tracing_subscriber::EnvFilter; - -use super::DebugCommand; -use crate::{api::server_server::parse_incoming_pdu, services, utils::HtmlEscape, Error, PduEvent, Result}; - -pub(crate) async fn process(command: DebugCommand, body: Vec<&str>) -> Result { - Ok(match command { - DebugCommand::GetAuthChain { - event_id, - } => { - let event_id = Arc::::from(event_id); - if let Some(event) = services().rooms.timeline.get_pdu_json(&event_id)? { - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let start = Instant::now(); - let count = services() - .rooms - .auth_chain - .event_ids_iter(room_id, vec![event_id]) - .await? - .count(); - let elapsed = start.elapsed(); - RoomMessageEventContent::text_plain(format!("Loaded auth chain with length {count} in {elapsed:?}")) - } else { - RoomMessageEventContent::text_plain("Event not found.") - } - }, - DebugCommand::ParsePdu => { - if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { - Ok(hash) => { - let event_id = EventId::parse(format!("${hash}")); - - match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), - ) { - Ok(pdu) => { - RoomMessageEventContent::text_plain(format!("EventId: {event_id:?}\n{pdu:#?}")) - }, - Err(e) => RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\nCould not parse event: {e}" - )), - } - }, - Err(e) => RoomMessageEventContent::text_plain(format!("Could not parse PDU JSON: {e:?}")), - }, - Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json in command body: {e}")), - } - } else { - RoomMessageEventContent::text_plain("Expected code block in command body.") - } - }, - DebugCommand::GetPdu { - event_id, - } => { - let mut outlier = false; - let mut pdu_json = services() - .rooms - .timeline - .get_non_outlier_pdu_json(&event_id)?; - if pdu_json.is_none() { - outlier = true; - pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?; - } - match pdu_json { - Some(json) => { - let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - return Ok(RoomMessageEventContent::text_html( - format!( - "{}\n```json\n{}\n```", - if outlier { - "Outlier PDU found in our database" - } else { - "PDU found in our database" - }, - json_text - ), - format!( - "

{}

\n
{}\n
\n", - if outlier { - "Outlier PDU found in our database" - } else { - "PDU found in our database" - }, - HtmlEscape(&json_text) - ), - )); - }, - None => { - return Ok(RoomMessageEventContent::text_plain("PDU not found locally.")); - }, - } - }, - DebugCommand::GetRemotePdu { - event_id, - server, - } => { - if !services().globals.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); - } - - if server == services().globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local \ - PDUs.", - )); - } - - // TODO: use Futures as some requests may take a while so we dont block the - // admin room - match services() - .sending - .send_federation_request( - &server, - ruma::api::federation::event::get_event::v1::Request { - event_id: event_id.clone().into(), - }, - ) - .await - { - Ok(response) => { - let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { - warn!( - "Requested event ID {event_id} from server but failed to convert from RawValue to \ - CanonicalJsonObject (malformed event/response?): {e}" - ); - Error::BadRequest(ErrorKind::Unknown, "Received response from server but failed to parse PDU") - })?; - - debug!("Attempting to parse PDU: {:?}", &response.pdu); - let parsed_pdu = { - let parsed_result = parse_incoming_pdu(&response.pdu); - let (event_id, value, room_id) = match parsed_result { - Ok(t) => t, - Err(e) => { - warn!("Failed to parse PDU: {e}"); - info!("Full PDU: {:?}", &response.pdu); - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse PDU remote server {server} sent us: {e}" - ))); - }, - }; - - vec![(event_id, value, room_id)] - }; - - let pub_key_map = RwLock::new(BTreeMap::new()); - - debug!("Attempting to fetch homeserver signing keys for {server}"); - services() - .rooms - .event_handler - .fetch_required_signing_keys( - parsed_pdu.iter().map(|(_event_id, event, _room_id)| event), - &pub_key_map, - ) - .await - .unwrap_or_else(|e| { - warn!("Could not fetch all signatures for PDUs from {server}: {e:?}"); - }); - - info!("Attempting to handle event ID {event_id} as backfilled PDU"); - services() - .rooms - .timeline - .backfill_pdu(&server, response.pdu, &pub_key_map) - .await?; - - let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - - return Ok(RoomMessageEventContent::text_html( - format!( - "{}\n```json\n{}\n```", - "Got PDU from specified server and handled as backfilled PDU successfully. Event body:", - json_text - ), - format!( - "

{}

\n
{}\n
\n", - "Got PDU from specified server and handled as backfilled PDU successfully. Event body:", - HtmlEscape(&json_text) - ), - )); - }, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Remote server did not have PDU or failed sending request to remote server: {e}" - ))); - }, - } - }, - DebugCommand::GetRoomState { - room_id, - } => { - let room_state = services() - .rooms - .state_accessor - .room_state_full(&room_id) - .await? - .values() - .map(|pdu| pdu.to_state_event()) - .collect::>(); - - if room_state.is_empty() { - return Ok(RoomMessageEventContent::text_plain( - "Unable to find room state in our database (vector is empty)", - )); - } - - let json_text = serde_json::to_string_pretty(&room_state).map_err(|e| { - error!("Failed converting room state vector in our database to pretty JSON: {e}"); - Error::bad_database( - "Failed to convert room state events to pretty JSON, possible invalid room state events in our \ - database", - ) - })?; - - return Ok(RoomMessageEventContent::text_html( - format!("{}\n```json\n{}\n```", "Found full room state", json_text), - format!( - "

{}

\n
{}\n
\n", - "Found full room state", - HtmlEscape(&json_text) - ), - )); - }, - DebugCommand::Ping { - server, - } => { - if server == services().globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to send federation requests to ourselves.", - )); - } - - let timer = tokio::time::Instant::now(); - - match services() - .sending - .send_federation_request(&server, ruma::api::federation::discovery::get_server_version::v1::Request {}) - .await - { - Ok(response) => { - let ping_time = timer.elapsed(); - - let json_text_res = serde_json::to_string_pretty(&response.server); - - if let Ok(json) = json_text_res { - return Ok(RoomMessageEventContent::text_html( - format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```"), - format!( - "

Got response which took {ping_time:?} time:

\n
{}\n
\n", - HtmlEscape(&json) - ), - )); - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "Got non-JSON response which took {ping_time:?} time:\n{0:?}", - response - ))); - }, - Err(e) => { - error!("Failed sending federation request to specified server from ping debug command: {e}"); - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed sending federation request to specified server:\n\n{e}", - ))); - }, - } - }, - DebugCommand::ForceDeviceListUpdates => { - // Force E2EE device list updates for all users - for user_id in services().users.iter().filter_map(Result::ok) { - services().users.mark_device_key_update(&user_id)?; - } - RoomMessageEventContent::text_plain("Marked all devices for all users as having new keys to update") - }, - DebugCommand::ChangeLogLevel { - filter, - reset, - } => { - if reset { - let old_filter_layer = match EnvFilter::try_new(&services().globals.config.log) { - Ok(s) => s, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Log level from config appears to be invalid now: {e}" - ))); - }, - }; - - match services() - .globals - .tracing_reload_handle - .modify(|filter| *filter = old_filter_layer) - { - Ok(()) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Successfully changed log level back to config value {}", - services().globals.config.log - ))); - }, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); - }, - } - } - - if let Some(filter) = filter { - let new_filter_layer = match EnvFilter::try_new(filter) { - Ok(s) => s, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Invalid log level filter specified: {e}" - ))); - }, - }; - - match services() - .globals - .tracing_reload_handle - .modify(|filter| *filter = new_filter_layer) - { - Ok(()) => { - return Ok(RoomMessageEventContent::text_plain("Successfully changed log level")); - }, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); - }, - } - } - - return Ok(RoomMessageEventContent::text_plain("No log level was specified.")); - }, - }) -} diff --git a/src/service/admin/debug/debug_commands.rs b/src/service/admin/debug/debug_commands.rs new file mode 100644 index 00000000..870016f3 --- /dev/null +++ b/src/service/admin/debug/debug_commands.rs @@ -0,0 +1,339 @@ +use std::{collections::BTreeMap, sync::Arc, time::Instant}; + +use ruma::{ + api::client::error::ErrorKind, events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId, + RoomId, RoomVersionId, ServerName, +}; +use tokio::sync::RwLock; +use tracing::{debug, error, info, warn}; +use tracing_subscriber::EnvFilter; + +use crate::{api::server_server::parse_incoming_pdu, services, utils::HtmlEscape, Error, PduEvent, Result}; + +pub(super) async fn get_auth_chain(_body: Vec<&str>, event_id: Box) -> Result { + let event_id = Arc::::from(event_id); + if let Some(event) = services().rooms.timeline.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + let start = Instant::now(); + let count = services() + .rooms + .auth_chain + .event_ids_iter(room_id, vec![event_id]) + .await? + .count(); + let elapsed = start.elapsed(); + Ok(RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {count} in {elapsed:?}" + ))) + } else { + Ok(RoomMessageEventContent::text_plain("Event not found.")) + } +} + +pub(super) async fn parse_pdu(body: Vec<&str>) -> Result { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + Ok(hash) => { + let event_id = EventId::parse(format!("${hash}")); + + match serde_json::from_value::(serde_json::to_value(value).expect("value is json")) { + Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!("EventId: {event_id:?}\n{pdu:#?}"))), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "EventId: {event_id:?}\nCould not parse event: {e}" + ))), + } + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Could not parse PDU JSON: {e:?}"))), + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Invalid json in command body: {e}" + ))), + } + } else { + Ok(RoomMessageEventContent::text_plain("Expected code block in command body.")) + } +} + +pub(super) async fn get_pdu(_body: Vec<&str>, event_id: Box) -> Result { + let mut outlier = false; + let mut pdu_json = services() + .rooms + .timeline + .get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?; + } + match pdu_json { + Some(json) => { + let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + Ok(RoomMessageEventContent::text_html( + format!( + "{}\n```json\n{}\n```", + if outlier { + "Outlier PDU found in our database" + } else { + "PDU found in our database" + }, + json_text + ), + format!( + "

{}

\n
{}\n
\n", + if outlier { + "Outlier PDU found in our database" + } else { + "PDU found in our database" + }, + HtmlEscape(&json_text) + ), + )) + }, + None => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), + } +} + +pub(super) async fn get_remote_pdu( + _body: Vec<&str>, event_id: Box, server: Box, +) -> Result { + if !services().globals.config.allow_federation { + return Ok(RoomMessageEventContent::text_plain( + "Federation is disabled on this homeserver.", + )); + } + + if server == services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain( + "Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local PDUs.", + )); + } + + match services() + .sending + .send_federation_request( + &server, + ruma::api::federation::event::get_event::v1::Request { + event_id: event_id.clone().into(), + }, + ) + .await + { + Ok(response) => { + let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { + warn!( + "Requested event ID {event_id} from server but failed to convert from RawValue to \ + CanonicalJsonObject (malformed event/response?): {e}" + ); + Error::BadRequest(ErrorKind::Unknown, "Received response from server but failed to parse PDU") + })?; + + debug!("Attempting to parse PDU: {:?}", &response.pdu); + let parsed_pdu = { + let parsed_result = parse_incoming_pdu(&response.pdu); + let (event_id, value, room_id) = match parsed_result { + Ok(t) => t, + Err(e) => { + warn!("Failed to parse PDU: {e}"); + info!("Full PDU: {:?}", &response.pdu); + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to parse PDU remote server {server} sent us: {e}" + ))); + }, + }; + + vec![(event_id, value, room_id)] + }; + + let pub_key_map = RwLock::new(BTreeMap::new()); + + debug!("Attempting to fetch homeserver signing keys for {server}"); + services() + .rooms + .event_handler + .fetch_required_signing_keys(parsed_pdu.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map) + .await + .unwrap_or_else(|e| { + warn!("Could not fetch all signatures for PDUs from {server}: {e:?}"); + }); + + info!("Attempting to handle event ID {event_id} as backfilled PDU"); + services() + .rooms + .timeline + .backfill_pdu(&server, response.pdu, &pub_key_map) + .await?; + + let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + + Ok(RoomMessageEventContent::text_html( + format!( + "{}\n```json\n{}\n```", + "Got PDU from specified server and handled as backfilled PDU successfully. Event body:", json_text + ), + format!( + "

{}

\n
{}\n
\n", + "Got PDU from specified server and handled as backfilled PDU successfully. Event body:", + HtmlEscape(&json_text) + ), + )) + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Remote server did not have PDU or failed sending request to remote server: {e}" + ))), + } +} + +pub(super) async fn get_room_state(_body: Vec<&str>, room_id: Box) -> Result { + let room_state = services() + .rooms + .state_accessor + .room_state_full(&room_id) + .await? + .values() + .map(|pdu| pdu.to_state_event()) + .collect::>(); + + if room_state.is_empty() { + return Ok(RoomMessageEventContent::text_plain( + "Unable to find room state in our database (vector is empty)", + )); + } + + let json_text = serde_json::to_string_pretty(&room_state).map_err(|e| { + error!("Failed converting room state vector in our database to pretty JSON: {e}"); + Error::bad_database( + "Failed to convert room state events to pretty JSON, possible invalid room state events in our database", + ) + })?; + + Ok(RoomMessageEventContent::text_html( + format!("{}\n```json\n{}\n```", "Found full room state", json_text), + format!( + "

{}

\n
{}\n
\n", + "Found full room state", + HtmlEscape(&json_text) + ), + )) +} + +pub(super) async fn ping(_body: Vec<&str>, server: Box) -> Result { + if server == services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain( + "Not allowed to send federation requests to ourselves.", + )); + } + + let timer = tokio::time::Instant::now(); + + match services() + .sending + .send_federation_request(&server, ruma::api::federation::discovery::get_server_version::v1::Request {}) + .await + { + Ok(response) => { + let ping_time = timer.elapsed(); + + let json_text_res = serde_json::to_string_pretty(&response.server); + + if let Ok(json) = json_text_res { + return Ok(RoomMessageEventContent::text_html( + format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```"), + format!( + "

Got response which took {ping_time:?} time:

\n
{}\n
\n", + HtmlEscape(&json) + ), + )); + } + + Ok(RoomMessageEventContent::text_plain(format!( + "Got non-JSON response which took {ping_time:?} time:\n{0:?}", + response + ))) + }, + Err(e) => { + error!("Failed sending federation request to specified server from ping debug command: {e}"); + Ok(RoomMessageEventContent::text_plain(format!( + "Failed sending federation request to specified server:\n\n{e}", + ))) + }, + } +} + +pub(super) async fn force_device_list_updates(_body: Vec<&str>) -> Result { + // Force E2EE device list updates for all users + for user_id in services().users.iter().filter_map(Result::ok) { + services().users.mark_device_key_update(&user_id)?; + } + Ok(RoomMessageEventContent::text_plain( + "Marked all devices for all users as having new keys to update", + )) +} + +pub(super) async fn change_log_level( + _body: Vec<&str>, filter: Option, reset: bool, +) -> Result { + if reset { + let old_filter_layer = match EnvFilter::try_new(&services().globals.config.log) { + Ok(s) => s, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Log level from config appears to be invalid now: {e}" + ))); + }, + }; + + match services() + .globals + .tracing_reload_handle + .modify(|filter| *filter = old_filter_layer) + { + Ok(()) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Successfully changed log level back to config value {}", + services().globals.config.log + ))); + }, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to modify and reload the global tracing log level: {e}" + ))); + }, + } + } + + if let Some(filter) = filter { + let new_filter_layer = match EnvFilter::try_new(filter) { + Ok(s) => s, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Invalid log level filter specified: {e}" + ))); + }, + }; + + match services() + .globals + .tracing_reload_handle + .modify(|filter| *filter = new_filter_layer) + { + Ok(()) => { + return Ok(RoomMessageEventContent::text_plain("Successfully changed log level")); + }, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to modify and reload the global tracing log level: {e}" + ))); + }, + } + } + + Ok(RoomMessageEventContent::text_plain("No log level was specified.")) +} diff --git a/src/service/admin/debug/mod.rs b/src/service/admin/debug/mod.rs index 43823175..80e1c74c 100644 --- a/src/service/admin/debug/mod.rs +++ b/src/service/admin/debug/mod.rs @@ -1,8 +1,13 @@ use clap::Subcommand; -use ruma::{EventId, RoomId, ServerName}; +use ruma::{events::room::message::RoomMessageEventContent, EventId, RoomId, ServerName}; -#[allow(clippy::module_inception)] -pub(crate) mod debug; +use self::debug_commands::{ + change_log_level, force_device_list_updates, get_auth_chain, get_pdu, get_remote_pdu, get_room_state, parse_pdu, + ping, +}; +use crate::Result; + +pub(crate) mod debug_commands; #[cfg_attr(test, derive(Debug))] #[derive(Subcommand)] @@ -78,3 +83,30 @@ pub(crate) enum DebugCommand { reset: bool, }, } + +pub(crate) async fn process(command: DebugCommand, body: Vec<&str>) -> Result { + Ok(match command { + DebugCommand::GetAuthChain { + event_id, + } => get_auth_chain(body, event_id).await?, + DebugCommand::ParsePdu => parse_pdu(body).await?, + DebugCommand::GetPdu { + event_id, + } => get_pdu(body, event_id).await?, + DebugCommand::GetRemotePdu { + event_id, + server, + } => get_remote_pdu(body, event_id, server).await?, + DebugCommand::GetRoomState { + room_id, + } => get_room_state(body, room_id).await?, + DebugCommand::Ping { + server, + } => ping(body, server).await?, + DebugCommand::ForceDeviceListUpdates => force_device_list_updates(body).await?, + DebugCommand::ChangeLogLevel { + filter, + reset, + } => change_log_level(body, filter, reset).await?, + }) +} diff --git a/src/service/admin/fsck.rs b/src/service/admin/fsck.rs index 054976d4..9e9b64a1 100644 --- a/src/service/admin/fsck.rs +++ b/src/service/admin/fsck.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use ruma::events::room::message::RoomMessageEventContent; -use crate::{services, Result}; +use crate::Result; #[cfg_attr(test, derive(Debug))] #[derive(Subcommand)] @@ -9,7 +9,8 @@ pub(crate) enum FsckCommand { Register, } -pub(crate) async fn fsck(command: FsckCommand, body: Vec<&str>) -> Result { +#[allow(dead_code)] +pub(crate) async fn fsck(command: FsckCommand, _body: Vec<&str>) -> Result { match command { FsckCommand::Register => { todo!() diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index da5a77c4..1971461d 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -280,11 +280,11 @@ impl Service { let reply_message_content = match command { AdminCommand::Appservices(command) => appservice::process(command, body).await?, AdminCommand::Media(command) => media::process(command, body).await?, - AdminCommand::Users(command) => user::user::process(command, body).await?, + AdminCommand::Users(command) => user::process(command, body).await?, AdminCommand::Rooms(command) => room::process(command, body).await?, AdminCommand::Federation(command) => federation::process(command, body).await?, AdminCommand::Server(command) => server::process(command, body).await?, - AdminCommand::Debug(command) => debug::debug::process(command, body).await?, + AdminCommand::Debug(command) => debug::process(command, body).await?, AdminCommand::Query(command) => query::process(command, body).await?, }; diff --git a/src/service/admin/query/account_data.rs b/src/service/admin/query/account_data.rs index 7e03d794..f335489e 100644 --- a/src/service/admin/query/account_data.rs +++ b/src/service/admin/query/account_data.rs @@ -32,7 +32,7 @@ pub(crate) enum AccountData { } /// All the getters and iterators from src/database/key_value/account_data.rs -pub(crate) async fn account_data(subcommand: AccountData) -> Result { +pub(super) async fn account_data(subcommand: AccountData) -> Result { match subcommand { AccountData::ChangesSince { user_id, diff --git a/src/service/admin/query/appservice.rs b/src/service/admin/query/appservice.rs index 4dfada18..c576f7db 100644 --- a/src/service/admin/query/appservice.rs +++ b/src/service/admin/query/appservice.rs @@ -15,7 +15,7 @@ pub(crate) enum Appservice { } /// All the getters and iterators from src/database/key_value/appservice.rs -pub(crate) async fn appservice(subcommand: Appservice) -> Result { +pub(super) async fn appservice(subcommand: Appservice) -> Result { match subcommand { Appservice::GetRegistration { appservice_id, diff --git a/src/service/admin/query/globals.rs b/src/service/admin/query/globals.rs index 2e2a38fc..25c3e337 100644 --- a/src/service/admin/query/globals.rs +++ b/src/service/admin/query/globals.rs @@ -23,7 +23,7 @@ pub(crate) enum Globals { } /// All the getters and iterators from src/database/key_value/globals.rs -pub(crate) async fn globals(subcommand: Globals) -> Result { +pub(super) async fn globals(subcommand: Globals) -> Result { match subcommand { Globals::DatabaseVersion => { let timer = tokio::time::Instant::now(); diff --git a/src/service/admin/query/mod.rs b/src/service/admin/query/mod.rs index fc10c1c9..8033e731 100644 --- a/src/service/admin/query/mod.rs +++ b/src/service/admin/query/mod.rs @@ -44,11 +44,11 @@ pub(crate) enum QueryCommand { /// Processes admin query commands #[allow(non_snake_case)] pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result { - match command { - QueryCommand::AccountData(AccountData) => account_data(AccountData).await, - QueryCommand::Appservice(Appservice) => appservice(Appservice).await, - QueryCommand::Presence(Presence) => presence(Presence).await, - QueryCommand::RoomAlias(RoomAlias) => room_alias(RoomAlias).await, - QueryCommand::Globals(Globals) => globals(Globals).await, - } + Ok(match command { + QueryCommand::AccountData(AccountData) => account_data(AccountData).await?, + QueryCommand::Appservice(Appservice) => appservice(Appservice).await?, + QueryCommand::Presence(Presence) => presence(Presence).await?, + QueryCommand::RoomAlias(RoomAlias) => room_alias(RoomAlias).await?, + QueryCommand::Globals(Globals) => globals(Globals).await?, + }) } diff --git a/src/service/admin/query/presence.rs b/src/service/admin/query/presence.rs index 952920c1..bb55b88f 100644 --- a/src/service/admin/query/presence.rs +++ b/src/service/admin/query/presence.rs @@ -22,7 +22,7 @@ pub(crate) enum Presence { } /// All the getters and iterators in key_value/presence.rs -pub(crate) async fn presence(subcommand: Presence) -> Result { +pub(super) async fn presence(subcommand: Presence) -> Result { match subcommand { Presence::GetPresence { user_id, diff --git a/src/service/admin/query/room_alias.rs b/src/service/admin/query/room_alias.rs index f5ca4965..e854f643 100644 --- a/src/service/admin/query/room_alias.rs +++ b/src/service/admin/query/room_alias.rs @@ -23,7 +23,7 @@ pub(crate) enum RoomAlias { } /// All the getters and iterators in src/database/key_value/rooms/alias.rs -pub(crate) async fn room_alias(subcommand: RoomAlias) -> Result { +pub(super) async fn room_alias(subcommand: RoomAlias) -> Result { match subcommand { RoomAlias::ResolveLocalAlias { alias, diff --git a/src/service/admin/server.rs b/src/service/admin/server.rs deleted file mode 100644 index 07519a1b..00000000 --- a/src/service/admin/server.rs +++ /dev/null @@ -1,106 +0,0 @@ -use clap::Subcommand; -use ruma::events::room::message::RoomMessageEventContent; - -use crate::{services, Result}; - -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum ServerCommand { - /// - Show configuration values - ShowConfig, - - /// - Print database memory usage statistics - MemoryUsage, - - /// - Clears all of Conduit's database caches with index smaller than the - /// amount - ClearDatabaseCaches { - amount: u32, - }, - - /// - Clears all of Conduit's service caches with index smaller than the - /// amount - ClearServiceCaches { - amount: u32, - }, - - /// - Performs an online backup of the database (only available for RocksDB - /// at the moment) - BackupDatabase, - - /// - List database backups - ListBackups, - - /// - List database files - ListDatabaseFiles, -} - -pub(crate) async fn process(command: ServerCommand, _body: Vec<&str>) -> Result { - match command { - ServerCommand::ShowConfig => { - // Construct and send the response - Ok(RoomMessageEventContent::text_plain(format!("{}", services().globals.config))) - }, - ServerCommand::MemoryUsage => { - let response1 = services().memory_usage().await; - let response2 = services().globals.db.memory_usage(); - - Ok(RoomMessageEventContent::text_plain(format!( - "Services:\n{response1}\n\nDatabase:\n{response2}" - ))) - }, - ServerCommand::ClearDatabaseCaches { - amount, - } => { - services().globals.db.clear_caches(amount); - - Ok(RoomMessageEventContent::text_plain("Done.")) - }, - ServerCommand::ClearServiceCaches { - amount, - } => { - services().clear_caches(amount).await; - - Ok(RoomMessageEventContent::text_plain("Done.")) - }, - ServerCommand::ListBackups => { - let result = services().globals.db.backup_list()?; - - if result.is_empty() { - Ok(RoomMessageEventContent::text_plain("No backups found.")) - } else { - Ok(RoomMessageEventContent::text_plain(result)) - } - }, - ServerCommand::BackupDatabase => { - if !cfg!(feature = "rocksdb") { - return Ok(RoomMessageEventContent::text_plain( - "Only RocksDB supports online backups in conduwuit.", - )); - } - - let mut result = tokio::task::spawn_blocking(move || match services().globals.db.backup() { - Ok(()) => String::new(), - Err(e) => (*e).to_string(), - }) - .await - .unwrap(); - - if result.is_empty() { - result = services().globals.db.backup_list()?; - } - - Ok(RoomMessageEventContent::text_plain(&result)) - }, - ServerCommand::ListDatabaseFiles => { - if !cfg!(feature = "rocksdb") { - return Ok(RoomMessageEventContent::text_plain( - "Only RocksDB supports listing files in conduwuit.", - )); - } - - let result = services().globals.db.file_list()?; - Ok(RoomMessageEventContent::notice_html(String::new(), result)) - }, - } -} diff --git a/src/service/admin/server/mod.rs b/src/service/admin/server/mod.rs new file mode 100644 index 00000000..b18b2c7a --- /dev/null +++ b/src/service/admin/server/mod.rs @@ -0,0 +1,58 @@ +pub(crate) mod server_commands; + +use clap::Subcommand; +use ruma::events::room::message::RoomMessageEventContent; + +use self::server_commands::{ + backup_database, clear_database_caches, clear_service_caches, list_backups, list_database_files, memory_usage, + show_config, +}; +use crate::Result; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum ServerCommand { + /// - Show configuration values + ShowConfig, + + /// - Print database memory usage statistics + MemoryUsage, + + /// - Clears all of Conduit's database caches with index smaller than the + /// amount + ClearDatabaseCaches { + amount: u32, + }, + + /// - Clears all of Conduit's service caches with index smaller than the + /// amount + ClearServiceCaches { + amount: u32, + }, + + /// - Performs an online backup of the database (only available for RocksDB + /// at the moment) + BackupDatabase, + + /// - List database backups + ListBackups, + + /// - List database files + ListDatabaseFiles, +} + +pub(crate) async fn process(command: ServerCommand, body: Vec<&str>) -> Result { + Ok(match command { + ServerCommand::ShowConfig => show_config(body).await?, + ServerCommand::MemoryUsage => memory_usage(body).await?, + ServerCommand::ClearDatabaseCaches { + amount, + } => clear_database_caches(body, amount).await?, + ServerCommand::ClearServiceCaches { + amount, + } => clear_service_caches(body, amount).await?, + ServerCommand::ListBackups => list_backups(body).await?, + ServerCommand::BackupDatabase => backup_database(body).await?, + ServerCommand::ListDatabaseFiles => list_database_files(body).await?, + }) +} diff --git a/src/service/admin/server/server_commands.rs b/src/service/admin/server/server_commands.rs new file mode 100644 index 00000000..f363d7ff --- /dev/null +++ b/src/service/admin/server/server_commands.rs @@ -0,0 +1,71 @@ +use ruma::events::room::message::RoomMessageEventContent; + +use crate::{services, Result}; + +pub(super) async fn show_config(_body: Vec<&str>) -> Result { + // Construct and send the response + Ok(RoomMessageEventContent::text_plain(format!("{}", services().globals.config))) +} + +pub(super) async fn memory_usage(_body: Vec<&str>) -> Result { + let response1 = services().memory_usage().await; + let response2 = services().globals.db.memory_usage(); + + Ok(RoomMessageEventContent::text_plain(format!( + "Services:\n{response1}\n\nDatabase:\n{response2}" + ))) +} + +pub(super) async fn clear_database_caches(_body: Vec<&str>, amount: u32) -> Result { + services().globals.db.clear_caches(amount); + + Ok(RoomMessageEventContent::text_plain("Done.")) +} + +pub(super) async fn clear_service_caches(_body: Vec<&str>, amount: u32) -> Result { + services().clear_caches(amount).await; + + Ok(RoomMessageEventContent::text_plain("Done.")) +} + +pub(super) async fn list_backups(_body: Vec<&str>) -> Result { + let result = services().globals.db.backup_list()?; + + if result.is_empty() { + Ok(RoomMessageEventContent::text_plain("No backups found.")) + } else { + Ok(RoomMessageEventContent::text_plain(result)) + } +} + +pub(super) async fn backup_database(_body: Vec<&str>) -> Result { + if !cfg!(feature = "rocksdb") { + return Ok(RoomMessageEventContent::text_plain( + "Only RocksDB supports online backups in conduwuit.", + )); + } + + let mut result = tokio::task::spawn_blocking(move || match services().globals.db.backup() { + Ok(()) => String::new(), + Err(e) => (*e).to_string(), + }) + .await + .unwrap(); + + if result.is_empty() { + result = services().globals.db.backup_list()?; + } + + Ok(RoomMessageEventContent::text_plain(&result)) +} + +pub(super) async fn list_database_files(_body: Vec<&str>) -> Result { + if !cfg!(feature = "rocksdb") { + return Ok(RoomMessageEventContent::text_plain( + "Only RocksDB supports listing files in conduwuit.", + )); + } + + let result = services().globals.db.file_list()?; + Ok(RoomMessageEventContent::notice_html(String::new(), result)) +} diff --git a/src/service/admin/user/mod.rs b/src/service/admin/user/mod.rs index 7ac30043..d14f7cfc 100644 --- a/src/service/admin/user/mod.rs +++ b/src/service/admin/user/mod.rs @@ -1,8 +1,10 @@ -#[allow(clippy::module_inception)] -pub(crate) mod user; +pub(crate) mod user_commands; use clap::Subcommand; -use ruma::UserId; +use ruma::{events::room::message::RoomMessageEventContent, UserId}; + +use self::user_commands::{create, deactivate, deactivate_all, list, list_joined_rooms, reset_password}; +use crate::Result; #[cfg_attr(test, derive(Debug))] #[derive(Subcommand)] @@ -18,7 +20,7 @@ pub(crate) enum UserCommand { /// - Reset user password ResetPassword { /// Username of the user for whom the password should be reset - username: String, + username: Box, }, /// - Deactivate a user @@ -61,3 +63,27 @@ pub(crate) enum UserCommand { user_id: Box, }, } + +pub(crate) async fn process(command: UserCommand, body: Vec<&str>) -> Result { + Ok(match command { + UserCommand::List => list(body).await?, + UserCommand::Create { + username, + password, + } => create(body, username, password).await?, + UserCommand::Deactivate { + leave_rooms, + user_id, + } => deactivate(body, leave_rooms, user_id).await?, + UserCommand::ResetPassword { + username, + } => reset_password(body, username).await?, + UserCommand::DeactivateAll { + leave_rooms, + force, + } => deactivate_all(body, leave_rooms, force).await?, + UserCommand::ListJoinedRooms { + user_id, + } => list_joined_rooms(body, user_id).await?, + }) +} diff --git a/src/service/admin/user/user.rs b/src/service/admin/user/user.rs deleted file mode 100644 index 73273590..00000000 --- a/src/service/admin/user/user.rs +++ /dev/null @@ -1,349 +0,0 @@ -use std::{fmt::Write as _, sync::Arc}; - -use itertools::Itertools; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, UserId}; -use tracing::{error, info, warn}; - -use super::UserCommand; - -use crate::{ - api::client_server::{join_room_by_id_helper, leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, - service::admin::{escape_html, get_room_info}, - services, utils, Result, -}; - -pub(crate) async fn process(command: UserCommand, body: Vec<&str>) -> Result { - match command { - UserCommand::List => match services().users.list_local_users() { - Ok(users) => { - let mut msg = format!("Found {} local user account(s):\n", users.len()); - msg += &users.join("\n"); - Ok(RoomMessageEventContent::text_plain(&msg)) - }, - Err(e) => Ok(RoomMessageEventContent::text_plain(e.to_string())), - }, - UserCommand::Create { - username, - password, - } => { - let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); - // Validate user id - let user_id = match UserId::parse_with_server_name( - username.as_str().to_lowercase(), - services().globals.server_name(), - ) { - Ok(id) => id, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {e}" - ))) - }, - }; - if user_id.is_historical() { - return Ok(RoomMessageEventContent::text_plain(format!( - "Userid {user_id} is not allowed due to historical" - ))); - } - if services().users.exists(&user_id)? { - return Ok(RoomMessageEventContent::text_plain(format!("Userid {user_id} already exists"))); - } - // Create user - services().users.create(&user_id, Some(password.as_str()))?; - - // Default to pretty displayname - let mut displayname = user_id.localpart().to_owned(); - - // If `new_user_displayname_suffix` is set, registration will push whatever - // content is set to the user's display name with a space before it - if !services().globals.new_user_displayname_suffix().is_empty() { - displayname.push_str(&(" ".to_owned() + services().globals.new_user_displayname_suffix())); - } - - services() - .users - .set_displayname(&user_id, Some(displayname)) - .await?; - - // Initial account data - services().account_data.update( - None, - &user_id, - ruma::events::GlobalAccountDataEventType::PushRules - .to_string() - .into(), - &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: ruma::push::Ruleset::server_default(&user_id), - }, - }) - .expect("to json value always works"), - )?; - - if !services().globals.config.auto_join_rooms.is_empty() { - for room in &services().globals.config.auto_join_rooms { - if !services() - .rooms - .state_cache - .server_in_room(services().globals.server_name(), room)? - { - warn!("Skipping room {room} to automatically join as we have never joined before."); - continue; - } - - if let Some(room_id_server_name) = room.server_name() { - match join_room_by_id_helper( - Some(&user_id), - room, - Some("Automatically joining this room upon registration".to_owned()), - &[room_id_server_name.to_owned(), services().globals.server_name().to_owned()], - None, - ) - .await - { - Ok(_) => { - info!("Automatically joined room {room} for user {user_id}"); - }, - Err(e) => { - // don't return this error so we don't fail registrations - error!("Failed to automatically join room {room} for user {user_id}: {e}"); - }, - }; - } - } - } - - // we dont add a device since we're not the user, just the creator - - // Inhibit login does not work for guests - Ok(RoomMessageEventContent::text_plain(format!( - "Created user with user_id: {user_id} and password: `{password}`" - ))) - }, - UserCommand::Deactivate { - leave_rooms, - user_id, - } => { - let user_id = Arc::::from(user_id); - - // check if user belongs to our server - if user_id.server_name() != services().globals.server_name() { - return Ok(RoomMessageEventContent::text_plain(format!( - "User {user_id} does not belong to our server." - ))); - } - - // don't deactivate the conduit service account - if user_id - == UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("conduit user exists") - { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to deactivate the Conduit service account.", - )); - } - - if services().users.exists(&user_id)? { - RoomMessageEventContent::text_plain(format!("Making {user_id} leave all rooms before deactivation...")); - - services().users.deactivate_account(&user_id)?; - - if leave_rooms { - leave_all_rooms(&user_id).await?; - } - - Ok(RoomMessageEventContent::text_plain(format!( - "User {user_id} has been deactivated" - ))) - } else { - Ok(RoomMessageEventContent::text_plain(format!( - "User {user_id} doesn't exist on this server" - ))) - } - }, - UserCommand::ResetPassword { - username, - } => { - let user_id = match UserId::parse_with_server_name( - username.as_str().to_lowercase(), - services().globals.server_name(), - ) { - Ok(id) => id, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {e}" - ))) - }, - }; - - // check if user belongs to our server - if user_id.server_name() != services().globals.server_name() { - return Ok(RoomMessageEventContent::text_plain(format!( - "User {user_id} does not belong to our server." - ))); - } - - // Check if the specified user is valid - if !services().users.exists(&user_id)? - || user_id - == UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("conduit user exists") - { - return Ok(RoomMessageEventContent::text_plain("The specified user does not exist!")); - } - - let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); - - match services() - .users - .set_password(&user_id, Some(new_password.as_str())) - { - Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {user_id}: `{new_password}`" - ))), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Couldn't reset the password for user {user_id}: {e}" - ))), - } - }, - UserCommand::DeactivateAll { - leave_rooms, - force, - } => { - if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { - let usernames = body.clone().drain(1..body.len() - 1).collect::>(); - - let mut user_ids: Vec<&UserId> = Vec::new(); - - for &username in &usernames { - match <&UserId>::try_from(username) { - Ok(user_id) => user_ids.push(user_id), - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username: {e}" - ))) - }, - } - } - - let mut deactivation_count = 0; - let mut admins = Vec::new(); - - if !force { - user_ids.retain(|&user_id| match services().users.is_admin(user_id) { - Ok(is_admin) => { - if is_admin { - admins.push(user_id.localpart()); - false - } else { - true - } - }, - Err(_) => false, - }); - } - - for &user_id in &user_ids { - // check if user belongs to our server and skips over non-local users - if user_id.server_name() != services().globals.server_name() { - continue; - } - - // don't deactivate the conduit service account - if user_id - == UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("conduit user exists") - { - continue; - } - - // user does not exist on our server - if !services().users.exists(user_id)? { - continue; - } - - if services().users.deactivate_account(user_id).is_ok() { - deactivation_count += 1; - } - } - - if leave_rooms { - for &user_id in &user_ids { - _ = leave_all_rooms(user_id).await; - } - } - - if admins.is_empty() { - Ok(RoomMessageEventContent::text_plain(format!( - "Deactivated {deactivation_count} accounts." - ))) - } else { - Ok(RoomMessageEventContent::text_plain(format!( - "Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin \ - accounts", - deactivation_count, - admins.join(", ") - ))) - } - } else { - Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )) - } - }, - UserCommand::ListJoinedRooms { - user_id, - } => { - if user_id.server_name() != services().globals.server_name() { - return Ok(RoomMessageEventContent::text_plain("User does not belong to our server.")); - } - - if !services().users.exists(&user_id)? { - return Ok(RoomMessageEventContent::text_plain("User does not exist on this server.")); - } - - let mut rooms: Vec<(OwnedRoomId, u64, String)> = services() - .rooms - .state_cache - .rooms_joined(&user_id) - .filter_map(Result::ok) - .map(|room_id| get_room_info(&room_id)) - .sorted_unstable() - .dedup() - .collect(); - - if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("User is not in any rooms.")); - } - - rooms.sort_by_key(|r| r.1); - rooms.reverse(); - - let output_plain = format!( - "Rooms {user_id} Joined:\n{}", - rooms - .iter() - .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) - .collect::>() - .join("\n") - ); - let output_html = format!( - "\n\t\t\n{}
Rooms {user_id} \ - Joined
idmembersname
", - rooms - .iter() - .fold(String::new(), |mut output, (id, members, name)| { - writeln!( - output, - "{}\t{}\t{}", - escape_html(id.as_ref()), - members, - escape_html(name) - ) - .unwrap(); - output - }) - ); - Ok(RoomMessageEventContent::text_html(output_plain, output_html)) - }, - } -} diff --git a/src/service/admin/user/user_commands.rs b/src/service/admin/user/user_commands.rs new file mode 100644 index 00000000..47a43233 --- /dev/null +++ b/src/service/admin/user/user_commands.rs @@ -0,0 +1,334 @@ +use std::{fmt::Write as _, sync::Arc}; + +use itertools::Itertools; +use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, UserId}; +use tracing::{error, info, warn}; + +use crate::{ + api::client_server::{join_room_by_id_helper, leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, + service::admin::{escape_html, get_room_info}, + services, utils, Result, +}; + +pub(super) async fn list(_body: Vec<&str>) -> Result { + match services().users.list_local_users() { + Ok(users) => { + let mut msg = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + Ok(RoomMessageEventContent::text_plain(&msg)) + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(e.to_string())), + } +} + +pub(super) async fn create( + _body: Vec<&str>, username: String, password: Option, +) -> Result { + let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + // Validate user id + let user_id = + match UserId::parse_with_server_name(username.as_str().to_lowercase(), services().globals.server_name()) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {e}" + ))) + }, + }; + if user_id.is_historical() { + return Ok(RoomMessageEventContent::text_plain(format!( + "Userid {user_id} is not allowed due to historical" + ))); + } + if services().users.exists(&user_id)? { + return Ok(RoomMessageEventContent::text_plain(format!("Userid {user_id} already exists"))); + } + // Create user + services().users.create(&user_id, Some(password.as_str()))?; + + // Default to pretty displayname + let mut displayname = user_id.localpart().to_owned(); + + // If `new_user_displayname_suffix` is set, registration will push whatever + // content is set to the user's display name with a space before it + if !services().globals.new_user_displayname_suffix().is_empty() { + displayname.push_str(&(" ".to_owned() + services().globals.new_user_displayname_suffix())); + } + + services() + .users + .set_displayname(&user_id, Some(displayname)) + .await?; + + // Initial account data + services().account_data.update( + None, + &user_id, + ruma::events::GlobalAccountDataEventType::PushRules + .to_string() + .into(), + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: ruma::push::Ruleset::server_default(&user_id), + }, + }) + .expect("to json value always works"), + )?; + + if !services().globals.config.auto_join_rooms.is_empty() { + for room in &services().globals.config.auto_join_rooms { + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), room)? + { + warn!("Skipping room {room} to automatically join as we have never joined before."); + continue; + } + + if let Some(room_id_server_name) = room.server_name() { + match join_room_by_id_helper( + Some(&user_id), + room, + Some("Automatically joining this room upon registration".to_owned()), + &[room_id_server_name.to_owned(), services().globals.server_name().to_owned()], + None, + ) + .await + { + Ok(_) => { + info!("Automatically joined room {room} for user {user_id}"); + }, + Err(e) => { + // don't return this error so we don't fail registrations + error!("Failed to automatically join room {room} for user {user_id}: {e}"); + }, + }; + } + } + } + + // we dont add a device since we're not the user, just the creator + + // Inhibit login does not work for guests + Ok(RoomMessageEventContent::text_plain(format!( + "Created user with user_id: {user_id} and password: `{password}`" + ))) +} + +pub(super) async fn deactivate( + _body: Vec<&str>, leave_rooms: bool, user_id: Box, +) -> Result { + let user_id = Arc::::from(user_id); + + // check if user belongs to our server + if user_id.server_name() != services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain(format!( + "User {user_id} does not belong to our server." + ))); + } + + // don't deactivate the conduit service account + if user_id + == UserId::parse_with_server_name("conduit", services().globals.server_name()).expect("conduit user exists") + { + return Ok(RoomMessageEventContent::text_plain( + "Not allowed to deactivate the Conduit service account.", + )); + } + + if services().users.exists(&user_id)? { + RoomMessageEventContent::text_plain(format!("Making {user_id} leave all rooms before deactivation...")); + + services().users.deactivate_account(&user_id)?; + + if leave_rooms { + leave_all_rooms(&user_id).await?; + } + + Ok(RoomMessageEventContent::text_plain(format!( + "User {user_id} has been deactivated" + ))) + } else { + Ok(RoomMessageEventContent::text_plain(format!( + "User {user_id} doesn't exist on this server" + ))) + } +} + +pub(super) async fn reset_password(_body: Vec<&str>, username: Box) -> Result { + let user_id = + match UserId::parse_with_server_name(username.as_str().to_lowercase(), services().globals.server_name()) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {e}" + ))) + }, + }; + + // check if user belongs to our server + if user_id.server_name() != services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain(format!( + "User {user_id} does not belong to our server." + ))); + } + + // Check if the specified user is valid + if !services().users.exists(&user_id)? + || user_id + == UserId::parse_with_server_name("conduit", services().globals.server_name()).expect("conduit user exists") + { + return Ok(RoomMessageEventContent::text_plain("The specified user does not exist!")); + } + + let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); + + match services() + .users + .set_password(&user_id, Some(new_password.as_str())) + { + Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( + "Successfully reset the password for user {user_id}: `{new_password}`" + ))), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Couldn't reset the password for user {user_id}: {e}" + ))), + } +} + +pub(super) async fn deactivate_all(body: Vec<&str>, leave_rooms: bool, force: bool) -> Result { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let usernames = body.clone().drain(1..body.len() - 1).collect::>(); + + let mut user_ids: Vec<&UserId> = Vec::new(); + + for &username in &usernames { + match <&UserId>::try_from(username) { + Ok(user_id) => user_ids.push(user_id), + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "{username} is not a valid username: {e}" + ))) + }, + } + } + + let mut deactivation_count = 0; + let mut admins = Vec::new(); + + if !force { + user_ids.retain(|&user_id| match services().users.is_admin(user_id) { + Ok(is_admin) => { + if is_admin { + admins.push(user_id.localpart()); + false + } else { + true + } + }, + Err(_) => false, + }); + } + + for &user_id in &user_ids { + // check if user belongs to our server and skips over non-local users + if user_id.server_name() != services().globals.server_name() { + continue; + } + + // don't deactivate the conduit service account + if user_id + == UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("conduit user exists") + { + continue; + } + + // user does not exist on our server + if !services().users.exists(user_id)? { + continue; + } + + if services().users.deactivate_account(user_id).is_ok() { + deactivation_count += 1; + } + } + + if leave_rooms { + for &user_id in &user_ids { + _ = leave_all_rooms(user_id).await; + } + } + + if admins.is_empty() { + Ok(RoomMessageEventContent::text_plain(format!( + "Deactivated {deactivation_count} accounts." + ))) + } else { + Ok(RoomMessageEventContent::text_plain(format!( + "Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", + deactivation_count, + admins.join(", ") + ))) + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } +} + +pub(super) async fn list_joined_rooms(_body: Vec<&str>, user_id: Box) -> Result { + if user_id.server_name() != services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain("User does not belong to our server.")); + } + + if !services().users.exists(&user_id)? { + return Ok(RoomMessageEventContent::text_plain("User does not exist on this server.")); + } + + let mut rooms: Vec<(OwnedRoomId, u64, String)> = services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(Result::ok) + .map(|room_id| get_room_info(&room_id)) + .sorted_unstable() + .dedup() + .collect(); + + if rooms.is_empty() { + return Ok(RoomMessageEventContent::text_plain("User is not in any rooms.")); + } + + rooms.sort_by_key(|r| r.1); + rooms.reverse(); + + let output_plain = format!( + "Rooms {user_id} Joined:\n{}", + rooms + .iter() + .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .collect::>() + .join("\n") + ); + let output_html = format!( + "\n\t\t\n{}
Rooms {user_id} \ + Joined
idmembersname
", + rooms + .iter() + .fold(String::new(), |mut output, (id, members, name)| { + writeln!( + output, + "{}\t{}\t{}", + escape_html(id.as_ref()), + members, + escape_html(name) + ) + .unwrap(); + output + }) + ); + Ok(RoomMessageEventContent::text_html(output_plain, output_html)) +} From 959aa7f6f883a519a48f9837e1cb06c1a702dc65 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Sat, 20 Apr 2024 23:11:53 +0100 Subject: [PATCH 03/45] Simplify publish to Dockerhub --- .github/workflows/ci.yml | 68 +++++++++------------------------------- 1 file changed, 14 insertions(+), 54 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 32290311..f38fde63 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -344,64 +344,24 @@ jobs: run: | docker load -i oci-image-${{ matrix.oci-target }}.tar.gz IMAGE_ID=$(docker images -q conduit:main) + TAG_SUFFIX=${{ matrix.oci-target }} + TAG_SUFFIX=${TAG_SUFFIX//-jemalloc/} + TAG_SUFFIX=${TAG_SUFFIX//unknown-linux-musl/} + TAG_SUFFIX=${TAG_SUFFIX//aarch64/arm64v8} + TAG_SUFFIX=${TAG_SUFFIX//x86_64/amd64} - # Tag and push the architecture specific images - if [[ "$TARGET_NAME" = *"x86_64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_AMD64 - docker push $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_AMD64 - else - docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 - docker push $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 - fi - elif [[ "$TARGET_NAME" = *"aarch64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - docker push $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - else - docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - docker push $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - fi - fi + # Tag and push the architecture-specific images + docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-$TAG_SUFFIX + docker push $IMAGE_NAME:$GITHUB_SHA-$TAG_SUFFIX - # Tag and push the architecture specific git ref - if [[ "$TARGET_NAME" = *"x86_64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_AMD64 - docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME - else - docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 - docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME - fi - elif [[ "$TARGET_NAME" = *"aarch64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME - else - docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 - docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME - fi - fi + # Create and push the architecture-specific git ref + docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-$TAG_SUFFIX + docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME # Tag "main" as latest (stable branch) architecture specific - if [[ "$GITHUB_REF_NAME" = "main" ]]; then - if [[ "$TARGET_NAME" = *"x86_64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_AMD64 - docker manifest push $IMAGE_NAME:latest - else - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 - docker manifest push $IMAGE_NAME:latest - fi - elif [[ "$TARGET_NAME" = *"aarch64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - docker manifest push $IMAGE_NAME:latest - else - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 - docker manifest push $IMAGE_NAME:latest - fi - fi + if [[ "$GITHUB_REF_NAME" == "main" ]]; then + docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-$TAG_SUFFIX + docker manifest push $IMAGE_NAME:latest fi - name: Publish to GitHub Container Registry From d5643cec8c1d1e1c503c4d61a696e2a56f611527 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Sat, 20 Apr 2024 23:54:39 +0100 Subject: [PATCH 04/45] Simplify to publish combined jemalloc image for all architectures --- .github/workflows/ci.yml | 208 +++++++++++++-------------------------- 1 file changed, 67 insertions(+), 141 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f38fde63..6a1a351c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -290,146 +290,72 @@ jobs: # don't compress again compression-level: 0 - - - name: Extract metadata for Dockerhub - env: - REGISTRY: registry.hub.docker.com - IMAGE_NAME: ${{ github.repository }} - id: meta-dockerhub - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - - - name: Extract metadata for GitHub Container Registry - env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} - id: meta-ghcr - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - - - - name: Login to Dockerhub - env: - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - DOCKER_USERNAME: ${{ vars.DOCKER_USERNAME }} - if: ${{ (github.event_name != 'pull_request') && (env.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} - uses: docker/login-action@v3 - with: - # username is not really a secret - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to GitHub Container Registry - if: github.event_name != 'pull_request' - uses: docker/login-action@v3 - env: - REGISTRY: ghcr.io - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - - name: Publish to Dockerhub - env: - IMAGE_SUFFIX_AMD64: amd64 - IMAGE_SUFFIX_ARM64V8: arm64v8 - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - DOCKER_USERNAME: ${{ vars.DOCKER_USERNAME }} - TARGET_NAME: ${{ matrix.oci-target }} - IMAGE_NAME: docker.io/${{ github.repository }} - if: ${{ (github.event_name != 'pull_request') && (env.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} - run: | - docker load -i oci-image-${{ matrix.oci-target }}.tar.gz - IMAGE_ID=$(docker images -q conduit:main) - TAG_SUFFIX=${{ matrix.oci-target }} - TAG_SUFFIX=${TAG_SUFFIX//-jemalloc/} - TAG_SUFFIX=${TAG_SUFFIX//unknown-linux-musl/} - TAG_SUFFIX=${TAG_SUFFIX//aarch64/arm64v8} - TAG_SUFFIX=${TAG_SUFFIX//x86_64/amd64} - - # Tag and push the architecture-specific images - docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-$TAG_SUFFIX - docker push $IMAGE_NAME:$GITHUB_SHA-$TAG_SUFFIX - - # Create and push the architecture-specific git ref - docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-$TAG_SUFFIX - docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME - - # Tag "main" as latest (stable branch) architecture specific - if [[ "$GITHUB_REF_NAME" == "main" ]]; then - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-$TAG_SUFFIX - docker manifest push $IMAGE_NAME:latest - fi - - - name: Publish to GitHub Container Registry - env: - IMAGE_SUFFIX_AMD64: amd64 - IMAGE_SUFFIX_ARM64V8: arm64v8 - TARGET_NAME: ${{ matrix.oci-target }} - IMAGE_NAME: ghcr.io/${{ github.repository }} - if: github.event_name != 'pull_request' - run: | - docker load -i oci-image-${{ matrix.oci-target }}.tar.gz - IMAGE_ID=$(docker images -q conduit:main) - - # Tag and push the architecture specific images - if [[ "$TARGET_NAME" = *"x86_64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_AMD64 - docker push $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_AMD64 - else - docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 - docker push $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 + create-and-push-manifest: + name: Create and Push Docker Manifest + runs-on: ubuntu-latest + needs: build-oci + + steps: + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Load OCI Images + run: | + docker load -i oci-image-x86_64-unknown-linux-musl-jemalloc.tar.gz + docker load -i oci-image-aarch64-unknown-linux-musl-jemalloc.tar.gz + + - name: Create and Push Manifest to Docker Hub + run: | + DOCKER_IMAGE_NAME="docker.io/${{ github.repository }}" + BRANCH_NAME="${{ github.ref_name }}" + SHA_TAG="${BRANCH_NAME}-${{ github.sha }}" + BRANCH_TAG=$BRANCH_NAME + + if [ "$BRANCH_NAME" == "main" ]; then + BRANCH_TAG="latest" fi - elif [[ "$TARGET_NAME" = *"aarch64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - docker push $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - else - docker tag $IMAGE_ID $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - docker push $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 + + # Create and push SHA specific manifest + docker manifest create $DOCKER_IMAGE_NAME:$SHA_TAG \ + --amend $DOCKER_IMAGE_NAME:${{ github.sha }}-x86_64-jemalloc \ + --amend $DOCKER_IMAGE_NAME:${{ github.sha }}-aarch64-jemalloc + docker manifest push $DOCKER_IMAGE_NAME:$SHA_TAG + + # Update and push branch or latest manifest + docker manifest create $DOCKER_IMAGE_NAME:$BRANCH_TAG \ + --amend $DOCKER_IMAGE_NAME:${{ github.sha }}-x86_64-jemalloc \ + --amend $DOCKER_IMAGE_NAME:${{ github.sha }}-aarch64-jemalloc + docker manifest push $DOCKER_IMAGE_NAME:$BRANCH_TAG + + - name: Create and Push Manifest to GitHub Container Registry + run: | + GHCR_IMAGE_NAME="ghcr.io/${{ github.repository }}" + BRANCH_NAME="${{ github.ref_name }}" + SHA_TAG="${BRANCH_NAME}-${{ github.sha }}" + BRANCH_TAG=$BRANCH_NAME + + if [ "$BRANCH_NAME" == "main" ]; then + BRANCH_TAG="latest" fi - fi - - # Tag and push the architecture specific git ref - if [[ "$TARGET_NAME" = *"x86_64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_AMD64 - docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME - else - docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 - docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME - fi - elif [[ "$TARGET_NAME" = *"aarch64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME - else - docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 - docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME - fi - fi - - # Tag "main" as latest (stable branch) architecture specific - if [[ "$GITHUB_REF_NAME" = "main" ]]; then - if [[ "$TARGET_NAME" = *"x86_64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_AMD64 - docker manifest push $IMAGE_NAME:latest - else - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 - docker manifest push $IMAGE_NAME:latest - fi - elif [[ "$TARGET_NAME" = *"aarch64"* ]]; then - if [[ "$TARGET_NAME" = *"jemalloc"* ]]; then - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-jemalloc-$IMAGE_SUFFIX_ARM64V8 - docker manifest push $IMAGE_NAME:latest - else - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 - docker manifest push $IMAGE_NAME:latest - fi - fi - fi + + # Create and push SHA specific manifest + docker manifest create $GHCR_IMAGE_NAME:$SHA_TAG \ + --amend $GHCR_IMAGE_NAME:${{ github.sha }}-x86_64-jemalloc \ + --amend $GHCR_IMAGE_NAME:${{ github.sha }}-aarch64-jemalloc + docker manifest push $GHCR_IMAGE_NAME:$SHA_TAG + + # Update and push branch or latest manifest + docker manifest create $GHCR_IMAGE_NAME:$BRANCH_TAG \ + --amend $GHCR_IMAGE_NAME:${{ github.sha }}-x86_64-jemalloc \ + --amend $GHCR_IMAGE_NAME:${{ github.sha }}-aarch64-jemalloc + docker manifest push $GHCR_IMAGE_NAME:$BRANCH_TAG From d9f3d22e20d0808b5b571f65a3450a1ff8b78087 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 20 Apr 2024 19:55:14 -0400 Subject: [PATCH 05/45] finish general admin room cleanup Signed-off-by: strawberry --- src/service/admin/federation.rs | 172 -------------- .../admin/federation/federation_commands.rs | 126 ++++++++++ src/service/admin/federation/mod.rs | 68 ++++++ src/service/admin/media.rs | 216 ------------------ src/service/admin/media/media_commands.rs | 171 ++++++++++++++ src/service/admin/media/mod.rs | 49 ++++ src/service/admin/mod.rs | 3 - src/service/admin/query/account_data.rs | 32 +-- src/service/admin/query/appservice.rs | 13 +- src/service/admin/query/globals.rs | 23 +- src/service/admin/query/mod.rs | 114 ++++++++- src/service/admin/query/presence.rs | 22 +- src/service/admin/query/room_alias.rs | 23 +- src/service/admin/room.rs | 96 -------- src/service/admin/room/mod.rs | 160 +++++++++++++ .../room_alias_commands.rs} | 40 +--- src/service/admin/room/room_commands.rs | 59 +++++ .../room_directory_commands.rs} | 29 +-- .../room_moderation_commands.rs} | 60 +---- 19 files changed, 751 insertions(+), 725 deletions(-) delete mode 100644 src/service/admin/federation.rs create mode 100644 src/service/admin/federation/federation_commands.rs create mode 100644 src/service/admin/federation/mod.rs delete mode 100644 src/service/admin/media.rs create mode 100644 src/service/admin/media/media_commands.rs create mode 100644 src/service/admin/media/mod.rs delete mode 100644 src/service/admin/room.rs create mode 100644 src/service/admin/room/mod.rs rename src/service/admin/{room_alias.rs => room/room_alias_commands.rs} (84%) create mode 100644 src/service/admin/room/room_commands.rs rename src/service/admin/{room_directory.rs => room/room_directory_commands.rs} (80%) rename src/service/admin/{room_moderation.rs => room/room_moderation_commands.rs} (90%) diff --git a/src/service/admin/federation.rs b/src/service/admin/federation.rs deleted file mode 100644 index c7a61103..00000000 --- a/src/service/admin/federation.rs +++ /dev/null @@ -1,172 +0,0 @@ -use std::{collections::BTreeMap, fmt::Write as _}; - -use clap::Subcommand; -use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName}; -use tokio::sync::RwLock; - -use crate::{services, utils::HtmlEscape, Result}; - -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum FederationCommand { - /// - List all rooms we are currently handling an incoming pdu from - IncomingFederation, - - /// - Disables incoming federation handling for a room. - DisableRoom { - room_id: Box, - }, - - /// - Enables incoming federation handling for a room again. - EnableRoom { - room_id: Box, - }, - - /// - Verify json signatures - /// - /// This command needs a JSON blob provided in a Markdown code block below - /// the command. - SignJson, - - /// - Verify json signatures - /// - /// This command needs a JSON blob provided in a Markdown code block below - /// the command. - VerifyJson, - - /// - Fetch `/.well-known/matrix/support` from the specified server - /// - /// Despite the name, this is not a federation endpoint and does not go - /// through the federation / server resolution process as per-spec this is - /// supposed to be served at the server_name. - /// - /// Respecting homeservers put this file here for listing administration, - /// moderation, and security inquiries. This command provides a way to - /// easily fetch that information. - FetchSupportWellKnown { - server_name: Box, - }, -} - -pub(crate) async fn process(command: FederationCommand, body: Vec<&str>) -> Result { - match command { - FederationCommand::DisableRoom { - room_id, - } => { - services().rooms.metadata.disable_room(&room_id, true)?; - Ok(RoomMessageEventContent::text_plain("Room disabled.")) - }, - FederationCommand::EnableRoom { - room_id, - } => { - services().rooms.metadata.disable_room(&room_id, false)?; - Ok(RoomMessageEventContent::text_plain("Room enabled.")) - }, - FederationCommand::IncomingFederation => { - let map = services().globals.roomid_federationhandletime.read().await; - let mut msg = format!("Handling {} incoming pdus:\n", map.len()); - - for (r, (e, i)) in map.iter() { - let elapsed = i.elapsed(); - let _ = writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60); - } - Ok(RoomMessageEventContent::text_plain(&msg)) - }, - FederationCommand::SignJson => { - if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(mut value) => { - ruma::signatures::sign_json( - services().globals.server_name().as_str(), - services().globals.keypair(), - &mut value, - ) - .expect("our request json is what ruma expects"); - let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::text_plain(json_text)) - }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), - } - } else { - Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )) - } - }, - FederationCommand::VerifyJson => { - if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => { - let pub_key_map = RwLock::new(BTreeMap::new()); - - services() - .rooms - .event_handler - .fetch_required_signing_keys([&value], &pub_key_map) - .await?; - - let pub_key_map = pub_key_map.read().await; - match ruma::signatures::verify_json(&pub_key_map, &value) { - Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Signature verification failed: {e}" - ))), - } - }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), - } - } else { - Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )) - } - }, - FederationCommand::FetchSupportWellKnown { - server_name, - } => { - let response = services() - .globals - .client - .default - .get(format!("https://{server_name}/.well-known/matrix/support")) - .send() - .await?; - - let text = response.text().await?; - - if text.is_empty() { - return Ok(RoomMessageEventContent::text_plain("Response text/body is empty.")); - } - - if text.len() > 1500 { - return Ok(RoomMessageEventContent::text_plain( - "Response text/body is over 1500 characters, assuming no support well-known.", - )); - } - - let json: serde_json::Value = match serde_json::from_str(&text) { - Ok(json) => json, - Err(_) => { - return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON.")); - }, - }; - - let pretty_json: String = match serde_json::to_string_pretty(&json) { - Ok(json) => json, - Err(_) => { - return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON.")); - }, - }; - - Ok(RoomMessageEventContent::text_html( - format!("Got JSON response:\n\n```json\n{pretty_json}\n```"), - format!( - "

Got JSON response:

\n
{}\n
\n", - HtmlEscape(&pretty_json) - ), - )) - }, - } -} diff --git a/src/service/admin/federation/federation_commands.rs b/src/service/admin/federation/federation_commands.rs new file mode 100644 index 00000000..845c2f91 --- /dev/null +++ b/src/service/admin/federation/federation_commands.rs @@ -0,0 +1,126 @@ +use std::{collections::BTreeMap, fmt::Write as _}; + +use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName}; +use tokio::sync::RwLock; + +use crate::{services, utils::HtmlEscape, Result}; + +pub(super) async fn disable_room(_body: Vec<&str>, room_id: Box) -> Result { + services().rooms.metadata.disable_room(&room_id, true)?; + Ok(RoomMessageEventContent::text_plain("Room disabled.")) +} + +pub(super) async fn enable_room(_body: Vec<&str>, room_id: Box) -> Result { + services().rooms.metadata.disable_room(&room_id, false)?; + Ok(RoomMessageEventContent::text_plain("Room enabled.")) +} + +pub(super) async fn incoming_federeation(_body: Vec<&str>) -> Result { + let map = services().globals.roomid_federationhandletime.read().await; + let mut msg = format!("Handling {} incoming pdus:\n", map.len()); + + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + let _ = writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60); + } + Ok(RoomMessageEventContent::text_plain(&msg)) +} + +pub(super) async fn sign_json(body: Vec<&str>) -> Result { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(mut value) => { + ruma::signatures::sign_json( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut value, + ) + .expect("our request json is what ruma expects"); + let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json"); + Ok(RoomMessageEventContent::text_plain(json_text)) + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } +} + +pub(super) async fn verify_json(body: Vec<&str>) -> Result { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + let pub_key_map = RwLock::new(BTreeMap::new()); + + services() + .rooms + .event_handler + .fetch_required_signing_keys([&value], &pub_key_map) + .await?; + + let pub_key_map = pub_key_map.read().await; + match ruma::signatures::verify_json(&pub_key_map, &value) { + Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Signature verification failed: {e}" + ))), + } + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } +} + +pub(super) async fn fetch_support_well_known( + _body: Vec<&str>, server_name: Box, +) -> Result { + let response = services() + .globals + .client + .default + .get(format!("https://{server_name}/.well-known/matrix/support")) + .send() + .await?; + + let text = response.text().await?; + + if text.is_empty() { + return Ok(RoomMessageEventContent::text_plain("Response text/body is empty.")); + } + + if text.len() > 1500 { + return Ok(RoomMessageEventContent::text_plain( + "Response text/body is over 1500 characters, assuming no support well-known.", + )); + } + + let json: serde_json::Value = match serde_json::from_str(&text) { + Ok(json) => json, + Err(_) => { + return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON.")); + }, + }; + + let pretty_json: String = match serde_json::to_string_pretty(&json) { + Ok(json) => json, + Err(_) => { + return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON.")); + }, + }; + + Ok(RoomMessageEventContent::text_html( + format!("Got JSON response:\n\n```json\n{pretty_json}\n```"), + format!( + "

Got JSON response:

\n
{}\n
\n", + HtmlEscape(&pretty_json) + ), + )) +} diff --git a/src/service/admin/federation/mod.rs b/src/service/admin/federation/mod.rs new file mode 100644 index 00000000..74878e36 --- /dev/null +++ b/src/service/admin/federation/mod.rs @@ -0,0 +1,68 @@ +use clap::Subcommand; +use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName}; + +use self::federation_commands::{ + disable_room, enable_room, fetch_support_well_known, incoming_federeation, sign_json, verify_json, +}; +use crate::Result; + +pub(crate) mod federation_commands; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum FederationCommand { + /// - List all rooms we are currently handling an incoming pdu from + IncomingFederation, + + /// - Disables incoming federation handling for a room. + DisableRoom { + room_id: Box, + }, + + /// - Enables incoming federation handling for a room again. + EnableRoom { + room_id: Box, + }, + + /// - Verify json signatures + /// + /// This command needs a JSON blob provided in a Markdown code block below + /// the command. + SignJson, + + /// - Verify json signatures + /// + /// This command needs a JSON blob provided in a Markdown code block below + /// the command. + VerifyJson, + + /// - Fetch `/.well-known/matrix/support` from the specified server + /// + /// Despite the name, this is not a federation endpoint and does not go + /// through the federation / server resolution process as per-spec this is + /// supposed to be served at the server_name. + /// + /// Respecting homeservers put this file here for listing administration, + /// moderation, and security inquiries. This command provides a way to + /// easily fetch that information. + FetchSupportWellKnown { + server_name: Box, + }, +} + +pub(crate) async fn process(command: FederationCommand, body: Vec<&str>) -> Result { + Ok(match command { + FederationCommand::DisableRoom { + room_id, + } => disable_room(body, room_id).await?, + FederationCommand::EnableRoom { + room_id, + } => enable_room(body, room_id).await?, + FederationCommand::IncomingFederation => incoming_federeation(body).await?, + FederationCommand::SignJson => sign_json(body).await?, + FederationCommand::VerifyJson => verify_json(body).await?, + FederationCommand::FetchSupportWellKnown { + server_name, + } => fetch_support_well_known(body, server_name).await?, + }) +} diff --git a/src/service/admin/media.rs b/src/service/admin/media.rs deleted file mode 100644 index ee86401e..00000000 --- a/src/service/admin/media.rs +++ /dev/null @@ -1,216 +0,0 @@ -use clap::Subcommand; -use ruma::{events::room::message::RoomMessageEventContent, EventId}; -use tracing::{debug, info}; - -use crate::{service::admin::MxcUri, services, Result}; - -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum MediaCommand { - /// - Deletes a single media file from our database and on the filesystem - /// via a single MXC URL - Delete { - /// The MXC URL to delete - #[arg(long)] - mxc: Option>, - - /// - The message event ID which contains the media and thumbnail MXC - /// URLs - #[arg(long)] - event_id: Option>, - }, - - /// - Deletes a codeblock list of MXC URLs from our database and on the - /// filesystem - DeleteList, - - /// - Deletes all remote media in the last X amount of time using filesystem - /// metadata first created at date. - DeletePastRemoteMedia { - /// - The duration (at or after), e.g. "5m" to delete all media in the - /// past 5 minutes - duration: String, - }, -} - -pub(crate) async fn process(command: MediaCommand, body: Vec<&str>) -> Result { - match command { - MediaCommand::Delete { - mxc, - event_id, - } => { - if event_id.is_some() && mxc.is_some() { - return Ok(RoomMessageEventContent::text_plain( - "Please specify either an MXC or an event ID, not both.", - )); - } - - if let Some(mxc) = mxc { - if !mxc.to_string().starts_with("mxc://") { - return Ok(RoomMessageEventContent::text_plain("MXC provided is not valid.")); - } - - debug!("Got MXC URL: {}", mxc); - services().media.delete(mxc.to_string()).await?; - - return Ok(RoomMessageEventContent::text_plain( - "Deleted the MXC from our database and on our filesystem.", - )); - } else if let Some(event_id) = event_id { - debug!("Got event ID to delete media from: {}", event_id); - - let mut mxc_urls = vec![]; - let mut mxc_deletion_count = 0; - - // parsing the PDU for any MXC URLs begins here - if let Some(event_json) = services().rooms.timeline.get_pdu_json(&event_id)? { - if let Some(content_key) = event_json.get("content") { - debug!("Event ID has \"content\"."); - let content_obj = content_key.as_object(); - - if let Some(content) = content_obj { - // 1. attempts to parse the "url" key - debug!("Attempting to go into \"url\" key for main media file"); - if let Some(url) = content.get("url") { - debug!("Got a URL in the event ID {event_id}: {url}"); - - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {} to list of MXCs to delete", url); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); - } else { - info!( - "Found a URL in the event ID {event_id} but did not start with mxc://, \ - ignoring" - ); - } - } - - // 2. attempts to parse the "info" key - debug!("Attempting to go into \"info\" key for thumbnails"); - if let Some(info_key) = content.get("info") { - debug!("Event ID has \"info\"."); - let info_obj = info_key.as_object(); - - if let Some(info) = info_obj { - if let Some(thumbnail_url) = info.get("thumbnail_url") { - debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - - if thumbnail_url.to_string().starts_with("\"mxc://") { - debug!("Pushing thumbnail URL {} to list of MXCs to delete", thumbnail_url); - let final_thumbnail_url = thumbnail_url.to_string().replace('"', ""); - mxc_urls.push(final_thumbnail_url); - } else { - info!( - "Found a thumbnail URL in the event ID {event_id} but did not start \ - with mxc://, ignoring" - ); - } - } else { - info!("No \"thumbnail_url\" key in \"info\" key, assuming no thumbnails."); - } - } - } - - // 3. attempts to parse the "file" key - debug!("Attempting to go into \"file\" key"); - if let Some(file_key) = content.get("file") { - debug!("Event ID has \"file\"."); - let file_obj = file_key.as_object(); - - if let Some(file) = file_obj { - if let Some(url) = file.get("url") { - debug!("Found url in file key: {url}"); - - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {} to list of MXCs to delete", url); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); - } else { - info!( - "Found a URL in the event ID {event_id} but did not start with \ - mxc://, ignoring" - ); - } - } else { - info!("No \"url\" key in \"file\" key."); - } - } - } - } else { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key or failed parsing the event ID JSON.", - )); - } - } else { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key, this is not a message or an event type that \ - contains media.", - )); - } - } else { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not exist or is not known to us.", - )); - } - - if mxc_urls.is_empty() { - // we shouldn't get here (should have errored earlier) but just in case for - // whatever reason we do... - info!("Parsed event ID {event_id} but did not contain any MXC URLs."); - return Ok(RoomMessageEventContent::text_plain("Parsed event ID but found no MXC URLs.")); - } - - for mxc_url in mxc_urls { - services().media.delete(mxc_url).await?; - mxc_deletion_count += 1; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from event ID \ - {event_id}." - ))); - } - - Ok(RoomMessageEventContent::text_plain( - "Please specify either an MXC using --mxc or an event ID using --event-id of the message containing \ - an image. See --help for details.", - )) - }, - MediaCommand::DeleteList => { - if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { - let mxc_list = body.clone().drain(1..body.len() - 1).collect::>(); - - let mut mxc_deletion_count = 0; - - for mxc in mxc_list { - debug!("Deleting MXC {} in bulk", mxc); - services().media.delete(mxc.to_owned()).await?; - mxc_deletion_count += 1; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk MXC deletion, deleted {} total MXCs from our database and the filesystem.", - mxc_deletion_count - ))); - } - - Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )) - }, - MediaCommand::DeletePastRemoteMedia { - duration, - } => { - let deleted_count = services() - .media - .delete_all_remote_media_at_after_time(duration) - .await?; - - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {} total files.", - deleted_count - ))) - }, - } -} diff --git a/src/service/admin/media/media_commands.rs b/src/service/admin/media/media_commands.rs new file mode 100644 index 00000000..d7745088 --- /dev/null +++ b/src/service/admin/media/media_commands.rs @@ -0,0 +1,171 @@ +use ruma::{events::room::message::RoomMessageEventContent, EventId}; +use tracing::{debug, info}; + +use crate::{service::admin::MxcUri, services, Result}; + +pub(super) async fn delete( + _body: Vec<&str>, mxc: Option>, event_id: Option>, +) -> Result { + if event_id.is_some() && mxc.is_some() { + return Ok(RoomMessageEventContent::text_plain( + "Please specify either an MXC or an event ID, not both.", + )); + } + + if let Some(mxc) = mxc { + debug!("Got MXC URL: {mxc}"); + services().media.delete(mxc.to_string()).await?; + + return Ok(RoomMessageEventContent::text_plain( + "Deleted the MXC from our database and on our filesystem.", + )); + } else if let Some(event_id) = event_id { + debug!("Got event ID to delete media from: {event_id}"); + + let mut mxc_urls = vec![]; + let mut mxc_deletion_count = 0; + + // parsing the PDU for any MXC URLs begins here + if let Some(event_json) = services().rooms.timeline.get_pdu_json(&event_id)? { + if let Some(content_key) = event_json.get("content") { + debug!("Event ID has \"content\"."); + let content_obj = content_key.as_object(); + + if let Some(content) = content_obj { + // 1. attempts to parse the "url" key + debug!("Attempting to go into \"url\" key for main media file"); + if let Some(url) = content.get("url") { + debug!("Got a URL in the event ID {event_id}: {url}"); + + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); + } else { + info!("Found a URL in the event ID {event_id} but did not start with mxc://, ignoring"); + } + } + + // 2. attempts to parse the "info" key + debug!("Attempting to go into \"info\" key for thumbnails"); + if let Some(info_key) = content.get("info") { + debug!("Event ID has \"info\"."); + let info_obj = info_key.as_object(); + + if let Some(info) = info_obj { + if let Some(thumbnail_url) = info.get("thumbnail_url") { + debug!("Found a thumbnail_url in info key: {thumbnail_url}"); + + if thumbnail_url.to_string().starts_with("\"mxc://") { + debug!("Pushing thumbnail URL {thumbnail_url} to list of MXCs to delete"); + let final_thumbnail_url = thumbnail_url.to_string().replace('"', ""); + mxc_urls.push(final_thumbnail_url); + } else { + info!( + "Found a thumbnail URL in the event ID {event_id} but did not start with \ + mxc://, ignoring" + ); + } + } else { + info!("No \"thumbnail_url\" key in \"info\" key, assuming no thumbnails."); + } + } + } + + // 3. attempts to parse the "file" key + debug!("Attempting to go into \"file\" key"); + if let Some(file_key) = content.get("file") { + debug!("Event ID has \"file\"."); + let file_obj = file_key.as_object(); + + if let Some(file) = file_obj { + if let Some(url) = file.get("url") { + debug!("Found url in file key: {url}"); + + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); + } else { + info!( + "Found a URL in the event ID {event_id} but did not start with mxc://, \ + ignoring" + ); + } + } else { + info!("No \"url\" key in \"file\" key."); + } + } + } + } else { + return Ok(RoomMessageEventContent::text_plain( + "Event ID does not have a \"content\" key or failed parsing the event ID JSON.", + )); + } + } else { + return Ok(RoomMessageEventContent::text_plain( + "Event ID does not have a \"content\" key, this is not a message or an event type that contains \ + media.", + )); + } + } else { + return Ok(RoomMessageEventContent::text_plain( + "Event ID does not exist or is not known to us.", + )); + } + + if mxc_urls.is_empty() { + // we shouldn't get here (should have errored earlier) but just in case for + // whatever reason we do... + info!("Parsed event ID {event_id} but did not contain any MXC URLs."); + return Ok(RoomMessageEventContent::text_plain("Parsed event ID but found no MXC URLs.")); + } + + for mxc_url in mxc_urls { + services().media.delete(mxc_url).await?; + mxc_deletion_count += 1; + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from event ID {event_id}." + ))); + } + + Ok(RoomMessageEventContent::text_plain( + "Please specify either an MXC using --mxc or an event ID using --event-id of the message containing an image. \ + See --help for details.", + )) +} + +pub(super) async fn delete_list(body: Vec<&str>) -> Result { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let mxc_list = body.clone().drain(1..body.len() - 1).collect::>(); + + let mut mxc_deletion_count = 0; + + for mxc in mxc_list { + debug!("Deleting MXC {mxc} in bulk"); + services().media.delete(mxc.to_owned()).await?; + mxc_deletion_count += 1; + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database and the filesystem.", + ))); + } + + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) +} + +pub(super) async fn delete_past_remote_media(_body: Vec<&str>, duration: String) -> Result { + let deleted_count = services() + .media + .delete_all_remote_media_at_after_time(duration) + .await?; + + Ok(RoomMessageEventContent::text_plain(format!( + "Deleted {deleted_count} total files.", + ))) +} diff --git a/src/service/admin/media/mod.rs b/src/service/admin/media/mod.rs new file mode 100644 index 00000000..d091f94a --- /dev/null +++ b/src/service/admin/media/mod.rs @@ -0,0 +1,49 @@ +use clap::Subcommand; +use ruma::{events::room::message::RoomMessageEventContent, EventId}; + +use self::media_commands::{delete, delete_list, delete_past_remote_media}; +use crate::{service::admin::MxcUri, Result}; + +pub(crate) mod media_commands; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum MediaCommand { + /// - Deletes a single media file from our database and on the filesystem + /// via a single MXC URL + Delete { + /// The MXC URL to delete + #[arg(long)] + mxc: Option>, + + /// - The message event ID which contains the media and thumbnail MXC + /// URLs + #[arg(long)] + event_id: Option>, + }, + + /// - Deletes a codeblock list of MXC URLs from our database and on the + /// filesystem + DeleteList, + + /// - Deletes all remote media in the last X amount of time using filesystem + /// metadata first created at date. + DeletePastRemoteMedia { + /// - The duration (at or after), e.g. "5m" to delete all media in the + /// past 5 minutes + duration: String, + }, +} + +pub(crate) async fn process(command: MediaCommand, body: Vec<&str>) -> Result { + Ok(match command { + MediaCommand::Delete { + mxc, + event_id, + } => delete(body, mxc, event_id).await?, + MediaCommand::DeleteList => delete_list(body).await?, + MediaCommand::DeletePastRemoteMedia { + duration, + } => delete_past_remote_media(body, duration).await?, + }) +} diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 1971461d..0f361a4c 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -42,9 +42,6 @@ pub(crate) mod fsck; pub(crate) mod media; pub(crate) mod query; pub(crate) mod room; -pub(crate) mod room_alias; -pub(crate) mod room_directory; -pub(crate) mod room_moderation; pub(crate) mod server; pub(crate) mod user; diff --git a/src/service/admin/query/account_data.rs b/src/service/admin/query/account_data.rs index f335489e..15d45633 100644 --- a/src/service/admin/query/account_data.rs +++ b/src/service/admin/query/account_data.rs @@ -1,36 +1,8 @@ -use clap::Subcommand; -use ruma::{ - events::{room::message::RoomMessageEventContent, RoomAccountDataEventType}, - RoomId, UserId, -}; +use ruma::events::room::message::RoomMessageEventContent; +use super::AccountData; use crate::{services, Result}; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -/// All the getters and iterators from src/database/key_value/account_data.rs -pub(crate) enum AccountData { - /// - Returns all changes to the account data that happened after `since`. - ChangesSince { - /// Full user ID - user_id: Box, - /// UNIX timestamp since (u64) - since: u64, - /// Optional room ID of the account data - room_id: Option>, - }, - - /// - Searches the account data for a specific kind. - Get { - /// Full user ID - user_id: Box, - /// Account data event type - kind: RoomAccountDataEventType, - /// Optional room ID of the account data - room_id: Option>, - }, -} - /// All the getters and iterators from src/database/key_value/account_data.rs pub(super) async fn account_data(subcommand: AccountData) -> Result { match subcommand { diff --git a/src/service/admin/query/appservice.rs b/src/service/admin/query/appservice.rs index c576f7db..bfb63c95 100644 --- a/src/service/admin/query/appservice.rs +++ b/src/service/admin/query/appservice.rs @@ -1,19 +1,8 @@ -use clap::Subcommand; use ruma::events::room::message::RoomMessageEventContent; +use super::Appservice; use crate::{services, Result}; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -/// All the getters and iterators from src/database/key_value/appservice.rs -pub(crate) enum Appservice { - /// - Gets the appservice registration info/details from the ID as a string - GetRegistration { - /// Appservice registration ID - appservice_id: Box, - }, -} - /// All the getters and iterators from src/database/key_value/appservice.rs pub(super) async fn appservice(subcommand: Appservice) -> Result { match subcommand { diff --git a/src/service/admin/query/globals.rs b/src/service/admin/query/globals.rs index 25c3e337..ff962cb5 100644 --- a/src/service/admin/query/globals.rs +++ b/src/service/admin/query/globals.rs @@ -1,27 +1,8 @@ -use clap::Subcommand; -use ruma::{events::room::message::RoomMessageEventContent, ServerName}; +use ruma::events::room::message::RoomMessageEventContent; +use super::Globals; use crate::{services, Result}; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -/// All the getters and iterators from src/database/key_value/globals.rs -pub(crate) enum Globals { - DatabaseVersion, - - CurrentCount, - - LastCheckForUpdatesId, - - LoadKeypair, - - /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found - /// for the server. - SigningKeysFor { - origin: Box, - }, -} - /// All the getters and iterators from src/database/key_value/globals.rs pub(super) async fn globals(subcommand: Globals) -> Result { match subcommand { diff --git a/src/service/admin/query/mod.rs b/src/service/admin/query/mod.rs index 8033e731..60867fb2 100644 --- a/src/service/admin/query/mod.rs +++ b/src/service/admin/query/mod.rs @@ -5,14 +5,13 @@ pub(crate) mod presence; pub(crate) mod room_alias; use clap::Subcommand; -use ruma::events::room::message::RoomMessageEventContent; +use ruma::{ + events::{room::message::RoomMessageEventContent, RoomAccountDataEventType}, + RoomAliasId, RoomId, ServerName, UserId, +}; use self::{ - account_data::{account_data, AccountData}, - appservice::{appservice, Appservice}, - globals::{globals, Globals}, - presence::{presence, Presence}, - room_alias::{room_alias, RoomAlias}, + account_data::account_data, appservice::appservice, globals::globals, presence::presence, room_alias::room_alias, }; use crate::Result; @@ -41,14 +40,105 @@ pub(crate) enum QueryCommand { Globals(Globals), } +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +/// All the getters and iterators from src/database/key_value/account_data.rs +pub(crate) enum AccountData { + /// - Returns all changes to the account data that happened after `since`. + ChangesSince { + /// Full user ID + user_id: Box, + /// UNIX timestamp since (u64) + since: u64, + /// Optional room ID of the account data + room_id: Option>, + }, + + /// - Searches the account data for a specific kind. + Get { + /// Full user ID + user_id: Box, + /// Account data event type + kind: RoomAccountDataEventType, + /// Optional room ID of the account data + room_id: Option>, + }, +} + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +/// All the getters and iterators from src/database/key_value/appservice.rs +pub(crate) enum Appservice { + /// - Gets the appservice registration info/details from the ID as a string + GetRegistration { + /// Appservice registration ID + appservice_id: Box, + }, +} + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +/// All the getters and iterators from src/database/key_value/presence.rs +pub(crate) enum Presence { + /// - Returns the latest presence event for the given user. + GetPresence { + /// Full user ID + user_id: Box, + }, + + /// - Iterator of the most recent presence updates that happened after the + /// event with id `since`. + PresenceSince { + /// UNIX timestamp since (u64) + since: u64, + }, +} + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +/// All the getters and iterators from src/database/key_value/rooms/alias.rs +pub(crate) enum RoomAlias { + ResolveLocalAlias { + /// Full room alias + alias: Box, + }, + + /// - Iterator of all our local room aliases for the room ID + LocalAliasesForRoom { + /// Full room ID + room_id: Box, + }, + + /// - Iterator of all our local aliases in our database with their room IDs + AllLocalAliases, +} + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +/// All the getters and iterators from src/database/key_value/globals.rs +pub(crate) enum Globals { + DatabaseVersion, + + CurrentCount, + + LastCheckForUpdatesId, + + LoadKeypair, + + /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found + /// for the server. + SigningKeysFor { + origin: Box, + }, +} + /// Processes admin query commands -#[allow(non_snake_case)] pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result { Ok(match command { - QueryCommand::AccountData(AccountData) => account_data(AccountData).await?, - QueryCommand::Appservice(Appservice) => appservice(Appservice).await?, - QueryCommand::Presence(Presence) => presence(Presence).await?, - QueryCommand::RoomAlias(RoomAlias) => room_alias(RoomAlias).await?, - QueryCommand::Globals(Globals) => globals(Globals).await?, + QueryCommand::AccountData(command) => account_data(command).await?, + QueryCommand::Appservice(command) => appservice(command).await?, + QueryCommand::Presence(command) => presence(command).await?, + QueryCommand::RoomAlias(command) => room_alias(command).await?, + QueryCommand::Globals(command) => globals(command).await?, }) } diff --git a/src/service/admin/query/presence.rs b/src/service/admin/query/presence.rs index bb55b88f..0e32bbd7 100644 --- a/src/service/admin/query/presence.rs +++ b/src/service/admin/query/presence.rs @@ -1,26 +1,8 @@ -use clap::Subcommand; -use ruma::{events::room::message::RoomMessageEventContent, UserId}; +use ruma::events::room::message::RoomMessageEventContent; +use super::Presence; use crate::{services, Result}; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -/// All the getters and iterators from src/database/key_value/presence.rs -pub(crate) enum Presence { - /// - Returns the latest presence event for the given user. - GetPresence { - /// Full user ID - user_id: Box, - }, - - /// - Iterator of the most recent presence updates that happened after the - /// event with id `since`. - PresenceSince { - /// UNIX timestamp since (u64) - since: u64, - }, -} - /// All the getters and iterators in key_value/presence.rs pub(super) async fn presence(subcommand: Presence) -> Result { match subcommand { diff --git a/src/service/admin/query/room_alias.rs b/src/service/admin/query/room_alias.rs index e854f643..e5238f38 100644 --- a/src/service/admin/query/room_alias.rs +++ b/src/service/admin/query/room_alias.rs @@ -1,27 +1,8 @@ -use clap::Subcommand; -use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId}; +use ruma::events::room::message::RoomMessageEventContent; +use super::RoomAlias; use crate::{services, Result}; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -/// All the getters and iterators from src/database/key_value/rooms/alias.rs -pub(crate) enum RoomAlias { - ResolveLocalAlias { - /// Full room alias - alias: Box, - }, - - /// - Iterator of all our local room aliases for the room ID - LocalAliasesForRoom { - /// Full room ID - room_id: Box, - }, - - /// - Iterator of all our local aliases in our database with their room IDs - AllLocalAliases, -} - /// All the getters and iterators in src/database/key_value/rooms/alias.rs pub(super) async fn room_alias(subcommand: RoomAlias) -> Result { match subcommand { diff --git a/src/service/admin/room.rs b/src/service/admin/room.rs deleted file mode 100644 index 721191b1..00000000 --- a/src/service/admin/room.rs +++ /dev/null @@ -1,96 +0,0 @@ -use std::fmt::Write as _; - -use clap::Subcommand; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; - -use crate::{ - service::admin::{ - escape_html, get_room_info, room_alias, room_alias::RoomAliasCommand, room_directory, - room_directory::RoomDirectoryCommand, room_moderation, room_moderation::RoomModerationCommand, PAGE_SIZE, - }, - services, Result, -}; - -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum RoomCommand { - /// - List all rooms the server knows about - List { - page: Option, - }, - - #[command(subcommand)] - /// - Manage moderation of remote or local rooms - Moderation(RoomModerationCommand), - - #[command(subcommand)] - /// - Manage rooms' aliases - Alias(RoomAliasCommand), - - #[command(subcommand)] - /// - Manage the room directory - Directory(RoomDirectoryCommand), -} - -pub(crate) async fn process(command: RoomCommand, body: Vec<&str>) -> Result { - match command { - RoomCommand::Alias(command) => room_alias::process(command, body).await, - - RoomCommand::Directory(command) => room_directory::process(command, body).await, - - RoomCommand::Moderation(command) => room_moderation::process(command, body).await, - - RoomCommand::List { - page, - } => { - // TODO: i know there's a way to do this with clap, but i can't seem to find it - let page = page.unwrap_or(1); - let mut rooms = services() - .rooms - .metadata - .iter_ids() - .filter_map(Result::ok) - .map(|id: OwnedRoomId| get_room_info(&id)) - .collect::>(); - rooms.sort_by_key(|r| r.1); - rooms.reverse(); - - let rooms = rooms - .into_iter() - .skip(page.saturating_sub(1) * PAGE_SIZE) - .take(PAGE_SIZE) - .collect::>(); - - if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No more rooms.")); - }; - - let output_plain = format!( - "Rooms:\n{}", - rooms - .iter() - .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) - .collect::>() - .join("\n") - ); - let output_html = format!( - "\n\t\t\n{}
Room list - page \ - {page}
idmembersname
", - rooms - .iter() - .fold(String::new(), |mut output, (id, members, name)| { - writeln!( - output, - "{}\t{}\t{}", - escape_html(id.as_ref()), - members, - escape_html(name) - ) - .unwrap(); - output - }) - ); - Ok(RoomMessageEventContent::text_html(output_plain, output_html)) - }, - } -} diff --git a/src/service/admin/room/mod.rs b/src/service/admin/room/mod.rs new file mode 100644 index 00000000..b4b7b279 --- /dev/null +++ b/src/service/admin/room/mod.rs @@ -0,0 +1,160 @@ +use clap::Subcommand; +use ruma::{events::room::message::RoomMessageEventContent, RoomId, RoomOrAliasId}; + +use self::room_commands::list; +use crate::Result; + +pub(crate) mod room_alias_commands; +pub(crate) mod room_commands; +pub(crate) mod room_directory_commands; +pub(crate) mod room_moderation_commands; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum RoomCommand { + /// - List all rooms the server knows about + List { + page: Option, + }, + + #[command(subcommand)] + /// - Manage moderation of remote or local rooms + Moderation(RoomModerationCommand), + + #[command(subcommand)] + /// - Manage rooms' aliases + Alias(RoomAliasCommand), + + #[command(subcommand)] + /// - Manage the room directory + Directory(RoomDirectoryCommand), +} + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum RoomAliasCommand { + /// - Make an alias point to a room. + Set { + #[arg(short, long)] + /// Set the alias even if a room is already using it + force: bool, + + /// The room id to set the alias on + room_id: Box, + + /// The alias localpart to use (`alias`, not `#alias:servername.tld`) + room_alias_localpart: String, + }, + + /// - Remove an alias + Remove { + /// The alias localpart to remove (`alias`, not `#alias:servername.tld`) + room_alias_localpart: String, + }, + + /// - Show which room is using an alias + Which { + /// The alias localpart to look up (`alias`, not + /// `#alias:servername.tld`) + room_alias_localpart: String, + }, + + /// - List aliases currently being used + List { + /// If set, only list the aliases for this room + room_id: Option>, + }, +} + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum RoomDirectoryCommand { + /// - Publish a room to the room directory + Publish { + /// The room id of the room to publish + room_id: Box, + }, + + /// - Unpublish a room to the room directory + Unpublish { + /// The room id of the room to unpublish + room_id: Box, + }, + + /// - List rooms that are published + List { + page: Option, + }, +} + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum RoomModerationCommand { + /// - Bans a room from local users joining and evicts all our local users + /// from the room. Also blocks any invites (local and remote) for the + /// banned room. + /// + /// Server admins (users in the conduwuit admin room) will not be evicted + /// and server admins can still join the room. To evict admins too, use + /// --force (also ignores errors) To disable incoming federation of the + /// room, use --disable-federation + BanRoom { + #[arg(short, long)] + /// Evicts admins out of the room and ignores any potential errors when + /// making our local users leave the room + force: bool, + + #[arg(long)] + /// Disables incoming federation of the room after banning and evicting + /// users + disable_federation: bool, + + /// The room in the format of `!roomid:example.com` or a room alias in + /// the format of `#roomalias:example.com` + room: Box, + }, + + /// - Bans a list of rooms (room IDs and room aliases) from a newline + /// delimited codeblock similar to `user deactivate-all` + BanListOfRooms { + #[arg(short, long)] + /// Evicts admins out of the room and ignores any potential errors when + /// making our local users leave the room + force: bool, + + #[arg(long)] + /// Disables incoming federation of the room after banning and evicting + /// users + disable_federation: bool, + }, + + /// - Unbans a room to allow local users to join again + /// + /// To re-enable incoming federation of the room, use --enable-federation + UnbanRoom { + #[arg(long)] + /// Enables incoming federation of the room after unbanning + enable_federation: bool, + + /// The room in the format of `!roomid:example.com` or a room alias in + /// the format of `#roomalias:example.com` + room: Box, + }, + + /// - List of all rooms we have banned + ListBannedRooms, +} + +pub(crate) async fn process(command: RoomCommand, body: Vec<&str>) -> Result { + Ok(match command { + RoomCommand::Alias(command) => room_alias_commands::process(command, body).await?, + + RoomCommand::Directory(command) => room_directory_commands::process(command, body).await?, + + RoomCommand::Moderation(command) => room_moderation_commands::process(command, body).await?, + + RoomCommand::List { + page, + } => list(body, page).await?, + }) +} diff --git a/src/service/admin/room_alias.rs b/src/service/admin/room/room_alias_commands.rs similarity index 84% rename from src/service/admin/room_alias.rs rename to src/service/admin/room/room_alias_commands.rs index f1621344..516df071 100644 --- a/src/service/admin/room_alias.rs +++ b/src/service/admin/room/room_alias_commands.rs @@ -1,46 +1,10 @@ use std::fmt::Write as _; -use clap::Subcommand; -use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId}; +use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId}; +use super::RoomAliasCommand; use crate::{service::admin::escape_html, services, Result}; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum RoomAliasCommand { - /// - Make an alias point to a room. - Set { - #[arg(short, long)] - /// Set the alias even if a room is already using it - force: bool, - - /// The room id to set the alias on - room_id: Box, - - /// The alias localpart to use (`alias`, not `#alias:servername.tld`) - room_alias_localpart: String, - }, - - /// - Remove an alias - Remove { - /// The alias localpart to remove (`alias`, not `#alias:servername.tld`) - room_alias_localpart: String, - }, - - /// - Show which room is using an alias - Which { - /// The alias localpart to look up (`alias`, not - /// `#alias:servername.tld`) - room_alias_localpart: String, - }, - - /// - List aliases currently being used - List { - /// If set, only list the aliases for this room - room_id: Option>, - }, -} - pub(crate) async fn process(command: RoomAliasCommand, _body: Vec<&str>) -> Result { match command { RoomAliasCommand::Set { diff --git a/src/service/admin/room/room_commands.rs b/src/service/admin/room/room_commands.rs new file mode 100644 index 00000000..f4964adf --- /dev/null +++ b/src/service/admin/room/room_commands.rs @@ -0,0 +1,59 @@ +use std::fmt::Write as _; + +use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; + +use crate::{ + service::admin::{escape_html, get_room_info, PAGE_SIZE}, + services, Result, +}; + +pub(super) async fn list(_body: Vec<&str>, page: Option) -> Result { + // TODO: i know there's a way to do this with clap, but i can't seem to find it + let page = page.unwrap_or(1); + let mut rooms = services() + .rooms + .metadata + .iter_ids() + .filter_map(Result::ok) + .map(|id: OwnedRoomId| get_room_info(&id)) + .collect::>(); + rooms.sort_by_key(|r| r.1); + rooms.reverse(); + + let rooms = rooms + .into_iter() + .skip(page.saturating_sub(1) * PAGE_SIZE) + .take(PAGE_SIZE) + .collect::>(); + + if rooms.is_empty() { + return Ok(RoomMessageEventContent::text_plain("No more rooms.")); + }; + + let output_plain = format!( + "Rooms:\n{}", + rooms + .iter() + .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .collect::>() + .join("\n") + ); + let output_html = format!( + "\n\t\t\n{}
Room list - page \ + {page}
idmembersname
", + rooms + .iter() + .fold(String::new(), |mut output, (id, members, name)| { + writeln!( + output, + "{}\t{}\t{}", + escape_html(id.as_ref()), + members, + escape_html(name) + ) + .unwrap(); + output + }) + ); + Ok(RoomMessageEventContent::text_html(output_plain, output_html)) +} diff --git a/src/service/admin/room_directory.rs b/src/service/admin/room/room_directory_commands.rs similarity index 80% rename from src/service/admin/room_directory.rs rename to src/service/admin/room/room_directory_commands.rs index 86dc03d6..bfdc7a60 100644 --- a/src/service/admin/room_directory.rs +++ b/src/service/admin/room/room_directory_commands.rs @@ -1,47 +1,26 @@ use std::fmt::Write as _; -use clap::Subcommand; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId}; +use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; +use super::RoomDirectoryCommand; use crate::{ service::admin::{escape_html, get_room_info, PAGE_SIZE}, services, Result, }; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum RoomDirectoryCommand { - /// - Publish a room to the room directory - Publish { - /// The room id of the room to publish - room_id: Box, - }, - - /// - Unpublish a room to the room directory - Unpublish { - /// The room id of the room to unpublish - room_id: Box, - }, - - /// - List rooms that are published - List { - page: Option, - }, -} - pub(crate) async fn process(command: RoomDirectoryCommand, _body: Vec<&str>) -> Result { match command { RoomDirectoryCommand::Publish { room_id, } => match services().rooms.directory.set_public(&room_id) { Ok(()) => Ok(RoomMessageEventContent::text_plain("Room published")), - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {}", err))), + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {err}"))), }, RoomDirectoryCommand::Unpublish { room_id, } => match services().rooms.directory.set_not_public(&room_id) { Ok(()) => Ok(RoomMessageEventContent::text_plain("Room unpublished")), - Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {}", err))), + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {err}"))), }, RoomDirectoryCommand::List { page, diff --git a/src/service/admin/room_moderation.rs b/src/service/admin/room/room_moderation_commands.rs similarity index 90% rename from src/service/admin/room_moderation.rs rename to src/service/admin/room/room_moderation_commands.rs index 18a42a37..c02d8d16 100644 --- a/src/service/admin/room_moderation.rs +++ b/src/service/admin/room/room_moderation_commands.rs @@ -1,75 +1,17 @@ use std::fmt::Write as _; -use clap::Subcommand; use ruma::{ events::room::message::RoomMessageEventContent, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId, }; use tracing::{debug, error, info, warn}; +use super::RoomModerationCommand; use crate::{ api::client_server::{get_alias_helper, leave_room}, service::admin::{escape_html, Service}, services, Result, }; -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum RoomModerationCommand { - /// - Bans a room from local users joining and evicts all our local users - /// from the room. Also blocks any invites (local and remote) for the - /// banned room. - /// - /// Server admins (users in the conduwuit admin room) will not be evicted - /// and server admins can still join the room. To evict admins too, use - /// --force (also ignores errors) To disable incoming federation of the - /// room, use --disable-federation - BanRoom { - #[arg(short, long)] - /// Evicts admins out of the room and ignores any potential errors when - /// making our local users leave the room - force: bool, - - #[arg(long)] - /// Disables incoming federation of the room after banning and evicting - /// users - disable_federation: bool, - - /// The room in the format of `!roomid:example.com` or a room alias in - /// the format of `#roomalias:example.com` - room: Box, - }, - - /// - Bans a list of rooms (room IDs and room aliases) from a newline - /// delimited codeblock similar to `user deactivate-all` - BanListOfRooms { - #[arg(short, long)] - /// Evicts admins out of the room and ignores any potential errors when - /// making our local users leave the room - force: bool, - - #[arg(long)] - /// Disables incoming federation of the room after banning and evicting - /// users - disable_federation: bool, - }, - - /// - Unbans a room to allow local users to join again - /// - /// To re-enable incoming federation of the room, use --enable-federation - UnbanRoom { - #[arg(long)] - /// Enables incoming federation of the room after unbanning - enable_federation: bool, - - /// The room in the format of `!roomid:example.com` or a room alias in - /// the format of `#roomalias:example.com` - room: Box, - }, - - /// - List of all rooms we have banned - ListBannedRooms, -} - pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) -> Result { match command { RoomModerationCommand::BanRoom { From 7cf20afcbc9e9d8651cfddf45003c40aa209b6b2 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 20 Apr 2024 23:00:55 -0400 Subject: [PATCH 06/45] ci: fix dockerhub login Signed-off-by: strawberry --- .github/workflows/ci.yml | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6a1a351c..ee6adaeb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -294,66 +294,71 @@ jobs: name: Create and Push Docker Manifest runs-on: ubuntu-latest needs: build-oci - + steps: - name: Login to Docker Hub uses: docker/login-action@v3 + env: + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + DOCKER_USERNAME: ${{ vars.DOCKER_USERNAME }} + if: ${{ (github.event_name != 'pull_request') && (env.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} with: - username: ${{ secrets.DOCKER_USERNAME }} + # username is not really a secret + username: ${{ vars.DOCKER_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - + - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - + - name: Load OCI Images run: | docker load -i oci-image-x86_64-unknown-linux-musl-jemalloc.tar.gz docker load -i oci-image-aarch64-unknown-linux-musl-jemalloc.tar.gz - + - name: Create and Push Manifest to Docker Hub run: | DOCKER_IMAGE_NAME="docker.io/${{ github.repository }}" BRANCH_NAME="${{ github.ref_name }}" SHA_TAG="${BRANCH_NAME}-${{ github.sha }}" BRANCH_TAG=$BRANCH_NAME - + if [ "$BRANCH_NAME" == "main" ]; then BRANCH_TAG="latest" fi - + # Create and push SHA specific manifest docker manifest create $DOCKER_IMAGE_NAME:$SHA_TAG \ --amend $DOCKER_IMAGE_NAME:${{ github.sha }}-x86_64-jemalloc \ --amend $DOCKER_IMAGE_NAME:${{ github.sha }}-aarch64-jemalloc docker manifest push $DOCKER_IMAGE_NAME:$SHA_TAG - + # Update and push branch or latest manifest docker manifest create $DOCKER_IMAGE_NAME:$BRANCH_TAG \ --amend $DOCKER_IMAGE_NAME:${{ github.sha }}-x86_64-jemalloc \ --amend $DOCKER_IMAGE_NAME:${{ github.sha }}-aarch64-jemalloc docker manifest push $DOCKER_IMAGE_NAME:$BRANCH_TAG - + - name: Create and Push Manifest to GitHub Container Registry run: | GHCR_IMAGE_NAME="ghcr.io/${{ github.repository }}" BRANCH_NAME="${{ github.ref_name }}" SHA_TAG="${BRANCH_NAME}-${{ github.sha }}" BRANCH_TAG=$BRANCH_NAME - + if [ "$BRANCH_NAME" == "main" ]; then BRANCH_TAG="latest" fi - + # Create and push SHA specific manifest docker manifest create $GHCR_IMAGE_NAME:$SHA_TAG \ --amend $GHCR_IMAGE_NAME:${{ github.sha }}-x86_64-jemalloc \ --amend $GHCR_IMAGE_NAME:${{ github.sha }}-aarch64-jemalloc docker manifest push $GHCR_IMAGE_NAME:$SHA_TAG - + # Update and push branch or latest manifest docker manifest create $GHCR_IMAGE_NAME:$BRANCH_TAG \ --amend $GHCR_IMAGE_NAME:${{ github.sha }}-x86_64-jemalloc \ From 5d16f5392b5fdb487389cf48e8237d4ff52415ca Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 20 Apr 2024 23:01:41 -0400 Subject: [PATCH 07/45] fix config check running too late, add tower panic catcher(?) Signed-off-by: strawberry --- Cargo.lock | 1 + Cargo.toml | 2 +- src/config/check.rs | 2 +- src/config/mod.rs | 8 ++++---- src/main.rs | 6 +++++- 5 files changed, 12 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d08abaf..da06c5ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3437,6 +3437,7 @@ dependencies = [ "bitflags 2.5.0", "bytes", "futures-core", + "futures-util", "http", "http-body", "http-body-util", diff --git a/Cargo.toml b/Cargo.toml index 9aadf621..f818d56f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -105,7 +105,7 @@ features = ["util"] [dependencies.tower-http] version = "0.5.2" -features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] +features = ["add-extension", "cors", "sensitive-headers", "trace", "util", "catch-panic"] [dependencies.hyper] version = "1.3.1" diff --git a/src/config/check.rs b/src/config/check.rs index f221cbd5..f289851f 100644 --- a/src/config/check.rs +++ b/src/config/check.rs @@ -5,7 +5,7 @@ use tracing::{debug, error, info, warn}; use crate::{utils::error::Error, Config}; -pub fn check(config: &Config) -> Result<(), Error> { +pub(super) fn check(config: &Config) -> Result<(), Error> { config.warn_deprecated(); config.warn_unknown_key(); diff --git a/src/config/mod.rs b/src/config/mod.rs index f6ec335a..599b8cd8 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -22,10 +22,10 @@ use serde::{de::IgnoredAny, Deserialize}; use tracing::{debug, error, warn}; use url::Url; -use self::proxy::ProxyConfig; +use self::{check::check, proxy::ProxyConfig}; use crate::utils::error::Error; -mod check; +pub(crate) mod check; mod proxy; #[derive(Deserialize, Clone, Debug)] @@ -371,8 +371,6 @@ impl Config { Ok(config) => config, }; - check::check(&config)?; - // don't start if we're listening on both UNIX sockets and TCP at same time if config.is_dual_listening(&raw_config) { return Err(Error::bad_config("dual listening on UNIX and TCP sockets not allowed.")); @@ -452,6 +450,8 @@ impl Config { .collect::>(), } } + + pub fn check(&self) -> Result<(), Error> { check(self) } } impl fmt::Display for Config { diff --git a/src/main.rs b/src/main.rs index a827ea1d..6481e017 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,6 +30,7 @@ use tokio::{ }; use tower::ServiceBuilder; use tower_http::{ + catch_panic::CatchPanicLayer, cors::{self, CorsLayer}, trace::{DefaultOnFailure, TraceLayer}, ServiceBuilderExt as _, @@ -76,7 +77,7 @@ async fn async_main(server: &Server) -> Result<(), Error> { if let Err(error) = run(server).await { error!("Critical error running server: {error}"); return Err(Error::Error(format!("{error}"))); - }; + } if let Err(error) = stop(server).await { error!("Critical error stopping server: {error}"); @@ -274,6 +275,7 @@ async fn build(server: &Server) -> io::Result Result { tracing_reload_handle = init_tracing_sub(&config); }; + config.check()?; + info!( server_name = ?config.server_name, database_path = ?config.database_path, From e816d3ffc0e9f426eb01a84f35870404408079c7 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Sun, 21 Apr 2024 12:45:47 -0400 Subject: [PATCH 08/45] ci: extract OCI images before loading and before login Signed-off-by: strawberry --- .github/workflows/ci.yml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ee6adaeb..fb5d8899 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -296,6 +296,13 @@ jobs: needs: build-oci steps: + - name: Extract and load OCI Images + run: | + unzip oci-image-x86_64-unknown-linux-musl-jemalloc.zip + docker load -i oci-image-x86_64-unknown-linux-musl-jemalloc.tar.gz + unzip oci-image-aarch64-unknown-linux-musl-jemalloc.zip + docker load -i oci-image-aarch64-unknown-linux-musl-jemalloc.tar.gz + - name: Login to Docker Hub uses: docker/login-action@v3 env: @@ -314,11 +321,6 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Load OCI Images - run: | - docker load -i oci-image-x86_64-unknown-linux-musl-jemalloc.tar.gz - docker load -i oci-image-aarch64-unknown-linux-musl-jemalloc.tar.gz - - name: Create and Push Manifest to Docker Hub run: | DOCKER_IMAGE_NAME="docker.io/${{ github.repository }}" From a1eb7d79aa79fcb1cd0904864cfe4770e42e6d23 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 13:02:56 -0400 Subject: [PATCH 09/45] simplify room v11 top level redacts key Signed-off-by: strawberry --- src/service/pdu.rs | 63 +++++++++++++++++++++++++------ src/service/rooms/timeline/mod.rs | 15 -------- 2 files changed, 52 insertions(+), 26 deletions(-) diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 6dc965ff..d8f020e6 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -3,9 +3,10 @@ use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; use ruma::{ canonical_json::redact_content_in_place, events::{ - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, - AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, - AnyTimelineEvent, StateEvent, TimelineEventType, + room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent}, + space::child::HierarchySpaceChildEvent, + AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, + AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, TimelineEventType, }, serde::Raw, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, @@ -98,10 +99,47 @@ impl PduEvent { Ok(()) } + /// Copies the `redacts` property of the event to the `content` dict and + /// vice-versa. + /// + /// This follows the specification's + /// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property): + /// + /// > For backwards-compatibility with older clients, servers should add a + /// > redacts + /// > property to the top level of m.room.redaction events in when serving + /// > such events + /// > over the Client-Server API. + /// + /// > For improved compatibility with newer clients, servers should add a + /// > redacts property + /// > to the content of m.room.redaction events in older room versions when + /// > serving + /// > such events over the Client-Server API. + #[must_use] + pub fn copy_redacts(&self) -> (Option>, Box) { + if self.kind == TimelineEventType::RoomRedaction { + if let Ok(mut content) = serde_json::from_str::(self.content.get()) { + if let Some(redacts) = content.redacts { + return (Some(redacts.into()), self.content.clone()); + } else if let Some(redacts) = self.redacts.clone() { + content.redacts = Some(redacts.into()); + return ( + self.redacts.clone(), + to_raw_value(&content).expect("Must be valid, we only added redacts field"), + ); + } + } + } + + (self.redacts.clone(), self.content.clone()) + } + #[tracing::instrument(skip(self))] pub fn to_sync_room_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); let mut json = json!({ - "content": self.content, + "content": content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, @@ -114,7 +152,7 @@ impl PduEvent { if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } - if let Some(redacts) = &self.redacts { + if let Some(redacts) = &redacts { json["redacts"] = json!(redacts); } @@ -124,8 +162,9 @@ impl PduEvent { /// This only works for events that are also AnyRoomEvents. #[tracing::instrument(skip(self))] pub fn to_any_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); let mut json = json!({ - "content": self.content, + "content": content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, @@ -139,7 +178,7 @@ impl PduEvent { if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } - if let Some(redacts) = &self.redacts { + if let Some(redacts) = &redacts { json["redacts"] = json!(redacts); } @@ -148,8 +187,9 @@ impl PduEvent { #[tracing::instrument(skip(self))] pub fn to_room_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); let mut json = json!({ - "content": self.content, + "content": content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, @@ -163,7 +203,7 @@ impl PduEvent { if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } - if let Some(redacts) = &self.redacts { + if let Some(redacts) = &redacts { json["redacts"] = json!(redacts); } @@ -172,8 +212,9 @@ impl PduEvent { #[tracing::instrument(skip(self))] pub fn to_message_like_event(&self) -> Raw { + let (redacts, content) = self.copy_redacts(); let mut json = json!({ - "content": self.content, + "content": content, "type": self.kind, "event_id": self.event_id, "sender": self.sender, @@ -187,7 +228,7 @@ impl PduEvent { if let Some(state_key) = &self.state_key { json["state_key"] = json!(state_key); } - if let Some(redacts) = &self.redacts { + if let Some(redacts) = &redacts { json["redacts"] = json!(redacts); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 3639c56b..94b993f8 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -307,21 +307,6 @@ impl Service { let mut pdu_id = shortroomid.to_be_bytes().to_vec(); pdu_id.extend_from_slice(&count2.to_be_bytes()); - // https://spec.matrix.org/v1.9/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property - // For backwards-compatibility with older clients, - // servers should add a redacts property to the top level of m.room.redaction - // events in when serving such events over the Client-Server API. - if pdu.kind == TimelineEventType::RoomRedaction - && services().rooms.state.get_room_version(&pdu.room_id)? == RoomVersionId::V11 - { - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?; - - if let Some(redact_id) = &content.redacts { - pdu_json.insert("redacts".to_owned(), CanonicalJsonValue::String(redact_id.to_string())); - } - } - // Insert pdu self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2)?; From 53e7df820ce456565165629b47801c416b083ee7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 15:00:54 -0400 Subject: [PATCH 10/45] add sending.rs to admin db query command Signed-off-by: strawberry --- src/service/admin/query/mod.rs | 14 ++++++++++++++ src/service/admin/query/sending.rs | 25 +++++++++++++++++++++++++ src/service/sending/mod.rs | 2 +- 3 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 src/service/admin/query/sending.rs diff --git a/src/service/admin/query/mod.rs b/src/service/admin/query/mod.rs index 60867fb2..9606095f 100644 --- a/src/service/admin/query/mod.rs +++ b/src/service/admin/query/mod.rs @@ -3,6 +3,7 @@ pub(crate) mod appservice; pub(crate) mod globals; pub(crate) mod presence; pub(crate) mod room_alias; +pub(crate) mod sending; use clap::Subcommand; use ruma::{ @@ -12,6 +13,7 @@ use ruma::{ use self::{ account_data::account_data, appservice::appservice, globals::globals, presence::presence, room_alias::room_alias, + sending::sending, }; use crate::Result; @@ -38,6 +40,10 @@ pub(crate) enum QueryCommand { /// - globals.rs iterators and getters #[command(subcommand)] Globals(Globals), + + /// - globals.rs iterators and getters + #[command(subcommand)] + Sending(Sending), } #[cfg_attr(test, derive(Debug))] @@ -132,6 +138,13 @@ pub(crate) enum Globals { }, } +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +/// All the getters and iterators from src/database/key_value/sending.rs +pub(crate) enum Sending { + ActiveRequests, +} + /// Processes admin query commands pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result { Ok(match command { @@ -140,5 +153,6 @@ pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result presence(command).await?, QueryCommand::RoomAlias(command) => room_alias(command).await?, QueryCommand::Globals(command) => globals(command).await?, + QueryCommand::Sending(command) => sending(command).await?, }) } diff --git a/src/service/admin/query/sending.rs b/src/service/admin/query/sending.rs new file mode 100644 index 00000000..d64546f8 --- /dev/null +++ b/src/service/admin/query/sending.rs @@ -0,0 +1,25 @@ +use ruma::events::room::message::RoomMessageEventContent; + +use super::Sending; +use crate::{services, Result}; + +/// All the getters and iterators in key_value/sending.rs +pub(super) async fn sending(subcommand: Sending) -> Result { + match subcommand { + Sending::ActiveRequests => { + let timer = tokio::time::Instant::now(); + let results = services().sending.db.active_requests(); + let query_time = timer.elapsed(); + + let active_requests: Result> = results.collect(); + + Ok(RoomMessageEventContent::text_html( + format!("Query completed in {query_time:?}:\n\n```\n{:?}```", active_requests), + format!( + "

Query completed in {query_time:?}:

\n
{:?}\n
", + active_requests + ), + )) + }, + } +} diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 8c813970..86676d58 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -38,7 +38,7 @@ pub use send::FedDest; const SELECT_EDU_LIMIT: usize = 16; pub struct Service { - db: &'static dyn Data, + pub db: &'static dyn Data, /// The state for a given state hash. pub(super) maximum_requests: Arc, From cbe0efe3f4064be849937d1299a11866195cea44 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 15:12:22 -0400 Subject: [PATCH 11/45] move sign_json and verify_json admin commands to debug these are purely debug-related commands Signed-off-by: strawberry --- src/service/admin/debug/debug_commands.rs | 53 ++++++++++++++++++ src/service/admin/debug/mod.rs | 16 +++++- .../admin/federation/federation_commands.rs | 56 +------------------ src/service/admin/federation/mod.rs | 18 +----- 4 files changed, 70 insertions(+), 73 deletions(-) diff --git a/src/service/admin/debug/debug_commands.rs b/src/service/admin/debug/debug_commands.rs index 870016f3..b882b517 100644 --- a/src/service/admin/debug/debug_commands.rs +++ b/src/service/admin/debug/debug_commands.rs @@ -337,3 +337,56 @@ pub(super) async fn change_log_level( Ok(RoomMessageEventContent::text_plain("No log level was specified.")) } + +pub(super) async fn sign_json(body: Vec<&str>) -> Result { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(mut value) => { + ruma::signatures::sign_json( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut value, + ) + .expect("our request json is what ruma expects"); + let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json"); + Ok(RoomMessageEventContent::text_plain(json_text)) + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } +} + +pub(super) async fn verify_json(body: Vec<&str>) -> Result { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + let pub_key_map = RwLock::new(BTreeMap::new()); + + services() + .rooms + .event_handler + .fetch_required_signing_keys([&value], &pub_key_map) + .await?; + + let pub_key_map = pub_key_map.read().await; + match ruma::signatures::verify_json(&pub_key_map, &value) { + Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Signature verification failed: {e}" + ))), + } + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } +} diff --git a/src/service/admin/debug/mod.rs b/src/service/admin/debug/mod.rs index 80e1c74c..f25d7511 100644 --- a/src/service/admin/debug/mod.rs +++ b/src/service/admin/debug/mod.rs @@ -3,7 +3,7 @@ use ruma::{events::room::message::RoomMessageEventContent, EventId, RoomId, Serv use self::debug_commands::{ change_log_level, force_device_list_updates, get_auth_chain, get_pdu, get_remote_pdu, get_room_state, parse_pdu, - ping, + ping, sign_json, verify_json, }; use crate::Result; @@ -82,6 +82,18 @@ pub(crate) enum DebugCommand { #[arg(short, long)] reset: bool, }, + + /// - Verify json signatures + /// + /// This command needs a JSON blob provided in a Markdown code block below + /// the command. + SignJson, + + /// - Verify json signatures + /// + /// This command needs a JSON blob provided in a Markdown code block below + /// the command. + VerifyJson, } pub(crate) async fn process(command: DebugCommand, body: Vec<&str>) -> Result { @@ -108,5 +120,7 @@ pub(crate) async fn process(command: DebugCommand, body: Vec<&str>) -> Result change_log_level(body, filter, reset).await?, + DebugCommand::SignJson => sign_json(body).await?, + DebugCommand::VerifyJson => verify_json(body).await?, }) } diff --git a/src/service/admin/federation/federation_commands.rs b/src/service/admin/federation/federation_commands.rs index 845c2f91..56c9f510 100644 --- a/src/service/admin/federation/federation_commands.rs +++ b/src/service/admin/federation/federation_commands.rs @@ -1,7 +1,6 @@ -use std::{collections::BTreeMap, fmt::Write as _}; +use std::fmt::Write as _; use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName}; -use tokio::sync::RwLock; use crate::{services, utils::HtmlEscape, Result}; @@ -26,59 +25,6 @@ pub(super) async fn incoming_federeation(_body: Vec<&str>) -> Result) -> Result { - if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(mut value) => { - ruma::signatures::sign_json( - services().globals.server_name().as_str(), - services().globals.keypair(), - &mut value, - ) - .expect("our request json is what ruma expects"); - let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::text_plain(json_text)) - }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), - } - } else { - Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )) - } -} - -pub(super) async fn verify_json(body: Vec<&str>) -> Result { - if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => { - let pub_key_map = RwLock::new(BTreeMap::new()); - - services() - .rooms - .event_handler - .fetch_required_signing_keys([&value], &pub_key_map) - .await?; - - let pub_key_map = pub_key_map.read().await; - match ruma::signatures::verify_json(&pub_key_map, &value) { - Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), - Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Signature verification failed: {e}" - ))), - } - }, - Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), - } - } else { - Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )) - } -} - pub(super) async fn fetch_support_well_known( _body: Vec<&str>, server_name: Box, ) -> Result { diff --git a/src/service/admin/federation/mod.rs b/src/service/admin/federation/mod.rs index 74878e36..1f8280b7 100644 --- a/src/service/admin/federation/mod.rs +++ b/src/service/admin/federation/mod.rs @@ -1,9 +1,7 @@ use clap::Subcommand; use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName}; -use self::federation_commands::{ - disable_room, enable_room, fetch_support_well_known, incoming_federeation, sign_json, verify_json, -}; +use self::federation_commands::{disable_room, enable_room, fetch_support_well_known, incoming_federeation}; use crate::Result; pub(crate) mod federation_commands; @@ -24,18 +22,6 @@ pub(crate) enum FederationCommand { room_id: Box, }, - /// - Verify json signatures - /// - /// This command needs a JSON blob provided in a Markdown code block below - /// the command. - SignJson, - - /// - Verify json signatures - /// - /// This command needs a JSON blob provided in a Markdown code block below - /// the command. - VerifyJson, - /// - Fetch `/.well-known/matrix/support` from the specified server /// /// Despite the name, this is not a federation endpoint and does not go @@ -59,8 +45,6 @@ pub(crate) async fn process(command: FederationCommand, body: Vec<&str>) -> Resu room_id, } => enable_room(body, room_id).await?, FederationCommand::IncomingFederation => incoming_federeation(body).await?, - FederationCommand::SignJson => sign_json(body).await?, - FederationCommand::VerifyJson => verify_json(body).await?, FederationCommand::FetchSupportWellKnown { server_name, } => fetch_support_well_known(body, server_name).await?, From 68702875a3dad2f3ce7d839cb9d4bd6c66a98558 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 12:41:49 -0700 Subject: [PATCH 12/45] unpin crane because the bug was fixed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'crane': 'github:ipetkov/crane/2c653e4478476a52c6aa3ac0495e4dea7449ea0e?narHash=sha256-XoXRS%2B5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc%3D' (2024-02-11) → 'github:ipetkov/crane/55f4939ac59ff8f89c6a4029730a2d49ea09105f?narHash=sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU%3D' (2024-04-21) --- flake.lock | 8 ++++---- flake.nix | 7 +------ 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/flake.lock b/flake.lock index 07b37e26..99fff55b 100644 --- a/flake.lock +++ b/flake.lock @@ -51,17 +51,17 @@ ] }, "locked": { - "lastModified": 1707685877, - "narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=", + "lastModified": 1713721181, + "narHash": "sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU=", "owner": "ipetkov", "repo": "crane", - "rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e", + "rev": "55f4939ac59ff8f89c6a4029730a2d49ea09105f", "type": "github" }, "original": { "owner": "ipetkov", + "ref": "master", "repo": "crane", - "rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e", "type": "github" } }, diff --git a/flake.nix b/flake.nix index 61fafef7..29ceb3a6 100644 --- a/flake.nix +++ b/flake.nix @@ -13,12 +13,7 @@ inputs.nixpkgs.follows = "nixpkgs"; }; crane = { - # Pin latest crane that's not affected by the following bugs: - # - # * - # * - # * - url = "github:ipetkov/crane?rev=2c653e4478476a52c6aa3ac0495e4dea7449ea0e"; + url = "github:ipetkov/crane?ref=master"; inputs.nixpkgs.follows = "nixpkgs"; }; attic.url = "github:zhaofengli/attic?ref=main"; From f6ed5278c135751969dc76a4e212811cc2402d6d Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 16:30:02 -0400 Subject: [PATCH 13/45] add `get-remote-pdu-list` debug admin command Signed-off-by: strawberry --- src/service/admin/debug/debug_commands.rs | 26 +++++++++++++++++++++++ src/service/admin/debug/mod.rs | 20 +++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/src/service/admin/debug/debug_commands.rs b/src/service/admin/debug/debug_commands.rs index b882b517..d4a82c2a 100644 --- a/src/service/admin/debug/debug_commands.rs +++ b/src/service/admin/debug/debug_commands.rs @@ -100,6 +100,32 @@ pub(super) async fn get_pdu(_body: Vec<&str>, event_id: Box) -> Result< } } +pub(super) async fn get_remote_pdu_list( + body: Vec<&str>, server: Box, force: bool, +) -> Result { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let list = body + .clone() + .drain(1..body.len() - 1) + .filter_map(|pdu| EventId::parse(pdu).ok()) + .collect::>(); + + for pdu in list { + if force { + _ = get_remote_pdu(Vec::new(), Box::from(pdu), server.clone()).await; + } else { + get_remote_pdu(Vec::new(), Box::from(pdu), server.clone()).await?; + } + } + + return Ok(RoomMessageEventContent::text_plain("Fetched list of remote PDUs.")); + } + + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) +} + pub(super) async fn get_remote_pdu( _body: Vec<&str>, event_id: Box, server: Box, ) -> Result { diff --git a/src/service/admin/debug/mod.rs b/src/service/admin/debug/mod.rs index f25d7511..8b617218 100644 --- a/src/service/admin/debug/mod.rs +++ b/src/service/admin/debug/mod.rs @@ -2,8 +2,8 @@ use clap::Subcommand; use ruma::{events::room::message::RoomMessageEventContent, EventId, RoomId, ServerName}; use self::debug_commands::{ - change_log_level, force_device_list_updates, get_auth_chain, get_pdu, get_remote_pdu, get_room_state, parse_pdu, - ping, sign_json, verify_json, + change_log_level, force_device_list_updates, get_auth_chain, get_pdu, get_remote_pdu, get_remote_pdu_list, + get_room_state, parse_pdu, ping, sign_json, verify_json, }; use crate::Result; @@ -45,6 +45,18 @@ pub(crate) enum DebugCommand { server: Box, }, + /// Same as `get-remote-pdu` but accepts a codeblock newline delimited list + /// of PDUs and a single server to fetch from + GetRemotePduList { + /// Argument for us to attempt to fetch all the events from the + /// specified remote server. + server: Box, + + /// If set, ignores errors, else stops at the first error/failure. + #[arg(short, long)] + force: bool, + }, + /// - Gets all the room state events for the specified room. /// /// This is functionally equivalent to `GET @@ -122,5 +134,9 @@ pub(crate) async fn process(command: DebugCommand, body: Vec<&str>) -> Result change_log_level(body, filter, reset).await?, DebugCommand::SignJson => sign_json(body).await?, DebugCommand::VerifyJson => verify_json(body).await?, + DebugCommand::GetRemotePduList { + server, + force, + } => get_remote_pdu_list(body, server, force).await?, }) } From 8e5bde1684e96f5b491d14f4b5749428192ff663 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 18:02:08 -0400 Subject: [PATCH 14/45] return matrix JSON response for panic catcher with details if debug build or `trace` used Signed-off-by: strawberry --- src/main.rs | 44 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6481e017..6e889175 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,7 +3,7 @@ use std::fs::Permissions; // not unix specific, just only for UNIX sockets stuff #[cfg(unix)] use std::os::unix::fs::PermissionsExt as _; /* not unix specific, just only for UNIX sockets stuff and *nix * container checks */ -use std::{io, net::SocketAddr, sync::atomic, time::Duration}; +use std::{any::Any, io, net::SocketAddr, sync::atomic, time::Duration}; use axum::{ extract::{DefaultBodyLimit, MatchedPath}, @@ -35,7 +35,7 @@ use tower_http::{ trace::{DefaultOnFailure, TraceLayer}, ServiceBuilderExt as _, }; -use tracing::{debug, error, info, warn, Level}; +use tracing::{debug, error, info, level_filters::LevelFilter, warn, Level}; use tracing_subscriber::{prelude::*, reload, EnvFilter, Registry}; mod routes; @@ -275,7 +275,6 @@ async fn build(server: &Server) -> io::Result io::Result Result<(), nix::errno::Errno> { Ok(()) } + +#[allow(clippy::needless_pass_by_value)] +fn catch_panic_layer(err: Box) -> http::Response> { + let details = if cfg!(debug_assertions) || LevelFilter::current() == LevelFilter::TRACE { + if let Some(s) = err.downcast_ref::() { + s.clone() + } else if let Some(s) = err.downcast_ref::<&str>() { + s.to_string() + } else { + "Unknown internal server error occurred.".to_owned() + } + } else { + "Internal server error occurred.".to_owned() + }; + + let body = if cfg!(debug_assertions) || LevelFilter::current() == LevelFilter::TRACE { + serde_json::json!({ + "errcode": "M_UNKNOWN", + "error": "M_UNKNOWN: Internal server error occurred", + "details": details, + }) + .to_string() + } else { + serde_json::json!({ + "errcode": "M_UNKNOWN", + "error": "M_UNKNOWN: Internal server error occurred", + }) + .to_string() + }; + + http::Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .header(header::CONTENT_TYPE, "application/json") + .body(http_body_util::Full::from(body)) + .expect("Failed to create response for our panic catcher?") +} From d1403f9cd784c0d14e2377f8e4713a64ff42ff29 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 18:23:37 -0400 Subject: [PATCH 15/45] always print the details in panic catcher Signed-off-by: strawberry --- src/main.rs | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6e889175..0fb161b8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -35,7 +35,7 @@ use tower_http::{ trace::{DefaultOnFailure, TraceLayer}, ServiceBuilderExt as _, }; -use tracing::{debug, error, info, level_filters::LevelFilter, warn, Level}; +use tracing::{debug, error, info, warn, Level}; use tracing_subscriber::{prelude::*, reload, EnvFilter, Registry}; mod routes; @@ -593,32 +593,20 @@ fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { #[allow(clippy::needless_pass_by_value)] fn catch_panic_layer(err: Box) -> http::Response> { - let details = if cfg!(debug_assertions) || LevelFilter::current() == LevelFilter::TRACE { - if let Some(s) = err.downcast_ref::() { - s.clone() - } else if let Some(s) = err.downcast_ref::<&str>() { - s.to_string() - } else { - "Unknown internal server error occurred.".to_owned() - } + let details = if let Some(s) = err.downcast_ref::() { + s.clone() + } else if let Some(s) = err.downcast_ref::<&str>() { + s.to_string() } else { - "Internal server error occurred.".to_owned() + "Unknown internal server error occurred.".to_owned() }; - let body = if cfg!(debug_assertions) || LevelFilter::current() == LevelFilter::TRACE { - serde_json::json!({ - "errcode": "M_UNKNOWN", - "error": "M_UNKNOWN: Internal server error occurred", - "details": details, - }) - .to_string() - } else { - serde_json::json!({ - "errcode": "M_UNKNOWN", - "error": "M_UNKNOWN: Internal server error occurred", - }) - .to_string() - }; + let body = serde_json::json!({ + "errcode": "M_UNKNOWN", + "error": "M_UNKNOWN: Internal server error occurred", + "details": details, + }) + .to_string(); http::Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) From f55618a05f603d52383bd3ddea34d7e37f6175bd Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 18:26:32 -0400 Subject: [PATCH 16/45] add federation allowed checks on get remote pdu list Signed-off-by: strawberry --- src/service/admin/debug/debug_commands.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/service/admin/debug/debug_commands.rs b/src/service/admin/debug/debug_commands.rs index d4a82c2a..aae4b948 100644 --- a/src/service/admin/debug/debug_commands.rs +++ b/src/service/admin/debug/debug_commands.rs @@ -103,6 +103,18 @@ pub(super) async fn get_pdu(_body: Vec<&str>, event_id: Box) -> Result< pub(super) async fn get_remote_pdu_list( body: Vec<&str>, server: Box, force: bool, ) -> Result { + if !services().globals.config.allow_federation { + return Ok(RoomMessageEventContent::text_plain( + "Federation is disabled on this homeserver.", + )); + } + + if server == services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain( + "Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local PDUs.", + )); + } + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { let list = body .clone() From dd1616e2ee0250befcf7201a040d67c4b8067d26 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 18:32:07 -0400 Subject: [PATCH 17/45] add get_latest_edu_count admin query cmd Signed-off-by: strawberry --- src/service/admin/query/mod.rs | 4 ++++ src/service/admin/query/sending.rs | 15 +++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/src/service/admin/query/mod.rs b/src/service/admin/query/mod.rs index 9606095f..f1473a46 100644 --- a/src/service/admin/query/mod.rs +++ b/src/service/admin/query/mod.rs @@ -143,6 +143,10 @@ pub(crate) enum Globals { /// All the getters and iterators from src/database/key_value/sending.rs pub(crate) enum Sending { ActiveRequests, + + GetLatestEduCount { + server_name: Box, + }, } /// Processes admin query commands diff --git a/src/service/admin/query/sending.rs b/src/service/admin/query/sending.rs index d64546f8..e980a6ee 100644 --- a/src/service/admin/query/sending.rs +++ b/src/service/admin/query/sending.rs @@ -21,5 +21,20 @@ pub(super) async fn sending(subcommand: Sending) -> Result { + let timer = tokio::time::Instant::now(); + let results = services().sending.db.get_latest_educount(&server_name); + let query_time = timer.elapsed(); + + Ok(RoomMessageEventContent::text_html( + format!("Query completed in {query_time:?}:\n\n```\n{:?}```", results), + format!( + "

Query completed in {query_time:?}:

\n
{:?}\n
", + results + ), + )) + }, } } From 74d301dbb931fd1d17bcbf3f9a7c27ef4724abb1 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 18:38:43 -0400 Subject: [PATCH 18/45] allow user admin commands to take the username only Signed-off-by: strawberry --- src/service/admin/user/mod.rs | 8 +++--- src/service/admin/user/user_commands.rs | 34 +++++++++++++++++++++---- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/src/service/admin/user/mod.rs b/src/service/admin/user/mod.rs index d14f7cfc..771b866b 100644 --- a/src/service/admin/user/mod.rs +++ b/src/service/admin/user/mod.rs @@ -1,7 +1,7 @@ pub(crate) mod user_commands; use clap::Subcommand; -use ruma::{events::room::message::RoomMessageEventContent, UserId}; +use ruma::events::room::message::RoomMessageEventContent; use self::user_commands::{create, deactivate, deactivate_all, list, list_joined_rooms, reset_password}; use crate::Result; @@ -20,7 +20,7 @@ pub(crate) enum UserCommand { /// - Reset user password ResetPassword { /// Username of the user for whom the password should be reset - username: Box, + username: String, }, /// - Deactivate a user @@ -30,7 +30,7 @@ pub(crate) enum UserCommand { Deactivate { #[arg(short, long)] leave_rooms: bool, - user_id: Box, + user_id: String, }, /// - Deactivate a list of users @@ -60,7 +60,7 @@ pub(crate) enum UserCommand { /// - Lists all the rooms (local and remote) that the specified user is /// joined in ListJoinedRooms { - user_id: Box, + user_id: String, }, } diff --git a/src/service/admin/user/user_commands.rs b/src/service/admin/user/user_commands.rs index 47a43233..52cc6792 100644 --- a/src/service/admin/user/user_commands.rs +++ b/src/service/admin/user/user_commands.rs @@ -25,6 +25,7 @@ pub(super) async fn create( _body: Vec<&str>, username: String, password: Option, ) -> Result { let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + // Validate user id let user_id = match UserId::parse_with_server_name(username.as_str().to_lowercase(), services().globals.server_name()) { @@ -35,11 +36,13 @@ pub(super) async fn create( ))) }, }; + if user_id.is_historical() { return Ok(RoomMessageEventContent::text_plain(format!( "Userid {user_id} is not allowed due to historical" ))); } + if services().users.exists(&user_id)? { return Ok(RoomMessageEventContent::text_plain(format!("Userid {user_id} already exists"))); } @@ -117,9 +120,18 @@ pub(super) async fn create( } pub(super) async fn deactivate( - _body: Vec<&str>, leave_rooms: bool, user_id: Box, + _body: Vec<&str>, leave_rooms: bool, user_id: String, ) -> Result { - let user_id = Arc::::from(user_id); + // Validate user id + let user_id = + match UserId::parse_with_server_name(user_id.as_str().to_lowercase(), services().globals.server_name()) { + Ok(id) => Arc::::from(id), + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {e}" + ))) + }, + }; // check if user belongs to our server if user_id.server_name() != services().globals.server_name() { @@ -156,10 +168,11 @@ pub(super) async fn deactivate( } } -pub(super) async fn reset_password(_body: Vec<&str>, username: Box) -> Result { +pub(super) async fn reset_password(_body: Vec<&str>, username: String) -> Result { + // Validate user id let user_id = match UserId::parse_with_server_name(username.as_str().to_lowercase(), services().globals.server_name()) { - Ok(id) => id, + Ok(id) => Arc::::from(id), Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "The supplied username is not a valid username: {e}" @@ -279,7 +292,18 @@ pub(super) async fn deactivate_all(body: Vec<&str>, leave_rooms: bool, force: bo } } -pub(super) async fn list_joined_rooms(_body: Vec<&str>, user_id: Box) -> Result { +pub(super) async fn list_joined_rooms(_body: Vec<&str>, user_id: String) -> Result { + // Validate user id + let user_id = + match UserId::parse_with_server_name(user_id.as_str().to_lowercase(), services().globals.server_name()) { + Ok(id) => Arc::::from(id), + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {e}" + ))) + }, + }; + if user_id.server_name() != services().globals.server_name() { return Ok(RoomMessageEventContent::text_plain("User does not belong to our server.")); } From 486307863199a2b07a87e385abdb11610f468920 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 19:37:52 -0400 Subject: [PATCH 19/45] add users query command, initial fsck admin command Signed-off-by: strawberry --- src/service/admin/fsck.rs | 19 ------------------ src/service/admin/fsck/fsck_commands.rs | 26 +++++++++++++++++++++++++ src/service/admin/fsck/mod.rs | 19 ++++++++++++++++++ src/service/admin/mod.rs | 6 ++++++ src/service/admin/query/mod.rs | 17 ++++++++++++++-- src/service/admin/query/users.rs | 25 ++++++++++++++++++++++++ 6 files changed, 91 insertions(+), 21 deletions(-) delete mode 100644 src/service/admin/fsck.rs create mode 100644 src/service/admin/fsck/fsck_commands.rs create mode 100644 src/service/admin/fsck/mod.rs create mode 100644 src/service/admin/query/users.rs diff --git a/src/service/admin/fsck.rs b/src/service/admin/fsck.rs deleted file mode 100644 index 9e9b64a1..00000000 --- a/src/service/admin/fsck.rs +++ /dev/null @@ -1,19 +0,0 @@ -use clap::Subcommand; -use ruma::events::room::message::RoomMessageEventContent; - -use crate::Result; - -#[cfg_attr(test, derive(Debug))] -#[derive(Subcommand)] -pub(crate) enum FsckCommand { - Register, -} - -#[allow(dead_code)] -pub(crate) async fn fsck(command: FsckCommand, _body: Vec<&str>) -> Result { - match command { - FsckCommand::Register => { - todo!() - }, - } -} diff --git a/src/service/admin/fsck/fsck_commands.rs b/src/service/admin/fsck/fsck_commands.rs new file mode 100644 index 00000000..46a04c3c --- /dev/null +++ b/src/service/admin/fsck/fsck_commands.rs @@ -0,0 +1,26 @@ +use ruma::events::room::message::RoomMessageEventContent; + +use crate::{services, Result}; + +/// Uses the iterator in `src/database/key_value/users.rs` to iterator over +/// every user in our database (remote and local). Reports total count, any +/// errors if there were any, etc +pub(super) async fn check_all_users(_body: Vec<&str>) -> Result { + let timer = tokio::time::Instant::now(); + let results = services().users.db.iter(); + let query_time = timer.elapsed(); + + let users = results.collect::>(); + + let total = users.len(); + let err_count = users.iter().filter(|user| user.is_err()).count(); + let ok_count = users.iter().filter(|user| user.is_ok()).count(); + + let message = format!( + "Database query completed in {query_time:?}:\n\n```\nTotal entries: {:?}\nFailure/Invalid user count: \ + {:?}\nSuccess/Valid user count: {:?}```", + total, err_count, ok_count + ); + + Ok(RoomMessageEventContent::notice_html(message, String::new())) +} diff --git a/src/service/admin/fsck/mod.rs b/src/service/admin/fsck/mod.rs new file mode 100644 index 00000000..e618fdc5 --- /dev/null +++ b/src/service/admin/fsck/mod.rs @@ -0,0 +1,19 @@ +use clap::Subcommand; +use ruma::events::room::message::RoomMessageEventContent; + +use self::fsck_commands::check_all_users; +use crate::Result; + +pub(crate) mod fsck_commands; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum FsckCommand { + CheckAllUsers, +} + +pub(crate) async fn process(command: FsckCommand, body: Vec<&str>) -> Result { + Ok(match command { + FsckCommand::CheckAllUsers => check_all_users(body).await?, + }) +} diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 0f361a4c..f1a63652 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -26,6 +26,7 @@ use serde_json::value::to_raw_value; use tokio::sync::Mutex; use tracing::{error, warn}; +use self::fsck::FsckCommand; use super::pdu::PduBuilder; use crate::{ service::admin::{ @@ -82,6 +83,10 @@ enum AdminCommand { #[command(subcommand)] /// - Query all the database getters and iterators Query(QueryCommand), + + #[command(subcommand)] + /// - Query all the database getters and iterators + Fsck(FsckCommand), } #[derive(Debug)] @@ -283,6 +288,7 @@ impl Service { AdminCommand::Server(command) => server::process(command, body).await?, AdminCommand::Debug(command) => debug::process(command, body).await?, AdminCommand::Query(command) => query::process(command, body).await?, + AdminCommand::Fsck(command) => fsck::process(command, body).await?, }; Ok(reply_message_content) diff --git a/src/service/admin/query/mod.rs b/src/service/admin/query/mod.rs index f1473a46..a1d7c53a 100644 --- a/src/service/admin/query/mod.rs +++ b/src/service/admin/query/mod.rs @@ -4,6 +4,7 @@ pub(crate) mod globals; pub(crate) mod presence; pub(crate) mod room_alias; pub(crate) mod sending; +pub(crate) mod users; use clap::Subcommand; use ruma::{ @@ -13,7 +14,7 @@ use ruma::{ use self::{ account_data::account_data, appservice::appservice, globals::globals, presence::presence, room_alias::room_alias, - sending::sending, + sending::sending, users::users, }; use crate::Result; @@ -41,9 +42,13 @@ pub(crate) enum QueryCommand { #[command(subcommand)] Globals(Globals), - /// - globals.rs iterators and getters + /// - sending.rs iterators and getters #[command(subcommand)] Sending(Sending), + + /// - users.rs iterators and getters + #[command(subcommand)] + Users(Users), } #[cfg_attr(test, derive(Debug))] @@ -149,6 +154,13 @@ pub(crate) enum Sending { }, } +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +/// All the getters and iterators from src/database/key_value/users.rs +pub(crate) enum Users { + Iter, +} + /// Processes admin query commands pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result { Ok(match command { @@ -158,5 +170,6 @@ pub(crate) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result room_alias(command).await?, QueryCommand::Globals(command) => globals(command).await?, QueryCommand::Sending(command) => sending(command).await?, + QueryCommand::Users(command) => users(command).await?, }) } diff --git a/src/service/admin/query/users.rs b/src/service/admin/query/users.rs new file mode 100644 index 00000000..818ff6d6 --- /dev/null +++ b/src/service/admin/query/users.rs @@ -0,0 +1,25 @@ +use ruma::events::room::message::RoomMessageEventContent; + +use super::Users; +use crate::{services, Result}; + +/// All the getters and iterators in key_value/users.rs +pub(super) async fn users(subcommand: Users) -> Result { + match subcommand { + Users::Iter => { + let timer = tokio::time::Instant::now(); + let results = services().users.db.iter(); + let query_time = timer.elapsed(); + + let users = results.collect::>(); + + Ok(RoomMessageEventContent::text_html( + format!("Query completed in {query_time:?}:\n\n```\n{:?}```", users), + format!( + "

Query completed in {query_time:?}:

\n
{:?}\n
", + users + ), + )) + }, + } +} From 101e7c7ae5c81f32faa5db354f372ab1b89746bc Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 19 Apr 2024 20:48:47 -0700 Subject: [PATCH 20/45] add debug log level macros. Signed-off-by: Jason Volk --- src/utils/debug.rs | 41 +++++++++++++++++++++++++++++++++++++++++ src/utils/mod.rs | 1 + 2 files changed, 42 insertions(+) create mode 100644 src/utils/debug.rs diff --git a/src/utils/debug.rs b/src/utils/debug.rs new file mode 100644 index 00000000..6c1093a0 --- /dev/null +++ b/src/utils/debug.rs @@ -0,0 +1,41 @@ +/// Log message at the ERROR level in debug-mode (when debug-assertions are +/// enabled). In release mode it becomes DEBUG level, and possibly subject to +/// elision. +#[macro_export] +macro_rules! debug_error { + ( $($x:tt)+ ) => { + if cfg!(debug_assertions) { + error!( $($x)+ ); + } else { + debug!( $($x)+ ); + } + } +} + +/// Log message at the WARN level in debug-mode (when debug-assertions are +/// enabled). In release mode it becomes DEBUG level, and possibly subject to +/// elision. +#[macro_export] +macro_rules! debug_warn { + ( $($x:tt)+ ) => { + if cfg!(debug_assertions) { + warn!( $($x)+ ); + } else { + debug!( $($x)+ ); + } + } +} + +/// Log message at the INFO level in debug-mode (when debug-assertions are +/// enabled). In release mode it becomes DEBUG level, and possibly subject to +/// elision. +#[macro_export] +macro_rules! debug_info { + ( $($x:tt)+ ) => { + if cfg!(debug_assertions) { + info!( $($x)+ ); + } else { + debug!( $($x)+ ); + } + } +} diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 225c2200..248ff144 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod debug; pub(crate) mod error; use std::{ From 7efd1c6ba66700a155c89245e26790b43dd5e5d9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 16 Apr 2024 20:54:16 -0700 Subject: [PATCH 21/45] cleanup/split/dedup sending/send callstack Signed-off-by: Jason Volk --- src/service/sending/send.rs | 530 +++++++++++++++++++----------------- 1 file changed, 275 insertions(+), 255 deletions(-) diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 5090db99..f5cd0649 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -43,9 +43,16 @@ pub enum FedDest { Named(String, String), } +struct ActualDestination { + destination: FedDest, + host: String, + string: String, + cached: bool, +} + #[tracing::instrument(skip_all, name = "send")] pub(crate) async fn send_request( - client: &reqwest::Client, destination: &ServerName, request: T, + client: &reqwest::Client, destination: &ServerName, req: T, ) -> Result where T: OutgoingRequest + Debug, @@ -54,286 +61,150 @@ where return Err(Error::bad_config("Federation is disabled.")); } - if destination == services().globals.server_name() { - return Err(Error::bad_config("Won't send federation request to ourselves")); - } - - if destination.is_ip_literal() || IPAddress::is_valid(destination.host()) { - debug!( - "Destination {} is an IP literal, checking against IP range denylist.", - destination - ); - let ip = IPAddress::parse(destination.host()).map_err(|e| { - warn!("Failed to parse IP literal from string: {}", e); - Error::BadServerResponse("Invalid IP address") + trace!("Preparing to send request"); + validate_destination(destination)?; + let actual = get_actual_destination(destination).await; + let mut http_request = req + .try_into_http_request::>(&actual.string, SendAccessToken::IfRequired(""), &[MatrixVersion::V1_5]) + .map_err(|e| { + warn!("Failed to find destination {}: {}", actual.string, e); + Error::BadServerResponse("Invalid destination") })?; - let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); - let mut cidr_ranges: Vec = Vec::new(); + sign_request::(destination, &mut http_request); + let request = reqwest::Request::try_from(http_request)?; + let method = request.method().clone(); + let url = request.url().clone(); + validate_url(&url)?; - for cidr in cidr_ranges_s { - cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); - } + debug!( + method = ?method, + url = ?url, + "Sending request", + ); + match client.execute(request).await { + Ok(response) => handle_response::(destination, actual, &method, &url, response).await, + Err(e) => handle_error::(destination, &actual, &method, &url, e), + } +} - debug!("List of pushed CIDR ranges: {:?}", cidr_ranges); +async fn handle_response( + destination: &ServerName, actual: ActualDestination, method: &reqwest::Method, url: &reqwest::Url, + mut response: reqwest::Response, +) -> Result +where + T: OutgoingRequest + Debug, +{ + trace!("Received response from {} for {} with {}", actual.string, url, response.url()); + validate_response(&response)?; - for cidr in cidr_ranges { - if cidr.includes(&ip) { - return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); - } - } + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); - debug!("IP literal {} is allowed.", destination); + trace!("Waiting for response body"); + let body = response.bytes().await.unwrap_or_else(|e| { + debug!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout + + let http_response = http_response_builder + .body(body) + .expect("reqwest body is valid http body"); + + debug!("Got {status:?} for {method} {url}"); + if !status.is_success() { + return Err(Error::FederationError( + destination.to_owned(), + RumaError::from_http_response(http_response), + )); } - trace!("Preparing to send request to {destination}"); + let response = T::IncomingResponse::try_from_http_response(http_response); + if response.is_ok() && !actual.cached { + services() + .globals + .actual_destinations() + .write() + .await + .insert(OwnedServerName::from(destination), (actual.destination, actual.host)); + } - let mut write_destination_to_cache = false; + match response { + Err(_e) => Err(Error::BadServerResponse("Server returned bad 200 response.")), + Ok(response) => Ok(response), + } +} +fn handle_error( + _destination: &ServerName, actual: &ActualDestination, method: &reqwest::Method, url: &reqwest::Url, + e: reqwest::Error, +) -> Result +where + T: OutgoingRequest + Debug, +{ + // we do not need to log that servers in a room are dead, this is normal in + // public rooms and just spams the logs. + if e.is_timeout() { + debug!("Timed out sending request to {}: {}", actual.string, e,); + } else if e.is_connect() { + debug!("Failed to connect to {}: {}", actual.string, e); + } else if e.is_redirect() { + debug!( + method = ?method, + url = ?url, + final_url = ?e.url(), + "Redirect loop sending request to {}: {}", + actual.string, + e, + ); + } else { + debug!("Could not send request to {}: {}", actual.string, e); + } + + Err(e.into()) +} + +#[tracing::instrument(skip_all, name = "resolve")] +async fn get_actual_destination(server_name: &ServerName) -> ActualDestination { + let cached; let cached_result = services() .globals .actual_destinations() .read() .await - .get(destination) + .get(server_name) .cloned(); - let (actual_destination, host) = if let Some(result) = cached_result { + let (destination, host) = if let Some(result) = cached_result { + cached = true; result } else { - write_destination_to_cache = true; - - let result = resolve_actual_destination(destination).await; - - (result.0, result.1.into_uri_string()) + cached = false; + resolve_actual_destination(server_name).await }; - let actual_destination_str = actual_destination.clone().into_https_string(); - - let mut http_request = request - .try_into_http_request::>( - &actual_destination_str, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_5], - ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", actual_destination_str, e); - Error::BadServerResponse("Invalid destination") - })?; - - let mut request_map = serde_json::Map::new(); - - if !http_request.body().is_empty() { - request_map.insert( - "content".to_owned(), - serde_json::from_slice(http_request.body()).expect("body is valid json, we just created it"), - ); - }; - - request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); - request_map.insert( - "uri".to_owned(), - http_request - .uri() - .path_and_query() - .expect("all requests have a path") - .to_string() - .into(), - ); - request_map.insert("origin".to_owned(), services().globals.server_name().as_str().into()); - request_map.insert("destination".to_owned(), destination.as_str().into()); - - let mut request_json = serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); - - ruma::signatures::sign_json( - services().globals.server_name().as_str(), - services().globals.keypair(), - &mut request_json, - ) - .expect("our request json is what ruma expects"); - - let request_json: serde_json::Map = - serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); - - let signatures = request_json["signatures"] - .as_object() - .unwrap() - .values() - .map(|v| { - v.as_object() - .unwrap() - .iter() - .map(|(k, v)| (k, v.as_str().unwrap())) - }); - - for signature_server in signatures { - for s in signature_server { - http_request.headers_mut().insert( - AUTHORIZATION, - HeaderValue::from_str(&format!( - "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - services().globals.server_name(), - s.0, - s.1 - )) - .unwrap(), - ); - } + let string = destination.clone().into_https_string(); + ActualDestination { + destination, + host, + string, + cached, } - - let reqwest_request = reqwest::Request::try_from(http_request)?; - let method = reqwest_request.method().clone(); - let url = reqwest_request.url().clone(); - - if let Some(url_host) = url.host_str() { - trace!("Checking request URL for IP"); - if let Ok(ip) = IPAddress::parse(url_host) { - let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); - let mut cidr_ranges: Vec = Vec::new(); - - for cidr in cidr_ranges_s { - cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); - } - - for cidr in cidr_ranges { - if cidr.includes(&ip) { - return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); - } - } - } - } - - debug!("Sending request {} {}", method, url); - let response = client.execute(reqwest_request).await; - trace!("Received resonse {} {}", method, url); - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - - trace!("Checking response destination's IP"); - if let Some(remote_addr) = response.remote_addr() { - if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { - let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); - let mut cidr_ranges: Vec = Vec::new(); - - for cidr in cidr_ranges_s { - cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); - } - - for cidr in cidr_ranges { - if cidr.includes(&ip) { - return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); - } - } - } - } - - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - trace!("Waiting for response body"); - let body = response.bytes().await.unwrap_or_else(|e| { - debug!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if !status.is_success() { - debug!( - "Got {status:?} for {method} {url}: {}", - String::from_utf8_lossy(&body) - .lines() - .collect::>() - .join(" ") - ); - } - - let http_response = http_response_builder - .body(body) - .expect("reqwest body is valid http body"); - - if status.is_success() { - debug!("Got {status:?} for {method} {url}"); - let response = T::IncomingResponse::try_from_http_response(http_response); - if response.is_ok() && write_destination_to_cache { - services() - .globals - .actual_destinations() - .write() - .await - .insert(OwnedServerName::from(destination), (actual_destination, host)); - } - - response.map_err(|e| { - debug!("Invalid 200 response for {} {}", url, e); - Error::BadServerResponse("Server returned bad 200 response.") - }) - } else { - Err(Error::FederationError( - destination.to_owned(), - RumaError::from_http_response(http_response), - )) - } - }, - Err(e) => { - // we do not need to log that servers in a room are dead, this is normal in - // public rooms and just spams the logs. - if e.is_timeout() { - debug!( - "Timed out sending request to {} at {}: {}", - destination, actual_destination_str, e - ); - } else if e.is_connect() { - debug!("Failed to connect to {} at {}: {}", destination, actual_destination_str, e); - } else if e.is_redirect() { - debug!( - "Redirect loop sending request to {} at {}: {}\nFinal URL: {:?}", - destination, - actual_destination_str, - e, - e.url() - ); - } else { - debug!("Could not send request to {} at {}: {}", destination, actual_destination_str, e); - } - - Err(e.into()) - }, - } -} - -fn get_ip_with_port(destination_str: &str) -> Option { - if let Ok(destination) = destination_str.parse::() { - Some(FedDest::Literal(destination)) - } else if let Ok(ip_addr) = destination_str.parse::() { - Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) - } else { - None - } -} - -fn add_port_to_hostname(destination_str: &str) -> FedDest { - let (host, port) = match destination_str.find(':') { - None => (destination_str, ":8448"), - Some(pos) => destination_str.split_at(pos), - }; - FedDest::Named(host.to_owned(), port.to_owned()) } /// Returns: `actual_destination`, host header /// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of /// specification -#[tracing::instrument(skip_all, name = "resolve")] -async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { +async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, String) { trace!("Finding actual destination for {destination}"); let destination_str = destination.as_str().to_owned(); let mut hostname = destination_str.clone(); @@ -429,7 +300,7 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, Fe }; debug!("Actual destination: {actual_destination:?} hostname: {hostname:?}"); - (actual_destination, hostname) + (actual_destination, hostname.into_uri_string()) } async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) { @@ -441,7 +312,6 @@ async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u1 { Ok(override_ip) => { trace!("Caching result of {:?} overriding {:?}", hostname, overname); - services() .globals .resolver @@ -533,6 +403,156 @@ async fn request_well_known(destination: &str) -> Option { Some(body.get("m.server")?.as_str()?.to_owned()) } +fn sign_request(destination: &ServerName, http_request: &mut http::Request>) +where + T: OutgoingRequest + Debug, +{ + let mut req_map = serde_json::Map::new(); + if !http_request.body().is_empty() { + req_map.insert( + "content".to_owned(), + serde_json::from_slice(http_request.body()).expect("body is valid json, we just created it"), + ); + }; + + req_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); + req_map.insert( + "uri".to_owned(), + http_request + .uri() + .path_and_query() + .expect("all requests have a path") + .to_string() + .into(), + ); + req_map.insert("origin".to_owned(), services().globals.server_name().as_str().into()); + req_map.insert("destination".to_owned(), destination.as_str().into()); + + let mut req_json = serde_json::from_value(req_map.into()).expect("valid JSON is valid BTreeMap"); + ruma::signatures::sign_json( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut req_json, + ) + .expect("our request json is what ruma expects"); + + let req_json: serde_json::Map = + serde_json::from_slice(&serde_json::to_vec(&req_json).unwrap()).unwrap(); + + let signatures = req_json["signatures"] + .as_object() + .expect("signatures object") + .values() + .map(|v| { + v.as_object() + .expect("server signatures object") + .iter() + .map(|(k, v)| (k, v.as_str().expect("server signature string"))) + }); + + for signature_server in signatures { + for s in signature_server { + http_request.headers_mut().insert( + AUTHORIZATION, + HeaderValue::from_str(&format!( + "X-Matrix origin={},key=\"{}\",sig=\"{}\"", + services().globals.server_name(), + s.0, + s.1 + )) + .expect("formatted X-Matrix header"), + ); + } + } +} + +fn validate_response(response: &reqwest::Response) -> Result<()> { + if let Some(remote_addr) = response.remote_addr() { + if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { + trace!("Checking response destination's IP"); + validate_ip(&ip)?; + } + } + + Ok(()) +} + +fn validate_url(url: &reqwest::Url) -> Result<()> { + if let Some(url_host) = url.host_str() { + if let Ok(ip) = IPAddress::parse(url_host) { + trace!("Checking request URL IP {ip:?}"); + validate_ip(&ip)?; + } + } + + Ok(()) +} + +fn validate_destination(destination: &ServerName) -> Result<()> { + if destination == services().globals.server_name() { + return Err(Error::bad_config("Won't send federation request to ourselves")); + } + + if destination.is_ip_literal() || IPAddress::is_valid(destination.host()) { + validate_destination_ip_literal(destination)?; + } + + trace!("Destination ServerName is valid"); + Ok(()) +} + +fn validate_destination_ip_literal(destination: &ServerName) -> Result<()> { + debug_assert!( + destination.is_ip_literal() || !IPAddress::is_valid(destination.host()), + "Destination is not an IP literal." + ); + debug!("Destination is an IP literal, checking against IP range denylist.",); + + let ip = IPAddress::parse(destination.host()).map_err(|e| { + warn!("Failed to parse IP literal from string: {}", e); + Error::BadServerResponse("Invalid IP address") + })?; + + validate_ip(&ip)?; + + Ok(()) +} + +fn validate_ip(ip: &IPAddress) -> Result<()> { + let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); + let mut cidr_ranges: Vec = Vec::new(); + for cidr in cidr_ranges_s { + cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); + } + + trace!("List of pushed CIDR ranges: {:?}", cidr_ranges); + for cidr in cidr_ranges { + if cidr.includes(ip) { + return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); + } + } + + Ok(()) +} + +fn get_ip_with_port(destination_str: &str) -> Option { + if let Ok(destination) = destination_str.parse::() { + Some(FedDest::Literal(destination)) + } else if let Ok(ip_addr) = destination_str.parse::() { + Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) + } else { + None + } +} + +fn add_port_to_hostname(destination_str: &str) -> FedDest { + let (host, port) = match destination_str.find(':') { + None => (destination_str, ":8448"), + Some(pos) => destination_str.split_at(pos), + }; + FedDest::Named(host.to_owned(), port.to_owned()) +} + impl FedDest { fn into_https_string(self) -> String { match self { From 08fe67337b192252b9775f7d1b92d4874d778266 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 18 Apr 2024 00:52:29 -0700 Subject: [PATCH 22/45] refactor sending send/resolver/well-known error propagation Signed-off-by: Jason Volk --- Cargo.lock | 1 + Cargo.toml | 4 + src/service/sending/send.rs | 205 +++++++++++++++++++----------------- 3 files changed, 116 insertions(+), 94 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da06c5ad..e0841e44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -536,6 +536,7 @@ dependencies = [ "reqwest", "ring", "ruma", + "ruma-identifiers-validation", "rusqlite", "rust-rocksdb", "sd-notify", diff --git a/Cargo.toml b/Cargo.toml index f818d56f..84976146 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -275,6 +275,10 @@ features = [ "unstable-extensible-events", ] +[dependencies.ruma-identifiers-validation] +git = "https://github.com/girlbossceo/ruma" +branch = "conduwuit-changes" + [dependencies.hickory-resolver] version = "0.24.1" default-features = false diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index f5cd0649..726ceffe 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -4,7 +4,6 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use futures_util::TryFutureExt; use hickory_resolver::{error::ResolveError, lookup::SrvLookup}; use http::{header::AUTHORIZATION, HeaderValue}; use ipaddress::IPAddress; @@ -15,9 +14,9 @@ use ruma::{ }, OwnedServerName, ServerName, }; -use tracing::{debug, trace, warn}; +use tracing::{debug, error, trace, warn}; -use crate::{services, Error, Result}; +use crate::{debug_error, debug_warn, services, Error, Result}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). @@ -63,11 +62,11 @@ where trace!("Preparing to send request"); validate_destination(destination)?; - let actual = get_actual_destination(destination).await; + let actual = get_actual_destination(destination).await?; let mut http_request = req .try_into_http_request::>(&actual.string, SendAccessToken::IfRequired(""), &[MatrixVersion::V1_5]) .map_err(|e| { - warn!("Failed to find destination {}: {}", actual.string, e); + debug_warn!("Failed to find destination {}: {}", actual.string, e); Error::BadServerResponse("Invalid destination") })?; @@ -96,8 +95,6 @@ where T: OutgoingRequest + Debug, { trace!("Received response from {} for {} with {}", actual.string, url, response.url()); - validate_response(&response)?; - let status = response.status(); let mut http_response_builder = http::Response::builder() .status(status) @@ -111,7 +108,7 @@ where trace!("Waiting for response body"); let body = response.bytes().await.unwrap_or_else(|e| { - debug!("server error {}", e); + debug_error!("server error {}", e); Vec::new().into() }); // TODO: handle timeout @@ -153,27 +150,27 @@ where // we do not need to log that servers in a room are dead, this is normal in // public rooms and just spams the logs. if e.is_timeout() { - debug!("Timed out sending request to {}: {}", actual.string, e,); + debug_error!("timeout {}: {}", actual.host, e); } else if e.is_connect() { - debug!("Failed to connect to {}: {}", actual.string, e); + debug_error!("connect {}: {}", actual.host, e); } else if e.is_redirect() { - debug!( + debug_error!( method = ?method, url = ?url, final_url = ?e.url(), - "Redirect loop sending request to {}: {}", - actual.string, + "Redirect loop {}: {}", + actual.host, e, ); } else { - debug!("Could not send request to {}: {}", actual.string, e); + debug_error!("{}: {}", actual.host, e); } Err(e.into()) } #[tracing::instrument(skip_all, name = "resolve")] -async fn get_actual_destination(server_name: &ServerName) -> ActualDestination { +async fn get_actual_destination(server_name: &ServerName) -> Result { let cached; let cached_result = services() .globals @@ -188,23 +185,23 @@ async fn get_actual_destination(server_name: &ServerName) -> ActualDestination { result } else { cached = false; - resolve_actual_destination(server_name).await + resolve_actual_destination(server_name).await? }; let string = destination.clone().into_https_string(); - ActualDestination { + Ok(ActualDestination { destination, host, string, cached, - } + }) } /// Returns: `actual_destination`, host header /// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of /// specification -async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, String) { +async fn resolve_actual_destination(destination: &'_ ServerName) -> Result<(FedDest, String)> { trace!("Finding actual destination for {destination}"); let destination_str = destination.as_str().to_owned(); let mut hostname = destination_str.clone(); @@ -218,12 +215,12 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, St debug!("2: Hostname with included port"); let (host, port) = destination_str.split_at(pos); - query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await; + query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await?; FedDest::Named(host.to_owned(), port.to_owned()) } else { trace!("Requesting well known for {destination}"); - if let Some(delegated_hostname) = request_well_known(destination.as_str()).await { + if let Some(delegated_hostname) = request_well_known(destination.as_str()).await? { debug!("3: A .well-known file is available"); hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); match get_ip_with_port(&delegated_hostname) { @@ -233,12 +230,12 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, St debug!("3.2: Hostname with port in .well-known file"); let (host, port) = delegated_hostname.split_at(pos); - query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await; + query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await?; FedDest::Named(host.to_owned(), port.to_owned()) } else { trace!("Delegated hostname has no port in this branch"); - if let Some(hostname_override) = query_srv_record(&delegated_hostname).await { + if let Some(hostname_override) = query_srv_record(&delegated_hostname).await? { debug!("3.3: SRV lookup successful"); let force_port = hostname_override.port(); @@ -247,7 +244,7 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, St &hostname_override.hostname(), force_port.unwrap_or(8448), ) - .await; + .await?; if let Some(port) = force_port { FedDest::Named(delegated_hostname, format!(":{port}")) @@ -256,7 +253,7 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, St } } else { debug!("3.4: No SRV records, just use the hostname from .well-known"); - query_and_cache_override(&delegated_hostname, &delegated_hostname, 8448).await; + query_and_cache_override(&delegated_hostname, &delegated_hostname, 8448).await?; add_port_to_hostname(&delegated_hostname) } } @@ -264,12 +261,12 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, St } } else { trace!("4: No .well-known or an error occured"); - if let Some(hostname_override) = query_srv_record(&destination_str).await { + if let Some(hostname_override) = query_srv_record(&destination_str).await? { debug!("4: No .well-known; SRV record found"); let force_port = hostname_override.port(); query_and_cache_override(&hostname, &hostname_override.hostname(), force_port.unwrap_or(8448)) - .await; + .await?; if let Some(port) = force_port { FedDest::Named(hostname.clone(), format!(":{port}")) @@ -278,7 +275,7 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, St } } else { debug!("4: No .well-known; 5: No SRV record found"); - query_and_cache_override(&destination_str, &destination_str, 8448).await; + query_and_cache_override(&destination_str, &destination_str, 8448).await?; add_port_to_hostname(&destination_str) } } @@ -300,10 +297,65 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, St }; debug!("Actual destination: {actual_destination:?} hostname: {hostname:?}"); - (actual_destination, hostname.into_uri_string()) + Ok((actual_destination, hostname.into_uri_string())) } -async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) { +async fn request_well_known(destination: &str) -> Result> { + if !services() + .globals + .resolver + .overrides + .read() + .unwrap() + .contains_key(destination) + { + query_and_cache_override(destination, destination, 8448).await?; + } + + let response = services() + .globals + .client + .well_known + .get(&format!("https://{destination}/.well-known/matrix/server")) + .send() + .await; + + trace!("Well known response: {:?}", response); + if let Err(e) = &response { + debug!("Well known error: {e:?}"); + return Ok(None); + } + + let response = response?; + if !response.status().is_success() { + debug!("Well known response not 2XX"); + return Ok(None); + } + + let text = response.text().await?; + trace!("Well known response text: {:?}", text); + if text.len() >= 12288 { + debug!("Well known response contains junk"); + return Ok(None); + } + + let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); + + let m_server = body + .get("m.server") + .unwrap_or(&serde_json::Value::Null) + .as_str() + .unwrap_or_default(); + + if ruma_identifiers_validation::server_name::validate(m_server).is_err() { + debug!("Well known response content missing or invalid"); + return Ok(None); + } + + Ok(Some(m_server.to_owned())) +} + +async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) -> Result<()> { match services() .globals .dns_resolver() @@ -319,14 +371,17 @@ async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u1 .write() .unwrap() .insert(overname.to_owned(), (override_ip.iter().collect(), port)); + + Ok(()) }, Err(e) => { debug!("Got {:?} for {:?} to override {:?}", e.kind(), hostname, overname); + handle_resolve_error(&e) }, } } -async fn query_srv_record(hostname: &'_ str) -> Option { +async fn query_srv_record(hostname: &'_ str) -> Result> { fn handle_successful_srv(srv: &SrvLookup) -> Option { srv.iter().next().map(|result| { FedDest::Named( @@ -346,61 +401,34 @@ async fn query_srv_record(hostname: &'_ str) -> Option { .await } - let first_hostname = format!("_matrix-fed._tcp.{hostname}."); - let second_hostname = format!("_matrix._tcp.{hostname}."); + let hostnames = [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; - lookup_srv(&first_hostname) - .or_else(|_| { - trace!("Querying deprecated _matrix SRV record for host {:?}", hostname); - lookup_srv(&second_hostname) - }) - .and_then(|srv_lookup| async move { Ok(handle_successful_srv(&srv_lookup)) }) - .await - .ok() - .flatten() + for hostname in hostnames { + match lookup_srv(&hostname).await { + Ok(result) => return Ok(handle_successful_srv(&result)), + Err(e) => handle_resolve_error(&e)?, + } + } + + Ok(None) } -async fn request_well_known(destination: &str) -> Option { - if !services() - .globals - .resolver - .overrides - .read() - .unwrap() - .contains_key(destination) - { - query_and_cache_override(destination, destination, 8448).await; +#[allow(clippy::single_match_else)] +fn handle_resolve_error(e: &ResolveError) -> Result<()> { + use hickory_resolver::error::ResolveErrorKind; + + match *e.kind() { + ResolveErrorKind::Io { + .. + } => { + debug_error!("DNS IO error: {e}"); + Err(Error::Error(e.to_string())) + }, + _ => { + debug!("DNS protocol error: {e}"); + Ok(()) + }, } - - let response = services() - .globals - .client - .well_known - .get(&format!("https://{destination}/.well-known/matrix/server")) - .send() - .await; - - trace!("Well known response: {:?}", response); - if let Err(e) = &response { - debug!("Well known error: {e:?}"); - return None; - } - - let text = response.ok()?.text().await; - trace!("Well known response text: {:?}", text); - - if text.as_ref().ok()?.len() > 10000 { - debug!( - "Well known response for destination '{destination}' exceeded past 10000 characters, assuming no \ - well-known." - ); - return None; - } - - let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?; - trace!("serde_json body of well known text: {}", body); - - Some(body.get("m.server")?.as_str()?.to_owned()) } fn sign_request(destination: &ServerName, http_request: &mut http::Request>) @@ -466,17 +494,6 @@ where } } -fn validate_response(response: &reqwest::Response) -> Result<()> { - if let Some(remote_addr) = response.remote_addr() { - if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { - trace!("Checking response destination's IP"); - validate_ip(&ip)?; - } - } - - Ok(()) -} - fn validate_url(url: &reqwest::Url) -> Result<()> { if let Some(url_host) = url.host_str() { if let Ok(ip) = IPAddress::parse(url_host) { @@ -509,7 +526,7 @@ fn validate_destination_ip_literal(destination: &ServerName) -> Result<()> { debug!("Destination is an IP literal, checking against IP range denylist.",); let ip = IPAddress::parse(destination.host()).map_err(|e| { - warn!("Failed to parse IP literal from string: {}", e); + debug_warn!("Failed to parse IP literal from string: {}", e); Error::BadServerResponse("Invalid IP address") })?; From caea5d8752a788e4e2c56305f086acc29e089dba Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 16 Apr 2024 03:39:31 -0700 Subject: [PATCH 23/45] show info log in release mode Signed-off-by: Jason Volk --- conduwuit-example.toml | 4 ++-- src/config/mod.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 65a012a9..933dbeb6 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -288,8 +288,8 @@ allow_profile_lookup_federation_requests = true # For release builds, the tracing crate is configured to only implement levels higher than error to avoid unnecessary overhead in the compiled binary from trace macros. # For debug builds, this restriction is not applied. # -# Defaults to "warn" -#log = "warn" +# Defaults to "info" +#log = "info" # controls whether encrypted rooms and events are allowed (default true) #allow_encryption = false diff --git a/src/config/mod.rs b/src/config/mod.rs index 599b8cd8..ae1df429 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -902,7 +902,7 @@ fn default_log() -> String { if cfg!(debug_assertions) { "debug".to_owned() } else { - "warn,ruma_state_res=warn".to_owned() + "info".to_owned() } } From 9733c1c07279b2176df737b3e7828b2541857e5e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 20 Apr 2024 14:10:57 -0700 Subject: [PATCH 24/45] integrate reqwest read_timeout options. Signed-off-by: Jason Volk --- conduwuit-example.toml | 40 +++++++++++++++++++++++++---------- src/config/mod.rs | 19 +++++++++++------ src/service/globals/client.rs | 12 ++++++++--- 3 files changed, 50 insertions(+), 21 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 933dbeb6..71d7b261 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -512,23 +512,24 @@ allow_profile_lookup_federation_requests = true ## ## Generally these defaults are the best, but if you find a reason to need to change these they are here. -# Default/base connection timeout +# Default/base connection timeout. # This is used only by URL previews and update/news endpoint checks # # Defaults to 10 seconds #request_conn_timeout = 10 -# Default/base request timeout -# This is used only by URL previews and update/news endpoint checks +# Default/base request timeout. The time waiting to receive more data from another server. +# This is used only by URL previews, update/news, and misc endpoint checks # # Defaults to 35 seconds #request_timeout = 35 -# Default/base max idle connections per host +# Default/base request total timeout. The time limit for a whole request. This is set very high to not +# cancel healthy requests while serving as a backstop. # This is used only by URL previews and update/news endpoint checks # -# Defaults to 1 as generally the same open connection can be re-used -#request_idle_per_host = 1 +# Defaults to 320 seconds +#request_total_timeout = 320 # Default/base idle connection pool timeout # This is used only by URL previews and update/news endpoint checks @@ -536,6 +537,12 @@ allow_profile_lookup_federation_requests = true # Defaults to 5 seconds #request_idle_timeout = 5 +# Default/base max idle connections per host +# This is used only by URL previews and update/news endpoint checks +# +# Defaults to 1 as generally the same open connection can be re-used +#request_idle_per_host = 1 + # Federation well-known resolution connection timeout # # Defaults to 6 seconds @@ -546,21 +553,32 @@ allow_profile_lookup_federation_requests = true # Defaults to 10 seconds #well_known_timeout = 10 -# Federation client/server request timeout +# Federation client request timeout # You most definitely want this to be high to account for extremely large room joins, slow homeservers, your own resources etc. # # Defaults to 300 seconds #federation_timeout = 300 -# Federation client/sender max idle connections per host +# Federation client idle connection pool timeout +# +# Defaults to 25 seconds +#federation_idle_timeout = 25 + +# Federation client max idle connections per host # # Defaults to 1 as generally the same open connection can be re-used #federation_idle_per_host = 1 -# Federation client/sender idle connection pool timeout +# Federation sender request timeout +# The time it takes for the remote server to process sent transactions can take a while. # -# Defaults to 25 seconds -#federation_idle_timeout = 25 +# Defaults to 180 seconds +#sender_timeout = 180 + +# Federation sender idle connection pool timeout +# +# Defaults to 180 seconds +#sender_idle_timeout = 180 # Appservice URL request connection timeout # diff --git a/src/config/mod.rs b/src/config/mod.rs index ae1df429..402f0439 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -112,20 +112,22 @@ pub struct Config { pub request_conn_timeout: u64, #[serde(default = "default_request_timeout")] pub request_timeout: u64, - #[serde(default = "default_request_idle_per_host")] - pub request_idle_per_host: u16, + #[serde(default = "default_request_total_timeout")] + pub request_total_timeout: u64, #[serde(default = "default_request_idle_timeout")] pub request_idle_timeout: u64, + #[serde(default = "default_request_idle_per_host")] + pub request_idle_per_host: u16, #[serde(default = "default_well_known_conn_timeout")] pub well_known_conn_timeout: u64, #[serde(default = "default_well_known_timeout")] pub well_known_timeout: u64, #[serde(default = "default_federation_timeout")] pub federation_timeout: u64, - #[serde(default = "default_federation_idle_per_host")] - pub federation_idle_per_host: u16, #[serde(default = "default_federation_idle_timeout")] pub federation_idle_timeout: u64, + #[serde(default = "default_federation_idle_per_host")] + pub federation_idle_per_host: u16, #[serde(default = "default_sender_timeout")] pub sender_timeout: u64, #[serde(default = "default_sender_idle_timeout")] @@ -502,6 +504,7 @@ impl fmt::Display for Config { ("Maximum concurrent requests", &self.max_concurrent_requests.to_string()), ("Request connect timeout", &self.request_conn_timeout.to_string()), ("Request timeout", &self.request_timeout.to_string()), + ("Request total timeout", &self.request_total_timeout.to_string()), ("Idle connections per host", &self.request_idle_per_host.to_string()), ("Request pool idle timeout", &self.request_idle_timeout.to_string()), ("Well_known connect timeout", &self.well_known_conn_timeout.to_string()), @@ -869,20 +872,22 @@ fn default_request_conn_timeout() -> u64 { 10 } fn default_request_timeout() -> u64 { 35 } -fn default_request_idle_per_host() -> u16 { 1 } +fn default_request_total_timeout() -> u64 { 320 } fn default_request_idle_timeout() -> u64 { 5 } +fn default_request_idle_per_host() -> u16 { 1 } + fn default_well_known_conn_timeout() -> u64 { 6 } fn default_well_known_timeout() -> u64 { 10 } fn default_federation_timeout() -> u64 { 300 } -fn default_federation_idle_per_host() -> u16 { 1 } - fn default_federation_idle_timeout() -> u64 { 25 } +fn default_federation_idle_per_host() -> u16 { 1 } + fn default_sender_timeout() -> u64 { 180 } fn default_sender_idle_timeout() -> u64 { 180 } diff --git a/src/service/globals/client.rs b/src/service/globals/client.rs index 3335cd16..138aa164 100644 --- a/src/service/globals/client.rs +++ b/src/service/globals/client.rs @@ -34,6 +34,7 @@ impl Client { .unwrap() .dns_resolver(resolver.hooked.clone()) .connect_timeout(Duration::from_secs(config.well_known_conn_timeout)) + .read_timeout(Duration::from_secs(config.well_known_timeout)) .timeout(Duration::from_secs(config.well_known_timeout)) .pool_max_idle_per_host(0) .redirect(redirect::Policy::limited(4)) @@ -43,6 +44,7 @@ impl Client { federation: Self::base(config) .unwrap() .dns_resolver(resolver.hooked.clone()) + .read_timeout(Duration::from_secs(config.federation_timeout)) .timeout(Duration::from_secs(config.federation_timeout)) .pool_max_idle_per_host(config.federation_idle_per_host.into()) .pool_idle_timeout(Duration::from_secs(config.federation_idle_timeout)) @@ -53,6 +55,7 @@ impl Client { sender: Self::base(config) .unwrap() .dns_resolver(resolver.hooked.clone()) + .read_timeout(Duration::from_secs(config.sender_timeout)) .timeout(Duration::from_secs(config.sender_timeout)) .pool_max_idle_per_host(1) .pool_idle_timeout(Duration::from_secs(config.sender_idle_timeout)) @@ -64,6 +67,7 @@ impl Client { .unwrap() .dns_resolver(resolver.clone()) .connect_timeout(Duration::from_secs(5)) + .read_timeout(Duration::from_secs(config.appservice_timeout)) .timeout(Duration::from_secs(config.appservice_timeout)) .pool_max_idle_per_host(1) .pool_idle_timeout(Duration::from_secs(config.appservice_idle_timeout)) @@ -90,12 +94,14 @@ impl Client { let mut builder = reqwest::Client::builder() .hickory_dns(true) - .timeout(Duration::from_secs(config.request_timeout)) .connect_timeout(Duration::from_secs(config.request_conn_timeout)) - .pool_max_idle_per_host(config.request_idle_per_host.into()) + .read_timeout(Duration::from_secs(config.request_timeout)) + .timeout(Duration::from_secs(config.request_total_timeout)) .pool_idle_timeout(Duration::from_secs(config.request_idle_timeout)) + .pool_max_idle_per_host(config.request_idle_per_host.into()) .user_agent("Conduwuit".to_owned() + "/" + &version) - .redirect(redirect::Policy::limited(6)); + .redirect(redirect::Policy::limited(6)) + .connection_verbose(true); #[cfg(feature = "gzip_compression")] { From 48f463322a2b86c8db772e06581e915731a3ec71 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 20:05:19 -0400 Subject: [PATCH 25/45] bump all deps Signed-off-by: strawberry --- .gitlab-ci.yml | 4 ++-- Cargo.lock | 17 +++++++++-------- Cargo.toml | 15 +++++++++++---- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4e4a9490..d0bf0d9e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -129,9 +129,9 @@ artifacts: .push-oci-image: stage: publish - image: docker:26.0.1 + image: docker:26.0.2 services: - - docker:26.0.1-dind + - docker:26.0.2-dind variables: IMAGE_SUFFIX_AMD64: amd64 IMAGE_SUFFIX_ARM64V8: arm64v8 diff --git a/Cargo.lock b/Cargo.lock index e0841e44..115bf815 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -399,12 +399,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -2995,9 +2996,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -3164,18 +3165,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 84976146..fcfbebc2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ rust-version = "1.75.0" rand = "0.8.5" # Used for conduit::Error type -thiserror = "1.0.58" +thiserror = "1.0.59" # Used to encode server public key base64 = "0.22.0" @@ -105,7 +105,14 @@ features = ["util"] [dependencies.tower-http] version = "0.5.2" -features = ["add-extension", "cors", "sensitive-headers", "trace", "util", "catch-panic"] +features = [ + "add-extension", + "cors", + "sensitive-headers", + "trace", + "util", + "catch-panic", +] [dependencies.hyper] version = "1.3.1" @@ -122,7 +129,7 @@ features = ["rustls-tls-native-roots", "socks", "hickory-dns"] # all the serde stuff # Used for pdu definition [dependencies.serde] -version = "1.0.197" +version = "1.0.198" features = ["rc"] # Used for appservice registration files [dependencies.serde_yaml] @@ -245,7 +252,7 @@ default-features = false # Used for reading the configuration from conduit.toml & environment variables [dependencies.figment] -version = "0.10.17" +version = "0.10.18" features = ["env", "toml"] # Used for matrix spec type definitions and helpers From 4727f5268c3409f067d8a81f4bbb5aa7486d230d Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 22:34:15 -0400 Subject: [PATCH 26/45] dont eat the `?server_name=` param for join room by ID or alias Signed-off-by: strawberry --- src/api/client_server/membership.rs | 60 ++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 10 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index b609fa34..1c289d66 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -115,8 +115,9 @@ pub async fn join_room_by_id_route(body: Ruma) -> /// /// - If the server knowns about this room: creates the join event and does auth /// rules locally -/// - If the server does not know about the room: asks other servers over -/// federation +/// - If the server does not know about the room: use the server name query +/// param if specified. if not specified, asks other servers over federation +/// via room alias server name and room ID server name pub async fn join_room_by_id_or_alias_route( body: Ruma, ) -> Result { @@ -152,7 +153,6 @@ pub async fn join_room_by_id_or_alias_route( } let mut servers = body.server_name.clone(); - servers.extend( services() .rooms @@ -181,6 +181,23 @@ pub async fn join_room_by_id_or_alias_route( (servers, room_id) }, Err(room_alias) => { + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&room_alias.server_name().to_owned()) + && !services().users.is_admin(sender_user)? + { + warn!( + "User {sender_user} tried joining room alias {room_alias} which has a server name that is \ + globally forbidden. Rejecting.", + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This remote server is banned on this homeserver.", + )); + } + let response = get_alias_helper(room_alias.clone()).await?; if services().rooms.metadata.is_banned(&response.room_id)? && !services().users.is_admin(sender_user)? { @@ -198,9 +215,9 @@ pub async fn join_room_by_id_or_alias_route( && !services().users.is_admin(sender_user)? { warn!( - "User {sender_user} tried joining room alias {} with room ID {} which has a server name that is \ - globally forbidden. Rejecting.", - &room_alias, &response.room_id + "User {sender_user} tried joining room alias {room_alias} with room ID {}, which the alias has a \ + server name that is globally forbidden. Rejecting.", + &response.room_id ); return Err(Error::BadRequest( ErrorKind::forbidden(), @@ -217,9 +234,9 @@ pub async fn join_room_by_id_or_alias_route( && !services().users.is_admin(sender_user)? { warn!( - "User {sender_user} tried joining room alias {} with room ID {} which has a server name that \ - is globally forbidden. Rejecting.", - &room_alias, &response.room_id + "User {sender_user} tried joining room alias {room_alias} with room ID {}, which has a server \ + name that is globally forbidden. Rejecting.", + &response.room_id ); return Err(Error::BadRequest( ErrorKind::forbidden(), @@ -228,7 +245,30 @@ pub async fn join_room_by_id_or_alias_route( } } - (response.servers, response.room_id) + let mut servers = body.server_name; + servers.extend(response.servers); + servers.extend( + services() + .rooms + .state_cache + .servers_invite_via(&response.room_id)? + .unwrap_or( + services() + .rooms + .state_cache + .invite_state(sender_user, &response.room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect(), + ), + ); + + (servers, response.room_id) }, }; From 16ac9716791ff33b5cd98b819f45b7ae44243d02 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 23:36:20 -0400 Subject: [PATCH 27/45] on room alias joins, attempt to find the room ID through *more* servers if available Signed-off-by: strawberry --- src/api/client_server/alias.rs | 130 ++++++++++++------ src/api/client_server/membership.rs | 2 +- .../admin/room/room_moderation_commands.rs | 6 +- 3 files changed, 91 insertions(+), 47 deletions(-) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 3fafee72..1345ef0c 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -10,8 +10,9 @@ use ruma::{ }, OwnedRoomAliasId, OwnedServerName, }; +use tracing::{debug, info}; -use crate::{services, Error, Result, Ruma}; +use crate::{debug_info, services, Error, Result, Ruma}; /// # `PUT /_matrix/client/v3/directory/room/{roomAlias}` /// @@ -118,12 +119,19 @@ pub async fn delete_alias_route(body: Ruma) -> Result /// /// Resolve an alias locally or over federation. pub async fn get_alias_route(body: Ruma) -> Result { - get_alias_helper(body.body.room_alias).await + get_alias_helper(body.body.room_alias, None).await } -pub(crate) async fn get_alias_helper(room_alias: OwnedRoomAliasId) -> Result { - if room_alias.server_name() != services().globals.server_name() { - let response = services() +pub(crate) async fn get_alias_helper( + room_alias: OwnedRoomAliasId, servers: Option>, +) -> Result { + if room_alias.server_name() != services().globals.server_name() + && (!servers + .as_ref() + .is_some_and(|servers| servers.contains(&services().globals.server_name().to_owned())) + || servers.as_ref().is_none()) + { + let mut response = services() .sending .send_federation_request( room_alias.server_name(), @@ -131,47 +139,83 @@ pub(crate) async fn get_alias_helper(room_alias: OwnedRoomAliasId) -> Result) -> federation" ); - match get_alias_helper(room_alias).await { + match get_alias_helper(room_alias, None).await { Ok(response) => { debug!("Got federation response fetching room ID for room {room}: {:?}", response); response.room_id @@ -233,7 +233,7 @@ pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) -> fetch room ID over federation" ); - match get_alias_helper(room_alias).await { + match get_alias_helper(room_alias, None).await { Ok(response) => { debug!( "Got federation response fetching room ID for room {room}: \ @@ -432,7 +432,7 @@ pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) -> federation" ); - match get_alias_helper(room_alias).await { + match get_alias_helper(room_alias, None).await { Ok(response) => { debug!("Got federation response fetching room ID for room {room}: {:?}", response); response.room_id From 3c718639c4bd6eacfd147f0291befdae0d0aa0ba Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 21 Apr 2024 23:55:09 -0400 Subject: [PATCH 28/45] break out the `via` field for hierarchy requests Signed-off-by: strawberry --- src/service/rooms/spaces/mod.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 5f31828f..8cdd2d22 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -625,16 +625,13 @@ impl Service { &self, sender_user: &UserId, room_id: &RoomId, limit: usize, skip: usize, max_depth: usize, suggested_only: bool, ) -> Result { + let via = match room_id.server_name() { + Some(server_name) => vec![server_name.to_owned()], + None => vec![], + }; + match self - .get_summary_and_children_client( - &room_id.to_owned(), - suggested_only, - sender_user, - &match room_id.server_name() { - Some(server_name) => vec![server_name.into()], - None => vec![], - }, - ) + .get_summary_and_children_client(&room_id.to_owned(), suggested_only, sender_user, &via) .await? { Some(SummaryAccessibility::Accessible(summary)) => { From f870656451e8272299712bcb7c923a9ff4653d29 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 22 Apr 2024 00:04:24 -0400 Subject: [PATCH 29/45] flip this Signed-off-by: strawberry --- src/api/client_server/alias.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 1345ef0c..9ceff6d0 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -161,11 +161,11 @@ pub(crate) async fn get_alias_helper( ) .await; - if response.is_err() { - continue; + if response.is_ok() { + break; } - break; + continue; } } } From 5a08e52282284974e56c339d2f7ba1e8c426e40e Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 22 Apr 2024 00:44:35 -0400 Subject: [PATCH 30/45] try finding more servers for federation hierarchy instead of room ID server name just the room ID server name is terrible Signed-off-by: strawberry --- src/service/rooms/spaces/mod.rs | 53 ++++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 11 deletions(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 8cdd2d22..714da443 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -30,9 +30,9 @@ use ruma::{ OwnedRoomId, OwnedServerName, RoomId, ServerName, UInt, UserId, }; use tokio::sync::Mutex; -use tracing::{debug, error, warn}; +use tracing::{debug, error, info, warn}; -use crate::{services, Error, Result}; +use crate::{debug_info, services, Error, Result}; pub struct CachedSpaceHierarchySummary { summary: SpaceHierarchyParentSummary, @@ -425,8 +425,36 @@ impl Service { } async fn get_summary_and_children_federation( - &self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &Vec, + &self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &[OwnedServerName], ) -> Result> { + // try to find more servers to fetch hierachy from if the only + // choice is the room ID's server name (usually dead) + // + // all spaces are normal rooms, so they should always have at least + // 1 admin in it which has a far higher chance of their server still + // being alive + let power_levels: ruma::events::room::power_levels::RoomPowerLevelsEventContent = services() + .rooms + .state_accessor + .room_state_get(current_room, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + // add server names of the list of admins in the room for backfill server + via.to_owned().extend( + power_levels + .users + .iter() + .filter(|(_, level)| **level > power_levels.users_default) + .map(|(user_id, _)| user_id.server_name()) + .filter(|server| server != &services().globals.server_name()) + .map(ToOwned::to_owned), + ); + for server in via { debug!("Asking {server} for /hierarchy"); if let Ok(response) = services() @@ -440,7 +468,7 @@ impl Service { ) .await { - debug!("Got response from {server} for /hierarchy\n{response:?}"); + debug_info!("Got response from {server} for /hierarchy\n{response:?}"); let summary = response.room.clone(); self.roomid_spacehierarchy_cache.lock().await.insert( @@ -511,7 +539,7 @@ impl Service { } async fn get_summary_and_children_client( - &self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &Vec, + &self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &[OwnedServerName], ) -> Result> { if let Ok(Some(response)) = self .get_summary_and_children_local(current_room, Identifier::UserId(user_id)) @@ -625,13 +653,16 @@ impl Service { &self, sender_user: &UserId, room_id: &RoomId, limit: usize, skip: usize, max_depth: usize, suggested_only: bool, ) -> Result { - let via = match room_id.server_name() { - Some(server_name) => vec![server_name.to_owned()], - None => vec![], - }; - match self - .get_summary_and_children_client(&room_id.to_owned(), suggested_only, sender_user, &via) + .get_summary_and_children_client( + &room_id.to_owned(), + suggested_only, + sender_user, + &match room_id.server_name() { + Some(server_name) => vec![server_name.to_owned()], + None => vec![], + }, + ) .await? { Some(SummaryAccessibility::Accessible(summary)) => { From fd8bbe6c93aaaad6dc49c4e54ba82d395dbc89a0 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 22 Apr 2024 00:47:04 -0400 Subject: [PATCH 31/45] remove unnecessary continue Signed-off-by: strawberry --- src/api/client_server/alias.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 9ceff6d0..67d632c4 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -164,8 +164,6 @@ pub(crate) async fn get_alias_helper( if response.is_ok() { break; } - - continue; } } } From 9107a8854af552885e322c11241a5f429e2cd94b Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 22 Apr 2024 01:25:28 -0400 Subject: [PATCH 32/45] some more room alias helper logging Signed-off-by: strawberry --- src/api/client_server/alias.rs | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 67d632c4..c71b2a82 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -10,9 +10,9 @@ use ruma::{ }, OwnedRoomAliasId, OwnedServerName, }; -use tracing::{debug, info}; +use tracing::{debug, info, warn}; -use crate::{debug_info, services, Error, Result, Ruma}; +use crate::{debug_info, debug_warn, services, Error, Result, Ruma}; /// # `PUT /_matrix/client/v3/directory/room/{roomAlias}` /// @@ -125,6 +125,7 @@ pub async fn get_alias_route(body: Ruma) -> Result>, ) -> Result { + debug!("get_alias_helper servers: {servers:?}"); if room_alias.server_name() != services().globals.server_name() && (!servers .as_ref() @@ -141,6 +142,8 @@ pub(crate) async fn get_alias_helper( ) .await; + debug_info!("room alias server_name get_alias_helper response: {response:?}"); + if let Err(ref e) = response { debug_info!( "Server {} of the original room alias failed to assist in resolving room alias: {e}", @@ -148,8 +151,13 @@ pub(crate) async fn get_alias_helper( ); } - if let Some(servers) = servers { - if !servers.is_empty() { + if response.as_ref().is_ok_and(|resp| resp.servers.is_empty()) { + debug_warn!( + "Server {} responded with room aliases, but was empty? Response: {response:?}", + room_alias.server_name() + ); + + if let Some(servers) = servers { for server in servers { response = services() .sending @@ -160,9 +168,15 @@ pub(crate) async fn get_alias_helper( }, ) .await; + debug_info!("Got response from server {server} for room aliases: {response:?}"); - if response.is_ok() { - break; + if let Ok(ref response) = response { + if !response.servers.is_empty() { + break; + } + debug_warn!( + "Server {server} responded with room aliases, but was empty? Response: {response:?}" + ); } } } From 923b7a5264567003917adda27284e491973899b7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 22 Apr 2024 01:44:22 -0400 Subject: [PATCH 33/45] forgor is_err check too Signed-off-by: strawberry --- src/api/client_server/alias.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index c71b2a82..550cb656 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -151,12 +151,7 @@ pub(crate) async fn get_alias_helper( ); } - if response.as_ref().is_ok_and(|resp| resp.servers.is_empty()) { - debug_warn!( - "Server {} responded with room aliases, but was empty? Response: {response:?}", - room_alias.server_name() - ); - + if response.as_ref().is_ok_and(|resp| resp.servers.is_empty()) || response.as_ref().is_err() { if let Some(servers) = servers { for server in servers { response = services() From 17f493ec4d314c95d191f101b81ca6525bfd4976 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 21 Apr 2024 22:32:45 -0700 Subject: [PATCH 34/45] precompute cidr range denylist; move validator. Signed-off-by: Jason Volk --- src/service/globals/mod.rs | 22 +++++++++++++++++++++- src/service/sending/send.rs | 13 ++----------- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 874ba22e..1aea4e32 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -14,6 +14,7 @@ use argon2::Argon2; use base64::{engine::general_purpose, Engine as _}; pub use data::Data; use hickory_resolver::TokioAsyncResolver; +use ipaddress::IPAddress; use regex::RegexSet; use ruma::{ api::{ @@ -25,7 +26,7 @@ use ruma::{ RoomVersionId, ServerName, UserId, }; use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore}; -use tracing::{error, info}; +use tracing::{error, info, trace}; use tracing_subscriber::{EnvFilter, Registry}; use url::Url; @@ -46,6 +47,7 @@ pub struct Service<'a> { pub tracing_reload_handle: tracing_subscriber::reload::Handle, pub config: Config, + pub cidr_range_denylist: Vec, keypair: Arc, jwt_decoding_key: Option, pub resolver: Arc, @@ -138,10 +140,18 @@ impl Service<'_> { argon2::Params::new(19456, 2, 1, None).expect("valid parameters"), ); + let mut cidr_range_denylist = Vec::new(); + for cidr in config.ip_range_denylist.clone() { + let cidr = IPAddress::parse(cidr).expect("valid cidr range"); + trace!("Denied CIDR range: {:?}", cidr); + cidr_range_denylist.push(cidr); + } + let mut s = Self { tracing_reload_handle, db, config: config.clone(), + cidr_range_denylist, keypair: Arc::new(keypair), resolver: resolver.clone(), client: client::Client::new(config, &resolver), @@ -424,6 +434,16 @@ impl Service<'_> { pub fn unix_socket_path(&self) -> &Option { &self.config.unix_socket_path } + pub fn valid_cidr_range(&self, ip: &IPAddress) -> bool { + for cidr in &self.cidr_range_denylist { + if cidr.includes(ip) { + return false; + } + } + + true + } + pub fn shutdown(&self) { self.shutdown.store(true, atomic::Ordering::Relaxed); // On shutdown diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 726ceffe..d6bd36d2 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -536,17 +536,8 @@ fn validate_destination_ip_literal(destination: &ServerName) -> Result<()> { } fn validate_ip(ip: &IPAddress) -> Result<()> { - let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); - let mut cidr_ranges: Vec = Vec::new(); - for cidr in cidr_ranges_s { - cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); - } - - trace!("List of pushed CIDR ranges: {:?}", cidr_ranges); - for cidr in cidr_ranges { - if cidr.includes(ip) { - return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); - } + if !services().globals.valid_cidr_range(ip) { + return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); } Ok(()) From 12dc99d2835aedefeb8eef914e2808615eb676b2 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 21 Apr 2024 22:41:47 -0700 Subject: [PATCH 35/45] various logging improvements. Signed-off-by: Jason Volk --- src/api/client_server/alias.rs | 2 +- src/service/rooms/spaces/mod.rs | 2 +- src/service/sending/mod.rs | 2 +- src/service/sending/send.rs | 40 +++++++++++++++++---------------- src/utils/debug.rs | 31 ++++++++++++------------- 5 files changed, 40 insertions(+), 37 deletions(-) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 550cb656..045d1174 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -10,7 +10,7 @@ use ruma::{ }, OwnedRoomAliasId, OwnedServerName, }; -use tracing::{debug, info, warn}; +use tracing::debug; use crate::{debug_info, debug_warn, services, Error, Result, Ruma}; diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 714da443..25bf3c22 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -30,7 +30,7 @@ use ruma::{ OwnedRoomId, OwnedServerName, RoomId, ServerName, UInt, UserId, }; use tokio::sync::Mutex; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, warn}; use crate::{debug_info, services, Error, Result}; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 86676d58..32a7409a 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -272,7 +272,7 @@ impl Service { }); } - #[tracing::instrument(skip(self), name = "sender")] + #[tracing::instrument(skip_all, name = "sender")] async fn handler(&self) -> Result<()> { let receiver = self.receiver.lock().await; diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index d6bd36d2..9859cc32 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -14,9 +14,9 @@ use ruma::{ }, OwnedServerName, ServerName, }; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, error, trace}; -use crate::{debug_error, debug_warn, services, Error, Result}; +use crate::{debug_error, debug_info, debug_warn, services, Error, Result}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). @@ -300,6 +300,7 @@ async fn resolve_actual_destination(destination: &'_ ServerName) -> Result<(FedD Ok((actual_destination, hostname.into_uri_string())) } +#[tracing::instrument(skip_all, name = "well-known")] async fn request_well_known(destination: &str) -> Result> { if !services() .globals @@ -320,22 +321,22 @@ async fn request_well_known(destination: &str) -> Result> { .send() .await; - trace!("Well known response: {:?}", response); + trace!("response: {:?}", response); if let Err(e) = &response { - debug!("Well known error: {e:?}"); + debug!("error: {e:?}"); return Ok(None); } let response = response?; if !response.status().is_success() { - debug!("Well known response not 2XX"); + debug!("response not 2XX"); return Ok(None); } let text = response.text().await?; - trace!("Well known response text: {:?}", text); + trace!("response text: {:?}", text); if text.len() >= 12288 { - debug!("Well known response contains junk"); + debug_warn!("response contains junk"); return Ok(None); } @@ -348,13 +349,15 @@ async fn request_well_known(destination: &str) -> Result> { .unwrap_or_default(); if ruma_identifiers_validation::server_name::validate(m_server).is_err() { - debug!("Well known response content missing or invalid"); + debug_error!("response content missing or invalid"); return Ok(None); } + debug_info!("{:?} found at {:?}", destination, m_server); Ok(Some(m_server.to_owned())) } +#[tracing::instrument(skip_all, name = "ip")] async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) -> Result<()> { match services() .globals @@ -362,8 +365,11 @@ async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u1 .lookup_ip(hostname.to_owned()) .await { + Err(e) => handle_resolve_error(&e), Ok(override_ip) => { - trace!("Caching result of {:?} overriding {:?}", hostname, overname); + if hostname != overname { + debug_info!("{:?} overriden by {:?}", overname, hostname); + } services() .globals .resolver @@ -374,13 +380,10 @@ async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u1 Ok(()) }, - Err(e) => { - debug!("Got {:?} for {:?} to override {:?}", e.kind(), hostname, overname); - handle_resolve_error(&e) - }, } } +#[tracing::instrument(skip_all, name = "srv")] async fn query_srv_record(hostname: &'_ str) -> Result> { fn handle_successful_srv(srv: &SrvLookup) -> Option { srv.iter().next().map(|result| { @@ -421,11 +424,11 @@ fn handle_resolve_error(e: &ResolveError) -> Result<()> { ResolveErrorKind::Io { .. } => { - debug_error!("DNS IO error: {e}"); + error!("{e}"); Err(Error::Error(e.to_string())) }, _ => { - debug!("DNS protocol error: {e}"); + debug_error!("{e}"); Ok(()) }, } @@ -514,19 +517,17 @@ fn validate_destination(destination: &ServerName) -> Result<()> { validate_destination_ip_literal(destination)?; } - trace!("Destination ServerName is valid"); Ok(()) } fn validate_destination_ip_literal(destination: &ServerName) -> Result<()> { + trace!("Destination is an IP literal, checking against IP range denylist.",); debug_assert!( destination.is_ip_literal() || !IPAddress::is_valid(destination.host()), "Destination is not an IP literal." ); - debug!("Destination is an IP literal, checking against IP range denylist.",); - let ip = IPAddress::parse(destination.host()).map_err(|e| { - debug_warn!("Failed to parse IP literal from string: {}", e); + debug_error!("Failed to parse IP literal from string: {}", e); Error::BadServerResponse("Invalid IP address") })?; @@ -558,6 +559,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { None => (destination_str, ":8448"), Some(pos) => destination_str.split_at(pos), }; + FedDest::Named(host.to_owned(), port.to_owned()) } diff --git a/src/utils/debug.rs b/src/utils/debug.rs index 6c1093a0..32a8cf3f 100644 --- a/src/utils/debug.rs +++ b/src/utils/debug.rs @@ -1,14 +1,23 @@ +/// Log event at given level in debug-mode (when debug-assertions are enabled). +/// In release mode it becomes DEBUG level, and possibly subject to elision. +#[macro_export] +macro_rules! debug_event { + ( $level:expr, $($x:tt)+ ) => { + if cfg!(debug_assertions) { + tracing::event!( $level, $($x)+ ); + } else { + tracing::debug!( $($x)+ ); + } + } +} + /// Log message at the ERROR level in debug-mode (when debug-assertions are /// enabled). In release mode it becomes DEBUG level, and possibly subject to /// elision. #[macro_export] macro_rules! debug_error { ( $($x:tt)+ ) => { - if cfg!(debug_assertions) { - error!( $($x)+ ); - } else { - debug!( $($x)+ ); - } + $crate::debug_event!(tracing::Level::ERROR, $($x)+ ); } } @@ -18,11 +27,7 @@ macro_rules! debug_error { #[macro_export] macro_rules! debug_warn { ( $($x:tt)+ ) => { - if cfg!(debug_assertions) { - warn!( $($x)+ ); - } else { - debug!( $($x)+ ); - } + $crate::debug_event!(tracing::Level::WARN, $($x)+ ); } } @@ -32,10 +37,6 @@ macro_rules! debug_warn { #[macro_export] macro_rules! debug_info { ( $($x:tt)+ ) => { - if cfg!(debug_assertions) { - info!( $($x)+ ); - } else { - debug!( $($x)+ ); - } + $crate::debug_event!(tracing::Level::INFO, $($x)+ ); } } From 8fde1e62395240cc3cca89c31d77ba9f012dad80 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 22 Apr 2024 01:52:48 -0400 Subject: [PATCH 36/45] use global `valid_cidr_range` everywhere else Signed-off-by: strawberry --- src/api/client_server/media.rs | 32 ++++---------------------------- src/service/pusher/mod.rs | 32 +++++++------------------------- 2 files changed, 11 insertions(+), 53 deletions(-) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 40e2b093..0e544aa6 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -692,20 +692,8 @@ async fn download_html(client: &reqwest::Client, url: &str) -> Result Result { if let Ok(ip) = IPAddress::parse(url) { - let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); - let mut cidr_ranges: Vec = Vec::new(); - - for cidr in cidr_ranges_s { - cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); - } - - for cidr in cidr_ranges { - if cidr.includes(&ip) { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Requesting from this address is forbidden", - )); - } + if !services().globals.valid_cidr_range(&ip) { + return Err(Error::BadServerResponse("Requesting from this address is forbidden")); } } @@ -714,20 +702,8 @@ async fn request_url_preview(url: &str) -> Result { if let Some(remote_addr) = response.remote_addr() { if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { - let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); - let mut cidr_ranges: Vec = Vec::new(); - - for cidr in cidr_ranges_s { - cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); - } - - for cidr in cidr_ranges { - if cidr.includes(&ip) { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Requesting from this address is forbidden", - )); - } + if !services().globals.valid_cidr_range(&ip) { + return Err(Error::BadServerResponse("Requesting from this address is forbidden")); } } } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 70d303ca..287148db 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -20,7 +20,7 @@ use ruma::{ serde::Raw, uint, RoomId, UInt, UserId, }; -use tracing::{debug, info, warn}; +use tracing::{info, trace, warn}; use crate::{services, Error, PduEvent, Result}; @@ -66,19 +66,10 @@ impl Service { let url = reqwest_request.url().clone(); if let Some(url_host) = url.host_str() { - debug!("Checking request URL for IP"); + trace!("Checking request URL for IP"); if let Ok(ip) = IPAddress::parse(url_host) { - let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); - let mut cidr_ranges: Vec = Vec::new(); - - for cidr in cidr_ranges_s { - cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); - } - - for cidr in cidr_ranges { - if cidr.includes(&ip) { - return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); - } + if !services().globals.valid_cidr_range(&ip) { + return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); } } } @@ -94,20 +85,11 @@ impl Service { Ok(mut response) => { // reqwest::Response -> http::Response conversion - debug!("Checking response destination's IP"); + trace!("Checking response destination's IP"); if let Some(remote_addr) = response.remote_addr() { if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { - let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); - let mut cidr_ranges: Vec = Vec::new(); - - for cidr in cidr_ranges_s { - cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); - } - - for cidr in cidr_ranges { - if cidr.includes(&ip) { - return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); - } + if !services().globals.valid_cidr_range(&ip) { + return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); } } } From 0bd80a53f847027e30cc5b8bd6fe822d2b49683f Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 17:15:02 -0700 Subject: [PATCH 37/45] flatten and sort all flake inputs --- flake.nix | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/flake.nix b/flake.nix index 29ceb3a6..9ac4e5f5 100644 --- a/flake.nix +++ b/flake.nix @@ -1,22 +1,12 @@ { inputs = { - nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; + attic.url = "github:zhaofengli/attic?ref=main"; + crane = { url = "github:ipetkov/crane?ref=master"; inputs.nixpkgs.follows = "nixpkgs"; }; + fenix = { url = "github:nix-community/fenix"; inputs.nixpkgs.follows = "nixpkgs"; }; + flake-compat = { url = "github:edolstra/flake-compat"; flake = false; }; flake-utils.url = "github:numtide/flake-utils"; nix-filter.url = "github:numtide/nix-filter"; - flake-compat = { - url = "github:edolstra/flake-compat"; - flake = false; - }; - - fenix = { - url = "github:nix-community/fenix"; - inputs.nixpkgs.follows = "nixpkgs"; - }; - crane = { - url = "github:ipetkov/crane?ref=master"; - inputs.nixpkgs.follows = "nixpkgs"; - }; - attic.url = "github:zhaofengli/attic?ref=main"; + nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; }; outputs = From 61cd28f66a3c2fe1b5e9aeab3b223154edbc4203 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 17:17:37 -0700 Subject: [PATCH 38/45] get rocksdb via flake inputs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Added input 'rocksdb': 'github:facebook/rocksdb/bcf88d48ce8aa8b536aee4dd305533b3b83cf435?narHash=sha256-vRPyrXkXVVhP56n5FVYef8zbIsnnanQSpElmQLZ7mh8%3D' (2024-04-16) --- flake.lock | 20 +++++++++++++++++++- flake.nix | 18 +++++------------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/flake.lock b/flake.lock index 99fff55b..bb767e47 100644 --- a/flake.lock +++ b/flake.lock @@ -214,6 +214,23 @@ "type": "github" } }, + "rocksdb": { + "flake": false, + "locked": { + "lastModified": 1713310517, + "narHash": "sha256-vRPyrXkXVVhP56n5FVYef8zbIsnnanQSpElmQLZ7mh8=", + "owner": "facebook", + "repo": "rocksdb", + "rev": "bcf88d48ce8aa8b536aee4dd305533b3b83cf435", + "type": "github" + }, + "original": { + "owner": "facebook", + "ref": "v9.1.0", + "repo": "rocksdb", + "type": "github" + } + }, "root": { "inputs": { "attic": "attic", @@ -222,7 +239,8 @@ "flake-compat": "flake-compat_2", "flake-utils": "flake-utils_2", "nix-filter": "nix-filter", - "nixpkgs": "nixpkgs_2" + "nixpkgs": "nixpkgs_2", + "rocksdb": "rocksdb" } }, "rust-analyzer-src": { diff --git a/flake.nix b/flake.nix index 9ac4e5f5..a9ca5cee 100644 --- a/flake.nix +++ b/flake.nix @@ -7,6 +7,7 @@ flake-utils.url = "github:numtide/flake-utils"; nix-filter.url = "github:numtide/nix-filter"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; + rocksdb = { url = "github:facebook/rocksdb?ref=v9.1.0"; flake = false; }; }; outputs = @@ -23,19 +24,10 @@ pkgsHost = nixpkgs.legacyPackages.${system}; allocator = null; - rocksdb' = pkgs: - let - version = "9.1.0"; - in - (pkgs.rocksdb.overrideAttrs (old: { - inherit version; - src = pkgs.fetchFromGitHub { - owner = "facebook"; - repo = "rocksdb"; - rev = "bcf88d48ce8aa8b536aee4dd305533b3b83cf435"; - hash = "sha256-vRPyrXkXVVhP56n5FVYef8zbIsnnanQSpElmQLZ7mh8"; - }; - })); + rocksdb' = pkgs: (pkgs.rocksdb.overrideAttrs (old: { + version = "9.1.0"; + src = inputs.rocksdb; + })); # Nix-accessible `Cargo.toml` cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); From 55dbca2aa91d1352060d7bac8eab89a42dd28f5a Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 18:44:39 -0700 Subject: [PATCH 39/45] get complement via flake inputs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Added input 'complement': 'github:matrix-org/complement/d73c81a091604b0fc5b6b0617dcac58c25763f57?narHash=sha256-hom/Lt0gZzLWqFhUJG0X2i88CAMIILInO5w0tPj6G3s%3D' (2024-04-18) --- flake.lock | 17 +++++++++++++++++ flake.nix | 10 ++-------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/flake.lock b/flake.lock index bb767e47..98ef9259 100644 --- a/flake.lock +++ b/flake.lock @@ -23,6 +23,22 @@ "type": "github" } }, + "complement": { + "flake": false, + "locked": { + "lastModified": 1713458251, + "narHash": "sha256-hom/Lt0gZzLWqFhUJG0X2i88CAMIILInO5w0tPj6G3s=", + "owner": "matrix-org", + "repo": "complement", + "rev": "d73c81a091604b0fc5b6b0617dcac58c25763f57", + "type": "github" + }, + "original": { + "owner": "matrix-org", + "repo": "complement", + "type": "github" + } + }, "crane": { "inputs": { "nixpkgs": [ @@ -234,6 +250,7 @@ "root": { "inputs": { "attic": "attic", + "complement": "complement", "crane": "crane_2", "fenix": "fenix", "flake-compat": "flake-compat_2", diff --git a/flake.nix b/flake.nix index a9ca5cee..67143ccd 100644 --- a/flake.nix +++ b/flake.nix @@ -1,6 +1,7 @@ { inputs = { attic.url = "github:zhaofengli/attic?ref=main"; + complement = { url = "github:matrix-org/complement"; flake = false; }; crane = { url = "github:ipetkov/crane?ref=master"; inputs.nixpkgs.follows = "nixpkgs"; }; fenix = { url = "github:nix-community/fenix"; inputs.nixpkgs.follows = "nixpkgs"; }; flake-compat = { url = "github:edolstra/flake-compat"; flake = false; }; @@ -197,19 +198,12 @@ }; createComplementRuntime = pkgs: image: let - complement = pkgs.fetchFromGitHub { - owner = "matrix-org"; - repo = "complement"; - rev = "d73c81a091604b0fc5b6b0617dcac58c25763f57"; - hash = "sha256-hom/Lt0gZzLWqFhUJG0X2i88CAMIILInO5w0tPj6G3s"; - }; - script = pkgs.writeShellScriptBin "run.sh" '' export PATH=${pkgs.lib.makeBinPath [ pkgs.olm pkgs.gcc ]} ${pkgs.lib.getExe pkgs.docker} load < ${image} set +o pipefail - /usr/bin/env -C "${complement}" COMPLEMENT_BASE_IMAGE="complement-conduit:dev" ${pkgs.lib.getExe pkgs.go} test -json ${complement}/tests | ${pkgs.toybox}/bin/tee $1 + /usr/bin/env -C "${inputs.complement}" COMPLEMENT_BASE_IMAGE="complement-conduit:dev" ${pkgs.lib.getExe pkgs.go} test -json ${inputs.complement}/tests | ${pkgs.toybox}/bin/tee $1 set -o pipefail # Post-process the results into an easy-to-compare format From d77aa94c6602840e12191fac0564459475fadfa2 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 15:16:55 -0700 Subject: [PATCH 40/45] remove dead code --- flake.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/flake.nix b/flake.nix index 67143ccd..ab9a297f 100644 --- a/flake.nix +++ b/flake.nix @@ -23,7 +23,6 @@ }: flake-utils.lib.eachDefaultSystem (system: let pkgsHost = nixpkgs.legacyPackages.${system}; - allocator = null; rocksdb' = pkgs: (pkgs.rocksdb.overrideAttrs (old: { version = "9.1.0"; From e678af76013062dbf5b36e04930e52a77738bab9 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 15:33:54 -0700 Subject: [PATCH 41/45] always go through `inputs` --- flake.nix | 50 ++++++++++++++++++++++---------------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/flake.nix b/flake.nix index ab9a297f..7eeaec7d 100644 --- a/flake.nix +++ b/flake.nix @@ -11,18 +11,10 @@ rocksdb = { url = "github:facebook/rocksdb?ref=v9.1.0"; flake = false; }; }; - outputs = - { self - , nixpkgs - , flake-utils - , nix-filter - - , fenix - , crane - , ... - }: flake-utils.lib.eachDefaultSystem (system: + outputs = inputs: + inputs.flake-utils.lib.eachDefaultSystem (system: let - pkgsHost = nixpkgs.legacyPackages.${system}; + pkgsHost = inputs.nixpkgs.legacyPackages.${system}; rocksdb' = pkgs: (pkgs.rocksdb.overrideAttrs (old: { version = "9.1.0"; @@ -33,7 +25,7 @@ cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); # The Rust toolchain to use - toolchain = fenix.packages.${system}.fromToolchainFile { + toolchain = inputs.fenix.packages.${system}.fromToolchainFile { file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` @@ -41,7 +33,7 @@ }; builder = pkgs: - ((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; + ((inputs.crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; nativeBuildInputs = pkgs: let darwin = if pkgs.stdenv.isDarwin then [ pkgs.libiconv ] else []; @@ -53,7 +45,7 @@ ] ++ darwin; env = pkgs: { - CONDUIT_VERSION_EXTRA = self.shortRev or self.dirtyShortRev; + CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include"; ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib"; } @@ -147,7 +139,7 @@ ); mkPackage = pkgs: allocator: cargoArgs: profile: builder pkgs { - src = nix-filter { + src = inputs.nix-filter { root = ./.; include = [ "src" @@ -179,7 +171,7 @@ name = package.pname; tag = "main"; # Debian makes builds reproducible through using the HEAD commit's date - created = "@${toString self.lastModified}"; + created = "@${toString inputs.self.lastModified}"; contents = [ pkgs.dockerTools.caCertificates ]; @@ -227,7 +219,7 @@ copyToRoot = pkgs.stdenv.mkDerivation { name = "complement_data"; - src = nix-filter { + src = inputs.nix-filter { root = ./.; include = [ "tests/complement/conduwuit-complement.toml" @@ -284,19 +276,21 @@ default = mkPackage pkgsHost null "" "release"; jemalloc = mkPackage pkgsHost "jemalloc" "" "release"; hmalloc = mkPackage pkgsHost "hmalloc" "" "release"; - oci-image = mkOciImage pkgsHost self.packages.${system}.default; - oci-image-jemalloc = mkOciImage pkgsHost self.packages.${system}.jemalloc; - oci-image-hmalloc = mkOciImage pkgsHost self.packages.${system}.hmalloc; + oci-image = mkOciImage pkgsHost inputs.self.packages.${system}.default; + oci-image-jemalloc = + mkOciImage pkgsHost inputs.self.packages.${system}.jemalloc; + oci-image-hmalloc = + mkOciImage pkgsHost inputs.self.packages.${system}.hmalloc; book = let - package = self.packages.${system}.default; + package = inputs.self.packages.${system}.default; in pkgsHost.stdenv.mkDerivation { pname = "${package.pname}-book"; version = package.version; - src = nix-filter { + src = inputs.nix-filter { root = ./.; include = [ "book.toml" @@ -317,7 +311,7 @@ ''; }; complement-image = createComplementImage pkgsHost; - complement-runtime = createComplementRuntime pkgsHost self.outputs.packages.${system}.complement-image; + complement-runtime = createComplementRuntime pkgsHost inputs.self.outputs.packages.${system}.complement-image; } // builtins.listToAttrs @@ -327,7 +321,7 @@ let binaryName = "static-${crossSystem}"; pkgsCrossStatic = - (import nixpkgs { + (import inputs.nixpkgs { inherit system; crossSystem = { config = crossSystem; @@ -358,7 +352,7 @@ name = "oci-image-${crossSystem}"; value = mkOciImage pkgsCrossStatic - self.packages.${system}.${binaryName}; + inputs.self.packages.${system}.${binaryName}; } # An output for an OCI image based on that binary with jemalloc @@ -366,7 +360,7 @@ name = "oci-image-${crossSystem}-jemalloc"; value = mkOciImage pkgsCrossStatic - self.packages.${system}."${binaryName}-jemalloc"; + inputs.self.packages.${system}."${binaryName}-jemalloc"; } # An output for an OCI image based on that binary with hardened_malloc @@ -374,7 +368,7 @@ name = "oci-image-${crossSystem}-hmalloc"; value = mkOciImage pkgsCrossStatic - self.packages.${system}."${binaryName}-hmalloc"; + inputs.self.packages.${system}."${binaryName}-hmalloc"; } ] ) @@ -399,7 +393,7 @@ # # This needs to come before `toolchain` in this list, otherwise # `$PATH` will have stable rustfmt instead. - fenix.packages.${system}.latest.rustfmt + inputs.fenix.packages.${system}.latest.rustfmt toolchain ] ++ (with pkgsHost; [ From 8a7e8271ad9cd57b365e7c2a26d21391ef9e722d Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 15:28:35 -0700 Subject: [PATCH 42/45] use `lib.makeScope` and files to organize packages Some of the improvements here include: * rocksdb can actually use jemalloc now instead of just pulling in a second rocksdb for no reason * "complement-runtime" factored back out into shell file * complement image no longer uses `mkDerivation` for `copyToRoot` because that's what `buildEnv` is for * complement image no longer sets `SERVER_NAME`, complement already does that * all packages were factored out into `callPackage`-able files for use with a custom `lib.makeScope pkgs.newScope` * new version of `mkPackage` has options that are easier to use and override such as `features` --- bin/complement | 39 + docs/SUMMARY.md | 2 + docs/development.md | 4 + docs/development/testing.md | 17 + flake.nix | 361 ++------- nix/pkgs/book/default.nix | 31 + nix/pkgs/complement/config.toml | 19 + nix/pkgs/complement/default.nix | 92 +++ {tests => nix/pkgs}/complement/v3.ext | 0 nix/pkgs/main/cross-compilation-env.nix | 100 +++ nix/pkgs/main/default.nix | 71 ++ nix/pkgs/oci-image/default.nix | 28 + tests/complement/README.md | 15 - tests/complement/conduwuit-complement.toml | 592 -------------- tests/complement/failed_tests.jsonl | 596 -------------- tests/complement/full_results.jsonl | 896 --------------------- tests/complement/passed_tests.jsonl | 284 ------- 17 files changed, 466 insertions(+), 2681 deletions(-) create mode 100755 bin/complement create mode 100644 docs/development.md create mode 100644 docs/development/testing.md create mode 100644 nix/pkgs/book/default.nix create mode 100644 nix/pkgs/complement/config.toml create mode 100644 nix/pkgs/complement/default.nix rename {tests => nix/pkgs}/complement/v3.ext (100%) create mode 100644 nix/pkgs/main/cross-compilation-env.nix create mode 100644 nix/pkgs/main/default.nix create mode 100644 nix/pkgs/oci-image/default.nix delete mode 100644 tests/complement/README.md delete mode 100644 tests/complement/conduwuit-complement.toml delete mode 100644 tests/complement/failed_tests.jsonl delete mode 100644 tests/complement/full_results.jsonl delete mode 100644 tests/complement/passed_tests.jsonl diff --git a/bin/complement b/bin/complement new file mode 100755 index 00000000..f2ce6971 --- /dev/null +++ b/bin/complement @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Path to Complement's source code +# +# The `COMPLEMENT_SRC` environment variable is set in the Nix dev shell, which +# points to a store path containing the Complement source code. It's likely you +# want to just pass that as the first argument to use it here. +COMPLEMENT_SRC="$1" + +# A `.jsonl` file to write test logs to +LOG_FILE="$2" + +# A `.jsonl` file to write test results to +RESULTS_FILE="$3" + +OCI_IMAGE="complement-conduit:dev" + +pushd "$(git rev-parse --show-toplevel)" > /dev/null +nix build .#complement +docker load < result +popd > /dev/null + +# It's okay (likely, even) that `go test` exits nonzero +set +o pipefail +env \ + -C "$COMPLEMENT_SRC" \ + COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ + go test -timeout 1h -json ./tests | tee "$LOG_FILE" +set -o pipefail + +# Post-process the results into an easy-to-compare format +cat "$LOG_FILE" | jq -c ' + select( + (.Action == "pass" or .Action == "fail" or .Action == "skip") + and .Test != null + ) | {Action: .Action, Test: .Test} + ' | sort > "$RESULTS_FILE" diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 3be2386c..da125422 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -11,3 +11,5 @@ - [NixOS](deploying/nixos.md) - [TURN](turn.md) - [Appservices](appservices.md) +- [Development](development.md) + - [Testing](development/testing.md) diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 00000000..d6ee27eb --- /dev/null +++ b/docs/development.md @@ -0,0 +1,4 @@ +# Development + +Information about developing the project. If you are only interested in using +it, you can safely ignore this section. diff --git a/docs/development/testing.md b/docs/development/testing.md new file mode 100644 index 00000000..4272202c --- /dev/null +++ b/docs/development/testing.md @@ -0,0 +1,17 @@ +# Testing + +## Complement + +Have a look at [Complement's repository][complement] for an explanation of what +it is. + +To test against Complement, with Nix and direnv installed and set up, you can +either: + +* Run `complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl ./path/to/results.jsonl` + to build a Complement image, run the tests, and output the logs and results + to the specified paths +* Run `nix build .#complement` from the root of the repository to just build a + Complement image + +[complement]: https://github.com/matrix-org/complement diff --git a/flake.nix b/flake.nix index 7eeaec7d..e512236c 100644 --- a/flake.nix +++ b/flake.nix @@ -16,14 +16,6 @@ let pkgsHost = inputs.nixpkgs.legacyPackages.${system}; - rocksdb' = pkgs: (pkgs.rocksdb.overrideAttrs (old: { - version = "9.1.0"; - src = inputs.rocksdb; - })); - - # Nix-accessible `Cargo.toml` - cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); - # The Rust toolchain to use toolchain = inputs.fenix.packages.${system}.fromToolchainFile { file = ./rust-toolchain.toml; @@ -32,286 +24,45 @@ sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8="; }; - builder = pkgs: - ((inputs.crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; + scope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { + book = self.callPackage ./nix/pkgs/book {}; + complement = self.callPackage ./nix/pkgs/complement {}; + craneLib = ((inputs.crane.mkLib pkgs).overrideToolchain toolchain); + inherit inputs; + main = self.callPackage ./nix/pkgs/main {}; + oci-image = self.callPackage ./nix/pkgs/oci-image {}; + rocksdb = pkgs.rocksdb.overrideAttrs (old: { + src = inputs.rocksdb; + version = pkgs.lib.removePrefix + "v" + (builtins.fromJSON (builtins.readFile ./flake.lock)) + .nodes.rocksdb.original.ref; + }); + }); - nativeBuildInputs = pkgs: let - darwin = if pkgs.stdenv.isDarwin then [ pkgs.libiconv ] else []; - in [ - # bindgen needs the build platform's libclang. Apparently due to - # "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't - # quite do the right thing here. - pkgs.pkgsBuildHost.rustPlatform.bindgenHook - ] ++ darwin; - - env = pkgs: { - CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; - ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include"; - ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib"; - } - // pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic { - ROCKSDB_STATIC = ""; - } - // { - CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in - lib.concatStringsSep " " ([ ] - ++ lib.optionals - # This disables PIE for static builds, which isn't great in terms - # of security. Unfortunately, my hand is forced because nixpkgs' - # `libstdc++.a` is built without `-fPIE`, which precludes us from - # leaving PIE enabled. - stdenv.hostPlatform.isStatic - [ "-C" "relocation-model=static" ] - ++ lib.optionals - (stdenv.buildPlatform.config != stdenv.hostPlatform.config) - [ "-l" "c" ] - ++ lib.optionals - # This check has to match the one [here][0]. We only need to set - # these flags when using a different linker. Don't ask me why, - # though, because I don't know. All I know is it breaks otherwise. - # - # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40 - ( - # Nixpkgs doesn't check for x86_64 here but we do, because I - # observed a failure building statically for x86_64 without - # including it here. Linkers are weird. - (stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64) - && stdenv.hostPlatform.isStatic - && !stdenv.isDarwin - && !stdenv.cc.bintools.isLLVM - ) - [ - "-l" - "stdc++" - "-L" - "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" - ] - ); - } - - # What follows is stolen from [here][0]. Its purpose is to properly - # configure compilers and linkers for various stages of the build, and - # even covers the case of build scripts that need native code compiled and - # run on the build platform (I think). - # - # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80 - // ( - let - inherit (pkgs.rust.lib) envVars; - in - pkgs.lib.optionalAttrs - (pkgs.stdenv.targetPlatform.rust.rustcTarget - != pkgs.stdenv.hostPlatform.rust.rustcTarget) - ( - let - inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = - envVars.linkerForTarget; - } - ) - // ( - let - inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForHost; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost; - CARGO_BUILD_TARGET = rustcTarget; - } - ) - // ( - let - inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild; - HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc"; - HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++"; - } - ) - ); - - mkPackage = pkgs: allocator: cargoArgs: profile: builder pkgs { - src = inputs.nix-filter { - root = ./.; - include = [ - "src" - "Cargo.toml" - "Cargo.lock" - ]; - }; - - rocksdb' = (if allocator == "jemalloc" then (pkgs.rocksdb.override { enableJemalloc = true; }) else (rocksdb' pkgs)); - - # This is redundant with CI - doCheck = false; - - env = env pkgs; - nativeBuildInputs = nativeBuildInputs pkgs; - - cargoExtraArgs = cargoArgs - + (if allocator == "jemalloc" then " --features jemalloc" else "") - + (if allocator == "hmalloc" then " --features hardened_malloc" else "") - ; - - meta.mainProgram = cargoToml.package.name; - - CARGO_PROFILE = profile; - }; - - mkOciImage = pkgs: package: - pkgs.dockerTools.buildLayeredImage { - name = package.pname; - tag = "main"; - # Debian makes builds reproducible through using the HEAD commit's date - created = "@${toString inputs.self.lastModified}"; - contents = [ - pkgs.dockerTools.caCertificates - ]; - config = { - # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) - # are handled as expected - Entrypoint = if !pkgs.stdenv.isDarwin then [ - "${pkgs.lib.getExe' pkgs.tini "tini"}" - "--" - ] else []; - Cmd = [ - "${pkgs.lib.getExe package}" - ]; - }; - }; - - createComplementRuntime = pkgs: image: let - script = pkgs.writeShellScriptBin "run.sh" - '' - export PATH=${pkgs.lib.makeBinPath [ pkgs.olm pkgs.gcc ]} - ${pkgs.lib.getExe pkgs.docker} load < ${image} - set +o pipefail - /usr/bin/env -C "${inputs.complement}" COMPLEMENT_BASE_IMAGE="complement-conduit:dev" ${pkgs.lib.getExe pkgs.go} test -json ${inputs.complement}/tests | ${pkgs.toybox}/bin/tee $1 - set -o pipefail - - # Post-process the results into an easy-to-compare format - ${pkgs.coreutils}/bin/cat "$1" | ${pkgs.lib.getExe pkgs.jq} -c ' - select( - (.Action == "pass" or .Action == "fail" or .Action == "skip") - and .Test != null - ) | {Action: .Action, Test: .Test} - ' | ${pkgs.coreutils}/bin/sort > "$2" - ''; - - in script; - - createComplementImage = pkgs: let - - conduwuit = mkPackage pkgs "jemalloc" "--features=axum_dual_protocol" "dev"; - - in pkgs.dockerTools.buildImage { - name = "complement-conduit"; - tag = "dev"; - - copyToRoot = pkgs.stdenv.mkDerivation { - - name = "complement_data"; - src = inputs.nix-filter { - root = ./.; - include = [ - "tests/complement/conduwuit-complement.toml" - "tests/complement/v3.ext" - ]; - }; - phases = [ "unpackPhase" "installPhase" ]; - installPhase = '' - mkdir -p $out/conduwuit/data - cp $src/tests/complement/conduwuit-complement.toml $out/conduwuit/conduit.toml - cp $src/tests/complement/v3.ext $out/v3.ext - ''; - - }; - - config = { - - Cmd = [ - "${pkgs.bash}/bin/sh" - "-c" - '' - echo "Starting server as $SERVER_NAME" && - export CONDUIT_SERVER_NAME=$SERVER_NAME CONDUIT_WELL_KNOWN_SERVER="$SERVER_NAME:8448" CONDUIT_WELL_KNOWN_SERVER="$SERVER_NAME:8008" && - ${pkgs.lib.getExe pkgs.openssl} genrsa -out /conduwuit/private_key.key 2048 && - ${pkgs.lib.getExe pkgs.openssl} req -new -sha256 -key /conduwuit/private_key.key -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" -out /conduwuit/signing_request.csr && - echo "DNS.1 = $SERVER_NAME" >> /v3.ext && - echo "IP.1 = $(${pkgs.lib.getExe pkgs.gawk} 'END{print $1}' /etc/hosts)" >> /v3.ext && - ${pkgs.lib.getExe pkgs.openssl} x509 -req -extfile /v3.ext -in /conduwuit/signing_request.csr -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -CAcreateserial -out /conduwuit/certificate.crt -days 1 -sha256 && - ${pkgs.lib.getExe conduwuit} - '' - ]; - - Entrypoint = if !pkgs.stdenv.isDarwin then [ - "${pkgs.lib.getExe' pkgs.tini "tini"}" - "--" - ] else []; - - Env = [ - "SSL_CERT_FILE=/complement/ca/ca.crt" - "SERVER_NAME=localhost" - "CONDUIT_CONFIG=/conduwuit/conduit.toml" - ]; - - ExposedPorts = { - "8008/tcp" = {}; - "8448/tcp" = {}; - }; - - }; - }; + scopeHost = (scope pkgsHost); in { packages = { - default = mkPackage pkgsHost null "" "release"; - jemalloc = mkPackage pkgsHost "jemalloc" "" "release"; - hmalloc = mkPackage pkgsHost "hmalloc" "" "release"; - oci-image = mkOciImage pkgsHost inputs.self.packages.${system}.default; - oci-image-jemalloc = - mkOciImage pkgsHost inputs.self.packages.${system}.jemalloc; - oci-image-hmalloc = - mkOciImage pkgsHost inputs.self.packages.${system}.hmalloc; + default = scopeHost.main; + jemalloc = scopeHost.main.override { features = ["jemalloc"]; }; + hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; }; - book = - let - package = inputs.self.packages.${system}.default; - in - pkgsHost.stdenv.mkDerivation { - pname = "${package.pname}-book"; - version = package.version; - - src = inputs.nix-filter { - root = ./.; - include = [ - "book.toml" - "conduwuit-example.toml" - "README.md" - "debian/README.md" - "docs" - ]; - }; - - nativeBuildInputs = (with pkgsHost; [ - mdbook - ]); - - buildPhase = '' - mdbook build - mv public $out - ''; + oci-image = scopeHost.oci-image; + oci-image-jemalloc = scopeHost.oci-image.override { + main = scopeHost.main.override { + features = ["jemalloc"]; }; - complement-image = createComplementImage pkgsHost; - complement-runtime = createComplementRuntime pkgsHost inputs.self.outputs.packages.${system}.complement-image; + }; + oci-image-hmalloc = scopeHost.oci-image.override { + main = scopeHost.main.override { + features = ["hardened_malloc"]; + }; + }; + + book = scopeHost.book; + + complement = scopeHost.complement; } // builtins.listToAttrs @@ -327,48 +78,55 @@ config = crossSystem; }; }).pkgsStatic; + scopeCrossStatic = scope pkgsCrossStatic; in [ # An output for a statically-linked binary { name = binaryName; - value = mkPackage pkgsCrossStatic null "" "release"; + value = scopeCrossStatic.main; } # An output for a statically-linked binary with jemalloc { name = "${binaryName}-jemalloc"; - value = mkPackage pkgsCrossStatic "jemalloc" "" "release"; + value = scopeCrossStatic.main.override { + features = ["jemalloc"]; + }; } # An output for a statically-linked binary with hardened_malloc { name = "${binaryName}-hmalloc"; - value = mkPackage pkgsCrossStatic "hmalloc" "" "release"; + value = scopeCrossStatic.main.override { + features = ["hardened_malloc"]; + }; } # An output for an OCI image based on that binary { name = "oci-image-${crossSystem}"; - value = mkOciImage - pkgsCrossStatic - inputs.self.packages.${system}.${binaryName}; + value = scopeCrossStatic.oci-image; } # An output for an OCI image based on that binary with jemalloc { name = "oci-image-${crossSystem}-jemalloc"; - value = mkOciImage - pkgsCrossStatic - inputs.self.packages.${system}."${binaryName}-jemalloc"; + value = scopeCrossStatic.oci-image.override { + main = scopeCrossStatic.main.override { + features = ["jemalloc"]; + }; + }; } # An output for an OCI image based on that binary with hardened_malloc { name = "oci-image-${crossSystem}-hmalloc"; - value = mkOciImage - pkgsCrossStatic - inputs.self.packages.${system}."${binaryName}-hmalloc"; + value = scopeCrossStatic.oci-image.override { + main = scopeCrossStatic.main.override { + features = ["hardened_malloc"]; + }; + }; } ] ) @@ -380,15 +138,19 @@ ); devShells.default = pkgsHost.mkShell { - env = env pkgsHost // { + env = scopeHost.main.env // { # Rust Analyzer needs to be able to find the path to default crate # sources, and it can read this environment variable to do so. The # `rust-src` component is required in order for this to work. RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; + + # Convenient way to access a pinned version of Complement's source + # code. + COMPLEMENT_SRC = inputs.complement.outPath; }; # Development tools - nativeBuildInputs = nativeBuildInputs pkgsHost ++ [ + packages = [ # Always use nightly rustfmt because most of its options are unstable # # This needs to come before `toolchain` in this list, otherwise @@ -396,7 +158,8 @@ inputs.fenix.packages.${system}.latest.rustfmt toolchain - ] ++ (with pkgsHost; [ + ] + ++ (with pkgsHost; [ engage # Needed for producing Debian packages @@ -414,7 +177,9 @@ # Useful for editing the book locally mdbook - ]); + ]) + ++ + scopeHost.main.nativeBuildInputs; }; }); } diff --git a/nix/pkgs/book/default.nix b/nix/pkgs/book/default.nix new file mode 100644 index 00000000..a3e9e28f --- /dev/null +++ b/nix/pkgs/book/default.nix @@ -0,0 +1,31 @@ +{ inputs + +# Dependencies +, main +, mdbook +, stdenv +}: + +stdenv.mkDerivation { + inherit (main) pname version; + + src = inputs.nix-filter { + root = inputs.self; + include = [ + "book.toml" + "conduwuit-example.toml" + "README.md" + "debian/README.md" + "docs" + ]; + }; + + nativeBuildInputs = [ + mdbook + ]; + + buildPhase = '' + mdbook build + mv public $out + ''; +} diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml new file mode 100644 index 00000000..db1f2d81 --- /dev/null +++ b/nix/pkgs/complement/config.toml @@ -0,0 +1,19 @@ +[global] +address = "0.0.0.0" +allow_device_name_federation = true +allow_guest_registration = true +allow_public_room_directory_over_federation = true +allow_public_room_directory_without_auth = true +allow_registration = true +allow_unstable_room_versions = true +database_backend = "rocksdb" +database_path = "/database" +log = "trace" +port = [8008, 8448] +trusted_servers = [] +yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true + +[global.tls] +certs = "/certificate.crt" +dual_protocol = true +key = "/private_key.key" diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix new file mode 100644 index 00000000..0399f1e8 --- /dev/null +++ b/nix/pkgs/complement/default.nix @@ -0,0 +1,92 @@ +# Dependencies +{ bashInteractive +, buildEnv +, coreutils +, dockerTools +, gawk +, lib +, main +, openssl +, stdenv +, tini +, writeShellScriptBin +}: + +let + main' = main.override { + profile = "dev"; + features = ["axum_dual_protocol"]; + }; + + start = writeShellScriptBin "start" '' + set -euxo pipefail + + ${lib.getExe openssl} genrsa -out private_key.key 2048 + ${lib.getExe openssl} req \ + -new \ + -sha256 \ + -key private_key.key \ + -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" \ + -out signing_request.csr + cp ${./v3.ext} v3.ext + echo "DNS.1 = $SERVER_NAME" >> v3.ext + echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \ + >> v3.ext + ${lib.getExe openssl} x509 \ + -req \ + -extfile v3.ext \ + -in signing_request.csr \ + -CA /complement/ca/ca.crt \ + -CAkey /complement/ca/ca.key \ + -CAcreateserial \ + -out certificate.crt \ + -days 1 \ + -sha256 + + ${lib.getExe' coreutils "env"} \ + CONDUIT_SERVER_NAME="$SERVER_NAME" \ + CONDUIT_WELL_KNOWN_SERVER="$SERVER_NAME:8448" \ + CONDUIT_WELL_KNOWN_SERVER="$SERVER_NAME:8008" \ + ${lib.getExe main'} + ''; +in + +dockerTools.buildImage { + name = "complement-${main.pname}"; + tag = "dev"; + + copyToRoot = buildEnv { + name = "root"; + pathsToLink = [ + "/bin" + ]; + paths = [ + bashInteractive + coreutils + main' + start + ]; + }; + + config = { + Cmd = [ + "${lib.getExe start}" + ]; + + Entrypoint = if !stdenv.isDarwin + # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) + # are handled as expected + then [ "${lib.getExe' tini "tini"}" "--" ] + else []; + + Env = [ + "SSL_CERT_FILE=/complement/ca/ca.crt" + "CONDUIT_CONFIG=${./config.toml}" + ]; + + ExposedPorts = { + "8008/tcp" = {}; + "8448/tcp" = {}; + }; + }; +} diff --git a/tests/complement/v3.ext b/nix/pkgs/complement/v3.ext similarity index 100% rename from tests/complement/v3.ext rename to nix/pkgs/complement/v3.ext diff --git a/nix/pkgs/main/cross-compilation-env.nix b/nix/pkgs/main/cross-compilation-env.nix new file mode 100644 index 00000000..83fe6ed6 --- /dev/null +++ b/nix/pkgs/main/cross-compilation-env.nix @@ -0,0 +1,100 @@ +{ lib +, pkgsBuildHost +, rust +, stdenv +}: + +lib.optionalAttrs stdenv.hostPlatform.isStatic { + ROCKSDB_STATIC = ""; +} +// +{ + CARGO_BUILD_RUSTFLAGS = + lib.concatStringsSep + " " + ([] + # This disables PIE for static builds, which isn't great in terms + # of security. Unfortunately, my hand is forced because nixpkgs' + # `libstdc++.a` is built without `-fPIE`, which precludes us from + # leaving PIE enabled. + ++ lib.optionals + stdenv.hostPlatform.isStatic + [ "-C" "relocation-model=static" ] + ++ lib.optionals + (stdenv.buildPlatform.config != stdenv.hostPlatform.config) + [ "-l" "c" ] + ++ lib.optionals + # This check has to match the one [here][0]. We only need to set + # these flags when using a different linker. Don't ask me why, + # though, because I don't know. All I know is it breaks otherwise. + # + # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40 + ( + # Nixpkgs doesn't check for x86_64 here but we do, because I + # observed a failure building statically for x86_64 without + # including it here. Linkers are weird. + (stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64) + && stdenv.hostPlatform.isStatic + && !stdenv.isDarwin + && !stdenv.cc.bintools.isLLVM + ) + [ + "-l" + "stdc++" + "-L" + "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" + ] + ); +} + +# What follows is stolen from [here][0]. Its purpose is to properly +# configure compilers and linkers for various stages of the build, and +# even covers the case of build scripts that need native code compiled and +# run on the build platform (I think). +# +# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80 +// +( + let + inherit (rust.lib) envVars; + in + lib.optionalAttrs + (stdenv.targetPlatform.rust.rustcTarget + != stdenv.hostPlatform.rust.rustcTarget) + ( + let + inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = + envVars.linkerForTarget; + } + ) + // + ( + let + inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForHost; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost; + CARGO_BUILD_TARGET = rustcTarget; + } + ) + // + ( + let + inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild; + HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc"; + HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++"; + } + ) +) diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix new file mode 100644 index 00000000..cbb9c409 --- /dev/null +++ b/nix/pkgs/main/default.nix @@ -0,0 +1,71 @@ +{ inputs + +# Dependencies +, craneLib +, lib +, libiconv +, pkgsBuildHost +, rocksdb +, rust +, stdenv + +# Options +, features ? [] +, profile ? "release" +}: + +craneLib.buildPackage rec { + src = inputs.nix-filter { + root = inputs.self; + include = [ + "src" + "Cargo.toml" + "Cargo.lock" + ]; + }; + + # This is redundant with CI + doCheck = false; + + env = + let + rocksdb' = rocksdb.override { + enableJemalloc = builtins.elem "jemalloc" features; + }; + in + { + CARGO_PROFILE = profile; + CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; + ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include"; + ROCKSDB_LIB_DIR = "${rocksdb'}/lib"; + } + // + (import ./cross-compilation-env.nix { + inherit + lib + pkgsBuildHost + rust + stdenv; + }); + + nativeBuildInputs = [ + # bindgen needs the build platform's libclang. Apparently due to "splicing + # weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the + # right thing here. + pkgsBuildHost.rustPlatform.bindgenHook + ] + ++ lib.optionals stdenv.isDarwin [ libiconv ]; + + cargoExtraArgs = "" + + lib.optionalString + (features != []) + "--features " + (builtins.concatStringsSep "," features); + + meta.mainProgram = (craneLib.crateNameFromCargoToml { + cargoToml = "${inputs.self}/Cargo.toml"; + }).pname; + + passthru = { + inherit env; + }; +} diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix new file mode 100644 index 00000000..ed2ec19a --- /dev/null +++ b/nix/pkgs/oci-image/default.nix @@ -0,0 +1,28 @@ +{ inputs + +# Dependencies +, dockerTools +, lib +, main +, stdenv +, tini +}: + +dockerTools.buildLayeredImage { + name = main.pname; + tag = "main"; + created = "@${toString inputs.self.lastModified}"; + contents = [ + dockerTools.caCertificates + ]; + config = { + Entrypoint = if !stdenv.isDarwin + # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) + # are handled as expected + then [ "${lib.getExe' tini "tini"}" "--" ] + else []; + Cmd = [ + "${lib.getExe main}" + ]; + }; +} diff --git a/tests/complement/README.md b/tests/complement/README.md deleted file mode 100644 index 9170424b..00000000 --- a/tests/complement/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Complement - -## What's that? - -Have a look at [its repository](https://github.com/matrix-org/complement). - -## How do I use it with conduwuit? - -For reproducible results, Complement support in conduwuit uses Nix to run and generate an image. - -After installing Nix, you can run either: - -- `nix run #.complement-runtime -- ./path/to/logs.jsonl ./path/to/results.jsonl` to build a Complement image, run the tests, and output the logs and results to the specified paths. - -- `nix run #.complement-image` to just build a Complement image diff --git a/tests/complement/conduwuit-complement.toml b/tests/complement/conduwuit-complement.toml deleted file mode 100644 index 9f5f0ffb..00000000 --- a/tests/complement/conduwuit-complement.toml +++ /dev/null @@ -1,592 +0,0 @@ -# ============================================================================= -# This is the official complement config for conduwuit. -# DO NOT USE IT IN ACTUAL SERVERS -# ============================================================================= - -[global] - -# The server_name is the pretty name of this server. It is used as a suffix for user -# and room ids. Examples: matrix.org, conduit.rs - -# The Conduit server needs all /_matrix/ requests to be reachable at -# https://your.server.name/ on port 443 (client-server) and 8448 (federation). - -# If that's not possible for you, you can create /.well-known files to redirect -# requests (delegation). See -# https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient -# and -# https://spec.matrix.org/v1.9/server-server-api/#getwell-knownmatrixserver -# for more information - -# We set this via env var -# server_name = "your.server.name" - -# Servers listed here will be used to gather public keys of other servers (notary trusted key servers). -# -# The default behaviour for conduwuit is to attempt to query trusted key servers before querying the individual servers. -# This is done for performance reasons, but if you would like to query individual servers before the notary servers -# configured below, set to -# -# (Currently, conduwuit doesn't support batched key requests, so this list should only contain Synapse servers) -# Defaults to `matrix.org` -trusted_servers = [] - -# Sentry.io crash/panic reporting, performance monitoring/metrics, etc. -# Conduwuit's Sentry reporting endpoint is o4506996327251968.ingest.us.sentry.io -# -# Defaults to false -#sentry = false - -# Report your Conduwuit server_name in Sentry.io crash reports and metrics -# -# Defaults to false -#sentry_send_server_name = false - -# Performance monitoring/tracing sample rate for Sentry.io -# -# Note that too high values may impact performance, and can be disabled by setting it to 0.0 -# -# Defaults to 0.15 -#sentry_traces_sample_rate = 0.15 - - -### Database configuration - -# This is the only directory where conduwuit will save its data, including media -database_path = "/conduwuit/data" - -# Database backend: Only rocksdb and sqlite are supported. Please note that sqlite -# will perform significantly worse than rocksdb as it is not intended to be used the -# way it is by conduwuit. sqlite only exists for historical reasons. -database_backend = "rocksdb" - - -### Network - -# The port(s) conduwuit will be running on. You need to set up a reverse proxy such as -# Caddy or Nginx so all requests to /_matrix on port 443 and 8448 will be -# forwarded to the conduwuit instance running on this port -# Docker users: Don't change this, you'll need to map an external port to this. -# To listen on multiple ports, specify a vector e.g. [8080, 8448] -port = [8008, 8448] - -# default address (IPv4 or IPv6) conduwuit will listen on. Generally you want this to be -# localhost (127.0.0.1 / ::1). If you are using Docker or a container NAT networking setup, you -# likely need this to be 0.0.0.0. -address = "0.0.0.0" - -# How many requests conduwuit sends to other servers at the same time concurrently. Default is 500 -# Note that because conduwuit is very fast unlike other homeserver implementations, setting this too -# high could inadvertently result in ratelimits kicking in, or overloading lower-end homeservers out there. -# -# A valid use-case for enabling this is if you have a significant amount of overall federation activity -# such as many rooms joined/tracked, and many servers in the true destination cache caused by that. Upon -# rebooting conduwuit, depending on how fast your resources are, client and incoming federation requests -# may timeout or be "stalled" for a period of time due to hitting the max concurrent requests limit from -# refreshing federation/destination caches and such. -# -# If you have a lot of active users on your homeserver, you will definitely need to raise this. -# -# No this will not speed up room joins. -max_concurrent_requests = 2000 - -# Max request size for file uploads -max_request_size = 100_000_000 # in bytes - -# Uncomment unix_socket_path to listen on a UNIX socket at the specified path. -# If listening on a UNIX socket, you must remove/comment the 'address' key if defined and add your -# reverse proxy to the 'conduwuit' group, unless world RW permissions are specified with unix_socket_perms (666 minimum). -#unix_socket_path = "/run/conduwuit/conduwuit.sock" -#unix_socket_perms = 660 - -# Set this to true for conduwuit to compress HTTP response bodies using zstd. -# This option does nothing if conduwuit was not built with `zstd_compression` feature. -# Please be aware that enabling HTTP compression may weaken TLS. -# Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this. -zstd_compression = false - -# Set this to true for conduwuit to compress HTTP response bodies using gzip. -# This option does nothing if conduwuit was not built with `gzip_compression` feature. -# Please be aware that enabling HTTP compression may weaken TLS. -# Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this. -gzip_compression = false - -# Set this to true for conduwuit to compress HTTP response bodies using brotli. -# This option does nothing if conduwuit was not built with `brotli_compression` feature. -# Please be aware that enabling HTTP compression may weaken TLS. -# Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this. -brotli_compression = false - -# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you do not want conduwuit to send outbound requests to. -# Defaults to RFC1918, unroutable, loopback, multicast, and testnet addresses for security. -# -# To disable, set this to be an empty vector (`[]`). -# Please be aware that this is *not* a guarantee. You should be using a firewall with zones as doing this on the application layer may have bypasses. -# -# Currently this does not account for proxies in use like Synapse does. -ip_range_denylist = [] - - -### Moderation / Privacy / Security - -# Set to true to allow user type "guest" registrations. Element attempts to register guest users automatically. -# For private homeservers, this is best at false. -allow_guest_registration = true - -# Vector list of servers that conduwuit will refuse to download remote media from. -# No default. -# prevent_media_downloads_from = ["example.com", "example.local"] - -# Enable complement tests being able to register -allow_registration = true - -# DO NOT USE THIS IN ON REAL SERVERS -yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true - -# controls whether federation is allowed or not -# defaults to true -allow_federation = true - -# controls whether users are allowed to create rooms. -# appservices and admins are always allowed to create rooms -# defaults to true -allow_room_creation = true - -# controls whether non-admin local users are forbidden from sending room invites (local and remote), -# and if non-admin users can receive remote room invites. admins are always allowed to send and receive all room invites. -# defaults to false -block_non_admin_invites = false - -# List of forbidden username patterns/strings. Values in this list are matched as *contains*. -# This is checked upon username availability check, registration, and startup as warnings if any local users in your database -# have a forbidden username. -# No default. -# forbidden_usernames = [] - -# List of forbidden room aliases and room IDs as patterns/strings. Values in this list are matched as *contains*. -# This is checked upon room alias creation, custom room ID creation if used, and startup as warnings if any room aliases -# in your database have a forbidden room alias/ID. -# No default. -# forbidden_alias_names = [] - -# Set this to true to allow your server's public room directory to be federated. -# Set this to false to protect against /publicRooms spiders, but will forbid external users -# from viewing your server's public room directory. If federation is disabled entirely -# (`allow_federation`), this is inherently false. -allow_public_room_directory_over_federation = true - -# Set this to true to allow your server's public room directory to be queried without client -# authentication (access token) through the Client APIs. Set this to false to protect against /publicRooms spiders. -allow_public_room_directory_without_auth = true - -# Set this to true to lock down your server's public room directory and only allow admins to publish rooms to the room directory. -# Unpublishing is still allowed by all users with this enabled. -# -# Defaults to false -lockdown_public_room_directory = false - -# Set this to true to allow federating device display names / allow external users to see your device display name. -# If federation is disabled entirely (`allow_federation`), this is inherently false. For privacy, this is best disabled. -allow_device_name_federation = true - -# Vector list of domains allowed to send requests to for URL previews. Defaults to none. -# Note: this is a *contains* match, not an explicit match. Putting "google.com" will match "https://google.com" and "http://mymaliciousdomainexamplegoogle.com" -# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so. -url_preview_domain_contains_allowlist = ["*"] - -# Vector list of explicit domains allowed to send requests to for URL previews. Defaults to none. -# Note: This is an *explicit* match, not a ccontains match. Putting "google.com" will match "https://google.com", "http://google.com", but not "https://mymaliciousdomainexamplegoogle.com" -# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so. -url_preview_domain_explicit_allowlist = [] - -# Vector list of URLs allowed to send requests to for URL previews. Defaults to none. -# Note that this is a *contains* match, not an explicit match. Putting "google.com" will match "https://google.com/", "https://google.com/url?q=https://mymaliciousdomainexample.com", and "https://mymaliciousdomainexample.com/hi/google.com" -# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so. -url_preview_url_contains_allowlist = [] - -# Maximum amount of bytes allowed in a URL preview body size when spidering. Defaults to 384KB (384_000 bytes) -url_preview_max_spider_size = 384_000 - -# Option to decide whether you would like to run the domain allowlist checks (contains and explicit) on the root domain or not. Does not apply to URL contains allowlist. Defaults to false. -# Example: If this is enabled and you have "wikipedia.org" allowed in the explicit and/or contains domain allowlist, it will allow all subdomains under "wikipedia.org" such as "en.m.wikipedia.org" as the root domain is checked and matched. -# Useful if the domain contains allowlist is still too broad for you but you still want to allow all the subdomains under a root domain. -url_preview_check_root_domain = false - -# A single contact and/or support page for /.well-known/matrix/support -# All options here are strings. Currently only supports 1 single contact. -# No default. -#well_known_support_page = "" -#well_known_support_role = "" -#well_known_support_email = "" -#well_known_support_mxid = "" - -# Config option to allow or disallow incoming federation requests that obtain the profiles -# of our local users from `/_matrix/federation/v1/query/profile` -# -# This is inherently false if `allow_federation` is disabled -# -# Defaults to true -allow_profile_lookup_federation_requests = true - - -### Misc - -# max log level for conduwuit. allows debug, info, warn, or error -# see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives -# For release builds, the maximum log level for conduwuit is info. For debug builds, it is "trace". -# Defaults to "warn" -log = "trace" - -# controls whether encrypted rooms and events are allowed (default true) -#allow_encryption = false - -# if enabled, conduwuit will send a simple GET request periodically to `https://pupbrain.dev/check-for-updates/stable` -# for any new announcements made. Despite the name, this is not an update check -# endpoint, it is simply an announcement check endpoint. -# Defaults to false. -allow_check_for_updates = false - -# If you are using delegation via well-known files and you cannot serve them from your reverse proxy, you can -# uncomment these to serve them directly from conduwuit. This requires proxying all requests to conduwuit, not just `/_matrix` to work. -# Note that whatever you put will show up in the well-known JSON values. - -# Set to false to disable users from joining or creating room versions that aren't 100% officially supported by conduwuit. -# conduwuit officially supports room versions 6 - 10. conduwuit has experimental/unstable support for 3 - 5, and 11. -# Defaults to true. -allow_unstable_room_versions = true - -# Option to control adding arbitrary text to the end of the user's displayname upon registration with a space before the text. -# This was the lightning bolt emoji option, just replaced with support for adding your own custom text or emojis. -# To disable, set this to "" (an empty string) -# Defaults to "🏳️‍⚧️" (trans pride flag) -#new_user_displayname_suffix = "" - -# Option to control whether conduwuit will query your list of trusted notary key servers (`trusted_servers`) for -# remote homeserver signing keys it doesn't know *first*, or query the individual servers first before falling back to the trusted -# key servers. -# -# The former/default behaviour makes federated/remote rooms joins generally faster because we're querying a single (or list of) server -# that we know works, is reasonably fast, and is reliable for just about all the homeserver signing keys in the room. Querying individual -# servers may take longer depending on the general infrastructure of everyone in there, how many dead servers there are, etc. -# -# However, this does create an increased reliance on one single or multiple large entities as `trusted_servers` should generally -# contain long-term and large servers who know a very large number of homeservers. -# -# If you don't know what any of this means, leave this and `trusted_servers` alone to their defaults. -# -# Defaults to true as this is the fastest option for federation. -query_trusted_key_servers_first = true - -# List/vector of room **IDs** that conduwuit will make newly registered users join. -# The room IDs specified must be rooms that you have joined at least once on the server, and must be public. -# -# No default. -#auto_join_rooms = [] - -# Retry failed and incomplete messages to remote servers immediately upon startup. This is called bursting. -# If this is disabled, said messages may not be delivered until more messages are queued for that server. -# Do not change this option unless server resources are extremely limited or the scale of the server's -# deployment is huge. Do not disable this unless you know what you are doing. -startup_netburst = true - -# Limit the startup netburst to the most recent (default: 50) messages queued for each remote server. All older -# messages are dropped and not reattempted. The `startup_netburst` option must be enabled for this value to have -# any effect. Do not change this value unless you know what you are doing. Set this value to -1 to reattempt -# every message without trimming the queues; this may consume significant disk. Set this value to 0 to drop all -# messages without any attempt at redelivery. -#startup_netburst_keep = 50 - - -### Generic database options - -# Set this to any float value to multiply conduwuit's in-memory LRU caches with. -# May be useful if you have significant memory to spare to increase performance. -# Defaults to 1.0. -#conduit_cache_capacity_modifier = 1.0 - -# Set this to any float value in megabytes for conduwuit to tell the database engine that this much memory is available for database-related caches. -# May be useful if you have significant memory to spare to increase performance. -# Defaults to 256.0 -#db_cache_capacity_mb = 256.0 - -# Interval in seconds when conduwuit will run database cleanup operations. -# -# For SQLite: this will flush the WAL by executing `PRAGMA wal_checkpoint(RESTART)` (https://www.sqlite.org/pragma.html#pragma_wal_checkpoint) -# For RocksDB: this will run `flush_opt` to flush database memtables to SST files on disk (https://docs.rs/rocksdb/latest/rocksdb/struct.DBCommon.html#method.flush_opt) -# These operations always run on shutdown. -# -# Defaults to 30 minutes (1800 seconds) to avoid IO amplification from too frequent cleanups -#cleanup_second_interval = 1800 - - -### RocksDB options - -# Set this to true to use RocksDB config options that are tailored to HDDs (slower device storage) -# -# It is worth noting that by default, conduwuit will use RocksDB with Direct IO enabled. *Generally* speaking this improves performance as it bypasses buffered I/O (system page cache). -# However there is a potential chance that Direct IO may cause issues with database operations if your setup is uncommon. This has been observed with FUSE filesystems, and possibly ZFS filesystem. -# RocksDB generally deals/corrects these issues but it cannot account for all setups. -# If you experience any weird RocksDB issues, try enabling this option as it turns off Direct IO and feel free to report in the conduwuit Matrix room if this option fixes your DB issues. -# See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. -# -# Defaults to false -#rocksdb_optimize_for_spinning_disks = false - -# RocksDB log level. This is not the same as conduwuit's log level. This is the log level for the RocksDB engine/library -# which show up in your database folder/path as `LOG` files. Defaults to error. conduwuit will typically log RocksDB errors as normal. -#rocksdb_log_level = "error" - -# Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB. -#rocksdb_max_log_file_size = 4194304 - -# Time in seconds before RocksDB will forcibly rotate logs. Defaults to 0. -#rocksdb_log_time_to_roll = 0 - -# Amount of threads that RocksDB will use for parallelism on database operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use all your physical cores. -# -# Defaults to your CPU physical core count (not logical threads). -#rocksdb_parallelism_threads = 0 - -# Maximum number of LOG files RocksDB will keep. This must *not* be set to 0. It must be at least 1. -# Defaults to 3 as these are not very useful. -#rocksdb_max_log_files = 3 - -# Type of RocksDB database compression to use. -# Available options are "zstd", "zlib", "bz2" and "lz4" -# It is best to use ZSTD as an overall good balance between speed/performance, storage, IO amplification, and CPU usage. -# For more performance but less compression (more storage used) and less CPU usage, use LZ4. -# See https://github.com/facebook/rocksdb/wiki/Compression for more details. -# -# Defaults to "zstd" -#rocksdb_compression_algo = "zstd" - -# Level of compression the specified compression algorithm for RocksDB to use. -# Default is 32767, which is internally read by RocksDB as the default magic number and -# translated to the library's default compression level as they all differ. -# See their `kDefaultCompressionLevel`. -# -#rocksdb_compression_level = 32767 - -# Level of compression the specified compression algorithm for the bottommost level/data for RocksDB to use. -# Default is 32767, which is internally read by RocksDB as the default magic number and -# translated to the library's default compression level as they all differ. -# See their `kDefaultCompressionLevel`. -# -# Since this is the bottommost level (generally old and least used data), it may be desirable to have a very -# high compression level here as it's lesss likely for this data to be used. Research your chosen compression algorithm. -# -#rocksdb_bottommost_compression_level = 32767 - -# Whether to enable RocksDB "bottommost_compression". -# At the expense of more CPU usage, this will further compress the database to reduce more storage. -# It is recommended to use ZSTD compression with this for best compression results. -# See https://github.com/facebook/rocksdb/wiki/Compression for more details. -# -# Defaults to false as this uses more CPU when compressing. -#rocksdb_bottommost_compression = false - -# Database recovery mode (for RocksDB WAL corruption) -# -# Use this option when the server reports corruption and refuses to start. Set mode 2 (PointInTime) -# to cleanly recover from this corruption. The server will continue from the last good state, -# several seconds or minutes prior to the crash. Clients may have to run "clear-cache & reload" to -# account for the rollback. Upon success, you may reset the mode back to default and restart again. -# Please note in some cases the corruption error may not be cleared for at least 30 minutes of -# operation in PointInTime mode. -# -# As a very last ditch effort, if PointInTime does not fix or resolve anything, you can try mode -# 3 (SkipAnyCorruptedRecord) but this will leave the server in a potentially inconsistent state. -# -# The default mode 1 (TolerateCorruptedTailRecords) will automatically drop the last entry in the -# database if corrupted during shutdown, but nothing more. It is extraordinarily unlikely this will -# desynchronize clients. To disable any form of silent rollback set mode 0 (AbsoluteConsistency). -# -# The options are: -# 0 = AbsoluteConsistency -# 1 = TolerateCorruptedTailRecords (default) -# 2 = PointInTime (use me if trying to recover) -# 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) -# -# See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information -# -# Defaults to 1 (TolerateCorruptedTailRecords) -#rocksdb_recovery_mode = 1 - -# Controls whether memory buffers are written to storage at the fixed interval set by `cleanup_period_interval` -# even when they are not full. Setting this will increase load on the storage backplane and is never advised -# under normal circumstances. -#rocksdb_periodic_cleanup = false - - -### Domain Name Resolution and Caching - -# Maximum entries stored in DNS memory-cache. The size of an entry may vary so please take care if -# raising this value excessively. Only decrease this when using an external DNS cache. Please note -# that systemd does *not* count as an external cache, even when configured to do so. -#dns_cache_entries = 12288 - -# Minimum time-to-live in seconds for entries in the DNS cache. The default may appear high to most -# administrators; this is by design. Only decrease this if you are using an external DNS cache. -#dns_min_ttl = 60 * 90 - -# Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. This value is critical for -# the server to federate efficiently. NXDOMAIN's are assumed to not be returning to the federation -# and aggressively cached rather than constantly rechecked. -#dns_min_ttl_nxdomain = 60 * 60 * 24 * 3 - -# The number of seconds to wait for a reply to a DNS query. Please note that recursive queries can -# take up to several seconds for some domains, so this value should not be too low. -#dns_timeout = 5 - -# Number of retries after a timeout. -#dns_attempts = 5 - -# Enable to query all nameservers until the domain is found. Referred to as "trust_negative_responses" in hickory_resolver. -# This can avoid useless DNS queries if the first nameserver responds with NXDOMAIN or an empty NOERROR response. -# -# The default is to query one nameserver and stop (false). -#query_all_nameservers = false - - -### Request Timeouts, Connection Timeouts, and Connection Pooling - -## Request Timeouts are HTTP response timeouts -## Connection Timeouts are TCP connection timeouts -## -## Connection Pooling Timeouts are timeouts for keeping an open idle connection alive. -## Connection pooling and keepalive is very useful for federation or other places where for performance reasons, -## we want to keep connections open that we will re-use frequently due to TCP and TLS 1.3 overhead/expensiveness. -## -## Generally these defaults are the best, but if you find a reason to need to change these they are here. - -# Default/base connection timeout -# This is used only by URL previews and update/news endpoint checks -# -# Defaults to 10 seconds -#request_conn_timeout = 10 - -# Default/base request timeout -# This is used only by URL previews and update/news endpoint checks -# -# Defaults to 35 seconds -#request_timeout = 35 - -# Default/base max idle connections per host -# This is used only by URL previews and update/news endpoint checks -# -# Defaults to 1 as generally the same open connection can be re-used -#request_idle_per_host = 1 - -# Default/base idle connection pool timeout -# This is used only by URL previews and update/news endpoint checks -# -# Defaults to 5 seconds -#request_idle_timeout = 5 - -# Federation well-known resolution connection timeout -# -# Defaults to 6 seconds -#well_known_conn_timeout = 6 - -# Federation HTTP well-known resolution request timeout -# -# Defaults to 10 seconds -#well_known_timeout = 10 - -# Federation client/server request timeout -# You most definitely want this to be high to account for extremely large room joins, slow homeservers, your own resources etc. -# -# Defaults to 300 seconds -#federation_timeout = 300 - -# Federation client/sender max idle connections per host -# -# Defaults to 1 as generally the same open connection can be re-used -#federation_idle_per_host = 1 - -# Federation client/sender idle connection pool timeout -# -# Defaults to 25 seconds -#federation_idle_timeout = 25 - -# Appservice URL request connection timeout -# -# Defaults to 120 seconds -#appservice_timeout = 120 - -# Appservice URL idle connection pool timeout -# -# Defaults to 300 seconds -#appservice_idle_timeout = 300 - -# Notification gateway pusher idle connection pool timeout -# -# Defaults to 15 seconds -#pusher_idle_timeout = 15 - - -### Presence / Typing Indicators / Read Receipts - -# Config option to control local (your server only) presence updates/requests. Defaults to true. -# Note that presence on conduwuit is very fast unlike Synapse's. -# If using outgoing presence, this MUST be enabled. -# -allow_local_presence = true - -# Config option to control incoming federated presence updates/requests. Defaults to true. -# This option receives presence updates from other servers, but does not send any unless `allow_outgoing_presence` is true. -# Note that presence on conduwuit is very fast unlike Synapse's. -# -allow_incoming_presence = true - -# Config option to control outgoing presence updates/requests. Defaults to true. -# This option sends presence updates to other servers, but does not receive any unless `allow_incoming_presence` is true. -# Note that presence on conduwuit is very fast unlike Synapse's. -# If using outgoing presence, you MUST enable `allow_local_presence` as well. -# -allow_outgoing_presence = true - -# Config option to control how many seconds before presence updates that you are idle. Defaults to 5 minutes. -#presence_idle_timeout_s = 300 - -# Config option to control how many seconds before presence updates that you are offline. Defaults to 30 minutes. -#presence_offline_timeout_s = 1800 - -# Config option to control whether we should receive remote incoming read receipts. -# Defaults to true. -allow_incoming_read_receipts = true - -# Config option to control outgoing typing updates to federation. Defaults to true. -allow_outgoing_typing = true - -# Config option to control incoming typing updates from federation. Defaults to true. -allow_incoming_typing = true - -# Config option to control maximum time federation user can indicate typing. -#typing_federation_timeout_s = 30 - -# Config option to control minimum time local client can indicate typing. This does not override -# a client's request to stop typing. It only enforces a minimum value in case of no stop request. -#typing_client_timeout_min_s = 15 - -# Config option to control maximum time local client can indicate typing. -#typing_client_timeout_max_s = 45 - - -# Other options not in [global]: -# -# -# Enables running conduwuit with direct TLS support -# It is strongly recommended you use a reverse proxy instead. This is primarily relevant for test suites like complement that require a private CA setup. -[global.tls] -certs = "/conduwuit/certificate.crt" -key = "/conduwuit/private_key.key" - -# -# Whether to listen and allow for HTTP and HTTPS connections (insecure!) -# This config option is only available if conduwuit was built with `axum_dual_protocol` feature (not default feature) -# Defaults to false -dual_protocol = true diff --git a/tests/complement/failed_tests.jsonl b/tests/complement/failed_tests.jsonl deleted file mode 100644 index 5c9ce5cb..00000000 --- a/tests/complement/failed_tests.jsonl +++ /dev/null @@ -1,596 +0,0 @@ -{ - "Action": "fail", - "Test": "TestBannedUserCannotSendJoin" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/join_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV1" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV1/leave_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV1/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV2" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV2/leave_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV2/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/invite_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/join_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/leave_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/non-state_membership_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/event_with_mismatched_state_key" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/invite_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/join_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/knock_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/non-state_membership_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/event_with_mismatched_state_key" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/invite_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/join_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/knock_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/regular_event" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummaryJoinRules" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/max_depth" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/pagination" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/query_whole_graph" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/redact_link" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/suggested_only" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederation" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederation/good_connectivity" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederation/interrupted_connectivity" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederationOnRoomJoin" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederation/stopped_server" -} -{ - "Action": "fail", - "Test": "TestEventAuth" -} -{ - "Action": "fail", - "Test": "TestFederationKeyUploadQuery" -} -{ - "Action": "fail", - "Test": "TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST" -} -{ - "Action": "fail", - "Test": "TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST" -} -{ - "Action": "fail", - "Test": "TestFederationRejectInvite" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata" -} -{ - "Action": "fail", - "Test": "TestGetMissingEventsGapFilling" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_shared_visibility" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_world_readable_visibility" -} -{ - "Action": "fail", - "Test": "TestInboundFederationRejectsEventsWithRejectedAuthEvents" -} -{ - "Action": "fail", - "Test": "TestJoinFederatedRoomFromApplicationServiceBridgeUser" -} -{ - "Action": "fail", - "Test": "TestJumpToDateEndpoint" -} -{ - "Action": "fail", - "Test": "TestKnocking" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail" -} -{ - "Action": "fail", - "Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed" -} -{ - "Action": "fail", - "Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock" -} -{ - "Action": "fail", - "Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnockRoomsInPublicRoomsDirectory" -} -{ - "Action": "fail", - "Test": "TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestLocalPngThumbnail" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/ASCII" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/Unicode" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation" -} -{ - "Action": "fail", - "Test": "TestNetworkPartitionOrdering" -} -{ - "Action": "fail", - "Test": "TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6" -} -{ - "Action": "fail", - "Test": "TestRemotePresence" -} -{ - "Action": "fail", - "Test": "TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members" -} -{ - "Action": "fail", - "Test": "TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsLocalJoin" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoin" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinFailOver" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinFailOverInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinLocalUser" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsSpacesSummaryFederation" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsSpacesSummaryLocal" -} -{ - "Action": "fail", - "Test": "TestToDeviceMessagesOverFederation" -} -{ - "Action": "fail", - "Test": "TestToDeviceMessagesOverFederation/interrupted_connectivity" -} -{ - "Action": "fail", - "Test": "TestToDeviceMessagesOverFederation/stopped_server" -} -{ - "Action": "fail", - "Test": "TestUnbanViaInvite" -} -{ - "Action": "fail", - "Test": "TestUnknownEndpoints" -} -{ - "Action": "fail", - "Test": "TestUnknownEndpoints/Key_endpoints" -} -{ - "Action": "fail", - "Test": "TestUnrejectRejectedEvents" -} diff --git a/tests/complement/full_results.jsonl b/tests/complement/full_results.jsonl deleted file mode 100644 index 0b0b9521..00000000 --- a/tests/complement/full_results.jsonl +++ /dev/null @@ -1,896 +0,0 @@ -{ - "Action": "fail", - "Test": "TestBannedUserCannotSendJoin" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/join_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV1" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV1/leave_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV1/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV2" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV2/leave_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonJoinViaSendJoinV2/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/invite_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/join_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/leave_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/non-state_membership_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonKnockViaSendKnock/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/event_with_mismatched_state_key" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/invite_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/join_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/knock_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/non-state_membership_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV1/regular_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/event_with_mismatched_state_key" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/invite_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/join_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/knock_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event" -} -{ - "Action": "fail", - "Test": "TestCannotSendNonLeaveViaSendLeaveV2/regular_event" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummaryJoinRules" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/max_depth" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/pagination" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/query_whole_graph" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/redact_link" -} -{ - "Action": "fail", - "Test": "TestClientSpacesSummary/suggested_only" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederation" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederation/good_connectivity" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederation/interrupted_connectivity" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederationOnRoomJoin" -} -{ - "Action": "fail", - "Test": "TestDeviceListsUpdateOverFederation/stopped_server" -} -{ - "Action": "fail", - "Test": "TestEventAuth" -} -{ - "Action": "fail", - "Test": "TestFederationKeyUploadQuery" -} -{ - "Action": "fail", - "Test": "TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST" -} -{ - "Action": "fail", - "Test": "TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST" -} -{ - "Action": "fail", - "Test": "TestFederationRejectInvite" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining" -} -{ - "Action": "fail", - "Test": "TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata" -} -{ - "Action": "fail", - "Test": "TestGetMissingEventsGapFilling" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_shared_visibility" -} -{ - "Action": "fail", - "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_world_readable_visibility" -} -{ - "Action": "fail", - "Test": "TestInboundFederationRejectsEventsWithRejectedAuthEvents" -} -{ - "Action": "fail", - "Test": "TestJoinFederatedRoomFromApplicationServiceBridgeUser" -} -{ - "Action": "fail", - "Test": "TestJumpToDateEndpoint" -} -{ - "Action": "fail", - "Test": "TestKnocking" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it" -} -{ - "Action": "fail", - "Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock" -} -{ - "Action": "fail", - "Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail" -} -{ - "Action": "fail", - "Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed" -} -{ - "Action": "fail", - "Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01" -} -{ - "Action": "fail", - "Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock" -} -{ - "Action": "fail", - "Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01" -} -{ - "Action": "fail", - "Test": "TestKnockRoomsInPublicRoomsDirectory" -} -{ - "Action": "fail", - "Test": "TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestLocalPngThumbnail" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/ASCII" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/Unicode" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally" -} -{ - "Action": "fail", - "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation" -} -{ - "Action": "fail", - "Test": "TestNetworkPartitionOrdering" -} -{ - "Action": "fail", - "Test": "TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6" -} -{ - "Action": "fail", - "Test": "TestRemotePresence" -} -{ - "Action": "fail", - "Test": "TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members" -} -{ - "Action": "fail", - "Test": "TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsLocalJoin" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoin" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinFailOver" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinFailOverInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinLocalUser" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsSpacesSummaryFederation" -} -{ - "Action": "fail", - "Test": "TestRestrictedRoomsSpacesSummaryLocal" -} -{ - "Action": "fail", - "Test": "TestToDeviceMessagesOverFederation" -} -{ - "Action": "fail", - "Test": "TestToDeviceMessagesOverFederation/interrupted_connectivity" -} -{ - "Action": "fail", - "Test": "TestToDeviceMessagesOverFederation/stopped_server" -} -{ - "Action": "fail", - "Test": "TestUnbanViaInvite" -} -{ - "Action": "fail", - "Test": "TestUnknownEndpoints" -} -{ - "Action": "fail", - "Test": "TestUnknownEndpoints/Key_endpoints" -} -{ - "Action": "fail", - "Test": "TestUnrejectRejectedEvents" -} -{ - "Action": "pass", - "Test": "TestACLs" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV1/event_with_mismatched_state_key" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV1/invite_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV1/knock_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV1/non-state_membership_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV2/event_with_mismatched_state_key" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV2/invite_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV2/knock_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV2/non-state_membership_event" -} -{ - "Action": "pass", - "Test": "TestFederatedClientSpaces" -} -{ - "Action": "pass", - "Test": "TestFederationRedactSendsWithoutEvent" -} -{ - "Action": "pass", - "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room" -} -{ - "Action": "pass", - "Test": "TestInboundFederationKeys" -} -{ - "Action": "pass", - "Test": "TestInboundFederationProfile" -} -{ - "Action": "pass", - "Test": "TestInboundFederationProfile/Inbound_federation_can_query_profile_data" -} -{ - "Action": "pass", - "Test": "TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected" -} -{ - "Action": "pass", - "Test": "TestIsDirectFlagFederation" -} -{ - "Action": "pass", - "Test": "TestIsDirectFlagLocal" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomFailOver" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join" -} -{ - "Action": "pass", - "Test": "TestJoinViaRoomIDAndServerName" -} -{ - "Action": "pass", - "Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail" -} -{ - "Action": "pass", - "Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01" -} -{ - "Action": "pass", - "Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock" -} -{ - "Action": "pass", - "Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock#01" -} -{ - "Action": "pass", - "Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'" -} -{ - "Action": "pass", - "Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01" -} -{ - "Action": "pass", - "Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'" -} -{ - "Action": "pass", - "Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01" -} -{ - "Action": "pass", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'" -} -{ - "Action": "pass", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name" -} -{ - "Action": "pass", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_upload_with_ASCII_file_name" -} -{ - "Action": "pass", - "Test": "TestMediaFilenames/Parallel/Unicode/Can_upload_with_Unicode_file_name" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName/parallel" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_locally" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_over_federation" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName/parallel/Can_upload_without_a_file_name" -} -{ - "Action": "pass", - "Test": "TestOutboundFederationProfile" -} -{ - "Action": "pass", - "Test": "TestOutboundFederationProfile/Outbound_federation_can_query_profile_data" -} -{ - "Action": "pass", - "Test": "TestOutboundFederationSend" -} -{ - "Action": "pass", - "Test": "TestRemoteAliasRequestsUnderstandUnicode" -} -{ - "Action": "pass", - "Test": "TestRemotePngThumbnail" -} -{ - "Action": "pass", - "Test": "TestRemoteTyping" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_initially" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_when_left_allowed_room" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_invited" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_initially" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_with_mangled_join_rules" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_invited" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_initially" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_when_left_allowed_room" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_initially" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_with_mangled_join_rules" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited" -} -{ - "Action": "pass", - "Test": "TestToDeviceMessagesOverFederation/good_connectivity" -} -{ - "Action": "pass", - "Test": "TestUnknownEndpoints/Client-server_endpoints" -} -{ - "Action": "pass", - "Test": "TestUnknownEndpoints/Media_endpoints" -} -{ - "Action": "pass", - "Test": "TestUnknownEndpoints/Server-server_endpoints" -} -{ - "Action": "pass", - "Test": "TestUnknownEndpoints/Unknown_prefix" -} -{ - "Action": "pass", - "Test": "TestUserAppearsInChangedDeviceListOnJoinOverFederation" -} -{ - "Action": "pass", - "Test": "TestWriteMDirectAccountData" -} -{ - "Action": "skip", - "Test": "TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_as_inline" -} -{ - "Action": "skip", - "Test": "TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_with_parameters_as_inline" -} -{ - "Action": "skip", - "Test": "TestMediaFilenames/Parallel/Unicode/Will_serve_unsafe_media_types_as_attachments" -} -{ - "Action": "skip", - "Test": "TestSendJoinPartialStateResponse" -} diff --git a/tests/complement/passed_tests.jsonl b/tests/complement/passed_tests.jsonl deleted file mode 100644 index 98022b31..00000000 --- a/tests/complement/passed_tests.jsonl +++ /dev/null @@ -1,284 +0,0 @@ -{ - "Action": "pass", - "Test": "TestACLs" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV1/event_with_mismatched_state_key" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV1/invite_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV1/knock_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV1/non-state_membership_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV2/event_with_mismatched_state_key" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV2/invite_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV2/knock_event" -} -{ - "Action": "pass", - "Test": "TestCannotSendNonJoinViaSendJoinV2/non-state_membership_event" -} -{ - "Action": "pass", - "Test": "TestFederatedClientSpaces" -} -{ - "Action": "pass", - "Test": "TestFederationRedactSendsWithoutEvent" -} -{ - "Action": "pass", - "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room" -} -{ - "Action": "pass", - "Test": "TestInboundFederationKeys" -} -{ - "Action": "pass", - "Test": "TestInboundFederationProfile" -} -{ - "Action": "pass", - "Test": "TestInboundFederationProfile/Inbound_federation_can_query_profile_data" -} -{ - "Action": "pass", - "Test": "TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected" -} -{ - "Action": "pass", - "Test": "TestIsDirectFlagFederation" -} -{ - "Action": "pass", - "Test": "TestIsDirectFlagLocal" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomFailOver" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join" -} -{ - "Action": "pass", - "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join" -} -{ - "Action": "pass", - "Test": "TestJoinViaRoomIDAndServerName" -} -{ - "Action": "pass", - "Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail" -} -{ - "Action": "pass", - "Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01" -} -{ - "Action": "pass", - "Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock" -} -{ - "Action": "pass", - "Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock#01" -} -{ - "Action": "pass", - "Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'" -} -{ - "Action": "pass", - "Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01" -} -{ - "Action": "pass", - "Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'" -} -{ - "Action": "pass", - "Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01" -} -{ - "Action": "pass", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'" -} -{ - "Action": "pass", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name" -} -{ - "Action": "pass", - "Test": "TestMediaFilenames/Parallel/ASCII/Can_upload_with_ASCII_file_name" -} -{ - "Action": "pass", - "Test": "TestMediaFilenames/Parallel/Unicode/Can_upload_with_Unicode_file_name" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName/parallel" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_locally" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_over_federation" -} -{ - "Action": "pass", - "Test": "TestMediaWithoutFileName/parallel/Can_upload_without_a_file_name" -} -{ - "Action": "pass", - "Test": "TestOutboundFederationProfile" -} -{ - "Action": "pass", - "Test": "TestOutboundFederationProfile/Outbound_federation_can_query_profile_data" -} -{ - "Action": "pass", - "Test": "TestOutboundFederationSend" -} -{ - "Action": "pass", - "Test": "TestRemoteAliasRequestsUnderstandUnicode" -} -{ - "Action": "pass", - "Test": "TestRemotePngThumbnail" -} -{ - "Action": "pass", - "Test": "TestRemoteTyping" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_initially" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_when_left_allowed_room" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_invited" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_initially" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_with_mangled_join_rules" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_invited" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_initially" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_when_left_allowed_room" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_initially" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_with_mangled_join_rules" -} -{ - "Action": "pass", - "Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited" -} -{ - "Action": "pass", - "Test": "TestToDeviceMessagesOverFederation/good_connectivity" -} -{ - "Action": "pass", - "Test": "TestUnknownEndpoints/Client-server_endpoints" -} -{ - "Action": "pass", - "Test": "TestUnknownEndpoints/Media_endpoints" -} -{ - "Action": "pass", - "Test": "TestUnknownEndpoints/Server-server_endpoints" -} -{ - "Action": "pass", - "Test": "TestUnknownEndpoints/Unknown_prefix" -} -{ - "Action": "pass", - "Test": "TestUserAppearsInChangedDeviceListOnJoinOverFederation" -} -{ - "Action": "pass", - "Test": "TestWriteMDirectAccountData" -} From 60756b16d5485aff0d6d37c949f191e685898a64 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 22:43:53 -0700 Subject: [PATCH 43/45] add cargo-audit to the devshell Apparently github actions VMs ship with it and that's how it was working before? Cursed. We should control our own supply chain and also ensure that local development uses the same version as CI. --- flake.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/flake.nix b/flake.nix index e512236c..9dadcff3 100644 --- a/flake.nix +++ b/flake.nix @@ -161,6 +161,7 @@ ] ++ (with pkgsHost; [ engage + cargo-audit # Needed for producing Debian packages cargo-deb From f59e47065c7e014ed01a5c8da01f31fb9e9c6702 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Sun, 21 Apr 2024 22:44:45 -0700 Subject: [PATCH 44/45] update flake.lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'attic': 'github:zhaofengli/attic/6eabc3f02fae3683bffab483e614bebfcd476b21?narHash=sha256-wSZjK%2BrOXn%2BUQiP1NbdNn5/UW6UcBxjvlqr2wh%2B%2BMbM%3D' (2024-02-14) → 'github:zhaofengli/attic/4dbdbee45728d8ce5788db6461aaaa89d98081f0?narHash=sha256-0O4v6e4a1toxXZ2gf5INhg4WPE5C5T%2BSVvsBt%2B45Mcc%3D' (2024-03-29) • Updated input 'attic/nixpkgs': 'github:NixOS/nixpkgs/aa9d4729cbc99dabacb50e3994dcefb3ea0f7447?narHash=sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U%3D' (2023-12-14) → 'github:NixOS/nixpkgs/07262b18b97000d16a4bdb003418bd2fb067a932?narHash=sha256-QoQqXoj8ClGo0sqD/qWKFWezgEwUL0SUh37/vY2jNhc%3D' (2024-03-25) • Updated input 'attic/nixpkgs-stable': 'github:NixOS/nixpkgs/1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f?narHash=sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA%3D' (2023-12-17) → 'github:NixOS/nixpkgs/44733514b72e732bd49f5511bd0203dea9b9a434?narHash=sha256-akSgjDZL6pVHEfSE6sz1DNSXuYX6hq%2BP/1Z5IoYWs7E%3D' (2024-03-26) • Updated input 'crane': 'github:ipetkov/crane/55f4939ac59ff8f89c6a4029730a2d49ea09105f?narHash=sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU%3D' (2024-04-21) → 'github:ipetkov/crane/f6c6a2fb1b8bd9b65d65ca9342dd0eb180a63f11?narHash=sha256-qd/MuLm7OfKQKyd4FAMqV4H6zYyOfef5lLzRrmXwKJM%3D' (2024-04-21) • Updated input 'fenix': 'github:nix-community/fenix/aa45c3e901ea42d6633af083c0c555efaf948b17?narHash=sha256-nTaO7ZDL4D02dVC5ktqnXNiNuODBUHyE4qEcFjAUCQY%3D' (2024-03-28) → 'github:nix-community/fenix/19aaa94a73cc670a4d87e84f0909966cd8f8cd79?narHash=sha256-3pbv7UgAgetwz9YdjzIT/lZ6Rgj6wj6MR4mphBLyDjU%3D' (2024-04-21) • Updated input 'fenix/rust-analyzer-src': 'github:rust-lang/rust-analyzer/ad51a17c627b4ca57f83f0dc1f3bb5f3f17e6d0b?narHash=sha256-s/YOyBM0vumhkqCFi8CnV5imFlC5JJrGia8CmEXyQkM%3D' (2024-03-27) → 'github:rust-lang/rust-analyzer/55d9a533b309119c8acd13061581b43ae8840823?narHash=sha256-iN5QUlUq527lswmBC%2BRopfXdu6Xx7mmTaBSH2l59FtM%3D' (2024-04-20) • Updated input 'nixpkgs': 'github:NixOS/nixpkgs/2726f127c15a4cc9810843b96cad73c7eb39e443?narHash=sha256-UKcYiHWHQynzj6CN/vTcix4yd1eCu1uFdsuarupdCQQ%3D' (2024-03-27) → 'github:NixOS/nixpkgs/5c24cf2f0a12ad855f444c30b2421d044120c66f?narHash=sha256-XtTSSIB2DA6tOv%2Bl0FhvfDMiyCmhoRbNB%2B0SeInZkbk%3D' (2024-04-19) --- flake.lock | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/flake.lock b/flake.lock index 98ef9259..e0289e53 100644 --- a/flake.lock +++ b/flake.lock @@ -9,11 +9,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1707922053, - "narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=", + "lastModified": 1711742460, + "narHash": "sha256-0O4v6e4a1toxXZ2gf5INhg4WPE5C5T+SVvsBt+45Mcc=", "owner": "zhaofengli", "repo": "attic", - "rev": "6eabc3f02fae3683bffab483e614bebfcd476b21", + "rev": "4dbdbee45728d8ce5788db6461aaaa89d98081f0", "type": "github" }, "original": { @@ -67,11 +67,11 @@ ] }, "locked": { - "lastModified": 1713721181, - "narHash": "sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU=", + "lastModified": 1713738183, + "narHash": "sha256-qd/MuLm7OfKQKyd4FAMqV4H6zYyOfef5lLzRrmXwKJM=", "owner": "ipetkov", "repo": "crane", - "rev": "55f4939ac59ff8f89c6a4029730a2d49ea09105f", + "rev": "f6c6a2fb1b8bd9b65d65ca9342dd0eb180a63f11", "type": "github" }, "original": { @@ -89,11 +89,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1711606966, - "narHash": "sha256-nTaO7ZDL4D02dVC5ktqnXNiNuODBUHyE4qEcFjAUCQY=", + "lastModified": 1713680591, + "narHash": "sha256-3pbv7UgAgetwz9YdjzIT/lZ6Rgj6wj6MR4mphBLyDjU=", "owner": "nix-community", "repo": "fenix", - "rev": "aa45c3e901ea42d6633af083c0c555efaf948b17", + "rev": "19aaa94a73cc670a4d87e84f0909966cd8f8cd79", "type": "github" }, "original": { @@ -184,11 +184,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1702539185, - "narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=", + "lastModified": 1711401922, + "narHash": "sha256-QoQqXoj8ClGo0sqD/qWKFWezgEwUL0SUh37/vY2jNhc=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447", + "rev": "07262b18b97000d16a4bdb003418bd2fb067a932", "type": "github" }, "original": { @@ -200,11 +200,11 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1702780907, - "narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=", + "lastModified": 1711460390, + "narHash": "sha256-akSgjDZL6pVHEfSE6sz1DNSXuYX6hq+P/1Z5IoYWs7E=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f", + "rev": "44733514b72e732bd49f5511bd0203dea9b9a434", "type": "github" }, "original": { @@ -216,11 +216,11 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1711523803, - "narHash": "sha256-UKcYiHWHQynzj6CN/vTcix4yd1eCu1uFdsuarupdCQQ=", + "lastModified": 1713537308, + "narHash": "sha256-XtTSSIB2DA6tOv+l0FhvfDMiyCmhoRbNB+0SeInZkbk=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2726f127c15a4cc9810843b96cad73c7eb39e443", + "rev": "5c24cf2f0a12ad855f444c30b2421d044120c66f", "type": "github" }, "original": { @@ -263,11 +263,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1711562745, - "narHash": "sha256-s/YOyBM0vumhkqCFi8CnV5imFlC5JJrGia8CmEXyQkM=", + "lastModified": 1713628977, + "narHash": "sha256-iN5QUlUq527lswmBC+RopfXdu6Xx7mmTaBSH2l59FtM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "ad51a17c627b4ca57f83f0dc1f3bb5f3f17e6d0b", + "rev": "55d9a533b309119c8acd13061581b43ae8840823", "type": "github" }, "original": { From af6ed05b6a5920806bdc3ae342342a7ab08901a1 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 22 Apr 2024 02:54:06 -0400 Subject: [PATCH 45/45] dont allow admin room to be made world readable Signed-off-by: strawberry --- src/api/client_server/state.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index f9e910af..1476b892 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -8,6 +8,7 @@ use ruma::{ events::{ room::{ canonical_alias::RoomCanonicalAliasEventContent, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, }, AnyStateEventContent, StateEventType, @@ -251,6 +252,23 @@ async fn send_state_event_for_key_helper( } } }, + // admin room is a sensitive room, it should not ever be made world readable + StateEventType::RoomHistoryVisibility => { + if let Some(admin_room_id) = service::admin::Service::get_admin_room()? { + if admin_room_id == room_id { + if let Ok(visibility_content) = + serde_json::from_str::(json.json().get()) + { + if visibility_content.history_visibility == HistoryVisibility::WorldReadable { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Admin room is not allowed to be made world readable (public room history).", + )); + } + } + } + } + }, // TODO: allow alias if it previously existed StateEventType::RoomCanonicalAlias => { if let Ok(canonical_alias) = serde_json::from_str::(json.json().get()) {