From f4cfc77a577e0dfd152878830294c8eb3fe47861 Mon Sep 17 00:00:00 2001
From: Jayryn <77808206+Jayryn@users.noreply.github.com>
Date: Tue, 28 May 2024 09:14:30 +0200
Subject: [PATCH 0001/2091] Check if database symlink already exists
test -L $object [object exists and is a symbolic link (same as -h)]
It is not recommended to use -h
[True if file exists and is a symbolic link. This operator is retained for compatibility with previous versions of this program. Do not rely on its existence; use -L instead.]
---
debian/postinst | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/debian/postinst b/debian/postinst
index f82cbfff..223d322b 100644
--- a/debian/postinst
+++ b/debian/postinst
@@ -25,8 +25,10 @@ case "$1" in
# and permissions for the config.
mkdir -v -p "$CONDUWUIT_DATABASE_PATH"
- # symlink the previous location for compatibility
- ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
+ # symlink the previous location for compatibility if it does not exist yet.
+ if ! test -L "/var/lib/matrix-conduit" ; then
+ ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
+ fi
chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH"
chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH"
From dd1d8fa76013ba7a923b657053a2681cf884cc43 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Wed, 29 May 2024 17:21:59 +0000
Subject: [PATCH 0002/2091] upgrade to hyper-util 0.1.5
Signed-off-by: Jason Volk
---
Cargo.lock | 48 +++++++++++++++++++++++++-----------------------
Cargo.toml | 9 ++++++++-
2 files changed, 33 insertions(+), 24 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 16315310..29b03a71 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4,9 +4,9 @@ version = 3
[[package]]
name = "addr2line"
-version = "0.21.0"
+version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
+checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678"
dependencies = [
"gimli",
]
@@ -97,9 +97,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002"
[[package]]
name = "async-compression"
-version = "0.4.10"
+version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c90a406b4495d129f00461241616194cb8a032c8d1c53c657f0961d5f8e0498"
+checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5"
dependencies = [
"brotli",
"flate2",
@@ -328,9 +328,9 @@ dependencies = [
[[package]]
name = "backtrace"
-version = "0.3.71"
+version = "0.3.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d"
+checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11"
dependencies = [
"addr2line",
"cc",
@@ -514,9 +514,9 @@ dependencies = [
[[package]]
name = "clang-sys"
-version = "1.8.0"
+version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a483f3cbf7cec2e153d424d0e92329d816becc6421389bd494375c6065921b9b"
+checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
dependencies = [
"glob",
"libc",
@@ -740,6 +740,8 @@ dependencies = [
"conduit_service",
"http 1.1.0",
"http-body-util",
+ "hyper 1.3.1",
+ "hyper-util",
"log",
"regex",
"ruma",
@@ -1244,9 +1246,9 @@ dependencies = [
[[package]]
name = "gimli"
-version = "0.28.1"
+version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
+checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd"
[[package]]
name = "glob"
@@ -1623,9 +1625,9 @@ dependencies = [
[[package]]
name = "hyper-util"
-version = "0.1.4"
+version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d8d52be92d09acc2e01dddb7fde3ad983fc6489c7db4837e605bc3fca4cb63e"
+checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56"
dependencies = [
"bytes",
"futures-channel",
@@ -1927,9 +1929,9 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "loole"
-version = "0.3.0"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c6725f0feab07fcf90f6de5417c06d7fef976fa6e5912fa9e21cb5e4dc6ae5da"
+checksum = "ad95468e4700cb37d8d1f198050db18cebe55e4b4c8aa9180a715deedb2f8965"
[[package]]
name = "lru-cache"
@@ -2177,9 +2179,9 @@ dependencies = [
[[package]]
name = "object"
-version = "0.32.2"
+version = "0.35.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441"
+checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e"
dependencies = [
"memchr",
]
@@ -3741,9 +3743,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.37.0"
+version = "1.38.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
+checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a"
dependencies = [
"backtrace",
"bytes",
@@ -3770,9 +3772,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
-version = "2.2.0"
+version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
+checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
dependencies = [
"proc-macro2",
"quote",
@@ -3889,7 +3891,7 @@ dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
- "winnow 0.6.8",
+ "winnow 0.6.9",
]
[[package]]
@@ -4564,9 +4566,9 @@ dependencies = [
[[package]]
name = "winnow"
-version = "0.6.8"
+version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d"
+checksum = "86c949fede1d13936a99f14fafd3e76fd642b556dd2ce96287fbe2e0151bfac6"
dependencies = [
"memchr",
]
diff --git a/Cargo.toml b/Cargo.toml
index bcfbda32..83f81817 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -216,7 +216,14 @@ features = [
]
[workspace.dependencies.hyper-util]
-version = "0.1.4"
+version = "0.1.5"
+features = [
+ "client",
+ "server-auto",
+ "server-graceful",
+ "service",
+ "tokio",
+]
# to support multiple variations of setting a config option
[workspace.dependencies.either]
From faa2b95c84794f993fde68588c2ff6d59d66fdcf Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sat, 25 May 2024 21:24:17 +0000
Subject: [PATCH 0003/2091] add unwrap_infallible tool
Signed-off-by: Jason Volk
---
src/core/utils/mod.rs | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs
index 5d4e5cba..582263a9 100644
--- a/src/core/utils/mod.rs
+++ b/src/core/utils/mod.rs
@@ -279,3 +279,15 @@ pub fn available_parallelism() -> usize {
.expect("Unable to query for available parallelism.")
.get()
}
+
+/// Boilerplate for wraps which are typed to never error.
+///
+/// *
+#[must_use]
+#[inline(always)]
+pub fn unwrap_infallible(result: Result) -> T {
+ match result {
+ Ok(val) => val,
+ Err(err) => match err {},
+ }
+}
From 0baa57f5d95c081136625596ace67d2e93e81410 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Wed, 29 May 2024 16:59:20 +0000
Subject: [PATCH 0004/2091] add back unix socket listener.
Signed-off-by: Jason Volk
---
src/router/Cargo.toml | 2 +
src/router/run.rs | 12 ++--
src/router/serve.rs | 133 +++++++++++++++++++++++++++++++-----------
3 files changed, 108 insertions(+), 39 deletions(-)
diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml
index dae9d14c..4b197d0a 100644
--- a/src/router/Cargo.toml
+++ b/src/router/Cargo.toml
@@ -66,6 +66,8 @@ bytes.workspace = true
clap.workspace = true
http-body-util.workspace = true
http.workspace = true
+hyper.workspace = true
+hyper-util.workspace = true
regex.workspace = true
ruma.workspace = true
sentry.optional = true
diff --git a/src/router/run.rs b/src/router/run.rs
index 2603c04a..e6149fa2 100644
--- a/src/router/run.rs
+++ b/src/router/run.rs
@@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration};
use axum_server::Handle as ServerHandle;
use tokio::{
signal,
- sync::oneshot::{self, Sender},
+ sync::broadcast::{self, Sender},
};
use tracing::{debug, info, warn};
@@ -40,14 +40,16 @@ pub(crate) async fn run(server: Arc) -> Result<(), Error> {
.insert(handle.clone());
server.interrupt.store(false, Ordering::Release);
- let (tx, rx) = oneshot::channel::<()>();
- let sigs = server.runtime().spawn(sighandle(server.clone(), tx));
+ let (tx, _) = broadcast::channel::<()>(1);
+ let sigs = server
+ .runtime()
+ .spawn(sighandle(server.clone(), tx.clone()));
// Prepare to serve http clients
let res;
// Serve clients
if cfg!(unix) && config.unix_socket_path.is_some() {
- res = serve::unix_socket(&server, app, rx).await;
+ res = serve::unix_socket(&server, app, tx.subscribe()).await;
} else if config.tls.is_some() {
res = serve::tls(&server, app, handle.clone(), addrs).await;
} else {
@@ -66,7 +68,7 @@ pub(crate) async fn run(server: Arc) -> Result<(), Error> {
_ = services().admin.handle.lock().await.take();
debug_info!("Finished");
- Ok(res?)
+ res
}
/// Async initializations
diff --git a/src/router/serve.rs b/src/router/serve.rs
index 37ed9902..bddd9a2d 100644
--- a/src/router/serve.rs
+++ b/src/router/serve.rs
@@ -1,27 +1,31 @@
-#[cfg(unix)]
-use std::fs::Permissions; // only for UNIX sockets stuff and *nix container checks
-#[cfg(unix)]
-use std::os::unix::fs::PermissionsExt as _;
use std::{
- io,
net::SocketAddr,
+ path::Path,
sync::{atomic::Ordering, Arc},
};
-use axum::Router;
+use axum::{extract::Request, routing::IntoMakeService, Router};
use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle};
#[cfg(feature = "axum_dual_protocol")]
use axum_server_dual_protocol::ServerExt;
-use conduit::{debug_info, Server};
+use conduit::{debug_error, debug_info, utils, Error, Result, Server};
+use hyper::{body::Incoming, service::service_fn};
+use hyper_util::{
+ rt::{TokioExecutor, TokioIo},
+ server,
+};
use tokio::{
- sync::oneshot::{self},
+ fs,
+ sync::broadcast::{self},
task::JoinSet,
};
+use tower::{Service, ServiceExt};
use tracing::{debug, info, warn};
+use utils::unwrap_infallible;
pub(crate) async fn plain(
- server: &Arc, app: axum::routing::IntoMakeService, handle: ServerHandle, addrs: Vec,
-) -> io::Result<()> {
+ server: &Arc, app: IntoMakeService, handle: ServerHandle, addrs: Vec,
+) -> Result<()> {
let mut join_set = JoinSet::new();
for addr in &addrs {
join_set.spawn_on(bind(*addr).handle(handle.clone()).serve(app.clone()), server.runtime());
@@ -48,8 +52,8 @@ pub(crate) async fn plain(
}
pub(crate) async fn tls(
- server: &Arc, app: axum::routing::IntoMakeService, handle: ServerHandle, addrs: Vec,
-) -> io::Result<()> {
+ server: &Arc, app: IntoMakeService, handle: ServerHandle, addrs: Vec,
+) -> Result<()> {
let config = &server.config;
let tls = config.tls.as_ref().expect("TLS configuration");
@@ -107,31 +111,92 @@ pub(crate) async fn tls(
}
#[cfg(unix)]
-#[allow(unused_variables)]
pub(crate) async fn unix_socket(
- server: &Arc, app: axum::routing::IntoMakeService, rx: oneshot::Receiver<()>,
-) -> io::Result<()> {
- let config = &server.config;
- let path = config.unix_socket_path.as_ref().unwrap();
-
- if path.exists() {
- warn!(
- "UNIX socket path {:#?} already exists (unclean shutdown?), attempting to remove it.",
- path.display()
- );
- tokio::fs::remove_file(&path).await?;
+ server: &Arc, app: IntoMakeService, mut shutdown: broadcast::Receiver<()>,
+) -> Result<()> {
+ let mut tasks = JoinSet::<()>::new();
+ let executor = TokioExecutor::new();
+ let builder = server::conn::auto::Builder::new(executor);
+ let listener = unix_socket_init(server).await?;
+ loop {
+ let app = app.clone();
+ let builder = builder.clone();
+ tokio::select! {
+ _sig = shutdown.recv() => break,
+ accept = listener.accept() => match accept {
+ Ok(conn) => unix_socket_accept(server, &listener, &mut tasks, app, builder, conn).await,
+ Err(err) => debug_error!(?listener, "accept error: {err}"),
+ },
+ }
}
- tokio::fs::create_dir_all(path.parent().unwrap()).await?;
-
- let socket_perms = config.unix_socket_perms.to_string();
- let octal_perms = u32::from_str_radix(&socket_perms, 8).unwrap();
- tokio::fs::set_permissions(&path, Permissions::from_mode(octal_perms))
- .await
- .unwrap();
-
- let bind = tokio::net::UnixListener::bind(path)?;
- info!("Listening at {:?}", path);
+ drop(listener);
+ tasks.shutdown().await;
Ok(())
}
+
+#[cfg(unix)]
+async fn unix_socket_accept(
+ server: &Arc, listener: &tokio::net::UnixListener, tasks: &mut JoinSet<()>,
+ mut app: IntoMakeService, builder: server::conn::auto::Builder,
+ conn: (tokio::net::UnixStream, tokio::net::unix::SocketAddr),
+) {
+ let (socket, remote) = conn;
+ let socket = TokioIo::new(socket);
+ debug!(?listener, ?socket, ?remote, "accepted");
+
+ let called = unwrap_infallible(app.call(()).await);
+ let handler = service_fn(move |req: Request| called.clone().oneshot(req));
+
+ let task = async move {
+ builder
+ .serve_connection(socket, handler)
+ .await
+ .map_err(|e| debug_error!(?remote, "connection error: {e}"))
+ .expect("connection error");
+ };
+
+ _ = tasks.spawn_on(task, server.runtime());
+ while tasks.try_join_next().is_some() {}
+}
+
+#[cfg(unix)]
+async fn unix_socket_init(server: &Arc) -> Result {
+ use std::os::unix::fs::PermissionsExt;
+
+ let config = &server.config;
+ let path = config
+ .unix_socket_path
+ .as_ref()
+ .expect("failed to extract configured unix socket path");
+
+ if path.exists() {
+ warn!("Removing existing UNIX socket {:#?} (unclean shutdown?)...", path.display());
+ fs::remove_file(&path)
+ .await
+ .map_err(|e| warn!("Failed to remove existing UNIX socket: {e}"))
+ .unwrap();
+ }
+
+ let dir = path.parent().unwrap_or_else(|| Path::new("/"));
+ if let Err(e) = fs::create_dir_all(dir).await {
+ return Err(Error::Err(format!("Failed to create {dir:?} for socket {path:?}: {e}")));
+ }
+
+ let listener = tokio::net::UnixListener::bind(path);
+ if let Err(e) = listener {
+ return Err(Error::Err(format!("Failed to bind listener {path:?}: {e}")));
+ }
+
+ let socket_perms = config.unix_socket_perms.to_string();
+ let octal_perms = u32::from_str_radix(&socket_perms, 8).expect("failed to convert octal permissions");
+ let perms = std::fs::Permissions::from_mode(octal_perms);
+ if let Err(e) = fs::set_permissions(&path, perms).await {
+ return Err(Error::Err(format!("Failed to set socket {path:?} permissions: {e}")));
+ }
+
+ info!("Listening at {:?}", path);
+
+ Ok(listener.unwrap())
+}
From 2e45cb281a8728eceaf4605b0d8ce16b6b30613a Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Thu, 30 May 2024 22:38:39 +0000
Subject: [PATCH 0005/2091] split router::serve units.
Signed-off-by: Jason Volk
---
src/router/mod.rs | 10 +-
src/router/run.rs | 12 +--
src/router/serve.rs | 202 --------------------------------------
src/router/serve/mod.rs | 26 +++++
src/router/serve/plain.rs | 38 +++++++
src/router/serve/tls.rs | 68 +++++++++++++
src/router/serve/unix.rs | 107 ++++++++++++++++++++
7 files changed, 245 insertions(+), 218 deletions(-)
delete mode 100644 src/router/serve.rs
create mode 100644 src/router/serve/mod.rs
create mode 100644 src/router/serve/plain.rs
create mode 100644 src/router/serve/tls.rs
create mode 100644 src/router/serve/unix.rs
diff --git a/src/router/mod.rs b/src/router/mod.rs
index 6467d5ee..9bc27356 100644
--- a/src/router/mod.rs
+++ b/src/router/mod.rs
@@ -1,8 +1,8 @@
-pub(crate) mod layers;
-pub(crate) mod request;
-pub(crate) mod router;
-pub(crate) mod run;
-pub(crate) mod serve;
+mod layers;
+mod request;
+mod router;
+mod run;
+mod serve;
extern crate conduit_core as conduit;
diff --git a/src/router/run.rs b/src/router/run.rs
index e6149fa2..d20f8669 100644
--- a/src/router/run.rs
+++ b/src/router/run.rs
@@ -24,9 +24,7 @@ use crate::{layers, serve};
#[tracing::instrument(skip_all)]
#[allow(clippy::let_underscore_must_use)] // various of these are intended
pub(crate) async fn run(server: Arc) -> Result<(), Error> {
- let config = &server.config;
let app = layers::build(&server)?;
- let addrs = config.get_bind_addrs();
// Install the admin room callback here for now
_ = services().admin.handle.lock().await.insert(admin::handle);
@@ -45,16 +43,8 @@ pub(crate) async fn run(server: Arc) -> Result<(), Error> {
.runtime()
.spawn(sighandle(server.clone(), tx.clone()));
- // Prepare to serve http clients
- let res;
// Serve clients
- if cfg!(unix) && config.unix_socket_path.is_some() {
- res = serve::unix_socket(&server, app, tx.subscribe()).await;
- } else if config.tls.is_some() {
- res = serve::tls(&server, app, handle.clone(), addrs).await;
- } else {
- res = serve::plain(&server, app, handle.clone(), addrs).await;
- }
+ let res = serve::serve(&server, app, handle, tx.subscribe()).await;
// Join the signal handler before we leave.
sigs.abort();
diff --git a/src/router/serve.rs b/src/router/serve.rs
deleted file mode 100644
index bddd9a2d..00000000
--- a/src/router/serve.rs
+++ /dev/null
@@ -1,202 +0,0 @@
-use std::{
- net::SocketAddr,
- path::Path,
- sync::{atomic::Ordering, Arc},
-};
-
-use axum::{extract::Request, routing::IntoMakeService, Router};
-use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle};
-#[cfg(feature = "axum_dual_protocol")]
-use axum_server_dual_protocol::ServerExt;
-use conduit::{debug_error, debug_info, utils, Error, Result, Server};
-use hyper::{body::Incoming, service::service_fn};
-use hyper_util::{
- rt::{TokioExecutor, TokioIo},
- server,
-};
-use tokio::{
- fs,
- sync::broadcast::{self},
- task::JoinSet,
-};
-use tower::{Service, ServiceExt};
-use tracing::{debug, info, warn};
-use utils::unwrap_infallible;
-
-pub(crate) async fn plain(
- server: &Arc, app: IntoMakeService, handle: ServerHandle, addrs: Vec,
-) -> Result<()> {
- let mut join_set = JoinSet::new();
- for addr in &addrs {
- join_set.spawn_on(bind(*addr).handle(handle.clone()).serve(app.clone()), server.runtime());
- }
-
- info!("Listening on {addrs:?}");
- while join_set.join_next().await.is_some() {}
-
- let spawn_active = server.requests_spawn_active.load(Ordering::Relaxed);
- let handle_active = server.requests_handle_active.load(Ordering::Relaxed);
- debug_info!(
- spawn_finished = server.requests_spawn_finished.load(Ordering::Relaxed),
- handle_finished = server.requests_handle_finished.load(Ordering::Relaxed),
- panics = server.requests_panic.load(Ordering::Relaxed),
- spawn_active,
- handle_active,
- "Stopped listening on {addrs:?}",
- );
-
- debug_assert!(spawn_active == 0, "active request tasks are not joined");
- debug_assert!(handle_active == 0, "active request handles still pending");
-
- Ok(())
-}
-
-pub(crate) async fn tls(
- server: &Arc, app: IntoMakeService, handle: ServerHandle, addrs: Vec,
-) -> Result<()> {
- let config = &server.config;
- let tls = config.tls.as_ref().expect("TLS configuration");
-
- debug!(
- "Using direct TLS. Certificate path {} and certificate private key path {}",
- &tls.certs, &tls.key
- );
- info!(
- "Note: It is strongly recommended that you use a reverse proxy instead of running conduwuit directly with TLS."
- );
- let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?;
-
- if cfg!(feature = "axum_dual_protocol") {
- info!(
- "conduwuit was built with axum_dual_protocol feature to listen on both HTTP and HTTPS. This will only \
- take effect if `dual_protocol` is enabled in `[global.tls]`"
- );
- }
-
- let mut join_set = JoinSet::new();
- if cfg!(feature = "axum_dual_protocol") && tls.dual_protocol {
- #[cfg(feature = "axum_dual_protocol")]
- for addr in &addrs {
- join_set.spawn_on(
- axum_server_dual_protocol::bind_dual_protocol(*addr, conf.clone())
- .set_upgrade(false)
- .handle(handle.clone())
- .serve(app.clone()),
- server.runtime(),
- );
- }
- } else {
- for addr in &addrs {
- join_set.spawn_on(
- bind_rustls(*addr, conf.clone())
- .handle(handle.clone())
- .serve(app.clone()),
- server.runtime(),
- );
- }
- }
-
- if cfg!(feature = "axum_dual_protocol") && tls.dual_protocol {
- warn!(
- "Listening on {:?} with TLS certificate {} and supporting plain text (HTTP) connections too (insecure!)",
- addrs, &tls.certs
- );
- } else {
- info!("Listening on {:?} with TLS certificate {}", addrs, &tls.certs);
- }
-
- while join_set.join_next().await.is_some() {}
-
- Ok(())
-}
-
-#[cfg(unix)]
-pub(crate) async fn unix_socket(
- server: &Arc, app: IntoMakeService, mut shutdown: broadcast::Receiver<()>,
-) -> Result<()> {
- let mut tasks = JoinSet::<()>::new();
- let executor = TokioExecutor::new();
- let builder = server::conn::auto::Builder::new(executor);
- let listener = unix_socket_init(server).await?;
- loop {
- let app = app.clone();
- let builder = builder.clone();
- tokio::select! {
- _sig = shutdown.recv() => break,
- accept = listener.accept() => match accept {
- Ok(conn) => unix_socket_accept(server, &listener, &mut tasks, app, builder, conn).await,
- Err(err) => debug_error!(?listener, "accept error: {err}"),
- },
- }
- }
-
- drop(listener);
- tasks.shutdown().await;
-
- Ok(())
-}
-
-#[cfg(unix)]
-async fn unix_socket_accept(
- server: &Arc, listener: &tokio::net::UnixListener, tasks: &mut JoinSet<()>,
- mut app: IntoMakeService, builder: server::conn::auto::Builder,
- conn: (tokio::net::UnixStream, tokio::net::unix::SocketAddr),
-) {
- let (socket, remote) = conn;
- let socket = TokioIo::new(socket);
- debug!(?listener, ?socket, ?remote, "accepted");
-
- let called = unwrap_infallible(app.call(()).await);
- let handler = service_fn(move |req: Request| called.clone().oneshot(req));
-
- let task = async move {
- builder
- .serve_connection(socket, handler)
- .await
- .map_err(|e| debug_error!(?remote, "connection error: {e}"))
- .expect("connection error");
- };
-
- _ = tasks.spawn_on(task, server.runtime());
- while tasks.try_join_next().is_some() {}
-}
-
-#[cfg(unix)]
-async fn unix_socket_init(server: &Arc) -> Result {
- use std::os::unix::fs::PermissionsExt;
-
- let config = &server.config;
- let path = config
- .unix_socket_path
- .as_ref()
- .expect("failed to extract configured unix socket path");
-
- if path.exists() {
- warn!("Removing existing UNIX socket {:#?} (unclean shutdown?)...", path.display());
- fs::remove_file(&path)
- .await
- .map_err(|e| warn!("Failed to remove existing UNIX socket: {e}"))
- .unwrap();
- }
-
- let dir = path.parent().unwrap_or_else(|| Path::new("/"));
- if let Err(e) = fs::create_dir_all(dir).await {
- return Err(Error::Err(format!("Failed to create {dir:?} for socket {path:?}: {e}")));
- }
-
- let listener = tokio::net::UnixListener::bind(path);
- if let Err(e) = listener {
- return Err(Error::Err(format!("Failed to bind listener {path:?}: {e}")));
- }
-
- let socket_perms = config.unix_socket_perms.to_string();
- let octal_perms = u32::from_str_radix(&socket_perms, 8).expect("failed to convert octal permissions");
- let perms = std::fs::Permissions::from_mode(octal_perms);
- if let Err(e) = fs::set_permissions(&path, perms).await {
- return Err(Error::Err(format!("Failed to set socket {path:?} permissions: {e}")));
- }
-
- info!("Listening at {:?}", path);
-
- Ok(listener.unwrap())
-}
diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs
new file mode 100644
index 00000000..f9d1ad6e
--- /dev/null
+++ b/src/router/serve/mod.rs
@@ -0,0 +1,26 @@
+mod plain;
+mod tls;
+mod unix;
+
+use std::sync::Arc;
+
+use axum::{routing::IntoMakeService, Router};
+use axum_server::Handle as ServerHandle;
+use conduit::{Error, Result, Server};
+use tokio::sync::broadcast;
+
+/// Serve clients
+pub(super) async fn serve(
+ server: &Arc, app: IntoMakeService, handle: ServerHandle, shutdown: broadcast::Receiver<()>,
+) -> Result<(), Error> {
+ let config = &server.config;
+ let addrs = config.get_bind_addrs();
+
+ if cfg!(unix) && config.unix_socket_path.is_some() {
+ unix::serve(server, app, shutdown).await
+ } else if config.tls.is_some() {
+ tls::serve(server, app, handle, addrs).await
+ } else {
+ plain::serve(server, app, handle, addrs).await
+ }
+}
diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs
new file mode 100644
index 00000000..339f8940
--- /dev/null
+++ b/src/router/serve/plain.rs
@@ -0,0 +1,38 @@
+use std::{
+ net::SocketAddr,
+ sync::{atomic::Ordering, Arc},
+};
+
+use axum::{routing::IntoMakeService, Router};
+use axum_server::{bind, Handle as ServerHandle};
+use conduit::{debug_info, Result, Server};
+use tokio::task::JoinSet;
+use tracing::info;
+
+pub(super) async fn serve(
+ server: &Arc, app: IntoMakeService, handle: ServerHandle, addrs: Vec,
+) -> Result<()> {
+ let mut join_set = JoinSet::new();
+ for addr in &addrs {
+ join_set.spawn_on(bind(*addr).handle(handle.clone()).serve(app.clone()), server.runtime());
+ }
+
+ info!("Listening on {addrs:?}");
+ while join_set.join_next().await.is_some() {}
+
+ let spawn_active = server.requests_spawn_active.load(Ordering::Relaxed);
+ let handle_active = server.requests_handle_active.load(Ordering::Relaxed);
+ debug_info!(
+ spawn_finished = server.requests_spawn_finished.load(Ordering::Relaxed),
+ handle_finished = server.requests_handle_finished.load(Ordering::Relaxed),
+ panics = server.requests_panic.load(Ordering::Relaxed),
+ spawn_active,
+ handle_active,
+ "Stopped listening on {addrs:?}",
+ );
+
+ debug_assert!(spawn_active == 0, "active request tasks are not joined");
+ debug_assert!(handle_active == 0, "active request handles still pending");
+
+ Ok(())
+}
diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs
new file mode 100644
index 00000000..e4edeb32
--- /dev/null
+++ b/src/router/serve/tls.rs
@@ -0,0 +1,68 @@
+use std::{net::SocketAddr, sync::Arc};
+
+use axum::{routing::IntoMakeService, Router};
+use axum_server::{bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle};
+#[cfg(feature = "axum_dual_protocol")]
+use axum_server_dual_protocol::ServerExt;
+use conduit::{Result, Server};
+use tokio::task::JoinSet;
+use tracing::{debug, info, warn};
+
+pub(super) async fn serve(
+ server: &Arc, app: IntoMakeService, handle: ServerHandle, addrs: Vec,
+) -> Result<()> {
+ let config = &server.config;
+ let tls = config.tls.as_ref().expect("TLS configuration");
+
+ debug!(
+ "Using direct TLS. Certificate path {} and certificate private key path {}",
+ &tls.certs, &tls.key
+ );
+ info!(
+ "Note: It is strongly recommended that you use a reverse proxy instead of running conduwuit directly with TLS."
+ );
+ let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?;
+
+ if cfg!(feature = "axum_dual_protocol") {
+ info!(
+ "conduwuit was built with axum_dual_protocol feature to listen on both HTTP and HTTPS. This will only \
+ take effect if `dual_protocol` is enabled in `[global.tls]`"
+ );
+ }
+
+ let mut join_set = JoinSet::new();
+ if cfg!(feature = "axum_dual_protocol") && tls.dual_protocol {
+ #[cfg(feature = "axum_dual_protocol")]
+ for addr in &addrs {
+ join_set.spawn_on(
+ axum_server_dual_protocol::bind_dual_protocol(*addr, conf.clone())
+ .set_upgrade(false)
+ .handle(handle.clone())
+ .serve(app.clone()),
+ server.runtime(),
+ );
+ }
+ } else {
+ for addr in &addrs {
+ join_set.spawn_on(
+ bind_rustls(*addr, conf.clone())
+ .handle(handle.clone())
+ .serve(app.clone()),
+ server.runtime(),
+ );
+ }
+ }
+
+ if cfg!(feature = "axum_dual_protocol") && tls.dual_protocol {
+ warn!(
+ "Listening on {:?} with TLS certificate {} and supporting plain text (HTTP) connections too (insecure!)",
+ addrs, &tls.certs
+ );
+ } else {
+ info!("Listening on {:?} with TLS certificate {}", addrs, &tls.certs);
+ }
+
+ while join_set.join_next().await.is_some() {}
+
+ Ok(())
+}
diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs
new file mode 100644
index 00000000..2f0cf9cf
--- /dev/null
+++ b/src/router/serve/unix.rs
@@ -0,0 +1,107 @@
+#![cfg(unix)]
+
+use std::{path::Path, sync::Arc};
+
+use axum::{extract::Request, routing::IntoMakeService, Router};
+use conduit::{debug_error, utils, Error, Result, Server};
+use hyper::{body::Incoming, service::service_fn};
+use hyper_util::{
+ rt::{TokioExecutor, TokioIo},
+ server,
+};
+use tokio::{
+ fs,
+ sync::broadcast::{self},
+ task::JoinSet,
+};
+use tower::{Service, ServiceExt};
+use tracing::{debug, info, warn};
+use utils::unwrap_infallible;
+
+pub(super) async fn serve(
+ server: &Arc, app: IntoMakeService, mut shutdown: broadcast::Receiver<()>,
+) -> Result<()> {
+ let mut tasks = JoinSet::<()>::new();
+ let executor = TokioExecutor::new();
+ let builder = server::conn::auto::Builder::new(executor);
+ let listener = init(server).await?;
+ loop {
+ let app = app.clone();
+ let builder = builder.clone();
+ tokio::select! {
+ _sig = shutdown.recv() => break,
+ conn = listener.accept() => match conn {
+ Ok(conn) => accept(server, &listener, &mut tasks, app, builder, conn).await,
+ Err(err) => debug_error!(?listener, "accept error: {err}"),
+ },
+ }
+ }
+
+ drop(listener);
+ tasks.shutdown().await;
+
+ Ok(())
+}
+
+async fn accept(
+ server: &Arc, listener: &tokio::net::UnixListener, tasks: &mut JoinSet<()>,
+ mut app: IntoMakeService, builder: server::conn::auto::Builder,
+ conn: (tokio::net::UnixStream, tokio::net::unix::SocketAddr),
+) {
+ let (socket, remote) = conn;
+ let socket = TokioIo::new(socket);
+ debug!(?listener, ?socket, ?remote, "accepted");
+
+ let called = unwrap_infallible(app.call(()).await);
+ let handler = service_fn(move |req: Request| called.clone().oneshot(req));
+
+ let task = async move {
+ builder
+ .serve_connection(socket, handler)
+ .await
+ .map_err(|e| debug_error!(?remote, "connection error: {e}"))
+ .expect("connection error");
+ };
+
+ _ = tasks.spawn_on(task, server.runtime());
+ while tasks.try_join_next().is_some() {}
+}
+
+async fn init(server: &Arc) -> Result {
+ use std::os::unix::fs::PermissionsExt;
+
+ let config = &server.config;
+ let path = config
+ .unix_socket_path
+ .as_ref()
+ .expect("failed to extract configured unix socket path");
+
+ if path.exists() {
+ warn!("Removing existing UNIX socket {:#?} (unclean shutdown?)...", path.display());
+ fs::remove_file(&path)
+ .await
+ .map_err(|e| warn!("Failed to remove existing UNIX socket: {e}"))
+ .unwrap();
+ }
+
+ let dir = path.parent().unwrap_or_else(|| Path::new("/"));
+ if let Err(e) = fs::create_dir_all(dir).await {
+ return Err(Error::Err(format!("Failed to create {dir:?} for socket {path:?}: {e}")));
+ }
+
+ let listener = tokio::net::UnixListener::bind(path);
+ if let Err(e) = listener {
+ return Err(Error::Err(format!("Failed to bind listener {path:?}: {e}")));
+ }
+
+ let socket_perms = config.unix_socket_perms.to_string();
+ let octal_perms = u32::from_str_radix(&socket_perms, 8).expect("failed to convert octal permissions");
+ let perms = std::fs::Permissions::from_mode(octal_perms);
+ if let Err(e) = fs::set_permissions(&path, perms).await {
+ return Err(Error::Err(format!("Failed to set socket {path:?} permissions: {e}")));
+ }
+
+ info!("Listening at {:?}", path);
+
+ Ok(listener.unwrap())
+}
From 6d1144bb697f42cd513dffff49cb913a795ac183 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Fri, 31 May 2024 00:43:30 +0000
Subject: [PATCH 0006/2091] move unix socket unlink from services to router
Signed-off-by: Jason Volk
---
src/router/serve/unix.rs | 29 ++++++++++++++++++++++-------
src/service/globals/mod.rs | 2 --
src/service/services.rs | 14 ++------------
3 files changed, 24 insertions(+), 21 deletions(-)
diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs
index 2f0cf9cf..ae768d24 100644
--- a/src/router/serve/unix.rs
+++ b/src/router/serve/unix.rs
@@ -11,6 +11,7 @@ use hyper_util::{
};
use tokio::{
fs,
+ net::{unix::SocketAddr, UnixListener, UnixStream},
sync::broadcast::{self},
task::JoinSet,
};
@@ -37,16 +38,14 @@ pub(super) async fn serve(
}
}
- drop(listener);
- tasks.shutdown().await;
+ fini(listener, tasks).await;
Ok(())
}
async fn accept(
- server: &Arc, listener: &tokio::net::UnixListener, tasks: &mut JoinSet<()>,
- mut app: IntoMakeService, builder: server::conn::auto::Builder,
- conn: (tokio::net::UnixStream, tokio::net::unix::SocketAddr),
+ server: &Arc, listener: &UnixListener, tasks: &mut JoinSet<()>, mut app: IntoMakeService,
+ builder: server::conn::auto::Builder, conn: (UnixStream, SocketAddr),
) {
let (socket, remote) = conn;
let socket = TokioIo::new(socket);
@@ -67,7 +66,7 @@ async fn accept(
while tasks.try_join_next().is_some() {}
}
-async fn init(server: &Arc) -> Result {
+async fn init(server: &Arc) -> Result {
use std::os::unix::fs::PermissionsExt;
let config = &server.config;
@@ -89,7 +88,7 @@ async fn init(server: &Arc) -> Result {
return Err(Error::Err(format!("Failed to create {dir:?} for socket {path:?}: {e}")));
}
- let listener = tokio::net::UnixListener::bind(path);
+ let listener = UnixListener::bind(path);
if let Err(e) = listener {
return Err(Error::Err(format!("Failed to bind listener {path:?}: {e}")));
}
@@ -105,3 +104,19 @@ async fn init(server: &Arc) -> Result {
Ok(listener.unwrap())
}
+
+async fn fini(listener: UnixListener, mut tasks: JoinSet<()>) {
+ let local = listener.local_addr();
+
+ drop(listener);
+ tasks.shutdown().await;
+
+ if let Ok(local) = local {
+ if let Some(path) = local.as_pathname() {
+ debug!(?path, "Removing unix socket file.");
+ if let Err(e) = fs::remove_file(path).await {
+ warn!(?path, "Failed to remove UNIX socket file: {e}");
+ }
+ }
+ }
+}
diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs
index 8f035fcd..723979da 100644
--- a/src/service/globals/mod.rs
+++ b/src/service/globals/mod.rs
@@ -395,8 +395,6 @@ impl Service {
pub fn well_known_server(&self) -> &Option { &self.config.well_known.server }
- pub fn unix_socket_path(&self) -> &Option { &self.config.unix_socket_path }
-
pub fn valid_cidr_range(&self, ip: &IPAddress) -> bool {
for cidr in &self.cidr_range_denylist {
if cidr.includes(ip) {
diff --git a/src/service/services.rs b/src/service/services.rs
index a2a9ca0b..a9356562 100644
--- a/src/service/services.rs
+++ b/src/service/services.rs
@@ -6,11 +6,8 @@ use std::{
use conduit::{debug_info, Result, Server};
use database::KeyValueDatabase;
use lru_cache::LruCache;
-use tokio::{
- fs,
- sync::{broadcast, Mutex, RwLock},
-};
-use tracing::{debug, info, trace, warn};
+use tokio::sync::{broadcast, Mutex, RwLock};
+use tracing::{debug, info, trace};
use crate::{
account_data, admin, appservice, globals, key_backups, media, presence, pusher, rooms, sending, transaction_ids,
@@ -321,13 +318,6 @@ bad_signature_ratelimiter: {bad_signature_ratelimiter}
info!("Shutting down services");
self.interrupt().await;
- debug!("Removing unix socket file.");
- if let Some(path) = self.globals.unix_socket_path().as_ref() {
- if let Err(e) = fs::remove_file(path).await {
- warn!("Failed to remove UNIX socket file: {e}");
- }
- }
-
debug!("Waiting for update worker...");
if let Some(updates_handle) = self.globals.updates_handle.lock().await.take() {
updates_handle.abort();
From c3a0d2830914bb87d57e5721f40a2ecc1147a1cf Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Fri, 31 May 2024 03:00:55 +0000
Subject: [PATCH 0007/2091] add tracing span; mute connection errors.
Signed-off-by: Jason Volk
---
src/router/serve/unix.rs | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs
index ae768d24..a526ba29 100644
--- a/src/router/serve/unix.rs
+++ b/src/router/serve/unix.rs
@@ -3,7 +3,7 @@
use std::{path::Path, sync::Arc};
use axum::{extract::Request, routing::IntoMakeService, Router};
-use conduit::{debug_error, utils, Error, Result, Server};
+use conduit::{debug_error, trace, utils, Error, Result, Server};
use hyper::{body::Incoming, service::service_fn};
use hyper_util::{
rt::{TokioExecutor, TokioIo},
@@ -19,6 +19,7 @@ use tower::{Service, ServiceExt};
use tracing::{debug, info, warn};
use utils::unwrap_infallible;
+#[tracing::instrument(skip_all)]
pub(super) async fn serve(
server: &Arc, app: IntoMakeService, mut shutdown: broadcast::Receiver<()>,
) -> Result<()> {
@@ -43,23 +44,21 @@ pub(super) async fn serve(
Ok(())
}
+#[allow(clippy::let_underscore_must_use)]
async fn accept(
server: &Arc, listener: &UnixListener, tasks: &mut JoinSet<()>, mut app: IntoMakeService,
builder: server::conn::auto::Builder, conn: (UnixStream, SocketAddr),
) {
let (socket, remote) = conn;
let socket = TokioIo::new(socket);
- debug!(?listener, ?socket, ?remote, "accepted");
+ trace!(?listener, ?socket, ?remote, "accepted");
let called = unwrap_infallible(app.call(()).await);
let handler = service_fn(move |req: Request| called.clone().oneshot(req));
let task = async move {
- builder
- .serve_connection(socket, handler)
- .await
- .map_err(|e| debug_error!(?remote, "connection error: {e}"))
- .expect("connection error");
+ // bug on darwin causes all results to be errors. do not unwrap this
+ _ = builder.serve_connection(socket, handler).await;
};
_ = tasks.spawn_on(task, server.runtime());
From eed8a2a801194b14364f074dc37e6ad11db29ba3 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sat, 1 Jun 2024 10:19:26 +0000
Subject: [PATCH 0008/2091] add and sort incomplete package metadata
Signed-off-by: Jason Volk
---
Cargo.toml | 14 ++++++++------
deps/rust-rocksdb/Cargo.toml | 8 +++++++-
src/admin/Cargo.toml | 8 +++++++-
src/api/Cargo.toml | 8 +++++++-
src/core/Cargo.toml | 8 +++++++-
src/database/Cargo.toml | 8 +++++++-
src/main/Cargo.toml | 14 ++++++++------
src/router/Cargo.toml | 8 +++++++-
src/service/Cargo.toml | 8 +++++++-
9 files changed, 65 insertions(+), 19 deletions(-)
diff --git a/Cargo.toml b/Cargo.toml
index 83f81817..de6878f0 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -6,19 +6,21 @@ members = ["src/*"]
default-members = ["src/*"]
[workspace.package]
-description = "a very cool fork of Conduit, a Matrix homeserver written in Rust"
-license = "Apache-2.0"
authors = [
"strawberry ",
"timokoesters ",
]
-version = "0.4.1"
+categories = ["network-programming"]
+description = "a very cool fork of Conduit, a Matrix homeserver written in Rust"
edition = "2021"
-# See also `rust-toolchain.toml`
-rust-version = "1.77.0"
homepage = "https://conduwuit.puppyirl.gay/"
-repository = "https://github.com/girlbossceo/conduwuit"
+keywords = ["chat", "matrix", "server"]
+license = "Apache-2.0"
+# See also `rust-toolchain.toml`
readme = "README.md"
+repository = "https://github.com/girlbossceo/conduwuit"
+rust-version = "1.77.0"
+version = "0.4.1"
[workspace.metadata.crane]
name = "conduit"
diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml
index 1a39281d..4dce69d1 100644
--- a/deps/rust-rocksdb/Cargo.toml
+++ b/deps/rust-rocksdb/Cargo.toml
@@ -1,7 +1,13 @@
[package]
name = "rust-rocksdb-uwu"
-version = "0.0.1"
+categories.workspace = true
+description = "dylib wrapper for rust-rocksdb"
edition = "2021"
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+version = "0.0.1"
[features]
default = ["snappy", "lz4", "zstd", "zlib", "bzip2"]
diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml
index 49ec4267..244cf7d2 100644
--- a/src/admin/Cargo.toml
+++ b/src/admin/Cargo.toml
@@ -1,7 +1,13 @@
[package]
name = "conduit_admin"
-version.workspace = true
+categories.workspace = true
+description.workspace = true
edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+version.workspace = true
[lib]
path = "mod.rs"
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index 890d66af..315fa76c 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -1,7 +1,13 @@
[package]
name = "conduit_api"
-version.workspace = true
+categories.workspace = true
+description.workspace = true
edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+version.workspace = true
[lib]
path = "mod.rs"
diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml
index 6b700f1f..988a810b 100644
--- a/src/core/Cargo.toml
+++ b/src/core/Cargo.toml
@@ -1,7 +1,13 @@
[package]
name = "conduit_core"
-version.workspace = true
+categories.workspace = true
+description.workspace = true
edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+version.workspace = true
[lib]
path = "mod.rs"
diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml
index 1855cbff..ea013298 100644
--- a/src/database/Cargo.toml
+++ b/src/database/Cargo.toml
@@ -1,7 +1,13 @@
[package]
name = "conduit_database"
-version.workspace = true
+categories.workspace = true
+description.workspace = true
edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+version.workspace = true
[lib]
path = "mod.rs"
diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml
index cecdeb58..22c6b2ac 100644
--- a/src/main/Cargo.toml
+++ b/src/main/Cargo.toml
@@ -2,15 +2,17 @@
# TODO: when can we rename to conduwuit?
name = "conduit"
default-run = "conduit"
-description.workspace = true
-license.workspace = true
authors.workspace = true
-homepage.workspace = true
-repository.workspace = true
-readme.workspace = true
-version.workspace = true
+categories.workspace = true
+description.workspace = true
edition.workspace = true
+homepage.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
rust-version.workspace = true
+version.workspace = true
metadata.crane.workspace = true
[package.metadata.deb]
diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml
index 4b197d0a..11028112 100644
--- a/src/router/Cargo.toml
+++ b/src/router/Cargo.toml
@@ -1,7 +1,13 @@
[package]
name = "conduit_router"
-version.workspace = true
+categories.workspace = true
+description.workspace = true
edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+version.workspace = true
[lib]
path = "mod.rs"
diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml
index 580706ff..f2990a46 100644
--- a/src/service/Cargo.toml
+++ b/src/service/Cargo.toml
@@ -1,7 +1,13 @@
[package]
name = "conduit_service"
-version.workspace = true
+categories.workspace = true
+description.workspace = true
edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+version.workspace = true
[lib]
path = "mod.rs"
From 14039d9df4b57e485924e996b57b212274fc7ead Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sat, 1 Jun 2024 10:26:05 +0000
Subject: [PATCH 0009/2091] cleanup unused extern crates.
Signed-off-by: Jason Volk
---
Cargo.lock | 25 -------------------------
src/core/Cargo.toml | 8 --------
src/database/Cargo.toml | 13 -------------
src/main/Cargo.toml | 4 ----
src/service/Cargo.toml | 32 --------------------------------
5 files changed, 82 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 29b03a71..375440ef 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -572,12 +572,8 @@ name = "conduit"
version = "0.4.1"
dependencies = [
"clap",
- "conduit_admin",
- "conduit_api",
"conduit_core",
- "conduit_database",
"conduit_router",
- "conduit_service",
"console-subscriber",
"hardened_malloc-rs",
"log",
@@ -655,16 +651,11 @@ dependencies = [
name = "conduit_core"
version = "0.4.1"
dependencies = [
- "async-trait",
"axum 0.7.5",
"axum-server",
- "base64 0.22.1",
"bytes",
- "clap",
- "cyborgtime",
"either",
"figment",
- "futures-util",
"hardened_malloc-rs",
"http 1.1.0",
"http-body-util",
@@ -674,7 +665,6 @@ dependencies = [
"itertools 0.13.0",
"libloading",
"log",
- "lru-cache",
"nix",
"parking_lot",
"rand",
@@ -688,8 +678,6 @@ dependencies = [
"serde",
"serde_json",
"serde_regex",
- "serde_yaml",
- "sha-1",
"thiserror",
"thread_local",
"tikv-jemalloc-ctl",
@@ -708,7 +696,6 @@ version = "0.4.1"
dependencies = [
"chrono",
"conduit_core",
- "futures-util",
"log",
"lru-cache",
"parking_lot",
@@ -716,9 +703,6 @@ dependencies = [
"rusqlite",
"rust-rocksdb-uwu",
"thread_local",
- "tikv-jemalloc-ctl",
- "tikv-jemalloc-sys",
- "tikv-jemallocator",
"tokio",
"tracing",
"zstd",
@@ -764,13 +748,11 @@ dependencies = [
"async-trait",
"base64 0.22.1",
"bytes",
- "clap",
"conduit_core",
"conduit_database",
"cyborgtime",
"futures-util",
"hickory-resolver",
- "hmac",
"http 1.1.0",
"image",
"ipaddress",
@@ -786,21 +768,14 @@ dependencies = [
"ruma",
"ruma-identifiers-validation",
"rusqlite",
- "rust-rocksdb-uwu",
"serde",
"serde_json",
"serde_yaml",
- "sha-1",
"sha2",
"thread_local",
- "tikv-jemalloc-ctl",
- "tikv-jemalloc-sys",
- "tikv-jemallocator",
"tokio",
"tracing",
- "tracing-subscriber",
"url",
- "webpage",
"zstd",
]
diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml
index 988a810b..a0c8a060 100644
--- a/src/core/Cargo.toml
+++ b/src/core/Cargo.toml
@@ -71,16 +71,11 @@ perf_measurements = []
sentry_telemetry = []
[dependencies]
-async-trait.workspace = true
axum-server.workspace = true
axum.workspace = true
-base64.workspace = true
bytes.workspace = true
-clap.workspace = true
-cyborgtime.workspace = true
either.workspace = true
figment.workspace = true
-futures-util.workspace = true
http-body-util.workspace = true
http.workspace = true
image.workspace = true
@@ -89,7 +84,6 @@ ipaddress.workspace = true
itertools.workspace = true
libloading.workspace = true
log.workspace = true
-lru-cache.workspace = true
parking_lot.optional = true
parking_lot.workspace = true
rand.workspace = true
@@ -105,8 +99,6 @@ sanitize-filename.workspace = true
serde_json.workspace = true
serde_regex.workspace = true
serde.workspace = true
-serde_yaml.workspace = true
-sha-1.workspace = true
thiserror.workspace = true
thread_local.optional = true
thread_local.workspace = true
diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml
index ea013298..e23d2d7b 100644
--- a/src/database/Cargo.toml
+++ b/src/database/Cargo.toml
@@ -41,14 +41,8 @@ rocksdb = [
"dep:rust-rocksdb",
]
jemalloc = [
- "dep:tikv-jemalloc-sys",
- "dep:tikv-jemalloc-ctl",
- "dep:tikv-jemallocator",
"rust-rocksdb/jemalloc",
]
-jemalloc_prof = [
- "tikv-jemalloc-sys/profiling",
-]
io_uring = [
"rust-rocksdb/io-uring",
]
@@ -59,7 +53,6 @@ zstd_compression = [
[dependencies]
chrono.workspace = true
conduit-core.workspace = true
-futures-util.workspace = true
log.workspace = true
lru-cache.workspace = true
parking_lot.optional = true
@@ -71,12 +64,6 @@ rust-rocksdb.optional = true
rust-rocksdb.workspace = true
thread_local.optional = true
thread_local.workspace = true
-tikv-jemallocator.optional = true
-tikv-jemallocator.workspace = true
-tikv-jemalloc-ctl.optional = true
-tikv-jemalloc-ctl.workspace = true
-tikv-jemalloc-sys.optional = true
-tikv-jemalloc-sys.workspace = true
tokio.workspace = true
tracing.workspace = true
zstd.optional = true
diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml
index 22c6b2ac..06c8c697 100644
--- a/src/main/Cargo.toml
+++ b/src/main/Cargo.toml
@@ -69,10 +69,6 @@ perf_measurements = [
[dependencies]
conduit-router.workspace = true
-conduit-admin.workspace = true
-conduit-api.workspace = true
-conduit-service.workspace = true
-conduit-database.workspace = true
conduit-core.workspace = true
tokio.workspace = true
diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml
index f2990a46..e4189125 100644
--- a/src/service/Cargo.toml
+++ b/src/service/Cargo.toml
@@ -18,11 +18,7 @@ crate-type = [
[features]
default = [
- "rocksdb",
- "io_uring",
- "jemalloc",
"gzip_compression",
- "zstd_compression",
"brotli_compression",
"release_max_log_level",
]
@@ -39,21 +35,6 @@ sqlite = [
"dep:parking_lot",
"dep:thread_local",
]
-rocksdb = [
- "dep:rust-rocksdb",
-]
-jemalloc = [
- "dep:tikv-jemalloc-sys",
- "dep:tikv-jemalloc-ctl",
- "dep:tikv-jemallocator",
- "rust-rocksdb/jemalloc",
-]
-io_uring = [
- "rust-rocksdb/io-uring",
-]
-zstd_compression = [
- "rust-rocksdb/zstd",
-]
gzip_compression = [
"reqwest/gzip",
]
@@ -69,13 +50,11 @@ argon2.workspace = true
async-trait.workspace = true
base64.workspace = true
bytes.workspace = true
-clap.workspace = true
conduit-core.workspace = true
conduit-database.workspace = true
cyborgtime.workspace = true
futures-util.workspace = true
hickory-resolver.workspace = true
-hmac.workspace = true
http.workspace = true
image.workspace = true
ipaddress.workspace = true
@@ -93,27 +72,16 @@ ruma-identifiers-validation.workspace = true
ruma.workspace = true
rusqlite.optional = true
rusqlite.workspace = true
-rust-rocksdb.optional = true
-rust-rocksdb.workspace = true
serde_json.workspace = true
serde.workspace = true
serde_yaml.workspace = true
-sha-1.workspace = true
sha2.optional = true
sha2.workspace = true
thread_local.optional = true
thread_local.workspace = true
-tikv-jemallocator.optional = true
-tikv-jemallocator.workspace = true
-tikv-jemalloc-ctl.optional = true
-tikv-jemalloc-ctl.workspace = true
-tikv-jemalloc-sys.optional = true
-tikv-jemalloc-sys.workspace = true
tokio.workspace = true
-tracing-subscriber.workspace = true
tracing.workspace = true
url.workspace = true
-webpage.workspace = true
zstd.optional = true
zstd.workspace = true
From 9e51525c25fa1d06e3e5fd30389c0edd7f83dc8c Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:15:02 +0000
Subject: [PATCH 0010/2091] Fix uninlined-format-args
Signed-off-by: Jason Volk
---
src/admin/appservice/appservice_command.rs | 2 +-
src/admin/debug/debug_commands.rs | 3 +-
src/admin/fsck/fsck_commands.rs | 5 ++--
src/admin/query/account_data.rs | 14 +++------
src/admin/query/appservice.rs | 14 +++------
src/admin/query/globals.rs | 35 +++++++---------------
src/admin/query/presence.rs | 14 +++------
src/admin/query/room_alias.rs | 21 ++++---------
src/admin/query/sending.rs | 28 +++++------------
src/admin/query/users.rs | 7 ++---
src/admin/room/room_alias_commands.rs | 22 +++++++-------
src/admin/room/room_moderation_commands.rs | 11 +++----
src/core/error.rs | 2 +-
src/core/utils/content_disposition.rs | 4 +--
14 files changed, 59 insertions(+), 123 deletions(-)
diff --git a/src/admin/appservice/appservice_command.rs b/src/admin/appservice/appservice_command.rs
index 409ef83b..d15cb7a6 100644
--- a/src/admin/appservice/appservice_command.rs
+++ b/src/admin/appservice/appservice_command.rs
@@ -47,7 +47,7 @@ pub(crate) async fn show(_body: Vec<&str>, appservice_identifier: String) -> Res
{
Some(config) => {
let config_str = serde_yaml::to_string(&config).expect("config should've been validated on register");
- let output = format!("Config for {}:\n\n```yaml\n{}\n```", appservice_identifier, config_str,);
+ let output = format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",);
let output_html = format!(
"Config for {}:\n\n{}
",
escape_html(&appservice_identifier),
diff --git a/src/admin/debug/debug_commands.rs b/src/admin/debug/debug_commands.rs
index a658b84d..74313089 100644
--- a/src/admin/debug/debug_commands.rs
+++ b/src/admin/debug/debug_commands.rs
@@ -294,8 +294,7 @@ pub(crate) async fn ping(_body: Vec<&str>, server: Box) -> Result {
diff --git a/src/admin/fsck/fsck_commands.rs b/src/admin/fsck/fsck_commands.rs
index dec407d8..2eab94de 100644
--- a/src/admin/fsck/fsck_commands.rs
+++ b/src/admin/fsck/fsck_commands.rs
@@ -17,9 +17,8 @@ pub(crate) async fn check_all_users(_body: Vec<&str>) -> Result ResultQuery completed in {query_time:?}:
\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
AccountData::Get {
@@ -39,11 +36,8 @@ pub(crate) async fn account_data(subcommand: AccountData) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
}
diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs
index 88c74d5e..d108ca8d 100644
--- a/src/admin/query/appservice.rs
+++ b/src/admin/query/appservice.rs
@@ -17,11 +17,8 @@ pub(crate) async fn appservice(subcommand: Appservice) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
Appservice::All => {
@@ -30,11 +27,8 @@ pub(crate) async fn appservice(subcommand: Appservice) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
}
diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs
index 37a549ad..e5249118 100644
--- a/src/admin/query/globals.rs
+++ b/src/admin/query/globals.rs
@@ -12,11 +12,8 @@ pub(crate) async fn globals(subcommand: Globals) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
Globals::CurrentCount => {
@@ -25,11 +22,8 @@ pub(crate) async fn globals(subcommand: Globals) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
Globals::LastCheckForUpdatesId => {
@@ -38,11 +32,8 @@ pub(crate) async fn globals(subcommand: Globals) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
Globals::LoadKeypair => {
@@ -51,11 +42,8 @@ pub(crate) async fn globals(subcommand: Globals) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
Globals::SigningKeysFor {
@@ -66,11 +54,8 @@ pub(crate) async fn globals(subcommand: Globals) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
}
diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs
index b77febfa..f69dd19a 100644
--- a/src/admin/query/presence.rs
+++ b/src/admin/query/presence.rs
@@ -14,11 +14,8 @@ pub(crate) async fn presence(subcommand: Presence) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
Presence::PresenceSince {
@@ -31,11 +28,8 @@ pub(crate) async fn presence(subcommand: Presence) -> Result = results.collect();
Ok(RoomMessageEventContent::text_html(
- format!("Query completed in {query_time:?}:\n\n```\n{:?}```", presence_since),
- format!(
- "Query completed in {query_time:?}:
\n{:?}\n
",
- presence_since
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{presence_since:?}```"),
+ format!("Query completed in {query_time:?}:
\n{presence_since:?}\n
"),
))
},
}
diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs
index eb2bac8a..62d05899 100644
--- a/src/admin/query/room_alias.rs
+++ b/src/admin/query/room_alias.rs
@@ -14,11 +14,8 @@ pub(crate) async fn room_alias(subcommand: RoomAlias) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
RoomAlias::LocalAliasesForRoom {
@@ -31,11 +28,8 @@ pub(crate) async fn room_alias(subcommand: RoomAlias) -> Result = results.collect();
Ok(RoomMessageEventContent::text_html(
- format!("Query completed in {query_time:?}:\n\n```\n{:?}```", aliases),
- format!(
- "Query completed in {query_time:?}:
\n{:?}\n
",
- aliases
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{aliases:?}```"),
+ format!("Query completed in {query_time:?}:
\n{aliases:?}\n
"),
))
},
RoomAlias::AllLocalAliases => {
@@ -46,11 +40,8 @@ pub(crate) async fn room_alias(subcommand: RoomAlias) -> Result = results.collect();
Ok(RoomMessageEventContent::text_html(
- format!("Query completed in {query_time:?}:\n\n```\n{:?}```", aliases),
- format!(
- "Query completed in {query_time:?}:
\n{:?}\n
",
- aliases
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{aliases:?}```"),
+ format!("Query completed in {query_time:?}:
\n{aliases:?}\n
"),
))
},
}
diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs
index 19d73cf6..5a21ec87 100644
--- a/src/admin/query/sending.rs
+++ b/src/admin/query/sending.rs
@@ -14,11 +14,8 @@ pub(crate) async fn sending(subcommand: Sending) -> Result> = results.collect();
Ok(RoomMessageEventContent::text_html(
- format!("Query completed in {query_time:?}:\n\n```\n{:?}```", active_requests),
- format!(
- "Query completed in {query_time:?}:
\n{:?}\n
",
- active_requests
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{active_requests:?}```"),
+ format!("Query completed in {query_time:?}:
\n{active_requests:?}\n
"),
))
},
Sending::QueuedRequests {
@@ -96,11 +93,8 @@ pub(crate) async fn sending(subcommand: Sending) -> Result>>();
Ok(RoomMessageEventContent::text_html(
- format!("Query completed in {query_time:?}:\n\n```\n{:?}```", queued_requests),
- format!(
- "Query completed in {query_time:?}:
\n{:?}\n
",
- queued_requests
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{queued_requests:?}```"),
+ format!("Query completed in {query_time:?}:
\n{queued_requests:?}\n
"),
))
},
Sending::ActiveRequestsFor {
@@ -178,11 +172,8 @@ pub(crate) async fn sending(subcommand: Sending) -> Result>>();
Ok(RoomMessageEventContent::text_html(
- format!("Query completed in {query_time:?}:\n\n```\n{:?}```", active_requests),
- format!(
- "Query completed in {query_time:?}:
\n{:?}\n
",
- active_requests
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{active_requests:?}```"),
+ format!("Query completed in {query_time:?}:
\n{active_requests:?}\n
"),
))
},
Sending::GetLatestEduCount {
@@ -193,11 +184,8 @@ pub(crate) async fn sending(subcommand: Sending) -> ResultQuery completed in {query_time:?}:\n{:?}\n
",
- results
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{results:?}```"),
+ format!("Query completed in {query_time:?}:
\n{results:?}\n
"),
))
},
}
diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs
index 2b624ed3..ef99d880 100644
--- a/src/admin/query/users.rs
+++ b/src/admin/query/users.rs
@@ -14,11 +14,8 @@ pub(crate) async fn users(subcommand: Users) -> Result
let users = results.collect::>();
Ok(RoomMessageEventContent::text_html(
- format!("Query completed in {query_time:?}:\n\n```\n{:?}```", users),
- format!(
- "Query completed in {query_time:?}:
\n{:?}\n
",
- users
- ),
+ format!("Query completed in {query_time:?}:\n\n```\n{users:?}```"),
+ format!("Query completed in {query_time:?}:
\n{users:?}\n
"),
))
},
}
diff --git a/src/admin/room/room_alias_commands.rs b/src/admin/room/room_alias_commands.rs
index b767251a..33c165b8 100644
--- a/src/admin/room/room_alias_commands.rs
+++ b/src/admin/room/room_alias_commands.rs
@@ -20,7 +20,7 @@ pub(crate) async fn process(command: RoomAliasCommand, _body: Vec<&str>) -> Resu
let room_alias_str = format!("#{}:{}", room_alias_localpart, services().globals.server_name());
let room_alias = match RoomAliasId::parse_box(room_alias_str) {
Ok(alias) => alias,
- Err(err) => return Ok(RoomMessageEventContent::text_plain(format!("Failed to parse alias: {}", err))),
+ Err(err) => return Ok(RoomMessageEventContent::text_plain(format!("Failed to parse alias: {err}"))),
};
match command {
RoomAliasCommand::Set {
@@ -30,14 +30,12 @@ pub(crate) async fn process(command: RoomAliasCommand, _body: Vec<&str>) -> Resu
} => match (force, services().rooms.alias.resolve_local_alias(&room_alias)) {
(true, Ok(Some(id))) => match services().rooms.alias.set_alias(&room_alias, &room_id) {
Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
- "Successfully overwrote alias (formerly {})",
- id
+ "Successfully overwrote alias (formerly {id})"
))),
- Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {}", err))),
+ Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))),
},
(false, Ok(Some(id))) => Ok(RoomMessageEventContent::text_plain(format!(
- "Refusing to overwrite in use alias for {}, use -f or --force to overwrite",
- id
+ "Refusing to overwrite in use alias for {id}, use -f or --force to overwrite"
))),
(_, Ok(None)) => match services().rooms.alias.set_alias(&room_alias, &room_id) {
Ok(()) => Ok(RoomMessageEventContent::text_plain("Successfully set alias")),
@@ -49,18 +47,18 @@ pub(crate) async fn process(command: RoomAliasCommand, _body: Vec<&str>) -> Resu
..
} => match services().rooms.alias.resolve_local_alias(&room_alias) {
Ok(Some(id)) => match services().rooms.alias.remove_alias(&room_alias) {
- Ok(()) => Ok(RoomMessageEventContent::text_plain(format!("Removed alias from {}", id))),
- Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {}", err))),
+ Ok(()) => Ok(RoomMessageEventContent::text_plain(format!("Removed alias from {id}"))),
+ Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))),
},
Ok(None) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
- Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {}", err))),
+ Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {err}"))),
},
RoomAliasCommand::Which {
..
} => match services().rooms.alias.resolve_local_alias(&room_alias) {
- Ok(Some(id)) => Ok(RoomMessageEventContent::text_plain(format!("Alias resolves to {}", id))),
+ Ok(Some(id)) => Ok(RoomMessageEventContent::text_plain(format!("Alias resolves to {id}"))),
Ok(None) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
- Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {}", err))),
+ Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {err}"))),
},
RoomAliasCommand::List {
..
@@ -93,7 +91,7 @@ pub(crate) async fn process(command: RoomAliasCommand, _body: Vec<&str>) -> Resu
let html = format!("Aliases for {room_id}:\n");
Ok(RoomMessageEventContent::text_html(plain, html))
},
- Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to list aliases: {}", err))),
+ Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to list aliases: {err}"))),
}
} else {
let aliases = services()
diff --git a/src/admin/room/room_moderation_commands.rs b/src/admin/room/room_moderation_commands.rs
index 14ea090d..88b05fd0 100644
--- a/src/admin/room/room_moderation_commands.rs
+++ b/src/admin/room/room_moderation_commands.rs
@@ -481,7 +481,7 @@ pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) ->
// as the room name if we dont have it TODO: do same if we have a room alias for
// this
let plain_list = room_ids.iter().fold(String::new(), |mut output, room_id| {
- writeln!(output, "- `{}`", room_id).unwrap();
+ writeln!(output, "- `{room_id}`").unwrap();
output
});
@@ -490,16 +490,13 @@ pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) ->
output
});
- let plain = format!("Rooms:\n{}", plain_list);
- let html = format!("Rooms:\n", html_list);
+ let plain = format!("Rooms:\n{plain_list}");
+ let html = format!("Rooms:\n");
Ok(RoomMessageEventContent::text_html(plain, html))
},
Err(e) => {
error!("Failed to list banned rooms: {}", e);
- Ok(RoomMessageEventContent::text_plain(format!(
- "Unable to list room aliases: {}",
- e
- )))
+ Ok(RoomMessageEventContent::text_plain(format!("Unable to list room aliases: {e}")))
},
}
},
diff --git a/src/core/error.rs b/src/core/error.rs
index 40ad8ecc..7049571a 100644
--- a/src/core/error.rs
+++ b/src/core/error.rs
@@ -135,7 +135,7 @@ impl From for Error {
}
impl fmt::Debug for Error {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self) }
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{self}") }
}
#[derive(Clone)]
diff --git a/src/core/utils/content_disposition.rs b/src/core/utils/content_disposition.rs
index 2a2a6d18..e1d40bec 100644
--- a/src/core/utils/content_disposition.rs
+++ b/src/core/utils/content_disposition.rs
@@ -127,9 +127,9 @@ mod tests {
};
// cargo test -- --nocapture
- println!("{}", SAMPLE);
+ println!("{SAMPLE}");
println!("{}", sanitize_filename::sanitize_with_options(SAMPLE, options.clone()));
- println!("{:?}", SAMPLE);
+ println!("{SAMPLE:?}");
println!("{:?}", sanitize_filename::sanitize_with_options(SAMPLE, options.clone()));
assert_eq!(SANITISED, sanitize_filename::sanitize_with_options(SAMPLE, options.clone()));
From b3fc8516ed392282e93d3395c4f335944ac2badd Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:16:29 +0000
Subject: [PATCH 0011/2091] Fix unnested-or-patterns
Signed-off-by: Jason Volk
---
src/api/client_server/membership.rs | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs
index 7ccea350..34af9cdf 100644
--- a/src/api/client_server/membership.rs
+++ b/src/api/client_server/membership.rs
@@ -995,10 +995,7 @@ pub async fn join_room_by_id_helper(
let restriction_rooms = match join_rules_event_content {
Some(RoomJoinRulesEventContent {
- join_rule: JoinRule::Restricted(restricted),
- })
- | Some(RoomJoinRulesEventContent {
- join_rule: JoinRule::KnockRestricted(restricted),
+ join_rule: JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted),
}) => restricted
.allow
.into_iter()
From 02081b66c47218b6ab3d537217727be949a46d4e Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:18:57 +0000
Subject: [PATCH 0012/2091] Fix some unnecessary-unwraps w/ addl
cleanup/simplification.
Signed-off-by: Jason Volk
---
src/api/client_server/sync.rs | 2 +-
src/service/globals/data.rs | 18 ++++--------------
src/service/globals/mod.rs | 26 ++++++++------------------
src/service/rooms/timeline/mod.rs | 2 +-
src/service/sending/mod.rs | 10 +++++-----
src/service/sending/sender.rs | 6 +++---
6 files changed, 22 insertions(+), 42 deletions(-)
diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs
index 5925fc53..fb087489 100644
--- a/src/api/client_server/sync.rs
+++ b/src/api/client_server/sync.rs
@@ -142,7 +142,7 @@ pub(crate) async fn sync_events_route(
.collect::>();
// Coalesce database writes for the remainder of this scope.
- let _cork = services().globals.cork_and_flush()?;
+ let _cork = services().globals.db.cork_and_flush();
for room_id in all_joined_rooms {
let room_id = room_id?;
diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs
index f1fa621d..5b26598b 100644
--- a/src/service/globals/data.rs
+++ b/src/service/globals/data.rs
@@ -25,15 +25,9 @@ pub trait Data: Send + Sync {
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>;
fn cleanup(&self) -> Result<()>;
- /// TODO: use this?
- #[allow(dead_code)]
- fn flush(&self) -> Result<()>;
- fn cork(&self) -> Result;
- fn cork_and_flush(&self) -> Result;
+ fn cork(&self) -> Cork;
+ fn cork_and_flush(&self) -> Cork;
- /// TODO: use this?
- #[allow(dead_code)]
- fn cork_and_sync(&self) -> Result;
fn memory_usage(&self) -> String;
fn clear_caches(&self, amount: u32);
fn load_keypair(&self) -> Result;
@@ -177,13 +171,9 @@ impl Data for KeyValueDatabase {
fn cleanup(&self) -> Result<()> { self.db.cleanup() }
- fn flush(&self) -> Result<()> { self.db.flush() }
+ fn cork(&self) -> Cork { Cork::new(&self.db, false, false) }
- fn cork(&self) -> Result { Ok(Cork::new(&self.db, false, false)) }
-
- fn cork_and_flush(&self) -> Result { Ok(Cork::new(&self.db, true, false)) }
-
- fn cork_and_sync(&self) -> Result { Ok(Cork::new(&self.db, true, true)) }
+ fn cork_and_flush(&self) -> Cork { Cork::new(&self.db, true, false) }
fn memory_usage(&self) -> String {
let auth_chain_cache = self.auth_chain_cache.lock().unwrap().len();
diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs
index 723979da..2621ae24 100644
--- a/src/service/globals/mod.rs
+++ b/src/service/globals/mod.rs
@@ -1,3 +1,10 @@
+mod client;
+mod data;
+pub(super) mod emerg_access;
+pub(super) mod migrations;
+mod resolver;
+pub(super) mod updates;
+
use std::{
collections::{BTreeMap, HashMap},
fs,
@@ -29,14 +36,7 @@ use tokio::{
use tracing::{error, trace};
use url::Url;
-use crate::{database::Cork, services, Config, Result};
-
-mod client;
-mod data;
-pub(crate) mod emerg_access;
-pub(crate) mod migrations;
-mod resolver;
-pub(crate) mod updates;
+use crate::{services, Config, Result};
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
@@ -194,16 +194,6 @@ impl Service {
self.db.watch(user_id, device_id).await
}
- pub fn cleanup(&self) -> Result<()> { self.db.cleanup() }
-
- /// TODO: use this?
- #[allow(dead_code)]
- pub fn flush(&self) -> Result<()> { self.db.flush() }
-
- pub fn cork(&self) -> Result { self.db.cork() }
-
- pub fn cork_and_flush(&self) -> Result { self.db.cork_and_flush() }
-
pub fn server_name(&self) -> &ServerName { self.config.server_name.as_ref() }
pub fn max_request_size(&self) -> u32 { self.config.max_request_size }
diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs
index 4d91375f..04f03242 100644
--- a/src/service/rooms/timeline/mod.rs
+++ b/src/service/rooms/timeline/mod.rs
@@ -195,7 +195,7 @@ impl Service {
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
) -> Result> {
// Coalesce database writes for the remainder of this scope.
- let _cork = services().globals.cork_and_flush()?;
+ let _cork = services().globals.db.cork_and_flush();
let shortroomid = services()
.rooms
diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs
index a9f64f7b..654ec523 100644
--- a/src/service/sending/mod.rs
+++ b/src/service/sending/mod.rs
@@ -81,7 +81,7 @@ impl Service {
pub fn send_pdu_push(&self, pdu_id: &[u8], user: &UserId, pushkey: String) -> Result<()> {
let dest = Destination::Push(user.to_owned(), pushkey);
let event = SendingEvent::Pdu(pdu_id.to_owned());
- let _cork = services().globals.cork()?;
+ let _cork = services().globals.db.cork();
let keys = self.db.queue_requests(&[(&dest, event.clone())])?;
self.dispatch(Msg {
dest,
@@ -94,7 +94,7 @@ impl Service {
pub fn send_pdu_appservice(&self, appservice_id: String, pdu_id: Vec) -> Result<()> {
let dest = Destination::Appservice(appservice_id);
let event = SendingEvent::Pdu(pdu_id);
- let _cork = services().globals.cork()?;
+ let _cork = services().globals.db.cork();
let keys = self.db.queue_requests(&[(&dest, event.clone())])?;
self.dispatch(Msg {
dest,
@@ -121,7 +121,7 @@ impl Service {
.into_iter()
.map(|server| (Destination::Normal(server), SendingEvent::Pdu(pdu_id.to_owned())))
.collect::>();
- let _cork = services().globals.cork()?;
+ let _cork = services().globals.db.cork();
let keys = self.db.queue_requests(
&requests
.iter()
@@ -143,7 +143,7 @@ impl Service {
pub fn send_edu_server(&self, server: &ServerName, serialized: Vec) -> Result<()> {
let dest = Destination::Normal(server.to_owned());
let event = SendingEvent::Edu(serialized);
- let _cork = services().globals.cork()?;
+ let _cork = services().globals.db.cork();
let keys = self.db.queue_requests(&[(&dest, event.clone())])?;
self.dispatch(Msg {
dest,
@@ -170,7 +170,7 @@ impl Service {
.into_iter()
.map(|server| (Destination::Normal(server), SendingEvent::Edu(serialized.clone())))
.collect::>();
- let _cork = services().globals.cork()?;
+ let _cork = services().globals.db.cork();
let keys = self.db.queue_requests(
&requests
.iter()
diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs
index 8bb93105..3d1d89da 100644
--- a/src/service/sending/sender.rs
+++ b/src/service/sending/sender.rs
@@ -100,7 +100,7 @@ impl Service {
fn handle_response_ok(
&self, dest: &Destination, futures: &mut SendingFutures<'_>, statuses: &mut CurTransactionStatus,
) {
- let _cork = services().globals.cork();
+ let _cork = services().globals.db.cork();
self.db
.delete_all_active_requests_for(dest)
.expect("all active requests deleted");
@@ -173,7 +173,7 @@ impl Service {
return Ok(None);
}
- let _cork = services().globals.cork();
+ let _cork = services().globals.db.cork();
let mut events = Vec::new();
// Must retry any previous transaction for this remote.
@@ -187,7 +187,7 @@ impl Service {
}
// Compose the next transaction
- let _cork = services().globals.cork();
+ let _cork = services().globals.db.cork();
if !new_events.is_empty() {
self.db.mark_as_active(&new_events)?;
for (e, _) in new_events {
From 2ab427fe99f09883ead44922999af44de19e5a25 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:21:08 +0000
Subject: [PATCH 0013/2091] Fix default-trait-access
---
src/core/alloc/default.rs | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/core/alloc/default.rs b/src/core/alloc/default.rs
index 6e4128bf..4e2f8d7e 100644
--- a/src/core/alloc/default.rs
+++ b/src/core/alloc/default.rs
@@ -2,8 +2,8 @@
/// Always returns the empty string
#[must_use]
-pub fn memory_stats() -> String { Default::default() }
+pub fn memory_stats() -> String { String::default() }
/// Always returns the empty string
#[must_use]
-pub fn memory_usage() -> String { Default::default() }
+pub fn memory_usage() -> String { String::default() }
From 68cbf191540db71568f9272ff88a8d493ee647ff Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:22:48 +0000
Subject: [PATCH 0014/2091] Fix items-after-statements
Signed-off-by: Jason Volk
---
src/api/client_server/sync.rs | 3 ++-
src/database/rocksdb/kvtree.rs | 6 +++---
src/database/rocksdb/mod.rs | 3 ++-
src/database/rocksdb/opts.rs | 3 ++-
src/service/pusher/mod.rs | 7 +++----
src/service/sending/appservice.rs | 5 ++---
6 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs
index fb087489..933f7b58 100644
--- a/src/api/client_server/sync.rs
+++ b/src/api/client_server/sync.rs
@@ -494,6 +494,8 @@ async fn handle_left_room(
async fn process_presence_updates(
presence_updates: &mut HashMap, since: u64, syncing_user: &OwnedUserId,
) -> Result<()> {
+ use crate::service::presence::Presence;
+
// Take presence updates
for (user_id, _, presence_bytes) in services().presence.presence_since(since) {
if !services()
@@ -504,7 +506,6 @@ async fn process_presence_updates(
continue;
}
- use crate::service::presence::Presence;
let presence_event = Presence::from_json_bytes_to_event(&presence_bytes, &user_id)?;
match presence_updates.entry(user_id) {
Entry::Vacant(slot) => {
diff --git a/src/database/rocksdb/kvtree.rs b/src/database/rocksdb/kvtree.rs
index 02a0f3bf..00d01f8e 100644
--- a/src/database/rocksdb/kvtree.rs
+++ b/src/database/rocksdb/kvtree.rs
@@ -23,13 +23,13 @@ impl KvTree for RocksDbEngineTree<'_> {
}
fn multi_get(&self, keys: &[&[u8]]) -> Result>>> {
- let mut readoptions = rust_rocksdb::ReadOptions::default();
- readoptions.set_total_order_seek(true);
-
// Optimization can be `true` if key vector is pre-sorted **by the column
// comparator**.
const SORTED: bool = false;
+ let mut readoptions = rust_rocksdb::ReadOptions::default();
+ readoptions.set_total_order_seek(true);
+
let mut ret: Vec>> = Vec::with_capacity(keys.len());
for res in self
.db
diff --git a/src/database/rocksdb/mod.rs b/src/database/rocksdb/mod.rs
index 5198e6b7..28ce5072 100644
--- a/src/database/rocksdb/mod.rs
+++ b/src/database/rocksdb/mod.rs
@@ -261,8 +261,9 @@ impl KeyValueDatabaseEngine for Arc {
impl Drop for Engine {
fn drop(&mut self) {
- debug!("Waiting for background tasks to finish...");
const BLOCKING: bool = true;
+
+ debug!("Waiting for background tasks to finish...");
self.rocks.cancel_all_background_work(BLOCKING);
debug!("Shutting down background threads");
diff --git a/src/database/rocksdb/opts.rs b/src/database/rocksdb/opts.rs
index afed79a5..06c90a77 100644
--- a/src/database/rocksdb/opts.rs
+++ b/src/database/rocksdb/opts.rs
@@ -17,13 +17,14 @@ use super::{
/// columns, therefor columns should only be opened after passing this result
/// through cf_options().
pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_cache: &Cache) -> Options {
+ const MIN_PARALLELISM: usize = 2;
+
let mut opts = Options::default();
// Logging
set_logging_defaults(&mut opts, config);
// Processing
- const MIN_PARALLELISM: usize = 2;
let threads = if config.rocksdb_parallelism_threads == 0 {
cmp::max(MIN_PARALLELISM, utils::available_parallelism())
} else {
diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs
index 261d69dd..636ba4dd 100644
--- a/src/service/pusher/mod.rs
+++ b/src/service/pusher/mod.rs
@@ -49,12 +49,11 @@ impl Service {
where
T: OutgoingRequest + Debug,
{
- let dest = dest.replace(services().globals.notification_push_path(), "");
-
- trace!("Push gateway destination: {dest}");
-
const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_0];
+ let dest = dest.replace(services().globals.notification_push_path(), "");
+ trace!("Push gateway destination: {dest}");
+
let http_request = request
.try_into_http_request::(&dest, SendAccessToken::IfRequired(""), &VERSIONS)
.map_err(|e| {
diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs
index 4764f889..4df5b934 100644
--- a/src/service/sending/appservice.rs
+++ b/src/service/sending/appservice.rs
@@ -14,6 +14,8 @@ pub(crate) async fn send_request(registration: Registration, request: T) -> R
where
T: OutgoingRequest + Debug,
{
+ const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_0];
+
let Some(dest) = registration.url else {
return Ok(None);
};
@@ -21,9 +23,6 @@ where
trace!("Appservice URL \"{dest}\", Appservice ID: {}", registration.id);
let hs_token = registration.hs_token.as_str();
-
- const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_0];
-
let mut http_request = request
.try_into_http_request::(&dest, SendAccessToken::IfRequired(hs_token), &VERSIONS)
.map_err(|e| {
From e5eccb3a0c43892587ea1a3e32b51b851eafd65f Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:23:21 +0000
Subject: [PATCH 0015/2091] Fix unreadable-literal
Signed-off-by: Jason Volk
---
src/core/config/check.rs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/core/config/check.rs b/src/core/config/check.rs
index 99ca3cfd..a631c4f8 100644
--- a/src/core/config/check.rs
+++ b/src/core/config/check.rs
@@ -91,7 +91,7 @@ pub fn check(config: &Config) -> Result<(), Error> {
return Err(Error::bad_config("Registration token was specified but is empty (\"\")"));
}
- if config.max_request_size < 5120000 {
+ if config.max_request_size < 5_120_000 {
return Err(Error::bad_config("Max request size is less than 5MB. Please increase it."));
}
From 0f3d43153b6e85082f641bafdfb5fd900a443c00 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:25:22 +0000
Subject: [PATCH 0016/2091] Fix unused-self
Signed-off-by: Jason Volk
---
src/core/config/mod.rs | 4 ++--
src/service/rooms/event_handler/mod.rs | 24 ++++++++++++------------
src/service/rooms/typing/mod.rs | 8 ++++----
src/service/sending/sender.rs | 4 ++--
4 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs
index b4e2886a..665cbe20 100644
--- a/src/core/config/mod.rs
+++ b/src/core/config/mod.rs
@@ -411,7 +411,7 @@ impl Config {
};
// don't start if we're listening on both UNIX sockets and TCP at same time
- if config.is_dual_listening(&raw_config) {
+ if Self::is_dual_listening(&raw_config) {
return Err(Error::bad_config("dual listening on UNIX and TCP sockets not allowed."));
};
@@ -455,7 +455,7 @@ impl Config {
/// Checks the presence of the `address` and `unix_socket_path` keys in the
/// raw_config, exiting the process if both keys were detected.
- fn is_dual_listening(&self, raw_config: &Figment) -> bool {
+ fn is_dual_listening(raw_config: &Figment) -> bool {
let check_address = raw_config.find_value("address");
let check_unix_socket = raw_config.find_value("unix_socket_path");
diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs
index 15e3d8d3..754365c3 100644
--- a/src/service/rooms/event_handler/mod.rs
+++ b/src/service/rooms/event_handler/mod.rs
@@ -118,7 +118,7 @@ impl Service {
.ok_or_else(|| Error::bad_database("Failed to find create event in db."))?;
// Procure the room version
- let room_version_id = self.get_room_version_id(&create_event)?;
+ let room_version_id = Self::get_room_version_id(&create_event)?;
let first_pdu_in_room = services()
.rooms
@@ -130,7 +130,7 @@ impl Service {
.handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false, pub_key_map)
.await?;
- self.check_room_id(room_id, &incoming_pdu)?;
+ Self::check_room_id(room_id, &incoming_pdu)?;
// 8. if not timeline event: stop
if !is_timeline_event {
@@ -308,7 +308,7 @@ impl Service {
// 2. Check signatures, otherwise drop
// 3. check content hash, redact if doesn't match
- let room_version_id = self.get_room_version_id(create_event)?;
+ let room_version_id = Self::get_room_version_id(create_event)?;
let guard = pub_key_map.read().await;
let mut val = match ruma::signatures::verify_event(&guard, &value, &room_version_id) {
@@ -347,7 +347,7 @@ impl Service {
)
.map_err(|_| Error::bad_database("Event is not a valid PDU."))?;
- self.check_room_id(room_id, &incoming_pdu)?;
+ Self::check_room_id(room_id, &incoming_pdu)?;
if !auth_events_known {
// 4. fetch any missing auth events doing all checks listed here starting at 1.
@@ -382,7 +382,7 @@ impl Service {
continue;
};
- self.check_room_id(room_id, &auth_event)?;
+ Self::check_room_id(room_id, &auth_event)?;
match auth_events.entry((
auth_event.kind.to_string().into(),
@@ -417,7 +417,7 @@ impl Service {
}
if !state_res::event_auth::auth_check(
- &self.to_room_version(&room_version_id),
+ &Self::to_room_version(&room_version_id),
&incoming_pdu,
None::, // TODO: third party invite
|k, s| auth_events.get(&(k.to_string().into(), s.to_owned())),
@@ -460,7 +460,7 @@ impl Service {
debug!("Upgrading to timeline pdu");
let timer = tokio::time::Instant::now();
- let room_version_id = self.get_room_version_id(create_event)?;
+ let room_version_id = Self::get_room_version_id(create_event)?;
// 10. Fetch missing state and auth chain events by calling /state_ids at
// backwards extremities doing all the checks in this list starting at 1.
@@ -488,7 +488,7 @@ impl Service {
}
let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above");
- let room_version = self.to_room_version(&room_version_id);
+ let room_version = Self::to_room_version(&room_version_id);
debug!("Performing auth check");
// 11. Check the auth of the event passes based on the state of the event
@@ -1221,7 +1221,7 @@ impl Service {
.await
.pop()
{
- self.check_room_id(room_id, &pdu)?;
+ Self::check_room_id(room_id, &pdu)?;
if amount > services().globals.max_fetch_prev_events() {
// Max limit reached
@@ -1329,7 +1329,7 @@ impl Service {
}
}
- fn check_room_id(&self, room_id: &RoomId, pdu: &PduEvent) -> Result<()> {
+ fn check_room_id(room_id: &RoomId, pdu: &PduEvent) -> Result<()> {
if pdu.room_id != room_id {
warn!("Found event from room {} in room {}", pdu.room_id, room_id);
return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has wrong room id"));
@@ -1337,7 +1337,7 @@ impl Service {
Ok(())
}
- fn get_room_version_id(&self, create_event: &PduEvent) -> Result {
+ fn get_room_version_id(create_event: &PduEvent) -> Result {
let create_event_content: RoomCreateEventContent =
serde_json::from_str(create_event.content.get()).map_err(|e| {
error!("Invalid create event: {}", e);
@@ -1347,7 +1347,7 @@ impl Service {
Ok(create_event_content.room_version)
}
- fn to_room_version(&self, room_version_id: &RoomVersionId) -> RoomVersion {
+ fn to_room_version(room_version_id: &RoomVersionId) -> RoomVersion {
RoomVersion::new(room_version_id).expect("room version is supported")
}
}
diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs
index 37876f3f..0b89dfdd 100644
--- a/src/service/rooms/typing/mod.rs
+++ b/src/service/rooms/typing/mod.rs
@@ -44,7 +44,7 @@ impl Service {
// update federation
if user_is_local(user_id) {
- self.federation_send(room_id, user_id, true)?;
+ Self::federation_send(room_id, user_id, true)?;
}
Ok(())
@@ -70,7 +70,7 @@ impl Service {
// update federation
if user_is_local(user_id) {
- self.federation_send(room_id, user_id, false)?;
+ Self::federation_send(room_id, user_id, false)?;
}
Ok(())
@@ -126,7 +126,7 @@ impl Service {
// update federation
for user in removable {
if user_is_local(&user) {
- self.federation_send(room_id, &user, false)?;
+ Self::federation_send(room_id, &user, false)?;
}
}
}
@@ -163,7 +163,7 @@ impl Service {
})
}
- fn federation_send(&self, room_id: &RoomId, user_id: &UserId, typing: bool) -> Result<()> {
+ fn federation_send(room_id: &RoomId, user_id: &UserId, typing: bool) -> Result<()> {
debug_assert!(user_is_local(user_id), "tried to broadcast typing status of remote user",);
if !services().globals.config.allow_outgoing_typing {
return Ok(());
diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs
index 3d1d89da..9e107df8 100644
--- a/src/service/sending/sender.rs
+++ b/src/service/sending/sender.rs
@@ -80,12 +80,12 @@ impl Service {
) {
match response {
Ok(dest) => self.handle_response_ok(&dest, futures, statuses),
- Err((dest, e)) => self.handle_response_err(dest, futures, statuses, &e),
+ Err((dest, e)) => Self::handle_response_err(dest, futures, statuses, &e),
};
}
fn handle_response_err(
- &self, dest: Destination, _futures: &mut SendingFutures<'_>, statuses: &mut CurTransactionStatus, e: &Error,
+ dest: Destination, _futures: &mut SendingFutures<'_>, statuses: &mut CurTransactionStatus, e: &Error,
) {
debug!(dest = ?dest, "{e:?}");
statuses.entry(dest).and_modify(|e| {
From 4521e93d04e25a9c218a1840c7d18f1b6e2bfe6e Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:27:03 +0000
Subject: [PATCH 0017/2091] Fix stable-sort-primitive
Signed-off-by: Jason Volk
---
src/service/rooms/auth_chain/mod.rs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs
index 511b762e..d8e38c85 100644
--- a/src/service/rooms/auth_chain/mod.rs
+++ b/src/service/rooms/auth_chain/mod.rs
@@ -121,7 +121,7 @@ impl Service {
);
}
- full_auth_chain.sort();
+ full_auth_chain.sort_unstable();
full_auth_chain.dedup();
debug!(
chain_length = ?full_auth_chain.len(),
From 050841a871a80670b8e52ab4b7f15cc0c4e68cb5 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:29:45 +0000
Subject: [PATCH 0018/2091] Fix inefficient-to-string
Signed-off-by: Jason Volk
---
src/router/layers.rs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/router/layers.rs b/src/router/layers.rs
index a6e30e98..33d0cb20 100644
--- a/src/router/layers.rs
+++ b/src/router/layers.rs
@@ -160,7 +160,7 @@ fn catch_panic(err: Box) -> http::Response() {
s.clone()
} else if let Some(s) = err.downcast_ref::<&str>() {
- s.to_string()
+ (*s).to_owned()
} else {
"Unknown internal server error occurred.".to_owned()
};
From 38ab1083e31f94d6932a5f3671539785ff1ac5e0 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 00:30:04 +0000
Subject: [PATCH 0019/2091] Fix ptr-cast-constness
Signed-off-by: Jason Volk
---
src/core/alloc/je.rs | 5 ++++-
src/router/run.rs | 2 +-
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs
index 4092d815..ebf3575f 100644
--- a/src/core/alloc/je.rs
+++ b/src/core/alloc/je.rs
@@ -30,7 +30,10 @@ pub fn memory_stats() -> String {
let mut str = String::new();
let opaque = std::ptr::from_mut(&mut str).cast::();
- let opts_p: *const c_char = std::ffi::CString::new(opts_s).expect("cstring").into_raw() as *const c_char;
+ let opts_p: *const c_char = std::ffi::CString::new(opts_s)
+ .expect("cstring")
+ .into_raw()
+ .cast_const();
// SAFETY: calls malloc_stats_print() with our string instance which must remain
// in this frame. https://docs.rs/tikv-jemalloc-sys/latest/tikv_jemalloc_sys/fn.malloc_stats_print.html
diff --git a/src/router/run.rs b/src/router/run.rs
index d20f8669..e6238853 100644
--- a/src/router/run.rs
+++ b/src/router/run.rs
@@ -97,7 +97,7 @@ pub(crate) async fn stop(_server: Arc) -> Result<(), Error> {
.take()
.unwrap();
- let s = std::ptr::from_ref(s) as *mut Services;
+ let s: *mut Services = std::ptr::from_ref(s).cast_mut();
//SAFETY: Services was instantiated in start() and leaked into the SERVICES
// global perusing as 'static for the duration of run_server(). Now we reclaim
// it to drop it before unloading the module. If this is not done there will be
From 53fe2362fcf04d0478d6f223b58da7a8e5b9569c Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 01:43:25 +0000
Subject: [PATCH 0020/2091] Fix path-buf-push-overwrite
Signed-off-by: Jason Volk
---
src/service/media/mod.rs | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs
index 5521011d..0697577f 100644
--- a/src/service/media/mod.rs
+++ b/src/service/media/mod.rs
@@ -559,9 +559,7 @@ mod tests {
.db
.create_file_metadata(None, mxc, width, height, Some(content_disposition), Some(content_type))
.unwrap();
- let mut r = PathBuf::new();
- r.push("/tmp");
- r.push("media");
+ let mut r = PathBuf::from("/tmp/media");
// r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD));
// use the sha256 hash of the key as the file name instead of the key itself
// this is because the base64 encoded key can be longer than 255 characters.
From ee52d2f751b6a4394b11af2de123f9bfbe592f79 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sat, 1 Jun 2024 09:20:37 +0000
Subject: [PATCH 0021/2091] refactor lints into categories. lints are now more
strict.
rust:
* ALL lints which rustc defaults to "allow" have been set to "warn".
* NEW "warn" lints which produce a warning as of this commit have been
explicitly identified and commented with a TODO for later review.
clippy:
* ALL categories (sans restriction) now fully enabled to "warn".
* redundant lints set to "warn" from categories now at "warn" are removed.
* previous "allow" sadness moved into respective categories.
* new warnings produced as of this commit have been explicitly identified:
- nursery lints set to "allow" marked with TODO for later review.
- pedantic lints set to "allow"
Signed-off-by: Jason Volk
---
Cargo.toml | 254 ++++++++++++++++++++++++++++++-----------------------
1 file changed, 145 insertions(+), 109 deletions(-)
diff --git a/Cargo.toml b/Cargo.toml
index de6878f0..cabd47a2 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -644,145 +644,181 @@ opt-level = 'z'
[profile.test]
incremental = false
+###############################################################################
+#
+# Linting
+#
+
[workspace.lints.rust]
-missing_abi = "warn"
-noop_method_call = "warn"
-pointer_structural_match = "warn"
-explicit_outlives_requirements = "warn"
-unused_extern_crates = "warn"
-unused_import_braces = "warn"
-unused_lifetimes = "warn"
-unused_qualifications = "warn"
-unused_macro_rules = "warn"
-dead_code = "warn"
-elided_lifetimes_in_paths = "warn"
-macro_use_extern_crate = "warn"
-single_use_lifetimes = "warn"
-unsafe_op_in_unsafe_fn = "warn"
-unreachable_pub = "warn"
+absolute-paths-not-starting-with-crate = "warn"
+#box-pointers = "warn" # TODO
+deprecated-in-future = "warn"
+elided-lifetimes-in-paths = "warn"
+explicit-outlives-requirements = "warn"
+ffi-unwind-calls = "warn"
+keyword-idents = "warn"
+macro-use-extern-crate = "warn"
+meta-variable-misuse = "warn"
+missing-abi = "warn"
+#missing-copy-implementations = "warn" # TODO
+#missing-debug-implementations = "warn" # TODO
+non-ascii-idents = "warn"
+rust-2021-incompatible-closure-captures = "warn"
+rust-2021-incompatible-or-patterns = "warn"
+rust-2021-prefixes-incompatible-syntax = "warn"
+rust-2021-prelude-collisions = "warn"
+single-use-lifetimes = "warn"
+#trivial-casts = "warn" # TODO
+trivial-numeric-casts = "warn"
+unit-bindings = "warn"
+#unnameable-types = "warn" # TODO
+unreachable-pub = "warn"
+unsafe-op-in-unsafe-fn = "warn"
+unstable-features = "warn"
+unused-extern-crates = "warn"
+unused-import-braces = "warn"
+unused-lifetimes = "warn"
+unused-macro-rules = "warn"
+unused-qualifications = "warn"
+#unused-results = "warn" # TODO
-# this seems to suggest broken code and is not working correctly
-unused_braces = "allow"
-
-# cfgs cannot be limited to features or cargo build --all-features panics for unsuspecting users.
+## some sadness
+let_underscore_drop = "allow"
+missing_docs = "allow"
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
# tokio_unstable will warn.
unexpected_cfgs = "allow"
+# this seems to suggest broken code and is not working correctly
+unused_braces = "allow"
+# buggy, but worth checking on occasionally
+unused_crate_dependencies = "allow"
+unsafe_code = "allow"
+variant_size_differences = "allow"
-# some sadness
-missing_docs = "allow"
+#######################################
+#
+# Clippy lints
+#
[workspace.lints.clippy]
-# pedantic = "warn"
-suspicious = "warn" # assume deny in practice
-perf = "warn" # assume deny in practice
+###################
+cargo = "warn"
-redundant_clone = "warn"
-cloned_instead_of_copied = "warn"
-expl_impl_clone_on_copy = "warn"
-unnecessary_cast = "warn"
-cast_lossless = "warn"
-ptr_as_ptr = "warn"
-mut_mut = "warn"
-char_lit_as_u8 = "warn"
-dbg_macro = "warn"
-empty_structs_with_brackets = "warn"
-get_unwrap = "warn"
-negative_feature_names = "warn"
-pub_without_shorthand = "warn"
-rc_buffer = "warn"
-rc_mutex = "warn"
-redundant_feature_names = "warn"
-redundant_type_annotations = "warn"
-rest_pat_in_fully_bound_structs = "warn"
-str_to_string = "warn"
-string_to_string = "warn"
-tests_outside_test_module = "warn"
-undocumented_unsafe_blocks = "warn"
-unneeded_field_pattern = "warn"
-unseparated_literal_suffix = "warn"
-wildcard_dependencies = "warn"
-or_fun_call = "warn"
-unnecessary_lazy_evaluations = "warn"
+## some sadness
+multiple_crate_versions = { level = "allow", priority = 1 }
+
+###################
+complexity = "warn"
+
+###################
+correctness = "warn"
+
+###################
+nursery = "warn"
+
+### some sadness
+branches_sharing_code = { level = "allow", priority = 1 } # TODO
+cognitive_complexity = { level = "allow", priority = 1 } # TODO
+derive_partial_eq_without_eq = { level = "allow", priority = 1 } # TODO
+equatable_if_let = { level = "allow", priority = 1 } # TODO
+future_not_send = { level = "allow", priority = 1 } # TODO
+missing_const_for_fn = { level = "allow", priority = 1 } # TODO
+needless_collect = { level = "allow", priority = 1 } # TODO
+needless_pass_by_ref_mut = { level = "allow", priority = 1 } # TODO
+option_if_let_else = { level = "allow", priority = 1 } # TODO
+redundant_pub_crate = { level = "allow", priority = 1 } # TODO
+significant_drop_in_scrutinee = { level = "allow", priority = 1 } # TODO
+significant_drop_tightening = { level = "allow", priority = 1 } # TODO
+suboptimal_flops = { level = "allow", priority = 1 } # TODO
+use_self = { level = "allow", priority = 1 } # TODO
+useless_let_if_seq = { level = "allow", priority = 1 } # TODO
+
+###################
+pedantic = "warn"
+
+## some sadness
+cast_possible_truncation = "allow"
+cast_precision_loss = "allow"
+cast_sign_loss = "allow"
+doc_markdown = "allow"
+error_impl_error = "allow"
+expect_used = "allow"
+if_not_else = "allow"
+if_then_some_else_none = "allow"
+implicit_return = "allow"
+inline_always = "allow"
+map_err_ignore = "allow"
+missing_docs_in_private_items = "allow"
+missing_errors_doc = "allow"
+missing_panics_doc = "allow"
+mod_module_files = "allow"
+module_name_repetitions = "allow"
+multiple_inherent_impl = "allow"
+no_effect_underscore_binding = "allow"
+ref_patterns = "allow"
+same_name_method = "allow"
+similar_names = { level = "allow", priority = 1 }
+single_call_fn = "allow"
+string_add = "allow"
+string_slice = "allow"
+struct_field_names = { level = "allow", priority = 1 }
+unnecessary_wraps = { level = "allow", priority = 1 }
+unused_async = { level = "allow", priority = 1 }
+unwrap_used = "allow"
+
+###################
+perf = "warn"
+
+###################
+#restriction = "warn"
+
+#arithmetic_side_effects = "warn" # TODO
+#as_conversions = "warn" # TODO
assertions_on_result_states = "warn"
+dbg_macro = "warn"
default_union_representation = "warn"
deref_by_slicing = "warn"
empty_drop = "warn"
+empty_structs_with_brackets = "warn"
exit = "warn"
filetype_is_file = "warn"
float_cmp_const = "warn"
+fn_to_numeric_cast_any = "warn"
format_push_string = "warn"
+get_unwrap = "warn"
impl_trait_in_params = "warn"
+let_underscore_must_use = "warn"
+let_underscore_untyped = "warn"
lossy_float_literal = "warn"
mem_forget = "warn"
missing_assert_message = "warn"
mutex_atomic = "warn"
+pub_without_shorthand = "warn"
+rc_buffer = "warn"
+rc_mutex = "warn"
+redundant_type_annotations = "warn"
+rest_pat_in_fully_bound_structs = "warn"
semicolon_outside_block = "warn"
-fn_to_numeric_cast = "warn"
-fn_to_numeric_cast_with_truncation = "warn"
+str_to_string = "warn"
string_lit_chars_any = "warn"
+string_to_string = "warn"
suspicious_xor_used_as_pow = "warn"
+tests_outside_test_module = "warn"
try_err = "warn"
+undocumented_unsafe_blocks = "warn"
unnecessary_safety_comment = "warn"
unnecessary_safety_doc = "warn"
unnecessary_self_imports = "warn"
+unneeded_field_pattern = "warn"
+unseparated_literal_suffix = "warn"
verbose_file_reads = "warn"
-cast_possible_wrap = "warn"
-redundant_closure_for_method_calls = "warn"
-large_futures = "warn"
-semicolon_if_nothing_returned = "warn"
-match_bool = "warn"
-struct_excessive_bools = "warn"
-must_use_candidate = "warn"
-collapsible_else_if = "warn"
-inconsistent_struct_constructor = "warn"
-manual_string_new = "warn"
-zero_sized_map_values = "warn"
-unnecessary_box_returns = "warn"
-map_unwrap_or = "warn"
-implicit_clone = "warn"
-match_wildcard_for_single_variants = "warn"
-match_same_arms = "warn"
-ignored_unit_patterns = "warn"
-redundant_else = "warn"
-explicit_into_iter_loop = "warn"
-used_underscore_binding = "warn"
-needless_pass_by_value = "warn"
-too_many_lines = "warn"
-let_underscore_untyped = "warn"
-single_match = "warn"
-single_match_else = "warn"
-explicit_deref_methods = "warn"
-explicit_iter_loop = "warn"
-manual_let_else = "warn"
-trivially_copy_pass_by_ref = "warn"
-wildcard_imports = "warn"
-checked_conversions = "warn"
-let_underscore_must_use = "warn"
-#integer_arithmetic = "warn"
-#as_conversions = "warn"
-# some sadness
-missing_errors_doc = "allow"
-missing_panics_doc = "allow"
-module_name_repetitions = "allow"
-if_not_else = "allow"
-doc_markdown = "allow"
-cast_possible_truncation = "allow"
-cast_precision_loss = "allow"
-cast_sign_loss = "allow"
-same_name_method = "allow"
-mod_module_files = "allow"
-unwrap_used = "allow"
-expect_used = "allow"
-if_then_some_else_none = "allow"
+###################
+style = "warn"
+
+###################
+suspicious = "warn"
+
+## some sadness
let_underscore_future = "allow"
-map_err_ignore = "allow"
-missing_docs_in_private_items = "allow"
-multiple_inherent_impl = "allow"
-error_impl_error = "allow"
-string_add = "allow"
-string_slice = "allow"
-ref_patterns = "allow"
-unnecessary_wraps = "allow"
From 9df5265c00c5083a3567c8be176ca149adf2c833 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Fri, 31 May 2024 00:14:22 +0000
Subject: [PATCH 0022/2091] split sending resolver into unit.
Signed-off-by: Jason Volk
---
src/admin/debug/debug_commands.rs | 2 +-
src/service/sending/mod.rs | 13 +-
src/service/sending/resolve.rs | 460 +++++++++++++++++++++++++++++
src/service/sending/send.rs | 465 +-----------------------------
4 files changed, 474 insertions(+), 466 deletions(-)
create mode 100644 src/service/sending/resolve.rs
diff --git a/src/admin/debug/debug_commands.rs b/src/admin/debug/debug_commands.rs
index 74313089..873a23e7 100644
--- a/src/admin/debug/debug_commands.rs
+++ b/src/admin/debug/debug_commands.rs
@@ -5,7 +5,7 @@ use ruma::{
api::client::error::ErrorKind, events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId,
RoomId, RoomVersionId, ServerName,
};
-use service::{rooms::event_handler::parse_incoming_pdu, sending::send::resolve_actual_dest, services, PduEvent};
+use service::{rooms::event_handler::parse_incoming_pdu, sending::resolve::resolve_actual_dest, services, PduEvent};
use tokio::sync::RwLock;
use tracing::{debug, info, warn};
use tracing_subscriber::EnvFilter;
diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs
index 654ec523..4ad40f67 100644
--- a/src/service/sending/mod.rs
+++ b/src/service/sending/mod.rs
@@ -1,6 +1,13 @@
+mod appservice;
+mod data;
+pub mod resolve;
+mod send;
+mod sender;
+
use std::{fmt::Debug, sync::Arc};
use data::Data;
+pub use resolve::FedDest;
use ruma::{
api::{appservice::Registration, OutgoingRequest},
OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
@@ -10,12 +17,6 @@ use tracing::{error, warn};
use crate::{server_is_ours, services, Config, Error, Result};
-mod appservice;
-mod data;
-pub mod send;
-pub mod sender;
-pub use send::FedDest;
-
pub struct Service {
pub db: Arc,
diff --git a/src/service/sending/resolve.rs b/src/service/sending/resolve.rs
new file mode 100644
index 00000000..d452a555
--- /dev/null
+++ b/src/service/sending/resolve.rs
@@ -0,0 +1,460 @@
+use std::{
+ fmt::Debug,
+ net::{IpAddr, SocketAddr},
+};
+
+use hickory_resolver::{error::ResolveError, lookup::SrvLookup};
+use ipaddress::IPAddress;
+use ruma::ServerName;
+use tracing::{debug, error, trace};
+
+use crate::{debug_error, debug_info, debug_warn, services, Error, Result};
+
+/// Wraps either an literal IP address plus port, or a hostname plus complement
+/// (colon-plus-port if it was specified).
+///
+/// Note: A `FedDest::Named` might contain an IP address in string form if there
+/// was no port specified to construct a `SocketAddr` with.
+///
+/// # Examples:
+/// ```rust
+/// # use conduit_service::sending::FedDest;
+/// # fn main() -> Result<(), std::net::AddrParseError> {
+/// FedDest::Literal("198.51.100.3:8448".parse()?);
+/// FedDest::Literal("[2001:db8::4:5]:443".parse()?);
+/// FedDest::Named("matrix.example.org".to_owned(), String::new());
+/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned());
+/// FedDest::Named("198.51.100.5".to_owned(), String::new());
+/// # Ok(())
+/// # }
+/// ```
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum FedDest {
+ Literal(SocketAddr),
+ Named(String, String),
+}
+
+pub(crate) struct ActualDest {
+ pub(crate) dest: FedDest,
+ pub(crate) host: String,
+ pub(crate) string: String,
+ pub(crate) cached: bool,
+}
+
+#[tracing::instrument(skip_all, name = "resolve")]
+pub(crate) async fn get_actual_dest(server_name: &ServerName) -> Result {
+ let cached;
+ let cached_result = services()
+ .globals
+ .actual_destinations()
+ .read()
+ .await
+ .get(server_name)
+ .cloned();
+
+ let (dest, host) = if let Some(result) = cached_result {
+ cached = true;
+ result
+ } else {
+ cached = false;
+ validate_dest(server_name)?;
+ resolve_actual_dest(server_name, false).await?
+ };
+
+ let string = dest.clone().into_https_string();
+ Ok(ActualDest {
+ dest,
+ host,
+ string,
+ cached,
+ })
+}
+
+/// Returns: `actual_destination`, host header
+/// Implemented according to the specification at
+/// Numbers in comments below refer to bullet points in linked section of
+/// specification
+pub async fn resolve_actual_dest(dest: &ServerName, no_cache_dest: bool) -> Result<(FedDest, String)> {
+ trace!("Finding actual destination for {dest}");
+ let dest_str = dest.as_str().to_owned();
+ let mut hostname = dest_str.clone();
+
+ #[allow(clippy::single_match_else)]
+ let actual_dest = match get_ip_with_port(&dest_str) {
+ Some(host_port) => {
+ debug!("1: IP literal with provided or default port");
+ host_port
+ },
+ None => {
+ if let Some(pos) = dest_str.find(':') {
+ debug!("2: Hostname with included port");
+ let (host, port) = dest_str.split_at(pos);
+ if !no_cache_dest {
+ query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await?;
+ }
+
+ FedDest::Named(host.to_owned(), port.to_owned())
+ } else {
+ trace!("Requesting well known for {dest}");
+ if let Some(delegated_hostname) = request_well_known(dest.as_str()).await? {
+ debug!("3: A .well-known file is available");
+ hostname = add_port_to_hostname(&delegated_hostname).into_uri_string();
+ match get_ip_with_port(&delegated_hostname) {
+ Some(host_and_port) => {
+ debug!("3.1: IP literal in .well-known file");
+ host_and_port
+ },
+ None => {
+ if let Some(pos) = delegated_hostname.find(':') {
+ debug!("3.2: Hostname with port in .well-known file");
+ let (host, port) = delegated_hostname.split_at(pos);
+ if !no_cache_dest {
+ query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await?;
+ }
+
+ FedDest::Named(host.to_owned(), port.to_owned())
+ } else {
+ trace!("Delegated hostname has no port in this branch");
+ if let Some(hostname_override) = query_srv_record(&delegated_hostname).await? {
+ debug!("3.3: SRV lookup successful");
+ let force_port = hostname_override.port();
+ if !no_cache_dest {
+ query_and_cache_override(
+ &delegated_hostname,
+ &hostname_override.hostname(),
+ force_port.unwrap_or(8448),
+ )
+ .await?;
+ }
+
+ if let Some(port) = force_port {
+ FedDest::Named(delegated_hostname, format!(":{port}"))
+ } else {
+ add_port_to_hostname(&delegated_hostname)
+ }
+ } else {
+ debug!("3.4: No SRV records, just use the hostname from .well-known");
+ if !no_cache_dest {
+ query_and_cache_override(&delegated_hostname, &delegated_hostname, 8448)
+ .await?;
+ }
+
+ add_port_to_hostname(&delegated_hostname)
+ }
+ }
+ },
+ }
+ } else {
+ trace!("4: No .well-known or an error occured");
+ if let Some(hostname_override) = query_srv_record(&dest_str).await? {
+ debug!("4: No .well-known; SRV record found");
+ let force_port = hostname_override.port();
+
+ if !no_cache_dest {
+ query_and_cache_override(
+ &hostname,
+ &hostname_override.hostname(),
+ force_port.unwrap_or(8448),
+ )
+ .await?;
+ }
+
+ if let Some(port) = force_port {
+ FedDest::Named(hostname.clone(), format!(":{port}"))
+ } else {
+ add_port_to_hostname(&hostname)
+ }
+ } else {
+ debug!("4: No .well-known; 5: No SRV record found");
+ if !no_cache_dest {
+ query_and_cache_override(&dest_str, &dest_str, 8448).await?;
+ }
+
+ add_port_to_hostname(&dest_str)
+ }
+ }
+ }
+ },
+ };
+
+ // Can't use get_ip_with_port here because we don't want to add a port
+ // to an IP address if it wasn't specified
+ let hostname = if let Ok(addr) = hostname.parse::() {
+ FedDest::Literal(addr)
+ } else if let Ok(addr) = hostname.parse::() {
+ FedDest::Named(addr.to_string(), ":8448".to_owned())
+ } else if let Some(pos) = hostname.find(':') {
+ let (host, port) = hostname.split_at(pos);
+ FedDest::Named(host.to_owned(), port.to_owned())
+ } else {
+ FedDest::Named(hostname, ":8448".to_owned())
+ };
+
+ debug!("Actual destination: {actual_dest:?} hostname: {hostname:?}");
+ Ok((actual_dest, hostname.into_uri_string()))
+}
+
+#[tracing::instrument(skip_all, name = "well-known")]
+async fn request_well_known(dest: &str) -> Result> {
+ if !services()
+ .globals
+ .resolver
+ .overrides
+ .read()
+ .unwrap()
+ .contains_key(dest)
+ {
+ query_and_cache_override(dest, dest, 8448).await?;
+ }
+
+ let response = services()
+ .globals
+ .client
+ .well_known
+ .get(&format!("https://{dest}/.well-known/matrix/server"))
+ .send()
+ .await;
+
+ trace!("response: {:?}", response);
+ if let Err(e) = &response {
+ debug!("error: {e:?}");
+ return Ok(None);
+ }
+
+ let response = response?;
+ if !response.status().is_success() {
+ debug!("response not 2XX");
+ return Ok(None);
+ }
+
+ let text = response.text().await?;
+ trace!("response text: {:?}", text);
+ if text.len() >= 12288 {
+ debug_warn!("response contains junk");
+ return Ok(None);
+ }
+
+ let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default();
+
+ let m_server = body
+ .get("m.server")
+ .unwrap_or(&serde_json::Value::Null)
+ .as_str()
+ .unwrap_or_default();
+
+ if ruma_identifiers_validation::server_name::validate(m_server).is_err() {
+ debug_error!("response content missing or invalid");
+ return Ok(None);
+ }
+
+ debug_info!("{:?} found at {:?}", dest, m_server);
+ Ok(Some(m_server.to_owned()))
+}
+
+#[tracing::instrument(skip_all, name = "ip")]
+async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) -> Result<()> {
+ match services()
+ .globals
+ .dns_resolver()
+ .lookup_ip(hostname.to_owned())
+ .await
+ {
+ Err(e) => handle_resolve_error(&e),
+ Ok(override_ip) => {
+ if hostname != overname {
+ debug_info!("{:?} overriden by {:?}", overname, hostname);
+ }
+ services()
+ .globals
+ .resolver
+ .overrides
+ .write()
+ .unwrap()
+ .insert(overname.to_owned(), (override_ip.iter().collect(), port));
+
+ Ok(())
+ },
+ }
+}
+
+#[tracing::instrument(skip_all, name = "srv")]
+async fn query_srv_record(hostname: &'_ str) -> Result > {
+ fn handle_successful_srv(srv: &SrvLookup) -> Option {
+ srv.iter().next().map(|result| {
+ FedDest::Named(
+ result.target().to_string().trim_end_matches('.').to_owned(),
+ format!(":{}", result.port()),
+ )
+ })
+ }
+
+ async fn lookup_srv(hostname: &str) -> Result {
+ debug!("querying SRV for {:?}", hostname);
+ let hostname = hostname.trim_end_matches('.');
+ services()
+ .globals
+ .dns_resolver()
+ .srv_lookup(hostname.to_owned())
+ .await
+ }
+
+ let hostnames = [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")];
+
+ for hostname in hostnames {
+ match lookup_srv(&hostname).await {
+ Ok(result) => return Ok(handle_successful_srv(&result)),
+ Err(e) => handle_resolve_error(&e)?,
+ }
+ }
+
+ Ok(None)
+}
+
+#[allow(clippy::single_match_else)]
+fn handle_resolve_error(e: &ResolveError) -> Result<()> {
+ use hickory_resolver::error::ResolveErrorKind;
+
+ match *e.kind() {
+ ResolveErrorKind::NoRecordsFound {
+ ..
+ } => {
+ // Raise to debug_warn if we can find out the result wasn't from cache
+ debug!("{e}");
+ Ok(())
+ },
+ _ => {
+ error!("DNS {e}");
+ Err(Error::Err(e.to_string()))
+ },
+ }
+}
+
+fn validate_dest(dest: &ServerName) -> Result<()> {
+ if dest == services().globals.server_name() {
+ return Err(Error::bad_config("Won't send federation request to ourselves"));
+ }
+
+ if dest.is_ip_literal() || IPAddress::is_valid(dest.host()) {
+ validate_dest_ip_literal(dest)?;
+ }
+
+ Ok(())
+}
+
+fn validate_dest_ip_literal(dest: &ServerName) -> Result<()> {
+ trace!("Destination is an IP literal, checking against IP range denylist.",);
+ debug_assert!(
+ dest.is_ip_literal() || !IPAddress::is_valid(dest.host()),
+ "Destination is not an IP literal."
+ );
+ let ip = IPAddress::parse(dest.host()).map_err(|e| {
+ debug_error!("Failed to parse IP literal from string: {}", e);
+ Error::BadServerResponse("Invalid IP address")
+ })?;
+
+ validate_ip(&ip)?;
+
+ Ok(())
+}
+
+pub(crate) fn validate_ip(ip: &IPAddress) -> Result<()> {
+ if !services().globals.valid_cidr_range(ip) {
+ return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
+ }
+
+ Ok(())
+}
+
+fn get_ip_with_port(dest_str: &str) -> Option {
+ if let Ok(dest) = dest_str.parse::() {
+ Some(FedDest::Literal(dest))
+ } else if let Ok(ip_addr) = dest_str.parse::() {
+ Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448)))
+ } else {
+ None
+ }
+}
+
+fn add_port_to_hostname(dest_str: &str) -> FedDest {
+ let (host, port) = match dest_str.find(':') {
+ None => (dest_str, ":8448"),
+ Some(pos) => dest_str.split_at(pos),
+ };
+
+ FedDest::Named(host.to_owned(), port.to_owned())
+}
+
+impl FedDest {
+ fn into_https_string(self) -> String {
+ match self {
+ Self::Literal(addr) => format!("https://{addr}"),
+ Self::Named(host, port) => format!("https://{host}{port}"),
+ }
+ }
+
+ fn into_uri_string(self) -> String {
+ match self {
+ Self::Literal(addr) => addr.to_string(),
+ Self::Named(host, port) => format!("{host}{port}"),
+ }
+ }
+
+ fn hostname(&self) -> String {
+ match &self {
+ Self::Literal(addr) => addr.ip().to_string(),
+ Self::Named(host, _) => host.clone(),
+ }
+ }
+
+ fn port(&self) -> Option {
+ match &self {
+ Self::Literal(addr) => Some(addr.port()),
+ Self::Named(_, port) => port[1..].parse().ok(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{add_port_to_hostname, get_ip_with_port, FedDest};
+
+ #[test]
+ fn ips_get_default_ports() {
+ assert_eq!(
+ get_ip_with_port("1.1.1.1"),
+ Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap()))
+ );
+ assert_eq!(
+ get_ip_with_port("dead:beef::"),
+ Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap()))
+ );
+ }
+
+ #[test]
+ fn ips_keep_custom_ports() {
+ assert_eq!(
+ get_ip_with_port("1.1.1.1:1234"),
+ Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap()))
+ );
+ assert_eq!(
+ get_ip_with_port("[dead::beef]:8933"),
+ Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap()))
+ );
+ }
+
+ #[test]
+ fn hostnames_get_default_ports() {
+ assert_eq!(
+ add_port_to_hostname("example.com"),
+ FedDest::Named(String::from("example.com"), String::from(":8448"))
+ );
+ }
+
+ #[test]
+ fn hostnames_keep_custom_ports() {
+ assert_eq!(
+ add_port_to_hostname("example.com:1337"),
+ FedDest::Named(String::from("example.com"), String::from(":1337"))
+ );
+ }
+}
diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs
index a835e438..e8432d12 100644
--- a/src/service/sending/send.rs
+++ b/src/service/sending/send.rs
@@ -1,10 +1,5 @@
-use std::{
- fmt::Debug,
- mem,
- net::{IpAddr, SocketAddr},
-};
+use std::{fmt::Debug, mem};
-use hickory_resolver::{error::ResolveError, lookup::SrvLookup};
use http::{header::AUTHORIZATION, HeaderValue};
use ipaddress::IPAddress;
use reqwest::{Client, Method, Request, Response, Url};
@@ -15,40 +10,10 @@ use ruma::{
},
OwnedServerName, ServerName,
};
-use tracing::{debug, error, trace};
+use tracing::{debug, trace};
-use crate::{debug_error, debug_info, debug_warn, services, Error, Result};
-
-/// Wraps either an literal IP address plus port, or a hostname plus complement
-/// (colon-plus-port if it was specified).
-///
-/// Note: A `FedDest::Named` might contain an IP address in string form if there
-/// was no port specified to construct a `SocketAddr` with.
-///
-/// # Examples:
-/// ```rust
-/// # use conduit_service::sending::FedDest;
-/// # fn main() -> Result<(), std::net::AddrParseError> {
-/// FedDest::Literal("198.51.100.3:8448".parse()?);
-/// FedDest::Literal("[2001:db8::4:5]:443".parse()?);
-/// FedDest::Named("matrix.example.org".to_owned(), String::new());
-/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned());
-/// FedDest::Named("198.51.100.5".to_owned(), String::new());
-/// # Ok(())
-/// # }
-/// ```
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub enum FedDest {
- Literal(SocketAddr),
- Named(String, String),
-}
-
-struct ActualDest {
- dest: FedDest,
- host: String,
- string: String,
- cached: bool,
-}
+use super::{resolve, resolve::ActualDest};
+use crate::{debug_error, debug_warn, services, Error, Result};
#[tracing::instrument(skip_all, name = "send")]
pub async fn send(client: &Client, dest: &ServerName, req: T) -> Result
@@ -59,7 +24,7 @@ where
return Err(Error::bad_config("Federation is disabled."));
}
- let actual = get_actual_dest(dest).await?;
+ let actual = resolve::get_actual_dest(dest).await?;
let request = prepare::(dest, &actual, req).await?;
execute::(client, dest, &actual, request).await
}
@@ -177,294 +142,6 @@ where
Err(e.into())
}
-#[tracing::instrument(skip_all, name = "resolve")]
-async fn get_actual_dest(server_name: &ServerName) -> Result {
- let cached;
- let cached_result = services()
- .globals
- .actual_destinations()
- .read()
- .await
- .get(server_name)
- .cloned();
-
- let (dest, host) = if let Some(result) = cached_result {
- cached = true;
- result
- } else {
- cached = false;
- validate_dest(server_name)?;
- resolve_actual_dest(server_name, false).await?
- };
-
- let string = dest.clone().into_https_string();
- Ok(ActualDest {
- dest,
- host,
- string,
- cached,
- })
-}
-
-/// Returns: `actual_destination`, host header
-/// Implemented according to the specification at
-/// Numbers in comments below refer to bullet points in linked section of
-/// specification
-pub async fn resolve_actual_dest(dest: &ServerName, no_cache_dest: bool) -> Result<(FedDest, String)> {
- trace!("Finding actual destination for {dest}");
- let dest_str = dest.as_str().to_owned();
- let mut hostname = dest_str.clone();
-
- #[allow(clippy::single_match_else)]
- let actual_dest = match get_ip_with_port(&dest_str) {
- Some(host_port) => {
- debug!("1: IP literal with provided or default port");
- host_port
- },
- None => {
- if let Some(pos) = dest_str.find(':') {
- debug!("2: Hostname with included port");
- let (host, port) = dest_str.split_at(pos);
- if !no_cache_dest {
- query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await?;
- }
-
- FedDest::Named(host.to_owned(), port.to_owned())
- } else {
- trace!("Requesting well known for {dest}");
- if let Some(delegated_hostname) = request_well_known(dest.as_str()).await? {
- debug!("3: A .well-known file is available");
- hostname = add_port_to_hostname(&delegated_hostname).into_uri_string();
- match get_ip_with_port(&delegated_hostname) {
- Some(host_and_port) => {
- debug!("3.1: IP literal in .well-known file");
- host_and_port
- },
- None => {
- if let Some(pos) = delegated_hostname.find(':') {
- debug!("3.2: Hostname with port in .well-known file");
- let (host, port) = delegated_hostname.split_at(pos);
- if !no_cache_dest {
- query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await?;
- }
-
- FedDest::Named(host.to_owned(), port.to_owned())
- } else {
- trace!("Delegated hostname has no port in this branch");
- if let Some(hostname_override) = query_srv_record(&delegated_hostname).await? {
- debug!("3.3: SRV lookup successful");
- let force_port = hostname_override.port();
- if !no_cache_dest {
- query_and_cache_override(
- &delegated_hostname,
- &hostname_override.hostname(),
- force_port.unwrap_or(8448),
- )
- .await?;
- }
-
- if let Some(port) = force_port {
- FedDest::Named(delegated_hostname, format!(":{port}"))
- } else {
- add_port_to_hostname(&delegated_hostname)
- }
- } else {
- debug!("3.4: No SRV records, just use the hostname from .well-known");
- if !no_cache_dest {
- query_and_cache_override(&delegated_hostname, &delegated_hostname, 8448)
- .await?;
- }
-
- add_port_to_hostname(&delegated_hostname)
- }
- }
- },
- }
- } else {
- trace!("4: No .well-known or an error occured");
- if let Some(hostname_override) = query_srv_record(&dest_str).await? {
- debug!("4: No .well-known; SRV record found");
- let force_port = hostname_override.port();
-
- if !no_cache_dest {
- query_and_cache_override(
- &hostname,
- &hostname_override.hostname(),
- force_port.unwrap_or(8448),
- )
- .await?;
- }
-
- if let Some(port) = force_port {
- FedDest::Named(hostname.clone(), format!(":{port}"))
- } else {
- add_port_to_hostname(&hostname)
- }
- } else {
- debug!("4: No .well-known; 5: No SRV record found");
- if !no_cache_dest {
- query_and_cache_override(&dest_str, &dest_str, 8448).await?;
- }
-
- add_port_to_hostname(&dest_str)
- }
- }
- }
- },
- };
-
- // Can't use get_ip_with_port here because we don't want to add a port
- // to an IP address if it wasn't specified
- let hostname = if let Ok(addr) = hostname.parse::() {
- FedDest::Literal(addr)
- } else if let Ok(addr) = hostname.parse::() {
- FedDest::Named(addr.to_string(), ":8448".to_owned())
- } else if let Some(pos) = hostname.find(':') {
- let (host, port) = hostname.split_at(pos);
- FedDest::Named(host.to_owned(), port.to_owned())
- } else {
- FedDest::Named(hostname, ":8448".to_owned())
- };
-
- debug!("Actual destination: {actual_dest:?} hostname: {hostname:?}");
- Ok((actual_dest, hostname.into_uri_string()))
-}
-
-#[tracing::instrument(skip_all, name = "well-known")]
-async fn request_well_known(dest: &str) -> Result> {
- if !services()
- .globals
- .resolver
- .overrides
- .read()
- .unwrap()
- .contains_key(dest)
- {
- query_and_cache_override(dest, dest, 8448).await?;
- }
-
- let response = services()
- .globals
- .client
- .well_known
- .get(&format!("https://{dest}/.well-known/matrix/server"))
- .send()
- .await;
-
- trace!("response: {:?}", response);
- if let Err(e) = &response {
- debug!("error: {e:?}");
- return Ok(None);
- }
-
- let response = response?;
- if !response.status().is_success() {
- debug!("response not 2XX");
- return Ok(None);
- }
-
- let text = response.text().await?;
- trace!("response text: {:?}", text);
- if text.len() >= 12288 {
- debug_warn!("response contains junk");
- return Ok(None);
- }
-
- let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default();
-
- let m_server = body
- .get("m.server")
- .unwrap_or(&serde_json::Value::Null)
- .as_str()
- .unwrap_or_default();
-
- if ruma_identifiers_validation::server_name::validate(m_server).is_err() {
- debug_error!("response content missing or invalid");
- return Ok(None);
- }
-
- debug_info!("{:?} found at {:?}", dest, m_server);
- Ok(Some(m_server.to_owned()))
-}
-
-#[tracing::instrument(skip_all, name = "ip")]
-async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) -> Result<()> {
- match services()
- .globals
- .dns_resolver()
- .lookup_ip(hostname.to_owned())
- .await
- {
- Err(e) => handle_resolve_error(&e),
- Ok(override_ip) => {
- if hostname != overname {
- debug_info!("{:?} overriden by {:?}", overname, hostname);
- }
- services()
- .globals
- .resolver
- .overrides
- .write()
- .unwrap()
- .insert(overname.to_owned(), (override_ip.iter().collect(), port));
-
- Ok(())
- },
- }
-}
-
-#[tracing::instrument(skip_all, name = "srv")]
-async fn query_srv_record(hostname: &'_ str) -> Result > {
- fn handle_successful_srv(srv: &SrvLookup) -> Option {
- srv.iter().next().map(|result| {
- FedDest::Named(
- result.target().to_string().trim_end_matches('.').to_owned(),
- format!(":{}", result.port()),
- )
- })
- }
-
- async fn lookup_srv(hostname: &str) -> Result {
- debug!("querying SRV for {:?}", hostname);
- let hostname = hostname.trim_end_matches('.');
- services()
- .globals
- .dns_resolver()
- .srv_lookup(hostname.to_owned())
- .await
- }
-
- let hostnames = [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")];
-
- for hostname in hostnames {
- match lookup_srv(&hostname).await {
- Ok(result) => return Ok(handle_successful_srv(&result)),
- Err(e) => handle_resolve_error(&e)?,
- }
- }
-
- Ok(None)
-}
-
-#[allow(clippy::single_match_else)]
-fn handle_resolve_error(e: &ResolveError) -> Result<()> {
- use hickory_resolver::error::ResolveErrorKind;
-
- match *e.kind() {
- ResolveErrorKind::NoRecordsFound {
- ..
- } => {
- // Raise to debug_warn if we can find out the result wasn't from cache
- debug!("{e}");
- Ok(())
- },
- _ => {
- error!("DNS {e}");
- Err(Error::Err(e.to_string()))
- },
- }
-}
-
fn sign_request(dest: &ServerName, http_request: &mut http::Request>)
where
T: OutgoingRequest + Debug,
@@ -533,139 +210,9 @@ fn validate_url(url: &Url) -> Result<()> {
if let Some(url_host) = url.host_str() {
if let Ok(ip) = IPAddress::parse(url_host) {
trace!("Checking request URL IP {ip:?}");
- validate_ip(&ip)?;
+ resolve::validate_ip(&ip)?;
}
}
Ok(())
}
-
-fn validate_dest(dest: &ServerName) -> Result<()> {
- if dest == services().globals.server_name() {
- return Err(Error::bad_config("Won't send federation request to ourselves"));
- }
-
- if dest.is_ip_literal() || IPAddress::is_valid(dest.host()) {
- validate_dest_ip_literal(dest)?;
- }
-
- Ok(())
-}
-
-fn validate_dest_ip_literal(dest: &ServerName) -> Result<()> {
- trace!("Destination is an IP literal, checking against IP range denylist.",);
- debug_assert!(
- dest.is_ip_literal() || !IPAddress::is_valid(dest.host()),
- "Destination is not an IP literal."
- );
- let ip = IPAddress::parse(dest.host()).map_err(|e| {
- debug_error!("Failed to parse IP literal from string: {}", e);
- Error::BadServerResponse("Invalid IP address")
- })?;
-
- validate_ip(&ip)?;
-
- Ok(())
-}
-
-fn validate_ip(ip: &IPAddress) -> Result<()> {
- if !services().globals.valid_cidr_range(ip) {
- return Err(Error::BadServerResponse("Not allowed to send requests to this IP"));
- }
-
- Ok(())
-}
-
-fn get_ip_with_port(dest_str: &str) -> Option {
- if let Ok(dest) = dest_str.parse::() {
- Some(FedDest::Literal(dest))
- } else if let Ok(ip_addr) = dest_str.parse::() {
- Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448)))
- } else {
- None
- }
-}
-
-fn add_port_to_hostname(dest_str: &str) -> FedDest {
- let (host, port) = match dest_str.find(':') {
- None => (dest_str, ":8448"),
- Some(pos) => dest_str.split_at(pos),
- };
-
- FedDest::Named(host.to_owned(), port.to_owned())
-}
-
-impl FedDest {
- fn into_https_string(self) -> String {
- match self {
- Self::Literal(addr) => format!("https://{addr}"),
- Self::Named(host, port) => format!("https://{host}{port}"),
- }
- }
-
- fn into_uri_string(self) -> String {
- match self {
- Self::Literal(addr) => addr.to_string(),
- Self::Named(host, port) => format!("{host}{port}"),
- }
- }
-
- fn hostname(&self) -> String {
- match &self {
- Self::Literal(addr) => addr.ip().to_string(),
- Self::Named(host, _) => host.clone(),
- }
- }
-
- fn port(&self) -> Option {
- match &self {
- Self::Literal(addr) => Some(addr.port()),
- Self::Named(_, port) => port[1..].parse().ok(),
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::{add_port_to_hostname, get_ip_with_port, FedDest};
-
- #[test]
- fn ips_get_default_ports() {
- assert_eq!(
- get_ip_with_port("1.1.1.1"),
- Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap()))
- );
- assert_eq!(
- get_ip_with_port("dead:beef::"),
- Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap()))
- );
- }
-
- #[test]
- fn ips_keep_custom_ports() {
- assert_eq!(
- get_ip_with_port("1.1.1.1:1234"),
- Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap()))
- );
- assert_eq!(
- get_ip_with_port("[dead::beef]:8933"),
- Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap()))
- );
- }
-
- #[test]
- fn hostnames_get_default_ports() {
- assert_eq!(
- add_port_to_hostname("example.com"),
- FedDest::Named(String::from("example.com"), String::from(":8448"))
- );
- }
-
- #[test]
- fn hostnames_keep_custom_ports() {
- assert_eq!(
- add_port_to_hostname("example.com:1337"),
- FedDest::Named(String::from("example.com"), String::from(":1337"))
- );
- }
-}
From ba48758b89dba451551fd243ac2abd1276e01df4 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Fri, 31 May 2024 01:21:15 +0000
Subject: [PATCH 0023/2091] impl fmt::Display for FedDest
Signed-off-by: Jason Volk
---
src/admin/debug/debug_commands.rs | 2 +-
src/service/sending/resolve.rs | 10 ++++++++++
2 files changed, 11 insertions(+), 1 deletion(-)
diff --git a/src/admin/debug/debug_commands.rs b/src/admin/debug/debug_commands.rs
index 873a23e7..27f259a6 100644
--- a/src/admin/debug/debug_commands.rs
+++ b/src/admin/debug/debug_commands.rs
@@ -448,7 +448,7 @@ pub(crate) async fn resolve_true_destination(
let (actual_dest, hostname_uri) = resolve_actual_dest(&server_name, no_cache).await?;
Ok(RoomMessageEventContent::text_plain(format!(
- "Actual destination: {actual_dest:?} | Hostname URI: {hostname_uri}"
+ "Actual destination: {actual_dest} | Hostname URI: {hostname_uri}"
)))
}
diff --git a/src/service/sending/resolve.rs b/src/service/sending/resolve.rs
index d452a555..243180ed 100644
--- a/src/service/sending/resolve.rs
+++ b/src/service/sending/resolve.rs
@@ -1,4 +1,5 @@
use std::{
+ fmt,
fmt::Debug,
net::{IpAddr, SocketAddr},
};
@@ -414,6 +415,15 @@ impl FedDest {
}
}
+impl fmt::Display for FedDest {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ FedDest::Named(host, port) => write!(f, "{host}{port}"),
+ FedDest::Literal(addr) => write!(f, "{addr}"),
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use super::{add_port_to_hostname, get_ip_with_port, FedDest};
From f1d1366129ffc64e0aad1400664fed3cab15a98f Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Fri, 31 May 2024 05:08:00 +0000
Subject: [PATCH 0024/2091] split resolve_actual_dest
Signed-off-by: Jason Volk
---
src/admin/debug/debug_commands.rs | 2 +-
src/service/sending/resolve.rs | 211 +++++++++++++++---------------
2 files changed, 108 insertions(+), 105 deletions(-)
diff --git a/src/admin/debug/debug_commands.rs b/src/admin/debug/debug_commands.rs
index 27f259a6..6a5cee35 100644
--- a/src/admin/debug/debug_commands.rs
+++ b/src/admin/debug/debug_commands.rs
@@ -445,7 +445,7 @@ pub(crate) async fn resolve_true_destination(
));
}
- let (actual_dest, hostname_uri) = resolve_actual_dest(&server_name, no_cache).await?;
+ let (actual_dest, hostname_uri) = resolve_actual_dest(&server_name, !no_cache).await?;
Ok(RoomMessageEventContent::text_plain(format!(
"Actual destination: {actual_dest} | Hostname URI: {hostname_uri}"
diff --git a/src/service/sending/resolve.rs b/src/service/sending/resolve.rs
index 243180ed..b79f18c8 100644
--- a/src/service/sending/resolve.rs
+++ b/src/service/sending/resolve.rs
@@ -59,7 +59,7 @@ pub(crate) async fn get_actual_dest(server_name: &ServerName) -> Result Result
/// Numbers in comments below refer to bullet points in linked section of
/// specification
-pub async fn resolve_actual_dest(dest: &ServerName, no_cache_dest: bool) -> Result<(FedDest, String)> {
+pub async fn resolve_actual_dest(dest: &ServerName, cache: bool) -> Result<(FedDest, String)> {
trace!("Finding actual destination for {dest}");
- let dest_str = dest.as_str().to_owned();
- let mut hostname = dest_str.clone();
-
- #[allow(clippy::single_match_else)]
- let actual_dest = match get_ip_with_port(&dest_str) {
- Some(host_port) => {
- debug!("1: IP literal with provided or default port");
- host_port
- },
+ let mut host = dest.as_str().to_owned();
+ let actual_dest = match get_ip_with_port(dest.as_str()) {
+ Some(host_port) => actual_dest_1(host_port)?,
None => {
- if let Some(pos) = dest_str.find(':') {
- debug!("2: Hostname with included port");
- let (host, port) = dest_str.split_at(pos);
- if !no_cache_dest {
- query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await?;
- }
-
- FedDest::Named(host.to_owned(), port.to_owned())
+ if let Some(pos) = dest.as_str().find(':') {
+ actual_dest_2(dest, cache, pos).await?
+ } else if let Some(delegated) = request_well_known(dest.as_str()).await? {
+ actual_dest_3(&mut host, cache, delegated).await?
+ } else if let Some(overrider) = query_srv_record(dest.as_str()).await? {
+ actual_dest_4(&host, cache, overrider).await?
} else {
- trace!("Requesting well known for {dest}");
- if let Some(delegated_hostname) = request_well_known(dest.as_str()).await? {
- debug!("3: A .well-known file is available");
- hostname = add_port_to_hostname(&delegated_hostname).into_uri_string();
- match get_ip_with_port(&delegated_hostname) {
- Some(host_and_port) => {
- debug!("3.1: IP literal in .well-known file");
- host_and_port
- },
- None => {
- if let Some(pos) = delegated_hostname.find(':') {
- debug!("3.2: Hostname with port in .well-known file");
- let (host, port) = delegated_hostname.split_at(pos);
- if !no_cache_dest {
- query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await?;
- }
-
- FedDest::Named(host.to_owned(), port.to_owned())
- } else {
- trace!("Delegated hostname has no port in this branch");
- if let Some(hostname_override) = query_srv_record(&delegated_hostname).await? {
- debug!("3.3: SRV lookup successful");
- let force_port = hostname_override.port();
- if !no_cache_dest {
- query_and_cache_override(
- &delegated_hostname,
- &hostname_override.hostname(),
- force_port.unwrap_or(8448),
- )
- .await?;
- }
-
- if let Some(port) = force_port {
- FedDest::Named(delegated_hostname, format!(":{port}"))
- } else {
- add_port_to_hostname(&delegated_hostname)
- }
- } else {
- debug!("3.4: No SRV records, just use the hostname from .well-known");
- if !no_cache_dest {
- query_and_cache_override(&delegated_hostname, &delegated_hostname, 8448)
- .await?;
- }
-
- add_port_to_hostname(&delegated_hostname)
- }
- }
- },
- }
- } else {
- trace!("4: No .well-known or an error occured");
- if let Some(hostname_override) = query_srv_record(&dest_str).await? {
- debug!("4: No .well-known; SRV record found");
- let force_port = hostname_override.port();
-
- if !no_cache_dest {
- query_and_cache_override(
- &hostname,
- &hostname_override.hostname(),
- force_port.unwrap_or(8448),
- )
- .await?;
- }
-
- if let Some(port) = force_port {
- FedDest::Named(hostname.clone(), format!(":{port}"))
- } else {
- add_port_to_hostname(&hostname)
- }
- } else {
- debug!("4: No .well-known; 5: No SRV record found");
- if !no_cache_dest {
- query_and_cache_override(&dest_str, &dest_str, 8448).await?;
- }
-
- add_port_to_hostname(&dest_str)
- }
- }
+ actual_dest_5(dest, cache).await?
}
},
};
// Can't use get_ip_with_port here because we don't want to add a port
// to an IP address if it wasn't specified
- let hostname = if let Ok(addr) = hostname.parse::() {
+ let host = if let Ok(addr) = host.parse::() {
FedDest::Literal(addr)
- } else if let Ok(addr) = hostname.parse::() {
+ } else if let Ok(addr) = host.parse::() {
FedDest::Named(addr.to_string(), ":8448".to_owned())
- } else if let Some(pos) = hostname.find(':') {
- let (host, port) = hostname.split_at(pos);
+ } else if let Some(pos) = host.find(':') {
+ let (host, port) = host.split_at(pos);
FedDest::Named(host.to_owned(), port.to_owned())
} else {
- FedDest::Named(hostname, ":8448".to_owned())
+ FedDest::Named(host, ":8448".to_owned())
};
- debug!("Actual destination: {actual_dest:?} hostname: {hostname:?}");
- Ok((actual_dest, hostname.into_uri_string()))
+ debug!("Actual destination: {actual_dest:?} hostname: {host:?}");
+ Ok((actual_dest, host.into_uri_string()))
+}
+
+fn actual_dest_1(host_port: FedDest) -> Result {
+ debug!("1: IP literal with provided or default port");
+ Ok(host_port)
+}
+
+async fn actual_dest_2(dest: &ServerName, cache: bool, pos: usize) -> Result {
+ debug!("2: Hostname with included port");
+ let (host, port) = dest.as_str().split_at(pos);
+ conditional_query_and_cache_override(host, host, port.parse::().unwrap_or(8448), cache).await?;
+ Ok(FedDest::Named(host.to_owned(), port.to_owned()))
+}
+
+async fn actual_dest_3(host: &mut String, cache: bool, delegated: String) -> Result {
+ debug!("3: A .well-known file is available");
+ *host = add_port_to_hostname(&delegated).into_uri_string();
+ match get_ip_with_port(&delegated) {
+ Some(host_and_port) => actual_dest_3_1(host_and_port),
+ None => {
+ if let Some(pos) = delegated.find(':') {
+ actual_dest_3_2(cache, delegated, pos).await
+ } else {
+ trace!("Delegated hostname has no port in this branch");
+ if let Some(overrider) = query_srv_record(&delegated).await? {
+ actual_dest_3_3(cache, delegated, overrider).await
+ } else {
+ actual_dest_3_4(cache, delegated).await
+ }
+ }
+ },
+ }
+}
+
+fn actual_dest_3_1(host_and_port: FedDest) -> Result {
+ debug!("3.1: IP literal in .well-known file");
+ Ok(host_and_port)
+}
+
+async fn actual_dest_3_2(cache: bool, delegated: String, pos: usize) -> Result {
+ debug!("3.2: Hostname with port in .well-known file");
+ let (host, port) = delegated.split_at(pos);
+ conditional_query_and_cache_override(host, host, port.parse::().unwrap_or(8448), cache).await?;
+ Ok(FedDest::Named(host.to_owned(), port.to_owned()))
+}
+
+async fn actual_dest_3_3(cache: bool, delegated: String, overrider: FedDest) -> Result {
+ debug!("3.3: SRV lookup successful");
+ let force_port = overrider.port();
+ conditional_query_and_cache_override(&delegated, &overrider.hostname(), force_port.unwrap_or(8448), cache).await?;
+ if let Some(port) = force_port {
+ Ok(FedDest::Named(delegated, format!(":{port}")))
+ } else {
+ Ok(add_port_to_hostname(&delegated))
+ }
+}
+
+async fn actual_dest_3_4(cache: bool, delegated: String) -> Result {
+ debug!("3.4: No SRV records, just use the hostname from .well-known");
+ conditional_query_and_cache_override(&delegated, &delegated, 8448, cache).await?;
+ Ok(add_port_to_hostname(&delegated))
+}
+
+async fn actual_dest_4(host: &str, cache: bool, overrider: FedDest) -> Result {
+ debug!("4: No .well-known; SRV record found");
+ let force_port = overrider.port();
+ conditional_query_and_cache_override(host, &overrider.hostname(), force_port.unwrap_or(8448), cache).await?;
+ if let Some(port) = force_port {
+ Ok(FedDest::Named(host.to_owned(), format!(":{port}")))
+ } else {
+ Ok(add_port_to_hostname(host))
+ }
+}
+
+async fn actual_dest_5(dest: &ServerName, cache: bool) -> Result {
+ debug!("5: No SRV record found");
+ conditional_query_and_cache_override(dest.as_str(), dest.as_str(), 8448, cache).await?;
+ Ok(add_port_to_hostname(dest.as_str()))
}
#[tracing::instrument(skip_all, name = "well-known")]
async fn request_well_known(dest: &str) -> Result> {
+ trace!("Requesting well known for {dest}");
if !services()
.globals
.resolver
@@ -252,6 +246,15 @@ async fn request_well_known(dest: &str) -> Result > {
Ok(Some(m_server.to_owned()))
}
+#[inline]
+async fn conditional_query_and_cache_override(overname: &str, hostname: &str, port: u16, cache: bool) -> Result<()> {
+ if cache {
+ query_and_cache_override(overname, hostname, port).await
+ } else {
+ Ok(())
+ }
+}
+
#[tracing::instrument(skip_all, name = "ip")]
async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) -> Result<()> {
match services()
From 5fe5ab279c99a5cf0b99d452f16bb1427591d7f0 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Tue, 28 May 2024 04:52:58 +0000
Subject: [PATCH 0025/2091] split RouterExt impl related into ruma_wrapper
unit.
slightly restrict client_server mod index.
Signed-off-by: Jason Volk
---
src/api/client_server/mod.rs | 140 ++++++++++++++++-----------------
src/api/mod.rs | 4 +-
src/api/router.rs | 75 ++----------------
src/api/ruma_wrapper/mod.rs | 3 +
src/api/ruma_wrapper/router.rs | 69 ++++++++++++++++
5 files changed, 149 insertions(+), 142 deletions(-)
create mode 100644 src/api/ruma_wrapper/router.rs
diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs
index 171e9bbe..ced2bde5 100644
--- a/src/api/client_server/mod.rs
+++ b/src/api/client_server/mod.rs
@@ -1,76 +1,76 @@
-pub(crate) mod account;
-pub(crate) mod alias;
-pub(crate) mod backup;
-pub(crate) mod capabilities;
-pub(crate) mod config;
-pub(crate) mod context;
-pub(crate) mod device;
-pub(crate) mod directory;
-pub(crate) mod filter;
-pub(crate) mod keys;
-pub(crate) mod media;
-pub(crate) mod membership;
-pub(crate) mod message;
-pub(crate) mod presence;
-pub(crate) mod profile;
-pub(crate) mod push;
-pub(crate) mod read_marker;
-pub(crate) mod redact;
-pub(crate) mod relations;
-pub(crate) mod report;
-pub(crate) mod room;
-pub(crate) mod search;
-pub(crate) mod session;
-pub(crate) mod space;
-pub(crate) mod state;
-pub(crate) mod sync;
-pub(crate) mod tag;
-pub(crate) mod thirdparty;
-pub(crate) mod threads;
-pub(crate) mod to_device;
-pub(crate) mod typing;
-pub(crate) mod unstable;
-pub(crate) mod unversioned;
-pub(crate) mod user_directory;
-pub(crate) mod voip;
+pub(super) mod account;
+pub(super) mod alias;
+pub(super) mod backup;
+pub(super) mod capabilities;
+pub(super) mod config;
+pub(super) mod context;
+pub(super) mod device;
+pub(super) mod directory;
+pub(super) mod filter;
+pub(super) mod keys;
+pub(super) mod media;
+pub(super) mod membership;
+pub(super) mod message;
+pub(super) mod presence;
+pub(super) mod profile;
+pub(super) mod push;
+pub(super) mod read_marker;
+pub(super) mod redact;
+pub(super) mod relations;
+pub(super) mod report;
+pub(super) mod room;
+pub(super) mod search;
+pub(super) mod session;
+pub(super) mod space;
+pub(super) mod state;
+pub(super) mod sync;
+pub(super) mod tag;
+pub(super) mod thirdparty;
+pub(super) mod threads;
+pub(super) mod to_device;
+pub(super) mod typing;
+pub(super) mod unstable;
+pub(super) mod unversioned;
+pub(super) mod user_directory;
+pub(super) mod voip;
-pub(crate) use account::*;
+pub(super) use account::*;
pub use alias::get_alias_helper;
-pub(crate) use alias::*;
-pub(crate) use backup::*;
-pub(crate) use capabilities::*;
-pub(crate) use config::*;
-pub(crate) use context::*;
-pub(crate) use device::*;
-pub(crate) use directory::*;
-pub(crate) use filter::*;
-pub(crate) use keys::*;
-pub(crate) use media::*;
-pub(crate) use membership::*;
+pub(super) use alias::*;
+pub(super) use backup::*;
+pub(super) use capabilities::*;
+pub(super) use config::*;
+pub(super) use context::*;
+pub(super) use device::*;
+pub(super) use directory::*;
+pub(super) use filter::*;
+pub(super) use keys::*;
+pub(super) use media::*;
+pub(super) use membership::*;
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room};
-pub(crate) use message::*;
-pub(crate) use presence::*;
-pub(crate) use profile::*;
-pub(crate) use push::*;
-pub(crate) use read_marker::*;
-pub(crate) use redact::*;
-pub(crate) use relations::*;
-pub(crate) use report::*;
-pub(crate) use room::*;
-pub(crate) use search::*;
-pub(crate) use session::*;
-pub(crate) use space::*;
-pub(crate) use state::*;
-pub(crate) use sync::*;
-pub(crate) use tag::*;
-pub(crate) use thirdparty::*;
-pub(crate) use threads::*;
-pub(crate) use to_device::*;
-pub(crate) use typing::*;
-pub(crate) use unstable::*;
-pub(crate) use unversioned::*;
-pub(crate) use user_directory::*;
-pub(crate) use voip::*;
+pub(super) use message::*;
+pub(super) use presence::*;
+pub(super) use profile::*;
+pub(super) use push::*;
+pub(super) use read_marker::*;
+pub(super) use redact::*;
+pub(super) use relations::*;
+pub(super) use report::*;
+pub(super) use room::*;
+pub(super) use search::*;
+pub(super) use session::*;
+pub(super) use space::*;
+pub(super) use state::*;
+pub(super) use sync::*;
+pub(super) use tag::*;
+pub(super) use thirdparty::*;
+pub(super) use threads::*;
+pub(super) use to_device::*;
+pub(super) use typing::*;
+pub(super) use unstable::*;
+pub(super) use unversioned::*;
+pub(super) use user_directory::*;
+pub(super) use voip::*;
/// generated device ID length
const DEVICE_ID_LENGTH: usize = 10;
diff --git a/src/api/mod.rs b/src/api/mod.rs
index 956bcdf7..b835f536 100644
--- a/src/api/mod.rs
+++ b/src/api/mod.rs
@@ -7,8 +7,8 @@ extern crate conduit_core as conduit;
extern crate conduit_service as service;
pub use client_server::membership::{join_room_by_id_helper, leave_all_rooms};
-pub(crate) use conduit::{debug_error, debug_info, debug_warn, error::RumaResponse, utils, Error, Result};
-pub(crate) use ruma_wrapper::Ruma;
+pub(crate) use conduit::{debug_error, debug_info, debug_warn, utils, Error, Result};
+pub(crate) use ruma_wrapper::{Ruma, RumaResponse};
pub(crate) use service::{pdu::PduEvent, services, user_is_local};
conduit::mod_ctor! {}
diff --git a/src/api/router.rs b/src/api/router.rs
index 6081a089..90d69873 100644
--- a/src/api/router.rs
+++ b/src/api/router.rs
@@ -1,15 +1,13 @@
-use std::future::Future;
-
use axum::{
response::IntoResponse,
- routing::{any, get, on, post, MethodFilter},
+ routing::{any, get, post},
Router,
};
-use conduit::{Error, Result, Server};
-use http::{Method, Uri};
-use ruma::api::{client::error::ErrorKind, IncomingRequest};
+use conduit::{Error, Server};
+use http::Uri;
+use ruma::api::client::error::ErrorKind;
-use crate::{client_server, server_server, Ruma, RumaResponse};
+use crate::{client_server, ruma_wrapper::RouterExt, server_server};
pub fn build(router: Router, server: &Server) -> Router {
let config = &server.config;
@@ -234,66 +232,3 @@ async fn initial_sync(_uri: Uri) -> impl IntoResponse {
}
async fn federation_disabled() -> impl IntoResponse { Error::bad_config("Federation is disabled.") }
-
-trait RouterExt {
- fn ruma_route(self, handler: H) -> Self
- where
- H: RumaHandler,
- T: 'static;
-}
-
-impl RouterExt for Router {
- #[inline(always)]
- fn ruma_route(self, handler: H) -> Self
- where
- H: RumaHandler,
- T: 'static,
- {
- handler.add_routes(self)
- }
-}
-
-trait RumaHandler {
- fn add_routes(&self, router: Router) -> Router;
-
- fn add_route(&self, router: Router, path: &str) -> Router;
-}
-
-impl RumaHandler> for F
-where
- Req: IncomingRequest + Send + 'static,
- F: FnOnce(Ruma) -> Fut + Clone + Send + Sync + 'static,
- Fut: Future> + Send,
- E: IntoResponse,
-{
- #[inline(always)]
- fn add_routes(&self, router: Router) -> Router {
- Req::METADATA
- .history
- .all_paths()
- .fold(router, |router, path| self.add_route(router, path))
- }
-
- #[inline(always)]
- fn add_route(&self, router: Router, path: &str) -> Router {
- let handle = self.clone();
- let method = method_to_filter(Req::METADATA.method);
- let action = |req| async { handle(req).await.map(RumaResponse) };
- router.route(path, on(method, action))
- }
-}
-
-#[inline]
-fn method_to_filter(method: Method) -> MethodFilter {
- match method {
- Method::DELETE => MethodFilter::DELETE,
- Method::GET => MethodFilter::GET,
- Method::HEAD => MethodFilter::HEAD,
- Method::OPTIONS => MethodFilter::OPTIONS,
- Method::PATCH => MethodFilter::PATCH,
- Method::POST => MethodFilter::POST,
- Method::PUT => MethodFilter::PUT,
- Method::TRACE => MethodFilter::TRACE,
- m => panic!("Unsupported HTTP method: {m:?}"),
- }
-}
diff --git a/src/api/ruma_wrapper/mod.rs b/src/api/ruma_wrapper/mod.rs
index 1e12995f..a130ddd9 100644
--- a/src/api/ruma_wrapper/mod.rs
+++ b/src/api/ruma_wrapper/mod.rs
@@ -1,11 +1,14 @@
mod auth;
mod request;
+mod router;
mod xmatrix;
use std::ops::Deref;
+pub(super) use conduit::error::RumaResponse;
use ruma::{CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId};
+pub(super) use self::router::RouterExt;
use crate::service::appservice::RegistrationInfo;
/// Extractor for Ruma request structs
diff --git a/src/api/ruma_wrapper/router.rs b/src/api/ruma_wrapper/router.rs
new file mode 100644
index 00000000..f769d2bb
--- /dev/null
+++ b/src/api/ruma_wrapper/router.rs
@@ -0,0 +1,69 @@
+use std::future::Future;
+
+use axum::{
+ response::IntoResponse,
+ routing::{on, MethodFilter},
+ Router,
+};
+use conduit::Result;
+use http::Method;
+use ruma::api::IncomingRequest;
+
+use super::{Ruma, RumaResponse};
+
+pub(in super::super) trait RouterExt {
+ fn ruma_route(self, handler: H) -> Self
+ where
+ H: RumaHandler;
+}
+
+impl RouterExt for Router {
+ fn ruma_route(self, handler: H) -> Self
+ where
+ H: RumaHandler,
+ {
+ handler.add_routes(self)
+ }
+}
+
+pub(in super::super) trait RumaHandler {
+ fn add_routes(&self, router: Router) -> Router;
+
+ fn add_route(&self, router: Router, path: &str) -> Router;
+}
+
+impl RumaHandler> for F
+where
+ Req: IncomingRequest + Send + 'static,
+ F: FnOnce(Ruma) -> Fut + Clone + Send + Sync + 'static,
+ Fut: Future> + Send,
+ E: IntoResponse,
+{
+ fn add_routes(&self, router: Router) -> Router {
+ Req::METADATA
+ .history
+ .all_paths()
+ .fold(router, |router, path| self.add_route(router, path))
+ }
+
+ fn add_route(&self, router: Router, path: &str) -> Router {
+ let handle = self.clone();
+ let method = method_to_filter(&Req::METADATA.method);
+ let action = |req| async { handle(req).await.map(RumaResponse) };
+ router.route(path, on(method, action))
+ }
+}
+
+const fn method_to_filter(method: &Method) -> MethodFilter {
+ match *method {
+ Method::DELETE => MethodFilter::DELETE,
+ Method::GET => MethodFilter::GET,
+ Method::HEAD => MethodFilter::HEAD,
+ Method::OPTIONS => MethodFilter::OPTIONS,
+ Method::PATCH => MethodFilter::PATCH,
+ Method::POST => MethodFilter::POST,
+ Method::PUT => MethodFilter::PUT,
+ Method::TRACE => MethodFilter::TRACE,
+ _ => panic!("Unsupported HTTP method"),
+ }
+}
From 90d9a997a5096532f7fbaa279ec19fe43cce492b Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Mon, 27 May 2024 20:05:33 +0000
Subject: [PATCH 0026/2091] split / cleanup core utils.
Signed-off-by: Jason Volk
---
src/api/client_server/unversioned.rs | 6 +-
src/api/server_server.rs | 2 +-
src/core/debug.rs | 3 +
src/core/mod.rs | 2 +-
src/core/utils/debug.rs | 40 ++++++
src/core/utils/html.rs | 37 ++++++
src/core/utils/json.rs | 39 ++++++
src/core/utils/mod.rs | 191 ++-------------------------
src/core/utils/sys.rs | 36 +++++
src/core/version.rs | 28 ++++
src/main/clap.rs | 3 +-
src/main/server.rs | 5 +-
src/service/globals/client.rs | 4 +-
13 files changed, 205 insertions(+), 191 deletions(-)
create mode 100644 src/core/utils/debug.rs
create mode 100644 src/core/utils/html.rs
create mode 100644 src/core/utils/json.rs
create mode 100644 src/core/utils/sys.rs
create mode 100644 src/core/version.rs
diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs
index 69a477a6..56ab9a90 100644
--- a/src/api/client_server/unversioned.rs
+++ b/src/api/client_server/unversioned.rs
@@ -10,7 +10,7 @@ use ruma::api::client::{
error::ErrorKind,
};
-use crate::{services, utils::conduwuit_version, Error, Result, Ruma};
+use crate::{services, Error, Result, Ruma};
/// # `GET /_matrix/client/versions`
///
@@ -145,7 +145,7 @@ pub(crate) async fn syncv3_client_server_json() -> Result {
Ok(Json(serde_json::json!({
"server": server_url,
- "version": conduwuit_version(),
+ "version": conduit::version::conduwuit(),
})))
}
@@ -156,7 +156,7 @@ pub(crate) async fn syncv3_client_server_json() -> Result {
pub(crate) async fn conduwuit_server_version() -> Result {
Ok(Json(serde_json::json!({
"name": "conduwuit",
- "version": conduwuit_version(),
+ "version": conduit::version::conduwuit(),
})))
}
diff --git a/src/api/server_server.rs b/src/api/server_server.rs
index f0d2ef01..72429752 100644
--- a/src/api/server_server.rs
+++ b/src/api/server_server.rs
@@ -74,7 +74,7 @@ pub(crate) async fn get_server_version_route(
Ok(get_server_version::v1::Response {
server: Some(get_server_version::v1::Server {
name: Some("Conduwuit".to_owned()),
- version: Some(utils::conduwuit_version()),
+ version: Some(conduit::version::conduwuit()),
}),
})
}
diff --git a/src/core/debug.rs b/src/core/debug.rs
index 6db9a4dd..207f08fa 100644
--- a/src/core/debug.rs
+++ b/src/core/debug.rs
@@ -2,6 +2,9 @@
use std::{panic, panic::PanicInfo};
+/// Export all of the ancillary tools from here as well.
+pub use crate::utils::debug::*;
+
/// Log event at given level in debug-mode (when debug-assertions are enabled).
/// In release-mode it becomes DEBUG level, and possibly subject to elision.
///
diff --git a/src/core/mod.rs b/src/core/mod.rs
index 5911e027..baebbb8f 100644
--- a/src/core/mod.rs
+++ b/src/core/mod.rs
@@ -7,12 +7,12 @@ pub mod mods;
pub mod pducount;
pub mod server;
pub mod utils;
+pub mod version;
pub use config::Config;
pub use error::{Error, Result, RumaResponse};
pub use pducount::PduCount;
pub use server::Server;
-pub use utils::conduwuit_version;
#[cfg(not(conduit_mods))]
pub mod mods {
diff --git a/src/core/utils/debug.rs b/src/core/utils/debug.rs
new file mode 100644
index 00000000..e4151f39
--- /dev/null
+++ b/src/core/utils/debug.rs
@@ -0,0 +1,40 @@
+use std::fmt;
+
+/// Debug-formats the given slice, but only up to the first `max_len` elements.
+/// Any further elements are replaced by an ellipsis.
+///
+/// See also [`slice_truncated()`],
+pub struct TruncatedSlice<'a, T> {
+ inner: &'a [T],
+ max_len: usize,
+}
+
+impl fmt::Debug for TruncatedSlice<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.inner.len() <= self.max_len {
+ write!(f, "{:?}", self.inner)
+ } else {
+ f.debug_list()
+ .entries(&self.inner[..self.max_len])
+ .entry(&"...")
+ .finish()
+ }
+ }
+}
+
+/// See [`TruncatedSlice`]. Useful for `#[instrument]`:
+///
+/// ```
+/// use conduit_core::utils::debug::slice_truncated;
+///
+/// #[tracing::instrument(fields(foos = slice_truncated(foos, 42)))]
+/// fn bar(foos: &[&str]);
+/// ```
+pub fn slice_truncated(
+ slice: &[T], max_len: usize,
+) -> tracing::field::DebugValue> {
+ tracing::field::debug(TruncatedSlice {
+ inner: slice,
+ max_len,
+ })
+}
diff --git a/src/core/utils/html.rs b/src/core/utils/html.rs
new file mode 100644
index 00000000..3b44a31b
--- /dev/null
+++ b/src/core/utils/html.rs
@@ -0,0 +1,37 @@
+use std::fmt;
+
+/// Wrapper struct which will emit the HTML-escaped version of the contained
+/// string when passed to a format string.
+pub struct Escape<'a>(pub &'a str);
+
+/// Copied from librustdoc:
+/// *
+impl fmt::Display for Escape<'_> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Because the internet is always right, turns out there's not that many
+ // characters to escape: http://stackoverflow.com/questions/7381974
+ let Escape(s) = *self;
+ let pile_o_bits = s;
+ let mut last = 0;
+ for (i, ch) in s.char_indices() {
+ let s = match ch {
+ '>' => ">",
+ '<' => "<",
+ '&' => "&",
+ '\'' => "'",
+ '"' => """,
+ _ => continue,
+ };
+ fmt.write_str(&pile_o_bits[last..i])?;
+ fmt.write_str(s)?;
+ // NOTE: we only expect single byte characters here - which is fine as long as
+ // we only match single byte characters
+ last = i + 1;
+ }
+
+ if last < s.len() {
+ fmt.write_str(&pile_o_bits[last..])?;
+ }
+ Ok(())
+ }
+}
diff --git a/src/core/utils/json.rs b/src/core/utils/json.rs
new file mode 100644
index 00000000..a9adad54
--- /dev/null
+++ b/src/core/utils/json.rs
@@ -0,0 +1,39 @@
+use std::{fmt, str::FromStr};
+
+use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject};
+
+use crate::Result;
+
+/// Fallible conversion from any value that implements `Serialize` to a
+/// `CanonicalJsonObject`.
+///
+/// `value` must serialize to an `serde_json::Value::Object`.
+pub fn to_canonical_object(value: T) -> Result {
+ use serde::ser::Error;
+
+ match serde_json::to_value(value).map_err(CanonicalJsonError::SerDe)? {
+ serde_json::Value::Object(map) => try_from_json_map(map),
+ _ => Err(CanonicalJsonError::SerDe(serde_json::Error::custom("Value must be an object"))),
+ }
+}
+
+pub fn deserialize_from_str<'de, D: serde::de::Deserializer<'de>, T: FromStr, E: fmt::Display>(
+ deserializer: D,
+) -> Result {
+ struct Visitor, E>(std::marker::PhantomData);
+ impl, Err: fmt::Display> serde::de::Visitor<'_> for Visitor {
+ type Value = T;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(formatter, "a parsable string")
+ }
+
+ fn visit_str(self, v: &str) -> Result
+ where
+ E: serde::de::Error,
+ {
+ v.parse().map_err(serde::de::Error::custom)
+ }
+ }
+ deserializer.deserialize_str(Visitor(std::marker::PhantomData))
+}
diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs
index 582263a9..e85080f1 100644
--- a/src/core/utils/mod.rs
+++ b/src/core/utils/mod.rs
@@ -1,21 +1,26 @@
+pub mod content_disposition;
+pub mod debug;
+pub mod defer;
+pub mod html;
+pub mod json;
+pub mod sys;
+
use std::{
cmp,
cmp::Ordering,
- fmt,
- str::FromStr,
time::{SystemTime, UNIX_EPOCH},
};
+pub use debug::slice_truncated as debug_slice_truncated;
+pub use html::Escape as HtmlEscape;
+pub use json::{deserialize_from_str, to_canonical_object};
use rand::prelude::*;
use ring::digest;
-use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject, OwnedUserId};
-use tracing::debug;
+use ruma::OwnedUserId;
+pub use sys::available_parallelism;
use crate::{Error, Result};
-pub mod content_disposition;
-pub mod defer;
-
pub fn clamp(val: T, min: T, max: T) -> T { cmp::min(cmp::max(val, min), max) }
#[must_use]
@@ -108,178 +113,6 @@ pub fn common_elements(
}))
}
-/// Fallible conversion from any value that implements `Serialize` to a
-/// `CanonicalJsonObject`.
-///
-/// `value` must serialize to an `serde_json::Value::Object`.
-pub fn to_canonical_object(value: T) -> Result {
- use serde::ser::Error;
-
- match serde_json::to_value(value).map_err(CanonicalJsonError::SerDe)? {
- serde_json::Value::Object(map) => try_from_json_map(map),
- _ => Err(CanonicalJsonError::SerDe(serde_json::Error::custom("Value must be an object"))),
- }
-}
-
-pub fn deserialize_from_str<'de, D: serde::de::Deserializer<'de>, T: FromStr, E: fmt::Display>(
- deserializer: D,
-) -> Result {
- struct Visitor, E>(std::marker::PhantomData);
- impl, Err: fmt::Display> serde::de::Visitor<'_> for Visitor {
- type Value = T;
-
- fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(formatter, "a parsable string")
- }
-
- fn visit_str(self, v: &str) -> Result
- where
- E: serde::de::Error,
- {
- v.parse().map_err(serde::de::Error::custom)
- }
- }
- deserializer.deserialize_str(Visitor(std::marker::PhantomData))
-}
-
-// Copied from librustdoc:
-// https://github.com/rust-lang/rust/blob/cbaeec14f90b59a91a6b0f17fc046c66fa811892/src/librustdoc/html/escape.rs
-
-/// Wrapper struct which will emit the HTML-escaped version of the contained
-/// string when passed to a format string.
-pub struct HtmlEscape<'a>(pub &'a str);
-
-impl fmt::Display for HtmlEscape<'_> {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- // Because the internet is always right, turns out there's not that many
- // characters to escape: http://stackoverflow.com/questions/7381974
- let HtmlEscape(s) = *self;
- let pile_o_bits = s;
- let mut last = 0;
- for (i, ch) in s.char_indices() {
- let s = match ch {
- '>' => ">",
- '<' => "<",
- '&' => "&",
- '\'' => "'",
- '"' => """,
- _ => continue,
- };
- fmt.write_str(&pile_o_bits[last..i])?;
- fmt.write_str(s)?;
- // NOTE: we only expect single byte characters here - which is fine as long as
- // we only match single byte characters
- last = i + 1;
- }
-
- if last < s.len() {
- fmt.write_str(&pile_o_bits[last..])?;
- }
- Ok(())
- }
-}
-
-/// one true function for returning the conduwuit version with the necessary
-/// CONDUWUIT_VERSION_EXTRA env variables used if specified
-///
-/// Set the environment variable `CONDUWUIT_VERSION_EXTRA` to any UTF-8 string
-/// to include it in parenthesis after the SemVer version. A common value are
-/// git commit hashes.
-#[must_use]
-pub fn conduwuit_version() -> String {
- match option_env!("CONDUWUIT_VERSION_EXTRA") {
- Some(extra) => {
- if extra.is_empty() {
- env!("CARGO_PKG_VERSION").to_owned()
- } else {
- format!("{} ({})", env!("CARGO_PKG_VERSION"), extra)
- }
- },
- None => match option_env!("CONDUIT_VERSION_EXTRA") {
- Some(extra) => {
- if extra.is_empty() {
- env!("CARGO_PKG_VERSION").to_owned()
- } else {
- format!("{} ({})", env!("CARGO_PKG_VERSION"), extra)
- }
- },
- None => env!("CARGO_PKG_VERSION").to_owned(),
- },
- }
-}
-
-/// Debug-formats the given slice, but only up to the first `max_len` elements.
-/// Any further elements are replaced by an ellipsis.
-///
-/// See also [`debug_slice_truncated()`],
-pub struct TruncatedDebugSlice<'a, T> {
- inner: &'a [T],
- max_len: usize,
-}
-
-impl fmt::Debug for TruncatedDebugSlice<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- if self.inner.len() <= self.max_len {
- write!(f, "{:?}", self.inner)
- } else {
- f.debug_list()
- .entries(&self.inner[..self.max_len])
- .entry(&"...")
- .finish()
- }
- }
-}
-
-/// See [`TruncatedDebugSlice`]. Useful for `#[instrument]`:
-///
-/// ```
-/// use conduit_core::utils::debug_slice_truncated;
-///
-/// #[tracing::instrument(fields(foos = debug_slice_truncated(foos, 42)))]
-/// fn bar(foos: &[&str]);
-/// ```
-pub fn debug_slice_truncated(
- slice: &[T], max_len: usize,
-) -> tracing::field::DebugValue> {
- tracing::field::debug(TruncatedDebugSlice {
- inner: slice,
- max_len,
- })
-}
-
-/// This is needed for opening lots of file descriptors, which tends to
-/// happen more often when using RocksDB and making lots of federation
-/// connections at startup. The soft limit is usually 1024, and the hard
-/// limit is usually 512000; I've personally seen it hit >2000.
-///
-/// *
-/// *
-#[cfg(unix)]
-pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> {
- use nix::sys::resource::{getrlimit, setrlimit, Resource::RLIMIT_NOFILE as NOFILE};
-
- let (soft_limit, hard_limit) = getrlimit(NOFILE)?;
- if soft_limit < hard_limit {
- setrlimit(NOFILE, hard_limit, hard_limit)?;
- assert_eq!((hard_limit, hard_limit), getrlimit(NOFILE)?, "getrlimit != setrlimit");
- debug!(to = hard_limit, from = soft_limit, "Raised RLIMIT_NOFILE",);
- }
-
- Ok(())
-}
-
-/// Get the number of threads which could execute in parallel based on the
-/// hardware and administrative constraints of this system. This value should be
-/// used to hint the size of thread-pools and divide-and-conquer algorithms.
-///
-/// *
-#[must_use]
-pub fn available_parallelism() -> usize {
- std::thread::available_parallelism()
- .expect("Unable to query for available parallelism.")
- .get()
-}
-
/// Boilerplate for wraps which are typed to never error.
///
/// *
diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs
new file mode 100644
index 00000000..825ec903
--- /dev/null
+++ b/src/core/utils/sys.rs
@@ -0,0 +1,36 @@
+use tracing::debug;
+
+use crate::Result;
+
+/// This is needed for opening lots of file descriptors, which tends to
+/// happen more often when using RocksDB and making lots of federation
+/// connections at startup. The soft limit is usually 1024, and the hard
+/// limit is usually 512000; I've personally seen it hit >2000.
+///
+/// *
+/// *
+#[cfg(unix)]
+pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> {
+ use nix::sys::resource::{getrlimit, setrlimit, Resource::RLIMIT_NOFILE as NOFILE};
+
+ let (soft_limit, hard_limit) = getrlimit(NOFILE)?;
+ if soft_limit < hard_limit {
+ setrlimit(NOFILE, hard_limit, hard_limit)?;
+ assert_eq!((hard_limit, hard_limit), getrlimit(NOFILE)?, "getrlimit != setrlimit");
+ debug!(to = hard_limit, from = soft_limit, "Raised RLIMIT_NOFILE",);
+ }
+
+ Ok(())
+}
+
+/// Get the number of threads which could execute in parallel based on the
+/// hardware and administrative constraints of this system. This value should be
+/// used to hint the size of thread-pools and divide-and-conquer algorithms.
+///
+/// *
+#[must_use]
+pub fn available_parallelism() -> usize {
+ std::thread::available_parallelism()
+ .expect("Unable to query for available parallelism.")
+ .get()
+}
diff --git a/src/core/version.rs b/src/core/version.rs
new file mode 100644
index 00000000..f65bac99
--- /dev/null
+++ b/src/core/version.rs
@@ -0,0 +1,28 @@
+/// one true function for returning the conduwuit version with the necessary
+/// CONDUWUIT_VERSION_EXTRA env variables used if specified
+///
+/// Set the environment variable `CONDUWUIT_VERSION_EXTRA` to any UTF-8 string
+/// to include it in parenthesis after the SemVer version. A common value are
+/// git commit hashes.
+#[must_use]
+pub fn conduwuit() -> String {
+ match option_env!("CONDUWUIT_VERSION_EXTRA") {
+ Some(extra) => {
+ if extra.is_empty() {
+ env!("CARGO_PKG_VERSION").to_owned()
+ } else {
+ format!("{} ({})", env!("CARGO_PKG_VERSION"), extra)
+ }
+ },
+ None => match option_env!("CONDUIT_VERSION_EXTRA") {
+ Some(extra) => {
+ if extra.is_empty() {
+ env!("CARGO_PKG_VERSION").to_owned()
+ } else {
+ format!("{} ({})", env!("CARGO_PKG_VERSION"), extra)
+ }
+ },
+ None => env!("CARGO_PKG_VERSION").to_owned(),
+ },
+ }
+}
diff --git a/src/main/clap.rs b/src/main/clap.rs
index 81a6da72..a2fb588e 100644
--- a/src/main/clap.rs
+++ b/src/main/clap.rs
@@ -3,11 +3,10 @@
use std::path::PathBuf;
use clap::Parser;
-use conduit_core::utils::conduwuit_version;
/// Commandline arguments
#[derive(Parser, Debug)]
-#[clap(version = conduwuit_version(), about, long_about = None)]
+#[clap(version = conduit::version::conduwuit(), about, long_about = None)]
pub(crate) struct Args {
#[arg(short, long)]
/// Optional argument to the path of a conduwuit config TOML file
diff --git a/src/main/server.rs b/src/main/server.rs
index c3f6a928..2395469b 100644
--- a/src/main/server.rs
+++ b/src/main/server.rs
@@ -1,11 +1,10 @@
use std::sync::Arc;
use conduit::{
- conduwuit_version,
config::Config,
info,
log::{LogLevelReloadHandles, ReloadHandle},
- utils::maximize_fd_limit,
+ utils::sys::maximize_fd_limit,
Error, Result,
};
use tokio::runtime;
@@ -43,7 +42,7 @@ impl Server {
database_path = ?config.database_path,
log_levels = %config.log,
"{}",
- conduwuit_version(),
+ conduit::version::conduwuit(),
);
Ok(Arc::new(Server {
diff --git a/src/service/globals/client.rs b/src/service/globals/client.rs
index 82747ae7..33f6d85f 100644
--- a/src/service/globals/client.rs
+++ b/src/service/globals/client.rs
@@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration};
use reqwest::redirect;
-use crate::{service::globals::resolver, utils::conduwuit_version, Config, Result};
+use crate::{service::globals::resolver, Config, Result};
pub struct Client {
pub default: reqwest::Client,
@@ -87,7 +87,7 @@ impl Client {
}
fn base(config: &Config) -> Result {
- let version = conduwuit_version();
+ let version = conduit::version::conduwuit();
let user_agent = format!("Conduwuit/{version}");
From 1108235c632476a746a19ef39822e926f7d4b94f Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sat, 25 May 2024 20:16:28 +0000
Subject: [PATCH 0027/2091] misc simplifications and cleanup
Signed-off-by: Jason Volk
---
src/main/main.rs | 17 ++++++++---------
src/main/server.rs | 6 ++----
2 files changed, 10 insertions(+), 13 deletions(-)
diff --git a/src/main/main.rs b/src/main/main.rs
index a96945a7..ba1cb2d3 100644
--- a/src/main/main.rs
+++ b/src/main/main.rs
@@ -25,21 +25,20 @@ fn main() -> Result<(), Error> {
.build()
.expect("built runtime");
- let handle = runtime.handle();
- let server: Arc = Server::build(args, Some(handle))?;
- runtime.block_on(async { async_main(server.clone()).await })?;
+ let server: Arc = Server::build(args, Some(runtime.handle()))?;
+ runtime.block_on(async_main(&server))?;
// explicit drop here to trace thread and tls dtors
drop(runtime);
-
debug_info!("Exit");
+
Ok(())
}
/// Operate the server normally in release-mode static builds. This will start,
/// run and stop the server within the asynchronous runtime.
#[cfg(not(conduit_mods))]
-async fn async_main(server: Arc) -> Result<(), Error> {
+async fn async_main(server: &Arc) -> Result<(), Error> {
extern crate conduit_router as router;
use tracing::error;
@@ -66,22 +65,22 @@ async fn async_main(server: Arc) -> Result<(), Error> {
/// and hot-reload portions of the server as-needed before returning for an
/// actual shutdown. This is not available in release-mode or static builds.
#[cfg(conduit_mods)]
-async fn async_main(server: Arc) -> Result<(), Error> {
+async fn async_main(server: &Arc) -> Result<(), Error> {
let mut starts = true;
let mut reloads = true;
while reloads {
- if let Err(error) = mods::open(&server).await {
+ if let Err(error) = mods::open(server).await {
error!("Loading router: {error}");
return Err(error);
}
- let result = mods::run(&server, starts).await;
+ let result = mods::run(server, starts).await;
if let Ok(result) = result {
(starts, reloads) = result;
}
let force = !reloads || result.is_err();
- if let Err(error) = mods::close(&server, force).await {
+ if let Err(error) = mods::close(server, force).await {
error!("Unloading router: {error}");
return Err(error);
}
diff --git a/src/main/server.rs b/src/main/server.rs
index 2395469b..d960bfac 100644
--- a/src/main/server.rs
+++ b/src/main/server.rs
@@ -139,9 +139,7 @@ fn init_tracing(config: &Config) -> (LogLevelReloadHandles, TracingFlameGuard) {
let (flame_layer, flame_guard) =
match tracing_flame::FlameLayer::with_file(&config.tracing_flame_output_path) {
Ok(ok) => ok,
- Err(e) => {
- panic!("failed to initialize tracing-flame: {e}");
- },
+ Err(e) => panic!("failed to initialize tracing-flame: {e}"),
};
let flame_layer = flame_layer
.with_empty_samples(false)
@@ -175,7 +173,7 @@ fn init_tracing(config: &Config) -> (LogLevelReloadHandles, TracingFlameGuard) {
#[cfg_attr(not(feature = "perf_measurements"), allow(clippy::let_unit_value))]
let flame_guard = ();
- tracing::subscriber::set_global_default(subscriber).unwrap();
+ tracing::subscriber::set_global_default(subscriber).expect("failed to set global tracing subscriber");
#[cfg(all(feature = "tokio_console", feature = "release_max_log_level", tokio_unstable))]
tracing::error!(
From 89ab687f16e519499cde88efd5e4da375d403653 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sat, 1 Jun 2024 08:03:20 +0000
Subject: [PATCH 0028/2091] move signal handling out to main
Signed-off-by: Jason Volk
---
src/core/server.rs | 6 ++++-
src/main/main.rs | 37 +++++++++++++++++++++++++--
src/router/run.rs | 63 +++++++++++-----------------------------------
3 files changed, 55 insertions(+), 51 deletions(-)
diff --git a/src/core/server.rs b/src/core/server.rs
index 4bfff340..9e4497ae 100644
--- a/src/core/server.rs
+++ b/src/core/server.rs
@@ -6,7 +6,7 @@ use std::{
time::SystemTime,
};
-use tokio::runtime;
+use tokio::{runtime, sync::broadcast};
use crate::{config::Config, log::LogLevelReloadHandles};
@@ -22,6 +22,9 @@ pub struct Server {
/// command to initiate shutdown.
pub shutdown: Mutex>,
+ /// Reload/shutdown signal
+ pub signal: broadcast::Sender<&'static str>,
+
/// Reload/shutdown desired indicator; when false, shutdown is desired. This
/// is an observable used on shutdown and modifying is not recommended.
pub reload: AtomicBool,
@@ -51,6 +54,7 @@ impl Server {
config,
started: SystemTime::now(),
shutdown: Mutex::new(None),
+ signal: broadcast::channel::<&'static str>(1).0,
reload: AtomicBool::new(false),
interrupt: AtomicBool::new(false),
runtime,
diff --git a/src/main/main.rs b/src/main/main.rs
index ba1cb2d3..73e6e690 100644
--- a/src/main/main.rs
+++ b/src/main/main.rs
@@ -6,9 +6,10 @@ extern crate conduit_core as conduit;
use std::{cmp, sync::Arc, time::Duration};
-use conduit::{debug_info, error, utils::available_parallelism, Error, Result};
+use conduit::{debug_error, debug_info, error, utils::available_parallelism, warn, Error, Result};
use server::Server;
-use tokio::runtime;
+use tokio::{runtime, signal};
+use tracing::debug;
const WORKER_NAME: &str = "conduwuit:worker";
const WORKER_MIN: usize = 2;
@@ -26,6 +27,7 @@ fn main() -> Result<(), Error> {
.expect("built runtime");
let server: Arc = Server::build(args, Some(runtime.handle()))?;
+ runtime.spawn(signal(server.clone()));
runtime.block_on(async_main(&server))?;
// explicit drop here to trace thread and tls dtors
@@ -94,3 +96,34 @@ async fn async_main(server: &Arc) -> Result<(), Error> {
debug_info!("Exit runtime");
Ok(())
}
+
+#[tracing::instrument(skip_all)]
+async fn signal(server: Arc) {
+ let (mut term, mut quit);
+ #[cfg(unix)]
+ {
+ use signal::unix;
+ quit = unix::signal(unix::SignalKind::quit()).expect("SIGQUIT handler");
+ term = unix::signal(unix::SignalKind::terminate()).expect("SIGTERM handler");
+ };
+
+ loop {
+ debug!("Installed signal handlers");
+ let sig: &'static str;
+ #[cfg(unix)]
+ tokio::select! {
+ _ = term.recv() => { sig = "SIGTERM"; },
+ _ = quit.recv() => { sig = "Ctrl+\\"; },
+ _ = signal::ctrl_c() => { sig = "Ctrl+C"; },
+ }
+ #[cfg(not(unix))]
+ tokio::select! {
+ _ = signal::ctrl_c() => { sig = "Ctrl+C"; },
+ }
+
+ warn!("Received signal {}", sig);
+ if let Err(e) = server.server.signal.send(sig) {
+ debug_error!("signal channel: {e}");
+ }
+ }
+}
diff --git a/src/router/run.rs b/src/router/run.rs
index e6238853..8afb2dc3 100644
--- a/src/router/run.rs
+++ b/src/router/run.rs
@@ -1,11 +1,8 @@
use std::{sync::Arc, time::Duration};
use axum_server::Handle as ServerHandle;
-use tokio::{
- signal,
- sync::broadcast::{self, Sender},
-};
-use tracing::{debug, info, warn};
+use tokio::sync::broadcast::{self, Sender};
+use tracing::{debug, error, info};
extern crate conduit_admin as admin;
extern crate conduit_core as conduit;
@@ -39,9 +36,7 @@ pub(crate) async fn run(server: Arc) -> Result<(), Error> {
server.interrupt.store(false, Ordering::Release);
let (tx, _) = broadcast::channel::<()>(1);
- let sigs = server
- .runtime()
- .spawn(sighandle(server.clone(), tx.clone()));
+ let sigs = server.runtime().spawn(signal(server.clone(), tx.clone()));
// Serve clients
let res = serve::serve(&server, app, handle, tx.subscribe()).await;
@@ -115,51 +110,25 @@ pub(crate) async fn stop(_server: Arc) -> Result<(), Error> {
}
#[tracing::instrument(skip_all)]
-async fn sighandle(server: Arc, tx: Sender<()>) -> Result<(), Error> {
- let ctrl_c = async {
- signal::ctrl_c()
- .await
- .expect("failed to install Ctrl+C handler");
+async fn signal(server: Arc, tx: Sender<()>) {
+ let sig: &'static str = server
+ .signal
+ .subscribe()
+ .recv()
+ .await
+ .expect("channel error");
+ debug!("Received signal {}", sig);
+ if sig == "Ctrl+C" {
let reload = cfg!(unix) && cfg!(debug_assertions);
server.reload.store(reload, Ordering::Release);
- };
-
- #[cfg(unix)]
- let ctrl_bs = async {
- signal::unix::signal(signal::unix::SignalKind::quit())
- .expect("failed to install Ctrl+\\ handler")
- .recv()
- .await;
- };
-
- #[cfg(unix)]
- let terminate = async {
- signal::unix::signal(signal::unix::SignalKind::terminate())
- .expect("failed to install SIGTERM handler")
- .recv()
- .await;
- };
-
- debug!("Installed signal handlers");
- let sig: &str;
- #[cfg(unix)]
- tokio::select! {
- () = ctrl_c => { sig = "Ctrl+C"; },
- () = ctrl_bs => { sig = "Ctrl+\\"; },
- () = terminate => { sig = "SIGTERM"; },
}
- #[cfg(not(unix))]
- tokio::select! {
- _ = ctrl_c => { sig = "Ctrl+C"; },
- }
-
- warn!("Received {}", sig);
server.interrupt.store(true, Ordering::Release);
services().globals.rotate.fire();
- tx.send(())
- .expect("failed sending shutdown transaction to oneshot channel");
+ if let Err(e) = tx.send(()) {
+ error!("failed sending shutdown transaction to channel: {e}");
+ }
if let Some(handle) = server.shutdown.lock().expect("locked").as_ref() {
let pending = server.requests_spawn_active.load(Ordering::Relaxed);
@@ -172,6 +141,4 @@ async fn sighandle(server: Arc, tx: Sender<()>) -> Result<(), Error> {
handle.shutdown();
}
}
-
- Ok(())
}
From 102bd1b4a6a0ab3c955fda06c86aed214d583a51 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 03:40:31 +0000
Subject: [PATCH 0029/2091] use debug_warn for parse_incoming_pdu err results.
Signed-off-by: Jason Volk
---
src/api/server_server.rs | 2 +-
src/service/rooms/event_handler/parse_incoming_pdu.rs | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/api/server_server.rs b/src/api/server_server.rs
index 72429752..d5edd03b 100644
--- a/src/api/server_server.rs
+++ b/src/api/server_server.rs
@@ -244,7 +244,7 @@ pub(crate) async fn send_transaction_message_route(
parsed_pdus.push(match parse_incoming_pdu(pdu) {
Ok(t) => t,
Err(e) => {
- warn!("Could not parse PDU: {e}");
+ debug_warn!("Could not parse PDU: {e}");
continue;
},
});
diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs
index 133ab66e..d9d9f063 100644
--- a/src/service/rooms/event_handler/parse_incoming_pdu.rs
+++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs
@@ -6,7 +6,7 @@ use crate::{service::pdu::gen_event_id_canonical_json, services, Error, Result};
pub fn parse_incoming_pdu(pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
- warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
+ warn!("Error parsing incoming event {pdu:?}: {e:?}");
Error::BadServerResponse("Invalid PDU in server response")
})?;
From 7d2f510cc3d390c8596d0f1614bbe0ba4dc5a875 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Fri, 31 May 2024 05:39:51 +0000
Subject: [PATCH 0030/2091] single-source for defaulty log filter string
Signed-off-by: Jason Volk
---
src/core/config/mod.rs | 3 ++-
src/main/server.rs | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs
index 665cbe20..bb13923b 100644
--- a/src/core/config/mod.rs
+++ b/src/core/config/mod.rs
@@ -975,7 +975,8 @@ fn default_tracing_flame_output_path() -> String { "./tracing.folded".to_owned()
fn default_trusted_servers() -> Vec { vec![OwnedServerName::try_from("matrix.org").unwrap()] }
-fn default_log() -> String {
+#[must_use]
+pub fn default_log() -> String {
// do debug logging by default for debug builds
if cfg!(debug_assertions) {
"debug".to_owned()
diff --git a/src/main/server.rs b/src/main/server.rs
index d960bfac..8e61d502 100644
--- a/src/main/server.rs
+++ b/src/main/server.rs
@@ -1,6 +1,7 @@
use std::sync::Arc;
use conduit::{
+ config,
config::Config,
info,
log::{LogLevelReloadHandles, ReloadHandle},
@@ -103,7 +104,7 @@ fn init_tracing(config: &Config) -> (LogLevelReloadHandles, TracingFlameGuard) {
Ok(s) => s,
Err(e) => {
eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}");
- EnvFilter::try_new("warn").unwrap()
+ EnvFilter::try_new(config::default_log()).expect("failed to set default EnvFilter")
},
};
From c2586737ae552ef3fa1c8d9911c3a7b822ef387a Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sat, 1 Jun 2024 01:36:37 +0000
Subject: [PATCH 0031/2091] accept receipts prior to events
Signed-off-by: Jason Volk
---
src/api/mod.rs | 2 +-
src/api/server_server.rs | 22 +++-------------------
2 files changed, 4 insertions(+), 20 deletions(-)
diff --git a/src/api/mod.rs b/src/api/mod.rs
index b835f536..84207d71 100644
--- a/src/api/mod.rs
+++ b/src/api/mod.rs
@@ -7,7 +7,7 @@ extern crate conduit_core as conduit;
extern crate conduit_service as service;
pub use client_server::membership::{join_room_by_id_helper, leave_all_rooms};
-pub(crate) use conduit::{debug_error, debug_info, debug_warn, utils, Error, Result};
+pub(crate) use conduit::{debug_info, debug_warn, utils, Error, Result};
pub(crate) use ruma_wrapper::{Ruma, RumaResponse};
pub(crate) use service::{pdu::PduEvent, services, user_is_local};
diff --git a/src/api/server_server.rs b/src/api/server_server.rs
index d5edd03b..1fe1ed1b 100644
--- a/src/api/server_server.rs
+++ b/src/api/server_server.rs
@@ -54,7 +54,6 @@ use tracing::{debug, error, trace, warn};
use crate::{
client_server::{self, claim_keys_helper, get_keys_helper},
- debug_error,
service::{
pdu::{gen_event_id_canonical_json, PduBuilder},
rooms::event_handler::parse_incoming_pdu,
@@ -369,22 +368,9 @@ pub(crate) async fn send_transaction_message_route(
}
if services().rooms.state_cache.is_joined(&user_id, &room_id)? {
- if let Some((event_id, _)) = user_updates
- .event_ids
- .iter()
- .filter_map(|id| {
- services()
- .rooms
- .timeline
- .get_pdu_count(id)
- .ok()
- .flatten()
- .map(|r| (id, r))
- })
- .max_by_key(|(_, count)| *count)
- {
+ for event_id in &user_updates.event_ids {
let mut user_receipts = BTreeMap::new();
- user_receipts.insert(user_id.clone(), user_updates.data);
+ user_receipts.insert(user_id.clone(), user_updates.data.clone());
let mut receipts = BTreeMap::new();
receipts.insert(ReceiptType::Read, user_receipts);
@@ -396,13 +382,11 @@ pub(crate) async fn send_transaction_message_route(
content: ReceiptEventContent(receipt_content),
room_id: room_id.clone(),
};
+
services()
.rooms
.read_receipt
.readreceipt_update(&user_id, &room_id, event)?;
- } else {
- // TODO fetch missing events
- debug_error!("No known event ids in read receipt: {:?}", user_updates);
}
} else {
debug_warn!(%user_id, %room_id, "received read receipt EDU for user not in room");
From 887496d040be61fe9a78169b4fa89febbec08812 Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sat, 1 Jun 2024 12:10:25 +0000
Subject: [PATCH 0032/2091] consolidate default cargo features
Signed-off-by: Jason Volk
---
Cargo.lock | 7 +--
src/admin/Cargo.toml | 8 ---
src/api/Cargo.toml | 7 ---
src/core/Cargo.toml | 11 ----
src/database/Cargo.toml | 8 ---
src/main/Cargo.toml | 117 ++++++++++++++++++++++++++++++++++------
src/router/Cargo.toml | 9 ----
src/service/Cargo.toml | 17 ------
8 files changed, 104 insertions(+), 80 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 375440ef..b2975e33 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -572,8 +572,12 @@ name = "conduit"
version = "0.4.1"
dependencies = [
"clap",
+ "conduit_admin",
+ "conduit_api",
"conduit_core",
+ "conduit_database",
"conduit_router",
+ "conduit_service",
"console-subscriber",
"hardened_malloc-rs",
"log",
@@ -761,18 +765,15 @@ dependencies = [
"log",
"loole",
"lru-cache",
- "parking_lot",
"rand",
"regex",
"reqwest",
"ruma",
"ruma-identifiers-validation",
- "rusqlite",
"serde",
"serde_json",
"serde_yaml",
"sha2",
- "thread_local",
"tokio",
"tracing",
"url",
diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml
index 244cf7d2..42327430 100644
--- a/src/admin/Cargo.toml
+++ b/src/admin/Cargo.toml
@@ -17,14 +17,6 @@ crate-type = [
]
[features]
-default = [
- "rocksdb",
- "io_uring",
- "jemalloc",
- "zstd_compression",
- "release_max_log_level",
-]
-
dev_release_log_level = []
release_max_log_level = [
"tracing/max_level_trace",
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index 315fa76c..a80a254e 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -17,13 +17,6 @@ crate-type = [
]
[features]
-default = [
- "element_hacks",
- "gzip_compression",
- "brotli_compression",
- "release_max_log_level",
-]
-
element_hacks = []
dev_release_log_level = []
release_max_log_level = [
diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml
index a0c8a060..dd17946f 100644
--- a/src/core/Cargo.toml
+++ b/src/core/Cargo.toml
@@ -17,17 +17,6 @@ crate-type = [
]
[features]
-default = [
- "rocksdb",
- "io_uring",
- "jemalloc",
- "gzip_compression",
- "zstd_compression",
- "brotli_compression",
- "sentry_telemetry",
- "release_max_log_level",
-]
-
dev_release_log_level = []
release_max_log_level = [
"tracing/max_level_trace",
diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml
index e23d2d7b..113414cb 100644
--- a/src/database/Cargo.toml
+++ b/src/database/Cargo.toml
@@ -17,14 +17,6 @@ crate-type = [
]
[features]
-default = [
- "rocksdb",
- "io_uring",
- "jemalloc",
- "zstd_compression",
- "release_max_log_level",
-]
-
dev_release_log_level = []
release_max_log_level = [
"tracing/max_level_trace",
diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml
index 06c8c697..94c62656 100644
--- a/src/main/Cargo.toml
+++ b/src/main/Cargo.toml
@@ -37,27 +37,57 @@ assets = [
[features]
default = [
- "sentry_telemetry",
+ "brotli_compression",
+ "element_hacks",
+ "gzip_compression",
+ "io_uring",
+ "jemalloc",
"release_max_log_level",
+ "rocksdb",
+ "sentry_telemetry",
+ "systemd",
+ "zstd_compression",
]
-# increases performance, reduces build times, and reduces binary size by not compiling or
-# genreating code for log level filters that users will generally not use (debug and trace)
-release_max_log_level = [
- "tracing/max_level_trace",
- "tracing/release_max_level_info",
- "log/max_level_trace",
- "log/release_max_level_info",
+axum_dual_protocol = [
+ "conduit-router/axum_dual_protocol",
]
-sentry_telemetry = [
- "dep:sentry",
- "dep:sentry-tracing",
- "dep:sentry-tower",
+brotli_compression = [
+ "conduit-api/brotli_compression",
+ "conduit-core/brotli_compression",
+ "conduit-router/brotli_compression",
+ "conduit-service/brotli_compression",
]
-# enable the tokio_console server ncompatible with release_max_log_level
-tokio_console = [
- "dep:console-subscriber",
- "tokio/tracing",
+dev_release_log_level = [
+ "conduit-api/dev_release_log_level",
+ "conduit-core/dev_release_log_level",
+ "conduit-database/dev_release_log_level",
+ "conduit-router/dev_release_log_level",
+ "conduit-service/dev_release_log_level",
+]
+element_hacks = [
+ "conduit-api/element_hacks",
+]
+gzip_compression = [
+ "conduit-api/gzip_compression",
+ "conduit-router/gzip_compression",
+ "conduit-service/gzip_compression",
+]
+hardened_malloc = [
+ "conduit-core/hardened_malloc",
+]
+io_uring = [
+ "conduit-admin/io_uring",
+ "conduit-core/io_uring",
+ "conduit-database/io_uring",
+]
+jemalloc = [
+ "conduit-admin/jemalloc",
+ "conduit-core/jemalloc",
+ "conduit-database/jemalloc",
+]
+jemalloc_prof = [
+ "conduit-core/jemalloc_prof",
]
perf_measurements = [
"dep:opentelemetry",
@@ -65,11 +95,64 @@ perf_measurements = [
"dep:tracing-opentelemetry",
"dep:opentelemetry_sdk",
"dep:opentelemetry-jaeger",
+ "conduit-core/perf_measurements",
+ "conduit-core/sentry_telemetry",
+]
+# increases performance, reduces build times, and reduces binary size by not compiling or
+# genreating code for log level filters that users will generally not use (debug and trace)
+release_max_log_level = [
+ "tracing/max_level_trace",
+ "tracing/release_max_level_info",
+ "log/max_level_trace",
+ "log/release_max_level_info",
+ "conduit-admin/release_max_log_level",
+ "conduit-api/release_max_log_level",
+ "conduit-core/release_max_log_level",
+ "conduit-database/release_max_log_level",
+ "conduit-router/release_max_log_level",
+ "conduit-service/release_max_log_level",
+]
+rocksdb = [
+ "conduit-admin/rocksdb",
+ "conduit-core/rocksdb",
+ "conduit-database/rocksdb",
+]
+sentry_telemetry = [
+ "dep:sentry",
+ "dep:sentry-tracing",
+ "dep:sentry-tower",
+ "conduit-core/sentry_telemetry",
+ "conduit-router/sentry_telemetry",
+]
+sha256_media = [
+ "conduit-service/sha256_media",
+]
+sqlite = [
+ "conduit-core/sqlite",
+ "conduit-database/sqlite",
+]
+systemd = [
+ "conduit-router/systemd",
+]
+# enable the tokio_console server ncompatible with release_max_log_level
+tokio_console = [
+ "dep:console-subscriber",
+ "tokio/tracing",
+]
+zstd_compression = [
+ "conduit-admin/zstd_compression",
+ "conduit-core/zstd_compression",
+ "conduit-database/zstd_compression",
+ "conduit-router/zstd_compression",
]
[dependencies]
-conduit-router.workspace = true
+conduit-admin.workspace = true
+conduit-api.workspace = true
conduit-core.workspace = true
+conduit-database.workspace = true
+conduit-router.workspace = true
+conduit-service.workspace = true
tokio.workspace = true
log.workspace = true
diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml
index 11028112..6084aa07 100644
--- a/src/router/Cargo.toml
+++ b/src/router/Cargo.toml
@@ -17,15 +17,6 @@ crate-type = [
]
[features]
-default = [
- "systemd",
- "sentry_telemetry",
- "gzip_compression",
- "zstd_compression",
- "brotli_compression",
- "release_max_log_level",
-]
-
dev_release_log_level = []
release_max_log_level = [
"tracing/max_level_trace",
diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml
index e4189125..f6f682a9 100644
--- a/src/service/Cargo.toml
+++ b/src/service/Cargo.toml
@@ -17,12 +17,6 @@ crate-type = [
]
[features]
-default = [
- "gzip_compression",
- "brotli_compression",
- "release_max_log_level",
-]
-
dev_release_log_level = []
release_max_log_level = [
"tracing/max_level_trace",
@@ -30,11 +24,6 @@ release_max_log_level = [
"log/max_level_trace",
"log/release_max_level_info",
]
-sqlite = [
- "dep:rusqlite",
- "dep:parking_lot",
- "dep:thread_local",
-]
gzip_compression = [
"reqwest/gzip",
]
@@ -63,22 +52,16 @@ jsonwebtoken.workspace = true
log.workspace = true
loole.workspace = true
lru-cache.workspace = true
-parking_lot.optional = true
-parking_lot.workspace = true
rand.workspace = true
regex.workspace = true
reqwest.workspace = true
ruma-identifiers-validation.workspace = true
ruma.workspace = true
-rusqlite.optional = true
-rusqlite.workspace = true
serde_json.workspace = true
serde.workspace = true
serde_yaml.workspace = true
sha2.optional = true
sha2.workspace = true
-thread_local.optional = true
-thread_local.workspace = true
tokio.workspace = true
tracing.workspace = true
url.workspace = true
From 95ca9d00a2c8d61f429b477247b73655af0831a9 Mon Sep 17 00:00:00 2001
From: strawberry
Date: Sun, 2 Jun 2024 21:08:52 +0000
Subject: [PATCH 0033/2091] nix: get all features from main crate
---
nix/pkgs/main/default.nix | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix
index 86595691..f91a9cdd 100644
--- a/nix/pkgs/main/default.nix
+++ b/nix/pkgs/main/default.nix
@@ -23,8 +23,6 @@
let
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
-workspaceMembers = builtins.map (member: "${inputs.self}/src/${member}")
- (builtins.attrNames (builtins.readDir "${inputs.self}/src"));
crateFeatures = path:
let manifest = lib.importTOML "${path}/Cargo.toml"; in
lib.remove "default" (lib.attrNames manifest.features) ++
@@ -34,10 +32,8 @@ crateFeatures = path:
manifest.dependencies);
crateDefaultFeatures = path:
(lib.importTOML "${path}/Cargo.toml").features.default;
-allDefaultFeatures = lib.unique
- (lib.flatten (builtins.map crateDefaultFeatures workspaceMembers));
-allFeatures = lib.unique
- (lib.flatten (builtins.map crateFeatures workspaceMembers));
+allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
+allFeatures = crateFeatures "${inputs.self}/src/main";
features' = lib.unique
(features ++
lib.optionals default_features allDefaultFeatures ++
From de79b66cea4937d68f57f0e0490e5c04a9fc6151 Mon Sep 17 00:00:00 2001
From: strawberry
Date: Sun, 2 Jun 2024 18:00:38 -0400
Subject: [PATCH 0034/2091] misc docs changes
Signed-off-by: strawberry
---
docs/appservices.md | 22 ++++++----------------
docs/deploying/generic.md | 4 +++-
2 files changed, 9 insertions(+), 17 deletions(-)
diff --git a/docs/appservices.md b/docs/appservices.md
index 84385ab9..8d646921 100644
--- a/docs/appservices.md
+++ b/docs/appservices.md
@@ -12,9 +12,9 @@ and later starting it.
At some point the appservice guide should ask you to add a registration yaml
file to the homeserver. In Synapse you would do this by adding the path to the
-homeserver.yaml, but in Conduit you can do this from within Matrix:
+homeserver.yaml, but in conduwuit you can do this from within Matrix:
-First, go into the #admins room of your homeserver. The first person that
+First, go into the `#admins` room of your homeserver. The first person that
registered on the homeserver automatically joins it. Then send a message into
the room like this:
@@ -31,13 +31,13 @@ the room like this:
```
You can confirm it worked by sending a message like this:
-`@conduit:your.server.name: appservices list`
+`!admin appservices list`
The `@conduit` bot should answer with `Appservices (1): your-bridge`
-Then you are done. Conduit will send messages to the appservices and the
+Then you are done. conduwuit will send messages to the appservices and the
appservice can send requests to the homeserver. You don't need to restart
-Conduit, but if it doesn't work, restarting while the appservice is running
+conduwuit, but if it doesn't work, restarting while the appservice is running
could help.
## Appservice-specific instructions
@@ -46,16 +46,6 @@ could help.
To remove an appservice go to your admin room and execute
-`@conduit:your.server.name: appservices unregister `
+`!admin appservices unregister `
where `` one of the output of `appservices list`.
-
-### Tested appservices
-
-These appservices have been tested and work with Conduit without any extra steps:
-
-- [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord)
-- [mautrix-hangouts](https://github.com/mautrix/hangouts/)
-- [mautrix-telegram](https://github.com/mautrix/telegram/)
-- [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward.
-- [heisenbridge](https://github.com/hifi/heisenbridge/)
diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md
index e23c75d4..dc47f51b 100644
--- a/docs/deploying/generic.md
+++ b/docs/deploying/generic.md
@@ -49,7 +49,7 @@ The systemd unit for conduwuit can be found [here](../configuration.md#example-s
Now we need to create the conduwuit's config file in `/etc/conduwuit/conduwuit.toml`. The example config can be found at [conduwuit-example.toml](../configuration.md).**Please take a moment to read it. You need to change at least the server name.**
-RocksDB (`rocksdb`) is the only supported database backend. SQLite only exists for historical reasons and is not recommended. Any performance issues, storage issues, database issues, etc will not be assisted if using SQLite and you will be asked to migrate to RocksDB first.
+RocksDB is the only supported database backend. SQLite only exists for historical reasons, is not recommended, and will be removed soon (likely in v0.5.0). Any performance issues, storage issues, database issues, etc will not be assisted if using SQLite and you will be asked to migrate to RocksDB first.
## Setting the correct file permissions
@@ -74,6 +74,8 @@ sudo chmod 700 /var/lib/conduwuit/
Refer to the documentation or various guides online of your chosen reverse proxy software. A [Caddy](https://caddyserver.com/) example will be provided as this is the recommended reverse proxy for new users and is very trivial to use (handles TLS, reverse proxy headers, etc transparently with proper defaults).
+Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization header, making federation non-functional. If using Apache, you need to use `nocanon` to prevent this.
+
### Caddy
Create `/etc/caddy/conf.d/conduwuit_caddyfile` and enter this (substitute for your server name).
From f09e0dc1379055c0a2fe6e112a022e723fb447c7 Mon Sep 17 00:00:00 2001
From: strawberry
Date: Sun, 2 Jun 2024 19:17:23 -0400
Subject: [PATCH 0035/2091] add conduwuit community code of conduct
this is a code of conduct that the moderation team made up,
specific to conduwuit's community spaces such as the matrix rooms.
the matrix foundation and contributor's covenant still apply, but
having conduwuit-specific additional guidelines help
Signed-off-by: strawberry
---
docs/SUMMARY.md | 1 +
docs/conduwuit_coc.md | 46 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 47 insertions(+)
create mode 100644 docs/conduwuit_coc.md
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index bafe88dc..6f3bd570 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -17,3 +17,4 @@
- [Contributing](contributing.md)
- [Testing](development/testing.md)
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
+- [conduwuit Community Code of Conduct](conduwuit_coc.md)
diff --git a/docs/conduwuit_coc.md b/docs/conduwuit_coc.md
new file mode 100644
index 00000000..858884c5
--- /dev/null
+++ b/docs/conduwuit_coc.md
@@ -0,0 +1,46 @@
+This page serves as a referenceable link to conduwuit's Code of Conduct for the Matrix rooms and any further community spaces related to conduwuit.
+
+# Conduwuit Community Code of Conduct
+
+Welcome to the Conduwuit community! We’re excited to have you here. This space is dedicated to fostering a positive, supportive, and inclusive environment for everyone. Here are our guidelines to help maintain the welcoming atmosphere that sets Conduwuit apart.
+
+For the foundational rules, please refer to the [Matrix.org Code of Conduct](https://matrix.org/legal/code-of-conduct/) and the [Contributor's Covenant](https://github.com/girlbossceo/conduwuit/blob/main/CODE_OF_CONDUCT.md). Below are additional guidelines specific to the Conduwuit community.
+
+## Our Values and Guidelines
+
+1. **Respect and Inclusivity**: We are committed to maintaining a community where everyone feels safe and respected. Discrimination, harassment, or hate speech of any kind will not be tolerated. Recognise that each community member experiences the world differently based on their past experiences, background, and identity. Share your own experiences and be open to learning about others' diverse perspectives.
+
+2. **Positivity and Constructiveness**: Engage in constructive discussions and support each other. If you are feeling angry, negative, or aggressive, take a break until you can participate in a positive and constructive manner. Process intense feelings with a friend or in a private setting before engaging in community conversations to help maintain a supportive and focused environment.
+
+3. **Clarity and Understanding**: We understand that our community includes neurodivergent individuals and those who may not appreciate sarcasm or subtlety. Communicate clearly and kindly, avoiding sarcasm and ensuring your messages are easily understood by all. Additionally, avoid putting the burden of education on marginalized groups by doing your own research before asking for explanations.
+
+4. **Be Open to Inclusivity**: Actively engage in conversations about making our community more inclusive. Report discriminatory behaviour to the moderators and be open to constructive critiques that aim to improve our community. Understand that discussing discrimination and negative experiences can be emotionally taxing, so focus on the message rather than critiquing the tone used.
+
+5. **Commit to Inclusivity**: Building an inclusive community requires time, energy, and resources. Recognise that addressing discrimination and bias is an ongoing process that necessitates commitment and action from all community members.
+
+## Community Spaces
+
+This Code of Conduct applies to the entire [Conduwuit Space](https://matrix.to/#/#conduwuit-space:puppygock.gay) and its rooms:
+
+### [Conduwuit](https://matrix.to/#/#conduwuit:puppygock.gay)
+
+This room is for support and discussions about Conduwuit. Ask questions, share insights, and help each other out.
+
+### [Conduwuit Offtopic](https://matrix.to/#/#conduwuit-offtopic:girlboss.ceo)
+
+For off-topic community conversations about any subject. While this room allows for a wide range of topics, the same CoC applies. Keep discussions respectful and inclusive, and avoid divisive subjects like country/world politics. General topics, such as world events, are welcome as long as they follow the CoC.
+
+### [Conduwuit Development](https://matrix.to/#/#conduwuit-dev:puppygock.gay)
+
+This room is dedicated to discussing active development of Conduwuit. Posting requires an elevated power level, which can be requested in one of the other rooms. Use this space to collaborate and innovate.
+
+## Enforcement
+
+We have a zero-tolerance policy for violations of this Code of Conduct. If someone’s behavior makes you uncomfortable, please report it to the moderators. Actions we may take include:
+
+1. **Warning**: A warning given directly in the room or via a private message from the moderators, identifying the violation and requesting corrective action.
+2. **Kick or Ban**: Egregious behavior may result in an immediate kick or ban to protect other community members. Bans are considered permanent and will only be reversed in exceptional circumstances after proven good behaviour.
+
+Together, let’s build a community where everyone feels valued and respected.
+
+- The Conduwuit Moderation Team
From 1a4736d40bcd5aa26d86e3d66cd64ec1bdcd055e Mon Sep 17 00:00:00 2001
From: Jason Volk
Date: Sun, 2 Jun 2024 22:36:00 +0000
Subject: [PATCH 0036/2091] support configuring multiple bind hosts; default to
dual localhost.
Signed-off-by: Jason Volk
---
src/core/config/check.rs | 69 +++++++++++++++++++---------------------
src/core/config/mod.rs | 52 +++++++++++++++++++-----------
2 files changed, 67 insertions(+), 54 deletions(-)
diff --git a/src/core/config/check.rs b/src/core/config/check.rs
index a631c4f8..403aa27c 100644
--- a/src/core/config/check.rs
+++ b/src/core/config/check.rs
@@ -27,46 +27,43 @@ pub fn check(config: &Config) -> Result<(), Error> {
));
}
- if config.address.is_loopback() && cfg!(unix) {
- debug!(
- "Found loopback listening address {}, running checks if we're in a container.",
- config.address
- );
+ config.get_bind_addrs().iter().for_each(|addr| {
+ if addr.ip().is_loopback() && cfg!(unix) {
+ debug!("Found loopback listening address {addr}, running checks if we're in a container.",);
- #[cfg(unix)]
- if Path::new("/proc/vz").exists() /* Guest */ && !Path::new("/proc/bz").exists()
- /* Host */
- {
- error!(
- "You are detected using OpenVZ with a loopback/localhost listening address of {}. If you are using \
- OpenVZ for containers and you use NAT-based networking to communicate with the host and guest, this \
- will NOT work. Please change this to \"0.0.0.0\". If this is expected, you can ignore.",
- config.address
- );
- }
+ #[cfg(unix)]
+ if Path::new("/proc/vz").exists() /* Guest */ && !Path::new("/proc/bz").exists()
+ /* Host */
+ {
+ error!(
+ "You are detected using OpenVZ with a loopback/localhost listening address of {addr}. If you are \
+ using OpenVZ for containers and you use NAT-based networking to communicate with the host and \
+ guest, this will NOT work. Please change this to \"0.0.0.0\". If this is expected, you can \
+ ignore.",
+ );
+ }
- #[cfg(unix)]
- if Path::new("/.dockerenv").exists() {
- error!(
- "You are detected using Docker with a loopback/localhost listening address of {}. If you are using a \
- reverse proxy on the host and require communication to conduwuit in the Docker container via \
- NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". If this is expected, \
- you can ignore.",
- config.address
- );
- }
+ #[cfg(unix)]
+ if Path::new("/.dockerenv").exists() {
+ error!(
+ "You are detected using Docker with a loopback/localhost listening address of {addr}. If you are \
+ using a reverse proxy on the host and require communication to conduwuit in the Docker container \
+ via NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". If this is \
+ expected, you can ignore.",
+ );
+ }
- #[cfg(unix)]
- if Path::new("/run/.containerenv").exists() {
- error!(
- "You are detected using Podman with a loopback/localhost listening address of {}. If you are using a \
- reverse proxy on the host and require communication to conduwuit in the Podman container via \
- NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". If this is expected, \
- you can ignore.",
- config.address
- );
+ #[cfg(unix)]
+ if Path::new("/run/.containerenv").exists() {
+ error!(
+ "You are detected using Podman with a loopback/localhost listening address of {addr}. If you are \
+ using a reverse proxy on the host and require communication to conduwuit in the Podman container \
+ via NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". If this is \
+ expected, you can ignore.",
+ );
+ }
}
- }
+ });
// rocksdb does not allow max_log_files to be 0
if config.rocksdb_max_log_files == 0 && cfg!(feature = "rocksdb") {
diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs
index bb13923b..67b83975 100644
--- a/src/core/config/mod.rs
+++ b/src/core/config/mod.rs
@@ -1,7 +1,7 @@
use std::{
collections::BTreeMap,
fmt::{self, Write as _},
- net::{IpAddr, Ipv6Addr, SocketAddr},
+ net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
path::PathBuf,
};
@@ -36,13 +36,20 @@ struct ListeningPort {
ports: Either>,
}
+#[derive(Deserialize, Clone, Debug)]
+#[serde(transparent)]
+struct ListeningAddr {
+ #[serde(with = "either::serde_untagged")]
+ addrs: Either>,
+}
+
/// all the config options for conduwuit
#[derive(Clone, Debug, Deserialize)]
#[allow(clippy::struct_excessive_bools)]
pub struct Config {
/// [`IpAddr`] conduwuit will listen on (can be IPv4 or IPv6)
#[serde(default = "default_address")]
- pub address: IpAddr,
+ address: ListeningAddr,
/// default TCP port(s) conduwuit will listen on
#[serde(default = "default_port")]
port: ListeningPort,
@@ -471,22 +478,27 @@ impl Config {
#[must_use]
pub fn get_bind_addrs(&self) -> Vec {
- match &self.port.ports {
- Left(port) => {
- // Left is only 1 value, so make a vec with 1 value only
- let port_vec = [port];
+ let mut addrs = Vec::new();
+ for host in &self.get_bind_hosts() {
+ for port in &self.get_bind_ports() {
+ addrs.push(SocketAddr::new(*host, *port));
+ }
+ }
- port_vec
- .iter()
- .copied()
- .map(|port| SocketAddr::from((self.address, *port)))
- .collect::>()
- },
- Right(ports) => ports
- .iter()
- .copied()
- .map(|port| SocketAddr::from((self.address, port)))
- .collect::>(),
+ addrs
+ }
+
+ fn get_bind_hosts(&self) -> Vec {
+ match &self.address.addrs {
+ Left(addr) => vec![*addr],
+ Right(addrs) => addrs.clone(),
+ }
+ }
+
+ fn get_bind_ports(&self) -> Vec {
+ match &self.port.ports {
+ Left(port) => vec![*port],
+ Right(ports) => ports.clone(),
}
}
@@ -875,7 +887,11 @@ impl fmt::Display for Config {
fn true_fn() -> bool { true }
-fn default_address() -> IpAddr { Ipv6Addr::LOCALHOST.into() }
+fn default_address() -> ListeningAddr {
+ ListeningAddr {
+ addrs: Right(vec![Ipv4Addr::LOCALHOST.into(), Ipv6Addr::LOCALHOST.into()]),
+ }
+}
fn default_port() -> ListeningPort {
ListeningPort {
From aef25ea1f752b60d1e8db9ed54b94089aed02101 Mon Sep 17 00:00:00 2001
From: strawberry
Date: Tue, 28 May 2024 02:28:45 -0400
Subject: [PATCH 0037/2091] enable tracing feature for axum
Signed-off-by: strawberry
---
Cargo.lock | 2 ++
Cargo.toml | 1 +
2 files changed, 3 insertions(+)
diff --git a/Cargo.lock b/Cargo.lock
index b2975e33..a6384b35 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -224,6 +224,7 @@ dependencies = [
"tower",
"tower-layer",
"tower-service",
+ "tracing",
]
[[package]]
@@ -261,6 +262,7 @@ dependencies = [
"sync_wrapper 0.1.2",
"tower-layer",
"tower-service",
+ "tracing",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index cabd47a2..cc6d5960 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -75,6 +75,7 @@ features = [
"json",
"matched-path",
"tokio",
+ "tracing",
]
[workspace.dependencies.axum-extra]
From 4aead5de7a8890a0bebd4794c75a7e3e385012b3 Mon Sep 17 00:00:00 2001
From: strawberry
Date: Tue, 28 May 2024 12:51:22 -0400
Subject: [PATCH 0038/2091] reflax a couple restrictions on custom room IDs and
aliases
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
🙃
Signed-off-by: strawberry
---
src/api/client_server/room.rs | 16 ----------------
1 file changed, 16 deletions(-)
diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs
index 4593b2e4..cb8930ec 100644
--- a/src/api/client_server/room.rs
+++ b/src/api/client_server/room.rs
@@ -900,20 +900,6 @@ async fn room_alias_check(
ErrorKind::InvalidParam,
"Room alias contained spaces which is not a valid room alias.",
));
- } else if room_alias_name.len() > 255 {
- // there is nothing spec-wise saying to check the limit of this,
- // however absurdly long room aliases are guaranteed to be unreadable or done
- // maliciously. there is no reason a room alias should even exceed 100
- // characters as is. generally in spec, 255 is matrix's fav number
- return Err(Error::BadRequest(
- ErrorKind::InvalidParam,
- "Room alias is excessively long, clients may not be able to handle this. Please shorten it.",
- ));
- } else if room_alias_name.contains('"') {
- return Err(Error::BadRequest(
- ErrorKind::InvalidParam,
- "Room alias contained `\"` which is not allowed.",
- ));
}
// check if room alias is forbidden
@@ -979,8 +965,6 @@ fn custom_room_id_check(custom_room_id: &String) -> Result {
ErrorKind::InvalidParam,
"Custom room ID contained spaces which is not valid.",
));
- } else if custom_room_id.len() > 255 {
- return Err(Error::BadRequest(ErrorKind::InvalidParam, "Custom room ID is too long."));
}
let full_room_id = format!("!{}:{}", custom_room_id, services().globals.config.server_name);
From 884cbab13557ddd34c2a37f7cbb88522095332f8 Mon Sep 17 00:00:00 2001
From: strawberry
Date: Tue, 28 May 2024 12:53:39 -0400
Subject: [PATCH 0039/2091] ci: comment out hardened_malloc clippy check for
now
i need to either fix static linking, or just make it
dynamic always (but then kinda useless idk)
https://gitlab.com/conduwuit/conduwuit/-/jobs/6953419673#L3155
Signed-off-by: strawberry
---
engage.toml | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/engage.toml b/engage.toml
index 1ce015be..9a2ae731 100644
--- a/engage.toml
+++ b/engage.toml
@@ -115,18 +115,18 @@ cargo clippy \
-D warnings
"""
-[[task]]
-name = "clippy/hardened_malloc"
-group = "lints"
-script = """
-cargo clippy \
- --workspace \
- --features hardened_malloc \
- --all-targets \
- --color=always \
- -- \
- -D warnings
-"""
+#[[task]]
+#name = "clippy/hardened_malloc"
+#group = "lints"
+#script = """
+#cargo clippy \
+# --workspace \
+# --features hardened_malloc \
+# --all-targets \
+# --color=always \
+# -- \
+# -D warnings
+#"""
[[task]]
name = "lychee"
From 68f42f5a2fc0a5e84c59aa7a72dbe7855ea2de27 Mon Sep 17 00:00:00 2001
From: strawberry
Date: Tue, 28 May 2024 15:07:30 -0400
Subject: [PATCH 0040/2091] fed: relax read receipt EDU check
so in theory: guest users, peaking over federation,
and world readable rooms should be allowed to send
read receipts even if they're not joined.
relaxing this check to only allow the read receipt if
the server has at least 1 member in the room makes
some of this still work
Signed-off-by: strawberry
---
src/api/server_server.rs | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/src/api/server_server.rs b/src/api/server_server.rs
index 1fe1ed1b..7eb4d7cf 100644
--- a/src/api/server_server.rs
+++ b/src/api/server_server.rs
@@ -367,7 +367,13 @@ pub(crate) async fn send_transaction_message_route(
continue;
}
- if services().rooms.state_cache.is_joined(&user_id, &room_id)? {
+ if services()
+ .rooms
+ .state_cache
+ .room_members(&room_id)
+ .filter_map(Result::ok)
+ .any(|member| member.server_name() == user_id.server_name())
+ {
for event_id in &user_updates.event_ids {
let mut user_receipts = BTreeMap::new();
user_receipts.insert(user_id.clone(), user_updates.data.clone());
@@ -389,7 +395,7 @@ pub(crate) async fn send_transaction_message_route(
.readreceipt_update(&user_id, &room_id, event)?;
}
} else {
- debug_warn!(%user_id, %room_id, "received read receipt EDU for user not in room");
+ debug_warn!(%user_id, %room_id, %origin, "received read receipt EDU from server who does not have a single member from their server in the room");
continue;
}
}
@@ -411,7 +417,7 @@ pub(crate) async fn send_transaction_message_route(
.acl_check(typing.user_id.server_name(), &typing.room_id)
.is_err()
{
- debug_warn!(%typing.user_id, %typing.room_id, "received typing EDU for ACL'd user's server");
+ debug_warn!(%typing.user_id, %typing.room_id, %origin, "received typing EDU for ACL'd user's server");
continue;
}
@@ -441,7 +447,7 @@ pub(crate) async fn send_transaction_message_route(
.await?;
}
} else {
- debug_warn!(%typing.user_id, %typing.room_id, "received typing EDU for user not in room");
+ debug_warn!(%typing.user_id, %typing.room_id, %origin, "received typing EDU for user not in room");
continue;
}
},
From f0533e07ef1cf2257e5250be988226630cfcf8f1 Mon Sep 17 00:00:00 2001
From: strawberry
Date: Tue, 28 May 2024 15:59:08 -0400
Subject: [PATCH 0041/2091] fed: remove unnecessary mutables, use with_capacity
in couple more places
Signed-off-by: strawberry
---
src/api/server_server.rs | 37 ++++++++++++++++++++++---------------
1 file changed, 22 insertions(+), 15 deletions(-)
diff --git a/src/api/server_server.rs b/src/api/server_server.rs
index 7eb4d7cf..277c971d 100644
--- a/src/api/server_server.rs
+++ b/src/api/server_server.rs
@@ -88,15 +88,15 @@ pub(crate) async fn get_server_version_route(
// Response type for this endpoint is Json because we need to calculate a
// signature for the response
pub(crate) async fn get_server_keys_route() -> Result {
- let mut verify_keys: BTreeMap = BTreeMap::new();
- verify_keys.insert(
+ let verify_keys: BTreeMap = BTreeMap::from([(
format!("ed25519:{}", services().globals.keypair().version())
.try_into()
.expect("found invalid server signing keys in DB"),
VerifyKey {
key: Base64::new(services().globals.keypair().public_key().to_vec()),
},
- );
+ )]);
+
let mut response = serde_json::from_slice(
get_server_keys::v2::Response {
server_key: Raw::new(&ServerSigningKeys {
@@ -375,14 +375,11 @@ pub(crate) async fn send_transaction_message_route(
.any(|member| member.server_name() == user_id.server_name())
{
for event_id in &user_updates.event_ids {
- let mut user_receipts = BTreeMap::new();
- user_receipts.insert(user_id.clone(), user_updates.data.clone());
+ let user_receipts = BTreeMap::from([(user_id.clone(), user_updates.data.clone())]);
- let mut receipts = BTreeMap::new();
- receipts.insert(ReceiptType::Read, user_receipts);
+ let receipts = BTreeMap::from([(ReceiptType::Read, user_receipts)]);
- let mut receipt_content = BTreeMap::new();
- receipt_content.insert(event_id.to_owned(), receipts);
+ let receipt_content = BTreeMap::from([(event_id.to_owned(), receipts)]);
let event = ReceiptEvent {
content: ReceiptEventContent(receipt_content),
@@ -633,13 +630,17 @@ pub(crate) async fn get_backfill_route(body: Ruma) ->
.max()
.ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Event not found."))?;
- let limit = body.limit.min(uint!(100));
+ let limit = body
+ .limit
+ .min(uint!(100))
+ .try_into()
+ .expect("UInt could not be converted to usize");
let all_events = services()
.rooms
.timeline
.pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
- .take(limit.try_into().unwrap());
+ .take(limit);
let events = all_events
.filter_map(Result::ok)
@@ -685,11 +686,17 @@ pub(crate) async fn get_missing_events_route(
.event_handler
.acl_check(origin, &body.room_id)?;
- let mut queued_events = body.latest_events.clone();
- let mut events = Vec::new();
+ let limit = body
+ .limit
+ .try_into()
+ .expect("UInt could not be converted to usize");
- let mut i = 0;
- while i < queued_events.len() && events.len() < u64::from(body.limit) as usize {
+ let mut queued_events = body.latest_events.clone();
+ // the vec will never have more entries the limit
+ let mut events = Vec::with_capacity(limit);
+
+ let mut i: usize = 0;
+ while i < queued_events.len() && events.len() < limit {
if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? {
let room_id_str = pdu
.get("room_id")
From 1ac72ab914f959c7beb438459b532819b831b273 Mon Sep 17 00:00:00 2001
From: strawberry
Date: Tue, 28 May 2024 16:55:08 -0400
Subject: [PATCH 0042/2091] init a few state hashmaps using with_capacity
Signed-off-by: strawberry
---
src/service/rooms/event_handler/mod.rs | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs
index 754365c3..e523ce79 100644
--- a/src/service/rooms/event_handler/mod.rs
+++ b/src/service/rooms/event_handler/mod.rs
@@ -375,7 +375,7 @@ impl Service {
// auth events
debug!("Checking based on auth events");
// Build map of auth events
- let mut auth_events = HashMap::new();
+ let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len());
for id in &incoming_pdu.auth_events {
let Some(auth_event) = services().rooms.timeline.get_pdu(id)? else {
warn!("Could not find auth event {}", id);
@@ -814,7 +814,7 @@ impl Service {
&self, incoming_pdu: &Arc