Better span traces

More information, fewer useless spans.

The span on request_spawn() can never emit any events, it only creates a useless empty root span for every request.
This commit is contained in:
Xiretza 2024-05-06 17:38:59 +00:00
parent 56edd5b037
commit 1e31428470
10 changed files with 17 additions and 6 deletions

View file

@ -138,6 +138,7 @@ pub(crate) async fn sync_events_route(
result
}
#[tracing::instrument(skip(body, tx))]
async fn sync_helper_wrapper(
sender_user: OwnedUserId, sender_device: OwnedDeviceId, body: sync_events::v3::Request,
tx: Sender<Option<Result<sync_events::v3::Response>>>,

View file

@ -421,7 +421,6 @@ impl KeyValueDatabase {
Ok(())
}
#[tracing::instrument]
fn start_check_for_updates_task() {
let timer_interval = Duration::from_secs(7200); // 2 hours
@ -472,7 +471,6 @@ impl KeyValueDatabase {
Ok(())
}
#[tracing::instrument]
fn start_cleanup_task() {
let timer_interval = Duration::from_secs(u64::from(services().globals.config.cleanup_second_interval));
@ -520,6 +518,7 @@ impl KeyValueDatabase {
});
}
#[tracing::instrument]
fn perform_cleanup() {
if !services().globals.config.rocksdb_periodic_cleanup {
return;

View file

@ -67,7 +67,10 @@ pub(crate) async fn build(server: &Server) -> io::Result<axum::routing::IntoMake
}
}
#[tracing::instrument(skip_all, name = "spawn")]
/// Ensures the request runs in a new tokio thread.
///
/// The axum request handler task gets cancelled if the connection is shut down;
/// by spawning our own task, processing continue after the client disconnects.
async fn request_spawn(
req: http::Request<axum::body::Body>, next: axum::middleware::Next,
) -> Result<axum::response::Response, StatusCode> {

View file

@ -161,6 +161,7 @@ impl Service {
self.sender.send(message).expect("message sent");
}
#[tracing::instrument(skip(self))]
async fn handle_event(&self, event: AdminRoomEvent, admin_room: &RoomId, server_user: &UserId) -> Result<()> {
let (mut message_content, reply) = match event {
AdminRoomEvent::SendMessage(content) => (content, None),

View file

@ -218,6 +218,7 @@ async fn presence_timer(user_id: OwnedUserId, timeout: Duration) -> OwnedUserId
user_id
}
#[tracing::instrument]
fn process_presence_timer(user_id: &OwnedUserId) -> Result<()> {
let idle_timeout = services().globals.config.presence_idle_timeout_s * 1_000;
let offline_timeout = services().globals.config.presence_offline_timeout_s * 1_000;

View file

@ -11,6 +11,7 @@ pub(crate) struct Service {
impl Service {
/// Replaces the previous read receipt.
#[tracing::instrument(skip(self, event))]
pub(crate) fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: ReceiptEvent) -> Result<()> {
self.db.readreceipt_update(user_id, room_id, event)?;
services().sending.flush_room(room_id)?;

View file

@ -628,6 +628,7 @@ impl Service {
Ok(pdu_id)
}
#[tracing::instrument(skip_all)]
pub(crate) fn create_hash_and_sign_event(
&self,
pdu_builder: PduBuilder,

View file

@ -10,6 +10,7 @@ use crate::{debug_error, services, utils, Error, Result};
///
/// Only returns Ok(None) if there is no url specified in the appservice
/// registration file
#[tracing::instrument(skip_all, fields(appservice = &registration.id))]
pub(crate) async fn send_request<T>(registration: Registration, request: T) -> Result<Option<T::IncomingResponse>>
where
T: OutgoingRequest + Debug,

View file

@ -176,7 +176,7 @@ impl Service {
Ok(())
}
#[tracing::instrument(skip(self, room_id))]
#[tracing::instrument(skip(self))]
pub(crate) fn flush_room(&self, room_id: &RoomId) -> Result<()> {
let servers = services()
.rooms
@ -234,6 +234,7 @@ impl Service {
Ok(())
}
#[tracing::instrument(skip(self))]
fn dispatch(&self, msg: Msg) -> Result<()> {
debug_assert!(!self.sender.is_full(), "channel full");
debug_assert!(!self.sender.is_closed(), "channel closed");

View file

@ -26,7 +26,7 @@ use super::{appservice, send, Destination, Msg, SendingEvent, Service};
use crate::{
service::presence::Presence,
services,
utils::{calculate_hash, user_id::user_is_local},
utils::{calculate_hash, debug_slice_truncated, user_id::user_is_local},
Error, PduEvent, Result,
};
@ -78,6 +78,7 @@ impl Service {
};
}
#[tracing::instrument(skip(self, _futures, statuses))]
fn handle_response_err(
&self, dest: Destination, _futures: &mut SendingFutures<'_>, statuses: &mut CurTransactionStatus, e: &Error,
) {
@ -91,6 +92,7 @@ impl Service {
});
}
#[tracing::instrument(skip(self, futures, statuses))]
fn handle_response_ok(
&self, dest: &Destination, futures: &mut SendingFutures<'_>, statuses: &mut CurTransactionStatus,
) {
@ -155,7 +157,7 @@ impl Service {
}
}
#[tracing::instrument(skip(self, dest, new_events, statuses))]
#[tracing::instrument(skip(self, dest, statuses), fields(new_events = debug_slice_truncated(&new_events, 3)))]
fn select_events(
&self,
dest: &Destination,