Compare commits

...

8 commits

Author SHA1 Message Date
Jade Ellis
f62d8a42d4
ci: Don't specify container for image builder
Some checks failed
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, linux-amd64) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, linux-arm64) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Rust Checks / Format (push) Failing after 2s
Rust Checks / Clippy (push) Failing after 10s
Rust Checks / Cargo Test (push) Failing after 9s
2025-05-22 14:07:22 +01:00
Jade Ellis
f7dd4c6928 ci: Don't install rustup if it's already there 2025-05-22 14:01:16 +01:00
Jade Ellis
d1cb893db1 chore: Fix more complicated clippy warnings 2025-05-22 13:49:22 +01:00
Jade Ellis
ebad3c78c6
fixup! feat: Add command to purge sync tokens for empty rooms 2025-05-22 13:29:47 +01:00
Jade Ellis
c338fd8453
feat: Add command to purge sync tokens for empty rooms 2025-05-22 13:21:10 +01:00
Jade Ellis
a331790058
feat: Add admin command to delete sync tokens from a room 2025-05-22 13:21:09 +01:00
Jade Ellis
2ccbd7d60b
fix: Reference config directly 2025-05-21 21:06:44 +01:00
Jade Ellis
60960c6e09
feat: Automatically set well-known support contacts 2025-05-21 20:32:53 +01:00
8 changed files with 372 additions and 31 deletions

View file

@ -19,11 +19,20 @@ outputs:
rustc_version:
description: The rustc version installed
value: ${{ steps.rustc-version.outputs.version }}
rustup_version:
description: The rustup version installed
value: ${{ steps.rustup-version.outputs.version }}
runs:
using: composite
steps:
- name: Check if rustup is already installed
shell: bash
id: rustup-version
run: |
echo "version=$(rustup --version)" >> $GITHUB_OUTPUT
- name: Cache rustup toolchains
if: steps.rustup-version.outputs.version == ''
uses: actions/cache@v3
with:
path: |
@ -33,6 +42,7 @@ runs:
# Requires repo to be cloned if toolchain is not specified
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
- name: Install Rust toolchain
if: steps.rustup-version.outputs.version == ''
shell: bash
run: |
if ! command -v rustup &> /dev/null ; then

View file

@ -57,7 +57,6 @@ jobs:
build-image:
runs-on: dind
container: ghcr.io/catthehacker/ubuntu:act-latest
needs: define-variables
permissions:
contents: read
@ -211,7 +210,6 @@ jobs:
merge:
runs-on: dind
container: ghcr.io/catthehacker/ubuntu:act-latest
needs: [define-variables, build-image]
steps:
- name: Download digests

View file

@ -1641,19 +1641,29 @@
#
#server =
# This item is undocumented. Please contribute documentation for it.
# URL to a support page for the server, which will be served as part of
# the MSC1929 server support endpoint at /.well-known/matrix/support.
# Will be included alongside any contact information
#
#support_page =
# This item is undocumented. Please contribute documentation for it.
# Role string for server support contacts, to be served as part of the
# MSC1929 server support endpoint at /.well-known/matrix/support.
#
#support_role =
#support_role = "m.role.admin"
# This item is undocumented. Please contribute documentation for it.
# Email address for server support contacts, to be served as part of the
# MSC1929 server support endpoint.
# This will be used along with support_mxid if specified.
#
#support_email =
# This item is undocumented. Please contribute documentation for it.
# Matrix ID for server support contacts, to be served as part of the
# MSC1929 server support endpoint.
# This will be used along with support_email if specified.
#
# If no email or mxid is specified, all of the server's admins will be
# listed.
#
#support_mxid =

View file

@ -1,6 +1,6 @@
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::OwnedRoomId;
use ruma::{OwnedRoomId, OwnedRoomOrAliasId};
use crate::{PAGE_SIZE, admin_command, get_room_info};
@ -66,3 +66,205 @@ pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result {
self.write_str(&format!("{result}")).await
}
#[admin_command]
pub(super) async fn purge_sync_tokens(&self, room: OwnedRoomOrAliasId) -> Result {
// Resolve the room ID from the room or alias ID
let room_id = self.services.rooms.alias.resolve(&room).await?;
// Delete all tokens for this room using the service method
let Ok(deleted_count) = self.services.rooms.user.delete_room_tokens(&room_id).await else {
return Err!("Failed to delete sync tokens for room {}", room_id);
};
self.write_str(&format!(
"Successfully deleted {deleted_count} sync tokens for room {room_id}"
))
.await
}
/// Target options for room purging
#[derive(Default, Debug, clap::ValueEnum, Clone)]
pub(crate) enum RoomTargetOption {
#[default]
/// Target all rooms
All,
/// Target only disabled rooms
DisabledOnly,
/// Target only banned rooms
BannedOnly,
}
#[admin_command]
pub(super) async fn purge_empty_room_tokens(
&self,
yes: bool,
target_option: Option<RoomTargetOption>,
dry_run: bool,
) -> Result {
use conduwuit::{debug, info};
if !yes && !dry_run {
return Err!(
"Please confirm this operation with --yes as it may delete tokens from many rooms, \
or use --dry-run to simulate"
);
}
let mode = if dry_run { "Simulating" } else { "Starting" };
// strictly, we should check if these reach the max value after the loop and
// warn the user that the count is too large
let mut total_rooms_processed: usize = 0;
let mut empty_rooms_processed: u32 = 0;
let mut total_tokens_deleted: usize = 0;
let mut error_count: u32 = 0;
let mut skipped_rooms: u32 = 0;
info!("{} purge of sync tokens for rooms with no local users", mode);
// Get all rooms in the server
let all_rooms = self
.services
.rooms
.metadata
.iter_ids()
.collect::<Vec<_>>()
.await;
info!("Found {} rooms total on the server", all_rooms.len());
// Filter rooms based on options
let mut rooms = Vec::new();
for room_id in all_rooms {
if let Some(target) = &target_option {
match target {
| RoomTargetOption::DisabledOnly => {
if !self.services.rooms.metadata.is_disabled(room_id).await {
debug!("Skipping room {} as it's not disabled", room_id);
skipped_rooms = skipped_rooms.saturating_add(1);
continue;
}
},
| RoomTargetOption::BannedOnly => {
if !self.services.rooms.metadata.is_banned(room_id).await {
debug!("Skipping room {} as it's not banned", room_id);
skipped_rooms = skipped_rooms.saturating_add(1);
continue;
}
},
| RoomTargetOption::All => {},
}
}
rooms.push(room_id);
}
// Total number of rooms we'll be checking
let total_rooms = rooms.len();
info!(
"Processing {} rooms after filtering (skipped {} rooms)",
total_rooms, skipped_rooms
);
// Process each room
for room_id in rooms {
total_rooms_processed = total_rooms_processed.saturating_add(1);
// Count local users in this room
let local_users_count = self
.services
.rooms
.state_cache
.local_users_in_room(room_id)
.count()
.await;
// Only process rooms with no local users
if local_users_count == 0 {
empty_rooms_processed = empty_rooms_processed.saturating_add(1);
// In dry run mode, just count what would be deleted, don't actually delete
debug!(
"Room {} has no local users, {}",
room_id,
if dry_run {
"would purge sync tokens"
} else {
"purging sync tokens"
}
);
if dry_run {
// For dry run mode, count tokens without deleting
match self.services.rooms.user.count_room_tokens(room_id).await {
| Ok(count) =>
if count > 0 {
debug!("Would delete {} sync tokens for room {}", count, room_id);
total_tokens_deleted = total_tokens_deleted.saturating_add(count);
} else {
debug!("No sync tokens found for room {}", room_id);
},
| Err(e) => {
debug!("Error counting sync tokens for room {}: {:?}", room_id, e);
error_count = error_count.saturating_add(1);
},
}
} else {
// Real deletion mode
match self.services.rooms.user.delete_room_tokens(room_id).await {
| Ok(count) =>
if count > 0 {
debug!("Deleted {} sync tokens for room {}", count, room_id);
total_tokens_deleted = total_tokens_deleted.saturating_add(count);
} else {
debug!("No sync tokens found for room {}", room_id);
},
| Err(e) => {
debug!("Error purging sync tokens for room {}: {:?}", room_id, e);
error_count = error_count.saturating_add(1);
},
}
}
} else {
debug!("Room {} has {} local users, skipping", room_id, local_users_count);
}
// Log progress periodically
if total_rooms_processed % 100 == 0 || total_rooms_processed == total_rooms {
info!(
"Progress: {}/{} rooms processed, {} empty rooms found, {} tokens {}",
total_rooms_processed,
total_rooms,
empty_rooms_processed,
total_tokens_deleted,
if dry_run { "would be deleted" } else { "deleted" }
);
}
}
let action = if dry_run { "would be deleted" } else { "deleted" };
info!(
"Finished {}: processed {} empty rooms out of {} total, {} tokens {}, errors: {}",
if dry_run {
"purge simulation"
} else {
"purging sync tokens"
},
empty_rooms_processed,
total_rooms,
total_tokens_deleted,
action,
error_count
);
let mode_msg = if dry_run { "DRY RUN: " } else { "" };
self.write_str(&format!(
"{}Successfully processed {empty_rooms_processed} empty rooms (out of {total_rooms} \
total rooms), {total_tokens_deleted} tokens {}. Skipped {skipped_rooms} rooms based on \
filters. Failed for {error_count} rooms.",
mode_msg,
if dry_run { "would be deleted" } else { "deleted" }
))
.await
}

View file

@ -5,8 +5,9 @@ mod info;
mod moderation;
use clap::Subcommand;
use commands::RoomTargetOption;
use conduwuit::Result;
use ruma::OwnedRoomId;
use ruma::{OwnedRoomId, OwnedRoomOrAliasId};
use self::{
alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand,
@ -56,4 +57,28 @@ pub(super) enum RoomCommand {
Exists {
room_id: OwnedRoomId,
},
/// - Delete all sync tokens for a room
PurgeSyncTokens {
/// Room ID or alias to purge sync tokens for
#[arg(value_parser)]
room: OwnedRoomOrAliasId,
},
/// - Delete sync tokens for all rooms that have no local users
///
/// By default, processes all empty rooms.
PurgeEmptyRoomTokens {
/// Confirm you want to delete tokens from potentially many rooms
#[arg(long)]
yes: bool,
/// Target specific room types
#[arg(long, value_enum)]
target_option: Option<RoomTargetOption>,
/// Perform a dry run without actually deleting any tokens
#[arg(long)]
dry_run: bool,
},
}

View file

@ -1,5 +1,6 @@
use axum::{Json, extract::State, response::IntoResponse};
use conduwuit::{Error, Result};
use futures::StreamExt;
use ruma::api::client::{
discovery::{
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
@ -17,7 +18,7 @@ pub(crate) async fn well_known_client(
State(services): State<crate::State>,
_body: Ruma<discover_homeserver::Request>,
) -> Result<discover_homeserver::Response> {
let client_url = match services.server.config.well_known.client.as_ref() {
let client_url = match services.config.well_known.client.as_ref() {
| Some(url) => url.to_string(),
| None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
};
@ -33,44 +34,63 @@ pub(crate) async fn well_known_client(
/// # `GET /.well-known/matrix/support`
///
/// Server support contact and support page of a homeserver's domain.
/// Implements MSC1929 for server discovery.
/// If no configuration is set, uses admin users as contacts.
pub(crate) async fn well_known_support(
State(services): State<crate::State>,
_body: Ruma<discover_support::Request>,
) -> Result<discover_support::Response> {
let support_page = services
.server
.config
.well_known
.support_page
.as_ref()
.map(ToString::to_string);
let role = services.server.config.well_known.support_role.clone();
// support page or role must be either defined for this to be valid
if support_page.is_none() && role.is_none() {
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
}
let email_address = services.server.config.well_known.support_email.clone();
let matrix_id = services.server.config.well_known.support_mxid.clone();
// if a role is specified, an email address or matrix id is required
if role.is_some() && (email_address.is_none() && matrix_id.is_none()) {
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
}
let email_address = services.config.well_known.support_email.clone();
let matrix_id = services.config.well_known.support_mxid.clone();
// TODO: support defining multiple contacts in the config
let mut contacts: Vec<Contact> = vec![];
if let Some(role) = role {
let contact = Contact { role, email_address, matrix_id };
let role_value = services
.config
.well_known
.support_role
.clone()
.unwrap_or_else(|| "m.role.admin".to_owned().into());
contacts.push(contact);
// Add configured contact if at least one contact method is specified
if email_address.is_some() || matrix_id.is_some() {
contacts.push(Contact {
role: role_value.clone(),
email_address: email_address.clone(),
matrix_id: matrix_id.clone(),
});
}
// Try to add admin users as contacts if no contacts are configured
if contacts.is_empty() {
if let Ok(admin_room) = services.admin.get_admin_room().await {
let admin_users = services.rooms.state_cache.room_members(&admin_room);
let mut stream = admin_users;
while let Some(user_id) = stream.next().await {
// Skip server user
if *user_id == services.globals.server_user {
break;
}
contacts.push(Contact {
role: role_value.clone(),
email_address: None,
matrix_id: Some(user_id.to_owned()),
});
}
}
}
// support page or role+contacts must be either defined for this to be valid
if contacts.is_empty() && support_page.is_none() {
// No admin room, no configured contacts, and no support page
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
}
@ -84,9 +104,9 @@ pub(crate) async fn well_known_support(
pub(crate) async fn syncv3_client_server_json(
State(services): State<crate::State>,
) -> Result<impl IntoResponse> {
let server_url = match services.server.config.well_known.client.as_ref() {
let server_url = match services.config.well_known.client.as_ref() {
| Some(url) => url.to_string(),
| None => match services.server.config.well_known.server.as_ref() {
| None => match services.config.well_known.server.as_ref() {
| Some(url) => url.to_string(),
| None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
},

View file

@ -1897,12 +1897,28 @@ pub struct WellKnownConfig {
/// example: "matrix.example.com:443"
pub server: Option<OwnedServerName>,
/// URL to a support page for the server, which will be served as part of
/// the MSC1929 server support endpoint at /.well-known/matrix/support.
/// Will be included alongside any contact information
pub support_page: Option<Url>,
/// Role string for server support contacts, to be served as part of the
/// MSC1929 server support endpoint at /.well-known/matrix/support.
///
/// default: "m.role.admin"
pub support_role: Option<ContactRole>,
/// Email address for server support contacts, to be served as part of the
/// MSC1929 server support endpoint.
/// This will be used along with support_mxid if specified.
pub support_email: Option<String>,
/// Matrix ID for server support contacts, to be served as part of the
/// MSC1929 server support endpoint.
/// This will be used along with support_email if specified.
///
/// If no email or mxid is specified, all of the server's admins will be
/// listed.
pub support_mxid: Option<OwnedUserId>,
}

View file

@ -127,3 +127,63 @@ pub async fn get_token_shortstatehash(
.await
.deserialized()
}
/// Count how many sync tokens exist for a room without deleting them
///
/// This is useful for dry runs to see how many tokens would be deleted
#[implement(Service)]
pub async fn count_room_tokens(&self, room_id: &RoomId) -> Result<usize> {
use futures::TryStreamExt;
let shortroomid = self.services.short.get_shortroomid(room_id).await?;
// Create a prefix to search by - all entries for this room will start with its
// short ID
let prefix = &[shortroomid];
// Collect all keys into a Vec and count them
let keys = self
.db
.roomsynctoken_shortstatehash
.keys_prefix_raw(prefix)
.map_ok(|_| ()) // We only need to count, not store the keys
.try_collect::<Vec<_>>()
.await?;
Ok(keys.len())
}
/// Delete all sync tokens associated with a room
///
/// This helps clean up the database as these tokens are never otherwise removed
#[implement(Service)]
pub async fn delete_room_tokens(&self, room_id: &RoomId) -> Result<usize> {
use futures::TryStreamExt;
let shortroomid = self.services.short.get_shortroomid(room_id).await?;
// Create a prefix to search by - all entries for this room will start with its
// short ID
let prefix = &[shortroomid];
// Collect all keys into a Vec first, then delete them
let keys = self
.db
.roomsynctoken_shortstatehash
.keys_prefix_raw(prefix)
.map_ok(|key| {
// Clone the key since we can't store references in the Vec
Vec::from(key)
})
.try_collect::<Vec<_>>()
.await?;
// Delete each key individually
for key in &keys {
self.db.roomsynctoken_shortstatehash.del(key);
}
let count = keys.len();
Ok(count)
}