Compare commits

...

18 commits

Author SHA1 Message Date
Jade Ellis
12e2706eff
fix: Use to_str methods on room IDs
Some checks failed
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, linux-amd64) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, linux-arm64) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Rust Checks / Format (push) Failing after 4s
Rust Checks / Clippy (push) Failing after 8s
Rust Checks / Cargo Test (push) Failing after 12s
2025-06-14 19:36:25 +01:00
Jade Ellis
986876c812
chore: cleanup 2025-06-14 19:36:24 +01:00
Jade Ellis
ae3858163f
chore: Fix more complicated clippy warnings 2025-06-14 19:36:24 +01:00
Jade Ellis
a173f7c091
feat: Add command to purge sync tokens for empty rooms 2025-06-14 19:36:24 +01:00
Jade Ellis
0d8a8bf02e
feat: Add admin command to delete sync tokens from a room 2025-06-14 19:36:22 +01:00
Jade Ellis
5d44653e3a
fix: Incorrect command descriptions
Some checks failed
Documentation / Build and Deploy Documentation (push) Failing after 2s
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, linux-amd64) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, linux-arm64) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Rust Checks / Format (push) Failing after 1s
Rust Checks / Clippy (push) Failing after 32s
Rust Checks / Cargo Test (push) Failing after 10s
2025-06-14 16:51:24 +01:00
Jade Ellis
44e60d0ea6
docs: Tiny phrasing changes to the security policy
Some checks failed
Documentation / Build and Deploy Documentation (push) Failing after 3s
Rust Checks / Format (push) Failing after 15s
Rust Checks / Clippy (push) Failing after 12s
Rust Checks / Cargo Test (push) Failing after 8s
2025-06-14 16:34:58 +01:00
Jade Ellis
d7514178ab
ci: Fix extra bracket in commit shorthash
Some checks failed
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, linux-amd64) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, linux-arm64) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Documentation / Build and Deploy Documentation (push) Failing after 3s
Rust Checks / Format (push) Failing after 1s
Rust Checks / Clippy (push) Failing after 12s
Rust Checks / Cargo Test (push) Failing after 8s
2025-06-13 14:30:26 +01:00
Jade Ellis
1d45e0b68c
feat: Add warning when admin users will be exposed as support contacts
Some checks failed
Documentation / Build and Deploy Documentation (push) Failing after 8s
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, linux-amd64) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, linux-arm64) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Rust Checks / Format (push) Failing after 20s
Rust Checks / Clippy (push) Failing after 21s
Rust Checks / Cargo Test (push) Failing after 10s
2025-06-13 13:39:50 +01:00
Jade Ellis
3c44dccd65
ci: HACK, disable saving to actions cache
Some checks failed
Documentation / Build and Deploy Documentation (push) Failing after 3s
Release Docker Image / define-variables (push) Failing after 1s
Release Docker Image / build-image (linux/amd64, linux-amd64) (push) Has been skipped
Release Docker Image / build-image (linux/arm64, linux-arm64) (push) Has been skipped
Release Docker Image / merge (push) Has been skipped
Rust Checks / Format (push) Failing after 1s
Rust Checks / Clippy (push) Failing after 9s
Rust Checks / Cargo Test (push) Failing after 9s
2025-05-26 19:16:50 +01:00
Jade Ellis
b57be072c7
build: Don't rerun on git changes 2025-05-26 19:16:05 +01:00
Jade Ellis
ea5dc8e09d
fix: Use correct brand in clap version string 2025-05-26 19:16:05 +01:00
Jade Ellis
b9d60c64e5
ci: Don't specify container for image builder 2025-05-26 19:16:04 +01:00
Jade Ellis
94ae824149
ci: Don't install rustup if it's already there 2025-05-26 19:16:03 +01:00
Jade Ellis
640714922b
feat: For knock_restricted rooms, automatically join rooms we meet
restrictions for rather than knocking
2025-05-26 19:16:03 +01:00
Jade Ellis
2b268fdaf3
fix: Allow joining via invite for knock_restricted rooms 2025-05-26 19:16:01 +01:00
Jade Ellis
e8d823a653
docs: Apply feedback on security policy
Some checks failed
Rust Checks / Format (push) Failing after 7s
Rust Checks / Clippy (push) Failing after 18s
Rust Checks / Cargo Test (push) Failing after 9s
2025-05-26 15:01:58 +01:00
Jade Ellis
0ba77674c7
docs: Security policy 2025-05-25 00:36:28 +01:00
15 changed files with 475 additions and 17 deletions

View file

@ -19,11 +19,20 @@ outputs:
rustc_version:
description: The rustc version installed
value: ${{ steps.rustc-version.outputs.version }}
rustup_version:
description: The rustup version installed
value: ${{ steps.rustup-version.outputs.version }}
runs:
using: composite
steps:
- name: Check if rustup is already installed
shell: bash
id: rustup-version
run: |
echo "version=$(rustup --version)" >> $GITHUB_OUTPUT
- name: Cache rustup toolchains
if: steps.rustup-version.outputs.version == ''
uses: actions/cache@v3
with:
path: |
@ -33,6 +42,7 @@ runs:
# Requires repo to be cloned if toolchain is not specified
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
- name: Install Rust toolchain
if: steps.rustup-version.outputs.version == ''
shell: bash
run: |
if ! command -v rustup &> /dev/null ; then

View file

@ -57,7 +57,6 @@ jobs:
build-image:
runs-on: dind
container: ghcr.io/catthehacker/ubuntu:act-latest
needs: define-variables
permissions:
contents: read
@ -181,14 +180,14 @@ jobs:
file: "docker/Dockerfile"
build-args: |
GIT_COMMIT_HASH=${{ github.sha }})
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }})
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
GIT_REMOTE_URL=${{github.event.repository.html_url }}
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
platforms: ${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
cache-from: type=gha
cache-to: type=gha,mode=max
# cache-to: type=gha,mode=max
sbom: true
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
env:
@ -211,7 +210,6 @@ jobs:
merge:
runs-on: dind
container: ghcr.io/catthehacker/ubuntu:act-latest
needs: [define-variables, build-image]
steps:
- name: Download digests

63
SECURITY.md Normal file
View file

@ -0,0 +1,63 @@
# Security Policy for Continuwuity
This document outlines the security policy for Continuwuity. Our goal is to maintain a secure platform for all users, and we take security matters seriously.
## Supported Versions
We provide security updates for the following versions of Continuwuity:
| Version | Supported |
| -------------- |:----------------:|
| Latest release | ✅ |
| Main branch | ✅ |
| Older releases | ❌ |
We may backport fixes to the previous release at our discretion, but we don't guarantee this.
## Reporting a Vulnerability
### Responsible Disclosure
We appreciate the efforts of security researchers and the community in identifying and reporting vulnerabilities. To ensure that potential vulnerabilities are addressed properly, please follow these guidelines:
1. **Contact members of the team directly** over E2EE private message.
- [@jade:ellis.link](https://matrix.to/#/@jade:ellis.link)
- [@nex:nexy7574.co.uk](https://matrix.to/#/@nex:nexy7574.co.uk) <!-- ? -->
2. **Email the security team** at [security@continuwuity.org](mailto:security@continuwuity.org). This is not E2EE, so don't include sensitive details.
3. **Do not disclose the vulnerability publicly** until it has been addressed
4. **Provide detailed information** about the vulnerability, including:
- A clear description of the issue
- Steps to reproduce
- Potential impact
- Any possible mitigations
- Version(s) affected, including specific commits if possible
If you have any doubts about a potential security vulnerability, contact us via private channels first! We'd prefer that you bother us, instead of having a vulnerability disclosed without a fix.
### What to Expect
When you report a security vulnerability:
1. **Acknowledgment**: We will acknowledge receipt of your report.
2. **Assessment**: We will assess the vulnerability and determine its impact on our users
3. **Updates**: We will provide updates on our progress in addressing the vulnerability, and may request you help test mitigations
4. **Resolution**: Once resolved, we will notify you and discuss coordinated disclosure
5. **Credit**: We will recognize your contribution (unless you prefer to remain anonymous)
## Security Update Process
When security vulnerabilities are identified:
1. We will develop and test fixes in a private fork
2. Security updates will be released as soon as possible
3. Release notes will include information about the vulnerabilities, avoiding details that could facilitate exploitation where possible
4. Critical security updates may be backported to the previous stable release
## Additional Resources
- [Matrix Security Disclosure Policy](https://matrix.org/security-disclosure-policy/)
- [Continuwuity Documentation](https://continuwuity.org/introduction)
---
This security policy was last updated on May 25, 2025.

View file

@ -20,3 +20,4 @@
- [Testing](development/testing.md)
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
- [Community (and Guidelines)](community.md)
- [Security](security.md)

1
docs/security.md Normal file
View file

@ -0,0 +1 @@
{{#include ../SECURITY.md}}

View file

@ -125,13 +125,13 @@ pub(super) enum DebugCommand {
reset: bool,
},
/// - Verify json signatures
/// - Sign JSON blob
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.
SignJson,
/// - Verify json signatures
/// - Verify JSON signatures
///
/// This command needs a JSON blob provided in a Markdown code block below
/// the command.

View file

@ -1,6 +1,6 @@
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::OwnedRoomId;
use ruma::{OwnedRoomId, OwnedRoomOrAliasId};
use crate::{PAGE_SIZE, admin_command, get_room_info};
@ -66,3 +66,185 @@ pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result {
self.write_str(&format!("{result}")).await
}
#[admin_command]
pub(super) async fn purge_sync_tokens(&self, room: OwnedRoomOrAliasId) -> Result {
// Resolve the room ID from the room or alias ID
let room_id = self.services.rooms.alias.resolve(&room).await?;
// Delete all tokens for this room using the service method
let Ok(deleted_count) = self.services.rooms.user.delete_room_tokens(&room_id).await else {
return Err!("Failed to delete sync tokens for room {}", room_id.as_str());
};
self.write_str(&format!(
"Successfully deleted {deleted_count} sync tokens for room {}",
room_id.as_str()
))
.await
}
/// Target options for room purging
#[derive(Default, Debug, clap::ValueEnum, Clone)]
pub(crate) enum RoomTargetOption {
#[default]
/// Target all rooms
All,
/// Target only disabled rooms
DisabledOnly,
/// Target only banned rooms
BannedOnly,
}
#[admin_command]
pub(super) async fn purge_all_sync_tokens(
&self,
target_option: Option<RoomTargetOption>,
execute: bool,
) -> Result {
use conduwuit::{debug, info};
let mode = if !execute { "Simulating" } else { "Starting" };
// strictly, we should check if these reach the max value after the loop and
// warn the user that the count is too large
let mut total_rooms_checked: usize = 0;
let mut total_tokens_deleted: usize = 0;
let mut error_count: u32 = 0;
let mut skipped_rooms: usize = 0;
info!("{} purge of sync tokens", mode);
// Get all rooms in the server
let all_rooms = self
.services
.rooms
.metadata
.iter_ids()
.collect::<Vec<_>>()
.await;
info!("Found {} rooms total on the server", all_rooms.len());
// Filter rooms based on options
let mut rooms = Vec::new();
for room_id in all_rooms {
if let Some(target) = &target_option {
match target {
| RoomTargetOption::DisabledOnly => {
if !self.services.rooms.metadata.is_disabled(room_id).await {
debug!("Skipping room {} as it's not disabled", room_id.as_str());
skipped_rooms = skipped_rooms.saturating_add(1);
continue;
}
},
| RoomTargetOption::BannedOnly => {
if !self.services.rooms.metadata.is_banned(room_id).await {
debug!("Skipping room {} as it's not banned", room_id.as_str());
skipped_rooms = skipped_rooms.saturating_add(1);
continue;
}
},
| RoomTargetOption::All => {},
}
}
rooms.push(room_id);
}
// Total number of rooms we'll be checking
let total_rooms = rooms.len();
info!(
"Processing {} rooms after filtering (skipped {} rooms)",
total_rooms, skipped_rooms
);
// Process each room
for room_id in rooms {
total_rooms_checked = total_rooms_checked.saturating_add(1);
// Log progress periodically
if total_rooms_checked % 100 == 0 || total_rooms_checked == total_rooms {
info!(
"Progress: {}/{} rooms checked, {} tokens {}",
total_rooms_checked,
total_rooms,
total_tokens_deleted,
if !execute { "would be deleted" } else { "deleted" }
);
}
// In dry run mode, just count what would be deleted, don't actually delete
debug!(
"Room {}: {}",
room_id.as_str(),
if !execute {
"would purge sync tokens"
} else {
"purging sync tokens"
}
);
if !execute {
// For dry run mode, count tokens without deleting
match self.services.rooms.user.count_room_tokens(room_id).await {
| Ok(count) =>
if count > 0 {
debug!(
"Would delete {} sync tokens for room {}",
count,
room_id.as_str()
);
total_tokens_deleted = total_tokens_deleted.saturating_add(count);
} else {
debug!("No sync tokens found for room {}", room_id.as_str());
},
| Err(e) => {
debug!("Error counting sync tokens for room {}: {:?}", room_id.as_str(), e);
error_count = error_count.saturating_add(1);
},
}
} else {
// Real deletion mode
match self.services.rooms.user.delete_room_tokens(room_id).await {
| Ok(count) =>
if count > 0 {
debug!("Deleted {} sync tokens for room {}", count, room_id.as_str());
total_tokens_deleted = total_tokens_deleted.saturating_add(count);
} else {
debug!("No sync tokens found for room {}", room_id.as_str());
},
| Err(e) => {
debug!("Error purging sync tokens for room {}: {:?}", room_id.as_str(), e);
error_count = error_count.saturating_add(1);
},
}
}
}
let action = if !execute { "would be deleted" } else { "deleted" };
info!(
"Finished {}: checked {} rooms out of {} total, {} tokens {}, errors: {}",
if !execute {
"purge simulation"
} else {
"purging sync tokens"
},
total_rooms_checked,
total_rooms,
total_tokens_deleted,
action,
error_count
);
self.write_str(&format!(
"Finished {}: checked {} rooms out of {} total, {} tokens {}, errors: {}",
if !execute { "simulation" } else { "purging sync tokens" },
total_rooms_checked,
total_rooms,
total_tokens_deleted,
action,
error_count
))
.await
}

View file

@ -5,8 +5,9 @@ mod info;
mod moderation;
use clap::Subcommand;
use commands::RoomTargetOption;
use conduwuit::Result;
use ruma::OwnedRoomId;
use ruma::{OwnedRoomId, OwnedRoomOrAliasId};
use self::{
alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand,
@ -56,4 +57,25 @@ pub(super) enum RoomCommand {
Exists {
room_id: OwnedRoomId,
},
/// - Delete all sync tokens for a room
PurgeSyncTokens {
/// Room ID or alias to purge sync tokens for
#[arg(value_parser)]
room: OwnedRoomOrAliasId,
},
/// - Delete sync tokens for all rooms that have no local users
///
/// By default, processes all empty rooms.
PurgeAllSyncTokens {
/// Target specific room types
#[arg(long, value_enum)]
target_option: Option<RoomTargetOption>,
/// Execute token deletions. Otherwise,
/// Performs a dry run without actually deleting any tokens
#[arg(long)]
execute: bool,
},
}

View file

@ -2162,6 +2162,109 @@ async fn knock_room_by_id_helper(
}
}
// For knock_restricted rooms, check if the user meets the restricted conditions
// If they do, attempt to join instead of knock
// This is not mentioned in the spec, but should be allowable (we're allowed to
// auto-join invites to knocked rooms)
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
if let JoinRule::KnockRestricted(restricted) = &join_rule {
let restriction_rooms: Vec<_> = restricted
.allow
.iter()
.filter_map(|a| match a {
| AllowRule::RoomMembership(r) => Some(&r.room_id),
| _ => None,
})
.collect();
// Check if the user is in any of the allowed rooms
let mut user_meets_restrictions = false;
for restriction_room_id in &restriction_rooms {
if services
.rooms
.state_cache
.is_joined(sender_user, restriction_room_id)
.await
{
user_meets_restrictions = true;
break;
}
}
// If the user meets the restrictions, try joining instead
if user_meets_restrictions {
debug_info!(
"{sender_user} meets the restricted criteria in knock_restricted room \
{room_id}, attempting to join instead of knock"
);
// For this case, we need to drop the state lock and get a new one in
// join_room_by_id_helper We need to release the lock here and let
// join_room_by_id_helper acquire it again
drop(state_lock);
match join_room_by_id_helper(
services,
sender_user,
room_id,
reason.clone(),
servers,
None,
&None,
)
.await
{
| Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())),
| Err(e) => {
debug_warn!(
"Failed to convert knock to join for {sender_user} in {room_id}: {e:?}"
);
// Get a new state lock for the remaining knock logic
let new_state_lock = services.rooms.state.mutex.lock(room_id).await;
let server_in_room = services
.rooms
.state_cache
.server_in_room(services.globals.server_name(), room_id)
.await;
let local_knock = server_in_room
|| servers.is_empty()
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
if local_knock {
knock_room_helper_local(
services,
sender_user,
room_id,
reason,
servers,
new_state_lock,
)
.boxed()
.await?;
} else {
knock_room_helper_remote(
services,
sender_user,
room_id,
reason,
servers,
new_state_lock,
)
.boxed()
.await?;
}
return Ok(knock_room::v3::Response::new(room_id.to_owned()));
},
}
}
} else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
debug_warn!(
"{sender_user} attempted to knock on room {room_id} but its join rule is \
{join_rule:?}, not knock or knock_restricted"
);
}
let server_in_room = services
.rooms
.state_cache
@ -2209,6 +2312,12 @@ async fn knock_room_helper_local(
return Err!(Request(Forbidden("This room does not support knocking.")));
}
// Verify that this room has a valid knock or knock_restricted join rule
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
return Err!(Request(Forbidden("This room's join rule does not allow knocking.")));
}
let content = RoomMemberEventContent {
displayname: services.users.displayname(sender_user).await.ok(),
avatar_url: services.users.avatar_url(sender_user).await.ok(),

View file

@ -79,12 +79,12 @@ fn main() {
// --- Rerun Triggers ---
// TODO: The git rerun triggers seem to always run
// Rerun if the git HEAD changes
println!("cargo:rerun-if-changed=.git/HEAD");
// Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch)
if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"]) {
println!("cargo:rerun-if-changed=.git/{ref_path}");
}
// // Rerun if the git HEAD changes
// println!("cargo:rerun-if-changed=.git/HEAD");
// // Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch)
// if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"])
// { println!("cargo:rerun-if-changed=.git/{ref_path}");
// }
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH");
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH_SHORT");

View file

@ -219,6 +219,15 @@ pub fn check(config: &Config) -> Result {
));
}
// Check if support contact information is configured
if config.well_known.support_email.is_none() && config.well_known.support_mxid.is_none() {
warn!(
"No support contact information (support_email or support_mxid) is configured in \
the well_known section. Users in the admin room will be automatically listed as \
support contacts in the /.well-known/matrix/support endpoint."
);
}
if config
.url_preview_domain_contains_allowlist
.contains(&"*".to_owned())

View file

@ -638,7 +638,7 @@ fn valid_membership_change(
warn!(?target_user_membership_event_id, "Banned user can't join");
false
} else if (join_rules == JoinRule::Invite
|| room_version.allow_knocking && join_rules == JoinRule::Knock)
|| room_version.allow_knocking && (join_rules == JoinRule::Knock || matches!(join_rules, JoinRule::KnockRestricted(_))))
// If the join_rule is invite then allow if membership state is invite or join
&& (target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Invite)

View file

@ -21,7 +21,10 @@ pub use ::toml;
pub use ::tracing;
pub use config::Config;
pub use error::Error;
pub use info::{rustc_flags_capture, version, version::version};
pub use info::{
rustc_flags_capture, version,
version::{name, version},
};
pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res};
pub use server::Server;
pub use utils::{ctor, dtor, implement, result, result::Result};

View file

@ -15,7 +15,7 @@ use conduwuit_core::{
#[clap(
about,
long_about = None,
name = "conduwuit",
name = conduwuit_core::name(),
version = conduwuit_core::version(),
)]
pub(crate) struct Args {

View file

@ -127,3 +127,63 @@ pub async fn get_token_shortstatehash(
.await
.deserialized()
}
/// Count how many sync tokens exist for a room without deleting them
///
/// This is useful for dry runs to see how many tokens would be deleted
#[implement(Service)]
pub async fn count_room_tokens(&self, room_id: &RoomId) -> Result<usize> {
use futures::TryStreamExt;
let shortroomid = self.services.short.get_shortroomid(room_id).await?;
// Create a prefix to search by - all entries for this room will start with its
// short ID
let prefix = &[shortroomid];
// Collect all keys into a Vec and count them
let keys = self
.db
.roomsynctoken_shortstatehash
.keys_prefix_raw(prefix)
.map_ok(|_| ()) // We only need to count, not store the keys
.try_collect::<Vec<_>>()
.await?;
Ok(keys.len())
}
/// Delete all sync tokens associated with a room
///
/// This helps clean up the database as these tokens are never otherwise removed
#[implement(Service)]
pub async fn delete_room_tokens(&self, room_id: &RoomId) -> Result<usize> {
use futures::TryStreamExt;
let shortroomid = self.services.short.get_shortroomid(room_id).await?;
// Create a prefix to search by - all entries for this room will start with its
// short ID
let prefix = &[shortroomid];
// Collect all keys into a Vec first, then delete them
let keys = self
.db
.roomsynctoken_shortstatehash
.keys_prefix_raw(prefix)
.map_ok(|key| {
// Clone the key since we can't store references in the Vec
Vec::from(key)
})
.try_collect::<Vec<_>>()
.await?;
// Delete each key individually
for key in &keys {
self.db.roomsynctoken_shortstatehash.del(key);
}
let count = keys.len();
Ok(count)
}