mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-06-30 19:24:29 +02:00
Compare commits
18 commits
32c83a89fb
...
12e2706eff
Author | SHA1 | Date | |
---|---|---|---|
|
12e2706eff | ||
|
986876c812 | ||
|
ae3858163f | ||
|
a173f7c091 | ||
|
0d8a8bf02e | ||
|
5d44653e3a | ||
|
44e60d0ea6 | ||
|
d7514178ab | ||
|
1d45e0b68c | ||
|
3c44dccd65 | ||
|
b57be072c7 | ||
|
ea5dc8e09d | ||
|
b9d60c64e5 | ||
|
94ae824149 | ||
|
640714922b | ||
|
2b268fdaf3 | ||
|
e8d823a653 | ||
|
0ba77674c7 |
15 changed files with 475 additions and 17 deletions
|
@ -19,11 +19,20 @@ outputs:
|
||||||
rustc_version:
|
rustc_version:
|
||||||
description: The rustc version installed
|
description: The rustc version installed
|
||||||
value: ${{ steps.rustc-version.outputs.version }}
|
value: ${{ steps.rustc-version.outputs.version }}
|
||||||
|
rustup_version:
|
||||||
|
description: The rustup version installed
|
||||||
|
value: ${{ steps.rustup-version.outputs.version }}
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
|
- name: Check if rustup is already installed
|
||||||
|
shell: bash
|
||||||
|
id: rustup-version
|
||||||
|
run: |
|
||||||
|
echo "version=$(rustup --version)" >> $GITHUB_OUTPUT
|
||||||
- name: Cache rustup toolchains
|
- name: Cache rustup toolchains
|
||||||
|
if: steps.rustup-version.outputs.version == ''
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
|
@ -33,6 +42,7 @@ runs:
|
||||||
# Requires repo to be cloned if toolchain is not specified
|
# Requires repo to be cloned if toolchain is not specified
|
||||||
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
|
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
|
||||||
- name: Install Rust toolchain
|
- name: Install Rust toolchain
|
||||||
|
if: steps.rustup-version.outputs.version == ''
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if ! command -v rustup &> /dev/null ; then
|
if ! command -v rustup &> /dev/null ; then
|
||||||
|
|
|
@ -57,7 +57,6 @@ jobs:
|
||||||
|
|
||||||
build-image:
|
build-image:
|
||||||
runs-on: dind
|
runs-on: dind
|
||||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
|
||||||
needs: define-variables
|
needs: define-variables
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
@ -181,14 +180,14 @@ jobs:
|
||||||
file: "docker/Dockerfile"
|
file: "docker/Dockerfile"
|
||||||
build-args: |
|
build-args: |
|
||||||
GIT_COMMIT_HASH=${{ github.sha }})
|
GIT_COMMIT_HASH=${{ github.sha }})
|
||||||
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }})
|
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
|
||||||
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
||||||
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
||||||
platforms: ${{ matrix.platform }}
|
platforms: ${{ matrix.platform }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
annotations: ${{ steps.meta.outputs.annotations }}
|
annotations: ${{ steps.meta.outputs.annotations }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
# cache-to: type=gha,mode=max
|
||||||
sbom: true
|
sbom: true
|
||||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||||
env:
|
env:
|
||||||
|
@ -211,7 +210,6 @@ jobs:
|
||||||
|
|
||||||
merge:
|
merge:
|
||||||
runs-on: dind
|
runs-on: dind
|
||||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
|
||||||
needs: [define-variables, build-image]
|
needs: [define-variables, build-image]
|
||||||
steps:
|
steps:
|
||||||
- name: Download digests
|
- name: Download digests
|
||||||
|
|
63
SECURITY.md
Normal file
63
SECURITY.md
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
# Security Policy for Continuwuity
|
||||||
|
|
||||||
|
This document outlines the security policy for Continuwuity. Our goal is to maintain a secure platform for all users, and we take security matters seriously.
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
We provide security updates for the following versions of Continuwuity:
|
||||||
|
|
||||||
|
| Version | Supported |
|
||||||
|
| -------------- |:----------------:|
|
||||||
|
| Latest release | ✅ |
|
||||||
|
| Main branch | ✅ |
|
||||||
|
| Older releases | ❌ |
|
||||||
|
|
||||||
|
We may backport fixes to the previous release at our discretion, but we don't guarantee this.
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
### Responsible Disclosure
|
||||||
|
|
||||||
|
We appreciate the efforts of security researchers and the community in identifying and reporting vulnerabilities. To ensure that potential vulnerabilities are addressed properly, please follow these guidelines:
|
||||||
|
|
||||||
|
1. **Contact members of the team directly** over E2EE private message.
|
||||||
|
- [@jade:ellis.link](https://matrix.to/#/@jade:ellis.link)
|
||||||
|
- [@nex:nexy7574.co.uk](https://matrix.to/#/@nex:nexy7574.co.uk) <!-- ? -->
|
||||||
|
2. **Email the security team** at [security@continuwuity.org](mailto:security@continuwuity.org). This is not E2EE, so don't include sensitive details.
|
||||||
|
3. **Do not disclose the vulnerability publicly** until it has been addressed
|
||||||
|
4. **Provide detailed information** about the vulnerability, including:
|
||||||
|
- A clear description of the issue
|
||||||
|
- Steps to reproduce
|
||||||
|
- Potential impact
|
||||||
|
- Any possible mitigations
|
||||||
|
- Version(s) affected, including specific commits if possible
|
||||||
|
|
||||||
|
If you have any doubts about a potential security vulnerability, contact us via private channels first! We'd prefer that you bother us, instead of having a vulnerability disclosed without a fix.
|
||||||
|
|
||||||
|
### What to Expect
|
||||||
|
|
||||||
|
When you report a security vulnerability:
|
||||||
|
|
||||||
|
1. **Acknowledgment**: We will acknowledge receipt of your report.
|
||||||
|
2. **Assessment**: We will assess the vulnerability and determine its impact on our users
|
||||||
|
3. **Updates**: We will provide updates on our progress in addressing the vulnerability, and may request you help test mitigations
|
||||||
|
4. **Resolution**: Once resolved, we will notify you and discuss coordinated disclosure
|
||||||
|
5. **Credit**: We will recognize your contribution (unless you prefer to remain anonymous)
|
||||||
|
|
||||||
|
## Security Update Process
|
||||||
|
|
||||||
|
When security vulnerabilities are identified:
|
||||||
|
|
||||||
|
1. We will develop and test fixes in a private fork
|
||||||
|
2. Security updates will be released as soon as possible
|
||||||
|
3. Release notes will include information about the vulnerabilities, avoiding details that could facilitate exploitation where possible
|
||||||
|
4. Critical security updates may be backported to the previous stable release
|
||||||
|
|
||||||
|
## Additional Resources
|
||||||
|
|
||||||
|
- [Matrix Security Disclosure Policy](https://matrix.org/security-disclosure-policy/)
|
||||||
|
- [Continuwuity Documentation](https://continuwuity.org/introduction)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
This security policy was last updated on May 25, 2025.
|
|
@ -20,3 +20,4 @@
|
||||||
- [Testing](development/testing.md)
|
- [Testing](development/testing.md)
|
||||||
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
|
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
|
||||||
- [Community (and Guidelines)](community.md)
|
- [Community (and Guidelines)](community.md)
|
||||||
|
- [Security](security.md)
|
||||||
|
|
1
docs/security.md
Normal file
1
docs/security.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{{#include ../SECURITY.md}}
|
|
@ -125,13 +125,13 @@ pub(super) enum DebugCommand {
|
||||||
reset: bool,
|
reset: bool,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Verify json signatures
|
/// - Sign JSON blob
|
||||||
///
|
///
|
||||||
/// This command needs a JSON blob provided in a Markdown code block below
|
/// This command needs a JSON blob provided in a Markdown code block below
|
||||||
/// the command.
|
/// the command.
|
||||||
SignJson,
|
SignJson,
|
||||||
|
|
||||||
/// - Verify json signatures
|
/// - Verify JSON signatures
|
||||||
///
|
///
|
||||||
/// This command needs a JSON blob provided in a Markdown code block below
|
/// This command needs a JSON blob provided in a Markdown code block below
|
||||||
/// the command.
|
/// the command.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use conduwuit::{Err, Result};
|
use conduwuit::{Err, Result};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::OwnedRoomId;
|
use ruma::{OwnedRoomId, OwnedRoomOrAliasId};
|
||||||
|
|
||||||
use crate::{PAGE_SIZE, admin_command, get_room_info};
|
use crate::{PAGE_SIZE, admin_command, get_room_info};
|
||||||
|
|
||||||
|
@ -66,3 +66,185 @@ pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result {
|
||||||
|
|
||||||
self.write_str(&format!("{result}")).await
|
self.write_str(&format!("{result}")).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
pub(super) async fn purge_sync_tokens(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
|
// Resolve the room ID from the room or alias ID
|
||||||
|
let room_id = self.services.rooms.alias.resolve(&room).await?;
|
||||||
|
|
||||||
|
// Delete all tokens for this room using the service method
|
||||||
|
let Ok(deleted_count) = self.services.rooms.user.delete_room_tokens(&room_id).await else {
|
||||||
|
return Err!("Failed to delete sync tokens for room {}", room_id.as_str());
|
||||||
|
};
|
||||||
|
|
||||||
|
self.write_str(&format!(
|
||||||
|
"Successfully deleted {deleted_count} sync tokens for room {}",
|
||||||
|
room_id.as_str()
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Target options for room purging
|
||||||
|
#[derive(Default, Debug, clap::ValueEnum, Clone)]
|
||||||
|
pub(crate) enum RoomTargetOption {
|
||||||
|
#[default]
|
||||||
|
/// Target all rooms
|
||||||
|
All,
|
||||||
|
/// Target only disabled rooms
|
||||||
|
DisabledOnly,
|
||||||
|
/// Target only banned rooms
|
||||||
|
BannedOnly,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[admin_command]
|
||||||
|
pub(super) async fn purge_all_sync_tokens(
|
||||||
|
&self,
|
||||||
|
target_option: Option<RoomTargetOption>,
|
||||||
|
execute: bool,
|
||||||
|
) -> Result {
|
||||||
|
use conduwuit::{debug, info};
|
||||||
|
|
||||||
|
let mode = if !execute { "Simulating" } else { "Starting" };
|
||||||
|
|
||||||
|
// strictly, we should check if these reach the max value after the loop and
|
||||||
|
// warn the user that the count is too large
|
||||||
|
let mut total_rooms_checked: usize = 0;
|
||||||
|
let mut total_tokens_deleted: usize = 0;
|
||||||
|
let mut error_count: u32 = 0;
|
||||||
|
let mut skipped_rooms: usize = 0;
|
||||||
|
|
||||||
|
info!("{} purge of sync tokens", mode);
|
||||||
|
|
||||||
|
// Get all rooms in the server
|
||||||
|
let all_rooms = self
|
||||||
|
.services
|
||||||
|
.rooms
|
||||||
|
.metadata
|
||||||
|
.iter_ids()
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
info!("Found {} rooms total on the server", all_rooms.len());
|
||||||
|
|
||||||
|
// Filter rooms based on options
|
||||||
|
let mut rooms = Vec::new();
|
||||||
|
for room_id in all_rooms {
|
||||||
|
if let Some(target) = &target_option {
|
||||||
|
match target {
|
||||||
|
| RoomTargetOption::DisabledOnly => {
|
||||||
|
if !self.services.rooms.metadata.is_disabled(room_id).await {
|
||||||
|
debug!("Skipping room {} as it's not disabled", room_id.as_str());
|
||||||
|
skipped_rooms = skipped_rooms.saturating_add(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
| RoomTargetOption::BannedOnly => {
|
||||||
|
if !self.services.rooms.metadata.is_banned(room_id).await {
|
||||||
|
debug!("Skipping room {} as it's not banned", room_id.as_str());
|
||||||
|
skipped_rooms = skipped_rooms.saturating_add(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
| RoomTargetOption::All => {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rooms.push(room_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total number of rooms we'll be checking
|
||||||
|
let total_rooms = rooms.len();
|
||||||
|
info!(
|
||||||
|
"Processing {} rooms after filtering (skipped {} rooms)",
|
||||||
|
total_rooms, skipped_rooms
|
||||||
|
);
|
||||||
|
|
||||||
|
// Process each room
|
||||||
|
for room_id in rooms {
|
||||||
|
total_rooms_checked = total_rooms_checked.saturating_add(1);
|
||||||
|
|
||||||
|
// Log progress periodically
|
||||||
|
if total_rooms_checked % 100 == 0 || total_rooms_checked == total_rooms {
|
||||||
|
info!(
|
||||||
|
"Progress: {}/{} rooms checked, {} tokens {}",
|
||||||
|
total_rooms_checked,
|
||||||
|
total_rooms,
|
||||||
|
total_tokens_deleted,
|
||||||
|
if !execute { "would be deleted" } else { "deleted" }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// In dry run mode, just count what would be deleted, don't actually delete
|
||||||
|
debug!(
|
||||||
|
"Room {}: {}",
|
||||||
|
room_id.as_str(),
|
||||||
|
if !execute {
|
||||||
|
"would purge sync tokens"
|
||||||
|
} else {
|
||||||
|
"purging sync tokens"
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
if !execute {
|
||||||
|
// For dry run mode, count tokens without deleting
|
||||||
|
match self.services.rooms.user.count_room_tokens(room_id).await {
|
||||||
|
| Ok(count) =>
|
||||||
|
if count > 0 {
|
||||||
|
debug!(
|
||||||
|
"Would delete {} sync tokens for room {}",
|
||||||
|
count,
|
||||||
|
room_id.as_str()
|
||||||
|
);
|
||||||
|
total_tokens_deleted = total_tokens_deleted.saturating_add(count);
|
||||||
|
} else {
|
||||||
|
debug!("No sync tokens found for room {}", room_id.as_str());
|
||||||
|
},
|
||||||
|
| Err(e) => {
|
||||||
|
debug!("Error counting sync tokens for room {}: {:?}", room_id.as_str(), e);
|
||||||
|
error_count = error_count.saturating_add(1);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Real deletion mode
|
||||||
|
match self.services.rooms.user.delete_room_tokens(room_id).await {
|
||||||
|
| Ok(count) =>
|
||||||
|
if count > 0 {
|
||||||
|
debug!("Deleted {} sync tokens for room {}", count, room_id.as_str());
|
||||||
|
total_tokens_deleted = total_tokens_deleted.saturating_add(count);
|
||||||
|
} else {
|
||||||
|
debug!("No sync tokens found for room {}", room_id.as_str());
|
||||||
|
},
|
||||||
|
| Err(e) => {
|
||||||
|
debug!("Error purging sync tokens for room {}: {:?}", room_id.as_str(), e);
|
||||||
|
error_count = error_count.saturating_add(1);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let action = if !execute { "would be deleted" } else { "deleted" };
|
||||||
|
info!(
|
||||||
|
"Finished {}: checked {} rooms out of {} total, {} tokens {}, errors: {}",
|
||||||
|
if !execute {
|
||||||
|
"purge simulation"
|
||||||
|
} else {
|
||||||
|
"purging sync tokens"
|
||||||
|
},
|
||||||
|
total_rooms_checked,
|
||||||
|
total_rooms,
|
||||||
|
total_tokens_deleted,
|
||||||
|
action,
|
||||||
|
error_count
|
||||||
|
);
|
||||||
|
|
||||||
|
self.write_str(&format!(
|
||||||
|
"Finished {}: checked {} rooms out of {} total, {} tokens {}, errors: {}",
|
||||||
|
if !execute { "simulation" } else { "purging sync tokens" },
|
||||||
|
total_rooms_checked,
|
||||||
|
total_rooms,
|
||||||
|
total_tokens_deleted,
|
||||||
|
action,
|
||||||
|
error_count
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
|
@ -5,8 +5,9 @@ mod info;
|
||||||
mod moderation;
|
mod moderation;
|
||||||
|
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
|
use commands::RoomTargetOption;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use ruma::OwnedRoomId;
|
use ruma::{OwnedRoomId, OwnedRoomOrAliasId};
|
||||||
|
|
||||||
use self::{
|
use self::{
|
||||||
alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand,
|
alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand,
|
||||||
|
@ -56,4 +57,25 @@ pub(super) enum RoomCommand {
|
||||||
Exists {
|
Exists {
|
||||||
room_id: OwnedRoomId,
|
room_id: OwnedRoomId,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// - Delete all sync tokens for a room
|
||||||
|
PurgeSyncTokens {
|
||||||
|
/// Room ID or alias to purge sync tokens for
|
||||||
|
#[arg(value_parser)]
|
||||||
|
room: OwnedRoomOrAliasId,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// - Delete sync tokens for all rooms that have no local users
|
||||||
|
///
|
||||||
|
/// By default, processes all empty rooms.
|
||||||
|
PurgeAllSyncTokens {
|
||||||
|
/// Target specific room types
|
||||||
|
#[arg(long, value_enum)]
|
||||||
|
target_option: Option<RoomTargetOption>,
|
||||||
|
|
||||||
|
/// Execute token deletions. Otherwise,
|
||||||
|
/// Performs a dry run without actually deleting any tokens
|
||||||
|
#[arg(long)]
|
||||||
|
execute: bool,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -2162,6 +2162,109 @@ async fn knock_room_by_id_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For knock_restricted rooms, check if the user meets the restricted conditions
|
||||||
|
// If they do, attempt to join instead of knock
|
||||||
|
// This is not mentioned in the spec, but should be allowable (we're allowed to
|
||||||
|
// auto-join invites to knocked rooms)
|
||||||
|
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
|
||||||
|
if let JoinRule::KnockRestricted(restricted) = &join_rule {
|
||||||
|
let restriction_rooms: Vec<_> = restricted
|
||||||
|
.allow
|
||||||
|
.iter()
|
||||||
|
.filter_map(|a| match a {
|
||||||
|
| AllowRule::RoomMembership(r) => Some(&r.room_id),
|
||||||
|
| _ => None,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Check if the user is in any of the allowed rooms
|
||||||
|
let mut user_meets_restrictions = false;
|
||||||
|
for restriction_room_id in &restriction_rooms {
|
||||||
|
if services
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, restriction_room_id)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
user_meets_restrictions = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the user meets the restrictions, try joining instead
|
||||||
|
if user_meets_restrictions {
|
||||||
|
debug_info!(
|
||||||
|
"{sender_user} meets the restricted criteria in knock_restricted room \
|
||||||
|
{room_id}, attempting to join instead of knock"
|
||||||
|
);
|
||||||
|
// For this case, we need to drop the state lock and get a new one in
|
||||||
|
// join_room_by_id_helper We need to release the lock here and let
|
||||||
|
// join_room_by_id_helper acquire it again
|
||||||
|
drop(state_lock);
|
||||||
|
match join_room_by_id_helper(
|
||||||
|
services,
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
reason.clone(),
|
||||||
|
servers,
|
||||||
|
None,
|
||||||
|
&None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
| Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())),
|
||||||
|
| Err(e) => {
|
||||||
|
debug_warn!(
|
||||||
|
"Failed to convert knock to join for {sender_user} in {room_id}: {e:?}"
|
||||||
|
);
|
||||||
|
// Get a new state lock for the remaining knock logic
|
||||||
|
let new_state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||||
|
|
||||||
|
let server_in_room = services
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.server_in_room(services.globals.server_name(), room_id)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let local_knock = server_in_room
|
||||||
|
|| servers.is_empty()
|
||||||
|
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
|
||||||
|
|
||||||
|
if local_knock {
|
||||||
|
knock_room_helper_local(
|
||||||
|
services,
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
reason,
|
||||||
|
servers,
|
||||||
|
new_state_lock,
|
||||||
|
)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
|
} else {
|
||||||
|
knock_room_helper_remote(
|
||||||
|
services,
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
reason,
|
||||||
|
servers,
|
||||||
|
new_state_lock,
|
||||||
|
)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(knock_room::v3::Response::new(room_id.to_owned()));
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
|
||||||
|
debug_warn!(
|
||||||
|
"{sender_user} attempted to knock on room {room_id} but its join rule is \
|
||||||
|
{join_rule:?}, not knock or knock_restricted"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let server_in_room = services
|
let server_in_room = services
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
|
@ -2209,6 +2312,12 @@ async fn knock_room_helper_local(
|
||||||
return Err!(Request(Forbidden("This room does not support knocking.")));
|
return Err!(Request(Forbidden("This room does not support knocking.")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify that this room has a valid knock or knock_restricted join rule
|
||||||
|
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
|
||||||
|
if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
|
||||||
|
return Err!(Request(Forbidden("This room's join rule does not allow knocking.")));
|
||||||
|
}
|
||||||
|
|
||||||
let content = RoomMemberEventContent {
|
let content = RoomMemberEventContent {
|
||||||
displayname: services.users.displayname(sender_user).await.ok(),
|
displayname: services.users.displayname(sender_user).await.ok(),
|
||||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||||
|
|
|
@ -79,12 +79,12 @@ fn main() {
|
||||||
|
|
||||||
// --- Rerun Triggers ---
|
// --- Rerun Triggers ---
|
||||||
// TODO: The git rerun triggers seem to always run
|
// TODO: The git rerun triggers seem to always run
|
||||||
// Rerun if the git HEAD changes
|
// // Rerun if the git HEAD changes
|
||||||
println!("cargo:rerun-if-changed=.git/HEAD");
|
// println!("cargo:rerun-if-changed=.git/HEAD");
|
||||||
// Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch)
|
// // Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch)
|
||||||
if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"]) {
|
// if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"])
|
||||||
println!("cargo:rerun-if-changed=.git/{ref_path}");
|
// { println!("cargo:rerun-if-changed=.git/{ref_path}");
|
||||||
}
|
// }
|
||||||
|
|
||||||
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH");
|
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH");
|
||||||
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH_SHORT");
|
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH_SHORT");
|
||||||
|
|
|
@ -219,6 +219,15 @@ pub fn check(config: &Config) -> Result {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if support contact information is configured
|
||||||
|
if config.well_known.support_email.is_none() && config.well_known.support_mxid.is_none() {
|
||||||
|
warn!(
|
||||||
|
"No support contact information (support_email or support_mxid) is configured in \
|
||||||
|
the well_known section. Users in the admin room will be automatically listed as \
|
||||||
|
support contacts in the /.well-known/matrix/support endpoint."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if config
|
if config
|
||||||
.url_preview_domain_contains_allowlist
|
.url_preview_domain_contains_allowlist
|
||||||
.contains(&"*".to_owned())
|
.contains(&"*".to_owned())
|
||||||
|
|
|
@ -638,7 +638,7 @@ fn valid_membership_change(
|
||||||
warn!(?target_user_membership_event_id, "Banned user can't join");
|
warn!(?target_user_membership_event_id, "Banned user can't join");
|
||||||
false
|
false
|
||||||
} else if (join_rules == JoinRule::Invite
|
} else if (join_rules == JoinRule::Invite
|
||||||
|| room_version.allow_knocking && join_rules == JoinRule::Knock)
|
|| room_version.allow_knocking && (join_rules == JoinRule::Knock || matches!(join_rules, JoinRule::KnockRestricted(_))))
|
||||||
// If the join_rule is invite then allow if membership state is invite or join
|
// If the join_rule is invite then allow if membership state is invite or join
|
||||||
&& (target_user_current_membership == MembershipState::Join
|
&& (target_user_current_membership == MembershipState::Join
|
||||||
|| target_user_current_membership == MembershipState::Invite)
|
|| target_user_current_membership == MembershipState::Invite)
|
||||||
|
|
|
@ -21,7 +21,10 @@ pub use ::toml;
|
||||||
pub use ::tracing;
|
pub use ::tracing;
|
||||||
pub use config::Config;
|
pub use config::Config;
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
pub use info::{rustc_flags_capture, version, version::version};
|
pub use info::{
|
||||||
|
rustc_flags_capture, version,
|
||||||
|
version::{name, version},
|
||||||
|
};
|
||||||
pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res};
|
pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res};
|
||||||
pub use server::Server;
|
pub use server::Server;
|
||||||
pub use utils::{ctor, dtor, implement, result, result::Result};
|
pub use utils::{ctor, dtor, implement, result, result::Result};
|
||||||
|
|
|
@ -15,7 +15,7 @@ use conduwuit_core::{
|
||||||
#[clap(
|
#[clap(
|
||||||
about,
|
about,
|
||||||
long_about = None,
|
long_about = None,
|
||||||
name = "conduwuit",
|
name = conduwuit_core::name(),
|
||||||
version = conduwuit_core::version(),
|
version = conduwuit_core::version(),
|
||||||
)]
|
)]
|
||||||
pub(crate) struct Args {
|
pub(crate) struct Args {
|
||||||
|
|
|
@ -127,3 +127,63 @@ pub async fn get_token_shortstatehash(
|
||||||
.await
|
.await
|
||||||
.deserialized()
|
.deserialized()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Count how many sync tokens exist for a room without deleting them
|
||||||
|
///
|
||||||
|
/// This is useful for dry runs to see how many tokens would be deleted
|
||||||
|
#[implement(Service)]
|
||||||
|
pub async fn count_room_tokens(&self, room_id: &RoomId) -> Result<usize> {
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
|
||||||
|
let shortroomid = self.services.short.get_shortroomid(room_id).await?;
|
||||||
|
|
||||||
|
// Create a prefix to search by - all entries for this room will start with its
|
||||||
|
// short ID
|
||||||
|
let prefix = &[shortroomid];
|
||||||
|
|
||||||
|
// Collect all keys into a Vec and count them
|
||||||
|
let keys = self
|
||||||
|
.db
|
||||||
|
.roomsynctoken_shortstatehash
|
||||||
|
.keys_prefix_raw(prefix)
|
||||||
|
.map_ok(|_| ()) // We only need to count, not store the keys
|
||||||
|
.try_collect::<Vec<_>>()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(keys.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Delete all sync tokens associated with a room
|
||||||
|
///
|
||||||
|
/// This helps clean up the database as these tokens are never otherwise removed
|
||||||
|
#[implement(Service)]
|
||||||
|
pub async fn delete_room_tokens(&self, room_id: &RoomId) -> Result<usize> {
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
|
||||||
|
let shortroomid = self.services.short.get_shortroomid(room_id).await?;
|
||||||
|
|
||||||
|
// Create a prefix to search by - all entries for this room will start with its
|
||||||
|
// short ID
|
||||||
|
let prefix = &[shortroomid];
|
||||||
|
|
||||||
|
// Collect all keys into a Vec first, then delete them
|
||||||
|
let keys = self
|
||||||
|
.db
|
||||||
|
.roomsynctoken_shortstatehash
|
||||||
|
.keys_prefix_raw(prefix)
|
||||||
|
.map_ok(|key| {
|
||||||
|
// Clone the key since we can't store references in the Vec
|
||||||
|
Vec::from(key)
|
||||||
|
})
|
||||||
|
.try_collect::<Vec<_>>()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Delete each key individually
|
||||||
|
for key in &keys {
|
||||||
|
self.db.roomsynctoken_shortstatehash.del(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
let count = keys.len();
|
||||||
|
|
||||||
|
Ok(count)
|
||||||
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue