mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-07-05 17:25:47 +02:00
Compare commits
14 commits
dd372f77eb
...
f840372c92
Author | SHA1 | Date | |
---|---|---|---|
|
f840372c92 | ||
|
f4b93e6fbf | ||
|
66e6d09acc | ||
|
9d9495d957 | ||
|
b735ad0cf2 | ||
|
7ab623d694 | ||
|
d5b3755242 | ||
|
3c44dccd65 | ||
|
b57be072c7 | ||
|
ea5dc8e09d | ||
|
b9d60c64e5 | ||
|
94ae824149 | ||
|
640714922b | ||
|
2b268fdaf3 |
17 changed files with 200 additions and 61 deletions
|
@ -19,11 +19,20 @@ outputs:
|
||||||
rustc_version:
|
rustc_version:
|
||||||
description: The rustc version installed
|
description: The rustc version installed
|
||||||
value: ${{ steps.rustc-version.outputs.version }}
|
value: ${{ steps.rustc-version.outputs.version }}
|
||||||
|
rustup_version:
|
||||||
|
description: The rustup version installed
|
||||||
|
value: ${{ steps.rustup-version.outputs.version }}
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
|
- name: Check if rustup is already installed
|
||||||
|
shell: bash
|
||||||
|
id: rustup-version
|
||||||
|
run: |
|
||||||
|
echo "version=$(rustup --version)" >> $GITHUB_OUTPUT
|
||||||
- name: Cache rustup toolchains
|
- name: Cache rustup toolchains
|
||||||
|
if: steps.rustup-version.outputs.version == ''
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
|
@ -33,6 +42,7 @@ runs:
|
||||||
# Requires repo to be cloned if toolchain is not specified
|
# Requires repo to be cloned if toolchain is not specified
|
||||||
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
|
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
|
||||||
- name: Install Rust toolchain
|
- name: Install Rust toolchain
|
||||||
|
if: steps.rustup-version.outputs.version == ''
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if ! command -v rustup &> /dev/null ; then
|
if ! command -v rustup &> /dev/null ; then
|
||||||
|
|
|
@ -57,7 +57,6 @@ jobs:
|
||||||
|
|
||||||
build-image:
|
build-image:
|
||||||
runs-on: dind
|
runs-on: dind
|
||||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
|
||||||
needs: define-variables
|
needs: define-variables
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
@ -188,7 +187,7 @@ jobs:
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
annotations: ${{ steps.meta.outputs.annotations }}
|
annotations: ${{ steps.meta.outputs.annotations }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
# cache-to: type=gha,mode=max
|
||||||
sbom: true
|
sbom: true
|
||||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||||
env:
|
env:
|
||||||
|
@ -211,7 +210,6 @@ jobs:
|
||||||
|
|
||||||
merge:
|
merge:
|
||||||
runs-on: dind
|
runs-on: dind
|
||||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
|
||||||
needs: [define-variables, build-image]
|
needs: [define-variables, build-image]
|
||||||
steps:
|
steps:
|
||||||
- name: Download digests
|
- name: Download digests
|
||||||
|
|
|
@ -6,6 +6,7 @@ use conduwuit::{
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
|
use futures::FutureExt;
|
||||||
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
|
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch, get_room_info};
|
use crate::{admin_command, admin_command_dispatch, get_room_info};
|
||||||
|
@ -155,7 +156,10 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
evicting admins too)",
|
evicting admins too)",
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
|
if let Err(e) = leave_room(self.services, user_id, &room_id, None)
|
||||||
|
.boxed()
|
||||||
|
.await
|
||||||
|
{
|
||||||
warn!("Failed to leave room: {e}");
|
warn!("Failed to leave room: {e}");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -323,7 +327,10 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||||
evicting admins too)",
|
evicting admins too)",
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Err(e) = leave_room(self.services, user_id, &room_id, None).await {
|
if let Err(e) = leave_room(self.services, user_id, &room_id, None)
|
||||||
|
.boxed()
|
||||||
|
.await
|
||||||
|
{
|
||||||
warn!("Failed to leave room: {e}");
|
warn!("Failed to leave room: {e}");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ use conduwuit::{
|
||||||
};
|
};
|
||||||
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
|
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
|
use futures::FutureExt;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
|
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
|
||||||
events::{
|
events::{
|
||||||
|
@ -655,7 +656,9 @@ pub(super) async fn force_leave_room(
|
||||||
return Err!("{user_id} is not joined in the room");
|
return Err!("{user_id} is not joined in the room");
|
||||||
}
|
}
|
||||||
|
|
||||||
leave_room(self.services, &user_id, &room_id, None).await?;
|
leave_room(self.services, &user_id, &room_id, None)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!("{user_id} has left {room_id}.",))
|
self.write_str(&format!("{user_id} has left {room_id}.",))
|
||||||
.await
|
.await
|
||||||
|
|
|
@ -763,7 +763,9 @@ pub(crate) async fn deactivate_route(
|
||||||
super::update_displayname(&services, sender_user, None, &all_joined_rooms).await;
|
super::update_displayname(&services, sender_user, None, &all_joined_rooms).await;
|
||||||
super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await;
|
super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await;
|
||||||
|
|
||||||
full_user_deactivate(&services, sender_user, &all_joined_rooms).await?;
|
full_user_deactivate(&services, sender_user, &all_joined_rooms)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
|
|
||||||
info!("User {sender_user} deactivated their account.");
|
info!("User {sender_user} deactivated their account.");
|
||||||
|
|
||||||
|
@ -915,7 +917,9 @@ pub async fn full_user_deactivate(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
super::leave_all_rooms(services, user_id).await;
|
super::leave_all_rooms(services, user_id)
|
||||||
|
.boxed()
|
||||||
|
.await;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,9 @@ async fn banned_room_check(
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
full_user_deactivate(services, user_id, &all_joined_rooms).await?;
|
full_user_deactivate(services, user_id, &all_joined_rooms)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
|
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
|
||||||
|
@ -153,7 +155,9 @@ async fn banned_room_check(
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
full_user_deactivate(services, user_id, &all_joined_rooms).await?;
|
full_user_deactivate(services, user_id, &all_joined_rooms)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
return Err!(Request(Forbidden("This remote server is banned on this homeserver.")));
|
return Err!(Request(Forbidden("This remote server is banned on this homeserver.")));
|
||||||
|
@ -259,6 +263,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||||
room_id.server_name(),
|
room_id.server_name(),
|
||||||
client,
|
client,
|
||||||
)
|
)
|
||||||
|
.boxed()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mut servers = body.via.clone();
|
let mut servers = body.via.clone();
|
||||||
|
@ -478,6 +483,7 @@ pub(crate) async fn leave_room_route(
|
||||||
body: Ruma<leave_room::v3::Request>,
|
body: Ruma<leave_room::v3::Request>,
|
||||||
) -> Result<leave_room::v3::Response> {
|
) -> Result<leave_room::v3::Response> {
|
||||||
leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone())
|
leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone())
|
||||||
|
.boxed()
|
||||||
.await
|
.await
|
||||||
.map(|()| leave_room::v3::Response::new())
|
.map(|()| leave_room::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
@ -1792,7 +1798,10 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) {
|
||||||
|
|
||||||
for room_id in all_rooms {
|
for room_id in all_rooms {
|
||||||
// ignore errors
|
// ignore errors
|
||||||
if let Err(e) = leave_room(services, user_id, &room_id, None).await {
|
if let Err(e) = leave_room(services, user_id, &room_id, None)
|
||||||
|
.boxed()
|
||||||
|
.await
|
||||||
|
{
|
||||||
warn!(%user_id, "Failed to leave {room_id} remotely: {e}");
|
warn!(%user_id, "Failed to leave {room_id} remotely: {e}");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2162,6 +2171,109 @@ async fn knock_room_by_id_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For knock_restricted rooms, check if the user meets the restricted conditions
|
||||||
|
// If they do, attempt to join instead of knock
|
||||||
|
// This is not mentioned in the spec, but should be allowable (we're allowed to
|
||||||
|
// auto-join invites to knocked rooms)
|
||||||
|
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
|
||||||
|
if let JoinRule::KnockRestricted(restricted) = &join_rule {
|
||||||
|
let restriction_rooms: Vec<_> = restricted
|
||||||
|
.allow
|
||||||
|
.iter()
|
||||||
|
.filter_map(|a| match a {
|
||||||
|
| AllowRule::RoomMembership(r) => Some(&r.room_id),
|
||||||
|
| _ => None,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Check if the user is in any of the allowed rooms
|
||||||
|
let mut user_meets_restrictions = false;
|
||||||
|
for restriction_room_id in &restriction_rooms {
|
||||||
|
if services
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, restriction_room_id)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
user_meets_restrictions = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the user meets the restrictions, try joining instead
|
||||||
|
if user_meets_restrictions {
|
||||||
|
debug_info!(
|
||||||
|
"{sender_user} meets the restricted criteria in knock_restricted room \
|
||||||
|
{room_id}, attempting to join instead of knock"
|
||||||
|
);
|
||||||
|
// For this case, we need to drop the state lock and get a new one in
|
||||||
|
// join_room_by_id_helper We need to release the lock here and let
|
||||||
|
// join_room_by_id_helper acquire it again
|
||||||
|
drop(state_lock);
|
||||||
|
match join_room_by_id_helper(
|
||||||
|
services,
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
reason.clone(),
|
||||||
|
servers,
|
||||||
|
None,
|
||||||
|
&None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
| Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())),
|
||||||
|
| Err(e) => {
|
||||||
|
debug_warn!(
|
||||||
|
"Failed to convert knock to join for {sender_user} in {room_id}: {e:?}"
|
||||||
|
);
|
||||||
|
// Get a new state lock for the remaining knock logic
|
||||||
|
let new_state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||||
|
|
||||||
|
let server_in_room = services
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.server_in_room(services.globals.server_name(), room_id)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let local_knock = server_in_room
|
||||||
|
|| servers.is_empty()
|
||||||
|
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
|
||||||
|
|
||||||
|
if local_knock {
|
||||||
|
knock_room_helper_local(
|
||||||
|
services,
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
reason,
|
||||||
|
servers,
|
||||||
|
new_state_lock,
|
||||||
|
)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
|
} else {
|
||||||
|
knock_room_helper_remote(
|
||||||
|
services,
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
reason,
|
||||||
|
servers,
|
||||||
|
new_state_lock,
|
||||||
|
)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(knock_room::v3::Response::new(room_id.to_owned()));
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
|
||||||
|
debug_warn!(
|
||||||
|
"{sender_user} attempted to knock on room {room_id} but its join rule is \
|
||||||
|
{join_rule:?}, not knock or knock_restricted"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let server_in_room = services
|
let server_in_room = services
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
|
@ -2209,6 +2321,12 @@ async fn knock_room_helper_local(
|
||||||
return Err!(Request(Forbidden("This room does not support knocking.")));
|
return Err!(Request(Forbidden("This room does not support knocking.")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify that this room has a valid knock or knock_restricted join rule
|
||||||
|
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
|
||||||
|
if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
|
||||||
|
return Err!(Request(Forbidden("This room's join rule does not allow knocking.")));
|
||||||
|
}
|
||||||
|
|
||||||
let content = RoomMemberEventContent {
|
let content = RoomMemberEventContent {
|
||||||
displayname: services.users.displayname(sender_user).await.ok(),
|
displayname: services.users.displayname(sender_user).await.ok(),
|
||||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||||
|
|
|
@ -121,7 +121,9 @@ where
|
||||||
.map(|(key, val)| (key, val.collect()))
|
.map(|(key, val)| (key, val.collect()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if !populate {
|
if populate {
|
||||||
|
rooms.push(summary_to_chunk(summary.clone()));
|
||||||
|
} else {
|
||||||
children = children
|
children = children
|
||||||
.iter()
|
.iter()
|
||||||
.rev()
|
.rev()
|
||||||
|
@ -144,10 +146,8 @@ where
|
||||||
.collect();
|
.collect();
|
||||||
}
|
}
|
||||||
|
|
||||||
if populate {
|
if !populate && queue.is_empty() && children.is_empty() {
|
||||||
rooms.push(summary_to_chunk(summary.clone()));
|
break;
|
||||||
} else if queue.is_empty() && children.is_empty() {
|
|
||||||
return Err!(Request(InvalidParam("Room IDs in token were not found.")));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
parents.insert(current_room.clone());
|
parents.insert(current_room.clone());
|
||||||
|
|
|
@ -6,6 +6,7 @@ use conduwuit::{
|
||||||
};
|
};
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
|
use futures::FutureExt;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
OwnedEventId, RoomId, UserId,
|
OwnedEventId, RoomId, UserId,
|
||||||
api::client::state::{get_state_events, get_state_events_for_key, send_state_event},
|
api::client::state::{get_state_events, get_state_events_for_key, send_state_event},
|
||||||
|
@ -59,6 +60,7 @@ pub(crate) async fn send_state_event_for_empty_key_route(
|
||||||
body: Ruma<send_state_event::v3::Request>,
|
body: Ruma<send_state_event::v3::Request>,
|
||||||
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
||||||
send_state_event_for_key_route(State(services), body)
|
send_state_event_for_key_route(State(services), body)
|
||||||
|
.boxed()
|
||||||
.await
|
.await
|
||||||
.map(RumaResponse)
|
.map(RumaResponse)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1009,8 +1009,6 @@ async fn calculate_state_incremental<'a>(
|
||||||
) -> Result<StateChanges> {
|
) -> Result<StateChanges> {
|
||||||
let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash);
|
let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash);
|
||||||
|
|
||||||
let state_changed = since_shortstatehash != current_shortstatehash;
|
|
||||||
|
|
||||||
let encrypted_room = services
|
let encrypted_room = services
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
|
@ -1042,7 +1040,7 @@ async fn calculate_state_incremental<'a>(
|
||||||
})
|
})
|
||||||
.into();
|
.into();
|
||||||
|
|
||||||
let state_diff_ids: OptionFuture<_> = (!full_state && state_changed)
|
let state_diff_ids: OptionFuture<_> = (!full_state)
|
||||||
.then(|| {
|
.then(|| {
|
||||||
StreamExt::into_future(
|
StreamExt::into_future(
|
||||||
services
|
services
|
||||||
|
|
|
@ -79,12 +79,12 @@ fn main() {
|
||||||
|
|
||||||
// --- Rerun Triggers ---
|
// --- Rerun Triggers ---
|
||||||
// TODO: The git rerun triggers seem to always run
|
// TODO: The git rerun triggers seem to always run
|
||||||
// Rerun if the git HEAD changes
|
// // Rerun if the git HEAD changes
|
||||||
println!("cargo:rerun-if-changed=.git/HEAD");
|
// println!("cargo:rerun-if-changed=.git/HEAD");
|
||||||
// Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch)
|
// // Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch)
|
||||||
if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"]) {
|
// if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"])
|
||||||
println!("cargo:rerun-if-changed=.git/{ref_path}");
|
// { println!("cargo:rerun-if-changed=.git/{ref_path}");
|
||||||
}
|
// }
|
||||||
|
|
||||||
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH");
|
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH");
|
||||||
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH_SHORT");
|
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH_SHORT");
|
||||||
|
|
|
@ -2059,41 +2059,41 @@ fn default_database_backups_to_keep() -> i16 { 1 }
|
||||||
|
|
||||||
fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) }
|
fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) }
|
||||||
|
|
||||||
fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) }
|
fn default_db_cache_capacity_mb() -> f64 { 256.0 + parallelism_scaled_f64(256.0) }
|
||||||
|
|
||||||
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
|
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(25_000).saturating_add(200_000) }
|
||||||
|
|
||||||
fn default_cache_capacity_modifier() -> f64 { 1.0 }
|
fn default_cache_capacity_modifier() -> f64 { 1.0 }
|
||||||
|
|
||||||
fn default_auth_chain_cache_capacity() -> u32 {
|
fn default_auth_chain_cache_capacity() -> u32 {
|
||||||
parallelism_scaled_u32(10_000).saturating_add(100_000)
|
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_shorteventid_cache_capacity() -> u32 {
|
fn default_shorteventid_cache_capacity() -> u32 {
|
||||||
parallelism_scaled_u32(50_000).saturating_add(100_000)
|
parallelism_scaled_u32(50_000).saturating_add(200_000)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_eventidshort_cache_capacity() -> u32 {
|
fn default_eventidshort_cache_capacity() -> u32 {
|
||||||
parallelism_scaled_u32(25_000).saturating_add(100_000)
|
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_eventid_pdu_cache_capacity() -> u32 {
|
fn default_eventid_pdu_cache_capacity() -> u32 {
|
||||||
parallelism_scaled_u32(25_000).saturating_add(100_000)
|
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_shortstatekey_cache_capacity() -> u32 {
|
fn default_shortstatekey_cache_capacity() -> u32 {
|
||||||
parallelism_scaled_u32(10_000).saturating_add(100_000)
|
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_statekeyshort_cache_capacity() -> u32 {
|
fn default_statekeyshort_cache_capacity() -> u32 {
|
||||||
parallelism_scaled_u32(10_000).saturating_add(100_000)
|
parallelism_scaled_u32(25_000).saturating_add(200_000)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_servernameevent_data_cache_capacity() -> u32 {
|
fn default_servernameevent_data_cache_capacity() -> u32 {
|
||||||
parallelism_scaled_u32(100_000).saturating_add(500_000)
|
parallelism_scaled_u32(200_000).saturating_add(500_000)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) }
|
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(2000) }
|
||||||
|
|
||||||
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
||||||
|
|
||||||
|
|
|
@ -638,7 +638,7 @@ fn valid_membership_change(
|
||||||
warn!(?target_user_membership_event_id, "Banned user can't join");
|
warn!(?target_user_membership_event_id, "Banned user can't join");
|
||||||
false
|
false
|
||||||
} else if (join_rules == JoinRule::Invite
|
} else if (join_rules == JoinRule::Invite
|
||||||
|| room_version.allow_knocking && join_rules == JoinRule::Knock)
|
|| room_version.allow_knocking && (join_rules == JoinRule::Knock || matches!(join_rules, JoinRule::KnockRestricted(_))))
|
||||||
// If the join_rule is invite then allow if membership state is invite or join
|
// If the join_rule is invite then allow if membership state is invite or join
|
||||||
&& (target_user_current_membership == MembershipState::Join
|
&& (target_user_current_membership == MembershipState::Join
|
||||||
|| target_user_current_membership == MembershipState::Invite)
|
|| target_user_current_membership == MembershipState::Invite)
|
||||||
|
|
|
@ -21,7 +21,10 @@ pub use ::toml;
|
||||||
pub use ::tracing;
|
pub use ::tracing;
|
||||||
pub use config::Config;
|
pub use config::Config;
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
pub use info::{rustc_flags_capture, version, version::version};
|
pub use info::{
|
||||||
|
rustc_flags_capture, version,
|
||||||
|
version::{name, version},
|
||||||
|
};
|
||||||
pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res};
|
pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res};
|
||||||
pub use server::Server;
|
pub use server::Server;
|
||||||
pub use utils::{ctor, dtor, implement, result, result::Result};
|
pub use utils::{ctor, dtor, implement, result, result::Result};
|
||||||
|
|
|
@ -29,7 +29,7 @@ fn descriptor_cf_options(
|
||||||
set_table_options(&mut opts, &desc, cache)?;
|
set_table_options(&mut opts, &desc, cache)?;
|
||||||
|
|
||||||
opts.set_min_write_buffer_number(1);
|
opts.set_min_write_buffer_number(1);
|
||||||
opts.set_max_write_buffer_number(2);
|
opts.set_max_write_buffer_number(3);
|
||||||
opts.set_write_buffer_size(desc.write_size);
|
opts.set_write_buffer_size(desc.write_size);
|
||||||
|
|
||||||
opts.set_target_file_size_base(desc.file_size);
|
opts.set_target_file_size_base(desc.file_size);
|
||||||
|
|
|
@ -15,7 +15,7 @@ use conduwuit_core::{
|
||||||
#[clap(
|
#[clap(
|
||||||
about,
|
about,
|
||||||
long_about = None,
|
long_about = None,
|
||||||
name = "conduwuit",
|
name = conduwuit_core::name(),
|
||||||
version = conduwuit_core::version(),
|
version = conduwuit_core::version(),
|
||||||
)]
|
)]
|
||||||
pub(crate) struct Args {
|
pub(crate) struct Args {
|
||||||
|
|
|
@ -4,7 +4,6 @@ mod execute;
|
||||||
mod grant;
|
mod grant;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
future::Future,
|
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::{Arc, RwLock as StdRwLock, Weak},
|
sync::{Arc, RwLock as StdRwLock, Weak},
|
||||||
};
|
};
|
||||||
|
@ -14,7 +13,7 @@ use conduwuit::{
|
||||||
Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder,
|
Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder,
|
||||||
};
|
};
|
||||||
pub use create::create_admin_room;
|
pub use create::create_admin_room;
|
||||||
use futures::{FutureExt, TryFutureExt};
|
use futures::{Future, FutureExt, TryFutureExt};
|
||||||
use loole::{Receiver, Sender};
|
use loole::{Receiver, Sender};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
OwnedEventId, OwnedRoomId, RoomId, UserId,
|
OwnedEventId, OwnedRoomId, RoomId, UserId,
|
||||||
|
|
|
@ -306,14 +306,12 @@ impl super::Service {
|
||||||
|
|
||||||
#[tracing::instrument(name = "srv", level = "debug", skip(self))]
|
#[tracing::instrument(name = "srv", level = "debug", skip(self))]
|
||||||
async fn query_srv_record(&self, hostname: &'_ str) -> Result<Option<FedDest>> {
|
async fn query_srv_record(&self, hostname: &'_ str) -> Result<Option<FedDest>> {
|
||||||
let hostnames =
|
|
||||||
[format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")];
|
|
||||||
|
|
||||||
for hostname in hostnames {
|
|
||||||
self.services.server.check_running()?;
|
self.services.server.check_running()?;
|
||||||
|
|
||||||
debug!("querying SRV for {hostname:?}");
|
debug!("querying SRV for {hostname:?}");
|
||||||
let hostname = hostname.trim_end_matches('.');
|
|
||||||
|
let hostname_suffix = format!("_matrix-fed._tcp.{hostname}.");
|
||||||
|
let hostname = hostname_suffix.trim_end_matches('.');
|
||||||
match self.resolver.resolver.srv_lookup(hostname).await {
|
match self.resolver.resolver.srv_lookup(hostname).await {
|
||||||
| Err(e) => Self::handle_resolve_error(&e, hostname)?,
|
| Err(e) => Self::handle_resolve_error(&e, hostname)?,
|
||||||
| Ok(result) => {
|
| Ok(result) => {
|
||||||
|
@ -328,7 +326,6 @@ impl super::Service {
|
||||||
}));
|
}));
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue