mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-09-11 19:13:02 +02:00
Compare commits
23 commits
7b519ccabb
...
782db347f9
Author | SHA1 | Date | |
---|---|---|---|
|
782db347f9 | ||
|
7fc0de3643 | ||
|
c99307834e | ||
|
b08b35668f | ||
|
58f81b610f | ||
|
f25e9a98a7 | ||
|
a5ce7f0991 | ||
|
a91aa29cff | ||
|
6010e550c2 | ||
|
33b0bf0576 | ||
|
f1708b09cc | ||
|
689024b29c | ||
|
d886a0738a | ||
|
f63a5b0a7e | ||
|
a8da08a210 | ||
|
956bdeb021 | ||
|
e4a43b1a5b |
||
|
5775e0ad9d |
||
|
238cc627e3 |
||
|
b1516209c4 |
||
|
0589884109 |
||
|
4a83df5b57 |
||
|
aa08edc55f |
17 changed files with 146 additions and 125 deletions
5
.github/FUNDING.yml
vendored
5
.github/FUNDING.yml
vendored
|
@ -1,5 +1,4 @@
|
|||
github: [JadedBlueEyes]
|
||||
# Doesn't support an array, so we can only list nex
|
||||
ko_fi: nexy7574
|
||||
github: [JadedBlueEyes, nexy7574]
|
||||
custom:
|
||||
- https://ko-fi.com/nexy7574
|
||||
- https://ko-fi.com/JadedBlueEyes
|
||||
|
|
|
@ -57,7 +57,7 @@ Continuwuity aims to:
|
|||
|
||||
### Can I try it out?
|
||||
|
||||
Check out the [documentation](introduction) for installation instructions.
|
||||
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
||||
|
||||
There are currently no open registration Continuwuity instances available.
|
||||
|
||||
|
|
|
@ -1066,7 +1066,7 @@
|
|||
# 3 to 5 = Statistics with possible performance impact.
|
||||
# 6 = All statistics.
|
||||
#
|
||||
#rocksdb_stats_level = 1
|
||||
#rocksdb_stats_level = 3
|
||||
|
||||
# This is a password that can be configured that will let you login to the
|
||||
# server bot account (currently `@conduit`) for emergency troubleshooting
|
||||
|
@ -1680,11 +1680,9 @@
|
|||
#stream_amplification = 1024
|
||||
|
||||
# Number of sender task workers; determines sender parallelism. Default is
|
||||
# '0' which means the value is determined internally, likely matching the
|
||||
# number of tokio worker-threads or number of cores, etc. Override by
|
||||
# setting a non-zero value.
|
||||
# number of CPU cores. Override by setting a different value.
|
||||
#
|
||||
#sender_workers = 0
|
||||
#sender_workers = 4
|
||||
|
||||
# Enables listener sockets; can be set to false to disable listening. This
|
||||
# option is intended for developer/diagnostic purposes only.
|
||||
|
|
|
@ -12,6 +12,15 @@ services:
|
|||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
networks:
|
||||
- proxy
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||
- "traefik.http.routers.continuwuity.entrypoints=websecure" # your HTTPS entry point
|
||||
- "traefik.http.routers.continuwuity.tls=true"
|
||||
- "traefik.http.routers.continuwuity.service=continuwuity"
|
||||
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
|
||||
# possibly, depending on your config:
|
||||
# - "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
|
||||
environment:
|
||||
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||
|
|
|
@ -12,6 +12,14 @@ services:
|
|||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
networks:
|
||||
- proxy
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||
- "traefik.http.routers.continuwuity.entrypoints=websecure"
|
||||
- "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
|
||||
# Uncomment and adjust the following if you want to use middleware
|
||||
# - "traefik.http.routers.continuwuity.middlewares=secureHeaders@file"
|
||||
environment:
|
||||
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
|
|
4
docs/static/announcements.json
vendored
4
docs/static/announcements.json
vendored
|
@ -6,8 +6,8 @@
|
|||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"message": "🎉 Continuwuity v0.5.0-rc.6 is now available! This release includes improved knock-restricted room handling, automatic support contact configuration, and a new HTML landing page. Check [the release notes for full details](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.6) and upgrade instructions."
|
||||
"id": 3,
|
||||
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1235,7 +1235,7 @@ pub struct Config {
|
|||
/// 3 to 5 = Statistics with possible performance impact.
|
||||
/// 6 = All statistics.
|
||||
///
|
||||
/// default: 1
|
||||
/// default: 3
|
||||
#[serde(default = "default_rocksdb_stats_level")]
|
||||
pub rocksdb_stats_level: u8,
|
||||
|
||||
|
@ -1917,12 +1917,10 @@ pub struct Config {
|
|||
pub stream_amplification: usize,
|
||||
|
||||
/// Number of sender task workers; determines sender parallelism. Default is
|
||||
/// '0' which means the value is determined internally, likely matching the
|
||||
/// number of tokio worker-threads or number of cores, etc. Override by
|
||||
/// setting a non-zero value.
|
||||
/// core count. Override by setting a different value.
|
||||
///
|
||||
/// default: 0
|
||||
#[serde(default)]
|
||||
/// default: core count
|
||||
#[serde(default = "default_sender_workers")]
|
||||
pub sender_workers: usize,
|
||||
|
||||
/// Enables listener sockets; can be set to false to disable listening. This
|
||||
|
@ -2153,45 +2151,48 @@ fn default_database_backups_to_keep() -> i16 { 1 }
|
|||
|
||||
fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) }
|
||||
|
||||
fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) }
|
||||
fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) }
|
||||
|
||||
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
|
||||
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(100_000) }
|
||||
|
||||
fn default_cache_capacity_modifier() -> f64 { 1.0 }
|
||||
|
||||
fn default_auth_chain_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(10_000).saturating_add(100_000)
|
||||
}
|
||||
|
||||
fn default_shorteventid_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(50_000).saturating_add(100_000)
|
||||
}
|
||||
|
||||
fn default_shorteventid_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(100_000).saturating_add(100_000)
|
||||
}
|
||||
|
||||
fn default_eventidshort_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(25_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(50_000).saturating_add(100_000)
|
||||
}
|
||||
|
||||
fn default_eventid_pdu_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(25_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(50_000).saturating_add(100_000)
|
||||
}
|
||||
|
||||
fn default_shortstatekey_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(10_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(50_000).saturating_add(100_000)
|
||||
}
|
||||
|
||||
fn default_statekeyshort_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(10_000).saturating_add(100_000)
|
||||
parallelism_scaled_u32(50_000).saturating_add(100_000)
|
||||
}
|
||||
|
||||
fn default_servernameevent_data_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(100_000).saturating_add(500_000)
|
||||
parallelism_scaled_u32(100_000).saturating_add(100_000)
|
||||
}
|
||||
|
||||
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) }
|
||||
fn default_stateinfo_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(500).clamp(100, 12000)
|
||||
}
|
||||
|
||||
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
|
||||
fn default_roomid_spacehierarchy_cache_capacity() -> u32 {
|
||||
parallelism_scaled_u32(500).clamp(100, 12000) }
|
||||
|
||||
fn default_dns_cache_entries() -> u32 { 32768 }
|
||||
fn default_dns_cache_entries() -> u32 { 327680 }
|
||||
|
||||
fn default_dns_min_ttl() -> u64 { 60 * 180 }
|
||||
|
||||
|
@ -2297,7 +2298,7 @@ fn default_typing_client_timeout_max_s() -> u64 { 45 }
|
|||
|
||||
fn default_rocksdb_recovery_mode() -> u8 { 1 }
|
||||
|
||||
fn default_rocksdb_log_level() -> String { "error".to_owned() }
|
||||
fn default_rocksdb_log_level() -> String { "info".to_owned() }
|
||||
|
||||
fn default_rocksdb_log_time_to_roll() -> usize { 0 }
|
||||
|
||||
|
@ -2329,7 +2330,7 @@ fn default_rocksdb_compression_level() -> i32 { 32767 }
|
|||
#[allow(clippy::doc_markdown)]
|
||||
fn default_rocksdb_bottommost_compression_level() -> i32 { 32767 }
|
||||
|
||||
fn default_rocksdb_stats_level() -> u8 { 1 }
|
||||
fn default_rocksdb_stats_level() -> u8 { 3 }
|
||||
|
||||
// I know, it's a great name
|
||||
#[must_use]
|
||||
|
@ -2384,14 +2385,13 @@ fn default_admin_log_capture() -> String {
|
|||
fn default_admin_room_tag() -> String { "m.server_notice".to_owned() }
|
||||
|
||||
#[allow(clippy::as_conversions, clippy::cast_precision_loss)]
|
||||
fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) }
|
||||
pub fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) }
|
||||
|
||||
fn parallelism_scaled_u32(val: u32) -> u32 {
|
||||
let val = val.try_into().expect("failed to cast u32 to usize");
|
||||
parallelism_scaled(val).try_into().unwrap_or(u32::MAX)
|
||||
}
|
||||
pub fn parallelism_scaled_u32(val: u32) -> u32 { val.saturating_mul(sys::available_parallelism() as u32) }
|
||||
|
||||
fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) }
|
||||
pub fn parallelism_scaled_i32(val: i32) -> i32 { val.saturating_mul(sys::available_parallelism() as i32) }
|
||||
|
||||
pub fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) }
|
||||
|
||||
fn default_trusted_server_batch_size() -> usize { 256 }
|
||||
|
||||
|
@ -2411,6 +2411,8 @@ fn default_stream_width_scale() -> f32 { 1.0 }
|
|||
|
||||
fn default_stream_amplification() -> usize { 1024 }
|
||||
|
||||
fn default_sender_workers() -> usize { parallelism_scaled(1) }
|
||||
|
||||
fn default_client_receive_timeout() -> u64 { 75 }
|
||||
|
||||
fn default_client_request_timeout() -> u64 { 180 }
|
||||
|
|
|
@ -13,6 +13,7 @@ use ruma::{
|
|||
power_levels::RoomPowerLevelsEventContent,
|
||||
third_party_invite::RoomThirdPartyInviteEventContent,
|
||||
},
|
||||
EventId,
|
||||
int,
|
||||
serde::{Base64, Raw},
|
||||
};
|
||||
|
@ -21,7 +22,6 @@ use serde::{
|
|||
de::{Error as _, IgnoredAny},
|
||||
};
|
||||
use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue};
|
||||
|
||||
use super::{
|
||||
Error, Event, Result, StateEventType, StateKey, TimelineEventType,
|
||||
power_levels::{
|
||||
|
@ -149,8 +149,8 @@ where
|
|||
for<'a> &'a E: Event + Send,
|
||||
{
|
||||
debug!(
|
||||
event_id = %incoming_event.event_id(),
|
||||
event_type = ?incoming_event.event_type(),
|
||||
event_id = format!("{}", incoming_event.event_id()),
|
||||
event_type = format!("{}", incoming_event.event_type()),
|
||||
"auth_check beginning"
|
||||
);
|
||||
|
||||
|
@ -242,20 +242,44 @@ where
|
|||
}
|
||||
*/
|
||||
|
||||
let (room_create_event, power_levels_event, sender_member_event) = join3(
|
||||
fetch_state(&StateEventType::RoomCreate, ""),
|
||||
fetch_state(&StateEventType::RoomPowerLevels, ""),
|
||||
fetch_state(&StateEventType::RoomMember, sender.as_str()),
|
||||
)
|
||||
.await;
|
||||
// let (room_create_event, power_levels_event, sender_member_event) = join3(
|
||||
// fetch_state(&StateEventType::RoomCreate, ""),
|
||||
// fetch_state(&StateEventType::RoomPowerLevels, ""),
|
||||
// fetch_state(&StateEventType::RoomMember, sender.as_str()),
|
||||
// )
|
||||
// .await;
|
||||
|
||||
let room_create_event = fetch_state(&StateEventType::RoomCreate, "").await;
|
||||
let power_levels_event = fetch_state(&StateEventType::RoomPowerLevels, "").await;
|
||||
let sender_member_event = fetch_state(&StateEventType::RoomMember, sender.as_str()).await;
|
||||
|
||||
let room_create_event = match room_create_event {
|
||||
| None => {
|
||||
warn!("no m.room.create event in auth chain");
|
||||
error!(
|
||||
create_event = room_create_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(),
|
||||
power_levels = power_levels_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(),
|
||||
member_event = sender_member_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(),
|
||||
"no m.room.create event found for {} ({})!",
|
||||
incoming_event.event_id().as_str(),
|
||||
incoming_event.room_id().as_str()
|
||||
);
|
||||
return Ok(false);
|
||||
},
|
||||
| Some(e) => e,
|
||||
};
|
||||
// just re-check 1.2 to work around a bug
|
||||
let Some(room_id_server_name) = incoming_event.room_id().server_name() else {
|
||||
warn!("room ID has no servername");
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
if room_id_server_name != room_create_event.sender().server_name() {
|
||||
warn!(
|
||||
"servername of room ID origin ({}) does not match servername of m.room.create sender ({})",
|
||||
room_id_server_name,
|
||||
room_create_event.sender().server_name());
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
if incoming_event.room_id() != room_create_event.room_id() {
|
||||
warn!("room_id of incoming event does not match room_id of m.room.create event");
|
||||
|
|
|
@ -733,8 +733,12 @@ where
|
|||
Fut: Future<Output = Option<E>> + Send,
|
||||
E: Event + Send + Sync,
|
||||
{
|
||||
let mut room_id = None;
|
||||
while let Some(sort_ev) = event {
|
||||
debug!(event_id = sort_ev.event_id().as_str(), "mainline");
|
||||
if room_id.is_none() {
|
||||
room_id = Some(sort_ev.room_id().to_owned());
|
||||
}
|
||||
|
||||
let id = sort_ev.event_id();
|
||||
if let Some(depth) = mainline_map.get(id) {
|
||||
|
@ -753,7 +757,7 @@ where
|
|||
}
|
||||
}
|
||||
}
|
||||
// Did not find a power level event so we default to zero
|
||||
warn!("could not find a power event in the mainline map for {room_id:?}, defaulting to zero depth");
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ fn descriptor_cf_options(
|
|||
set_table_options(&mut opts, &desc, cache)?;
|
||||
|
||||
opts.set_min_write_buffer_number(1);
|
||||
opts.set_max_write_buffer_number(2);
|
||||
opts.set_max_write_buffer_number(3);
|
||||
opts.set_write_buffer_size(desc.write_size);
|
||||
|
||||
opts.set_target_file_size_base(desc.file_size);
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
use std::{cmp, convert::TryFrom};
|
||||
|
||||
use conduwuit::{Config, Result, utils};
|
||||
use conduwuit::{Config, Result};
|
||||
use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel};
|
||||
|
||||
use conduwuit::config::{parallelism_scaled_i32, parallelism_scaled_u32};
|
||||
use super::{cf_opts::cache_size_f64, logger::handle as handle_log};
|
||||
|
||||
/// Create database-wide options suitable for opening the database. This also
|
||||
|
@ -23,8 +21,8 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul
|
|||
set_logging_defaults(&mut opts, config);
|
||||
|
||||
// Processing
|
||||
opts.set_max_background_jobs(num_threads::<i32>(config)?);
|
||||
opts.set_max_subcompactions(num_threads::<u32>(config)?);
|
||||
opts.set_max_background_jobs(parallelism_scaled_i32(1));
|
||||
opts.set_max_subcompactions(parallelism_scaled_u32(1));
|
||||
opts.set_avoid_unnecessary_blocking_io(true);
|
||||
opts.set_max_file_opening_threads(0);
|
||||
|
||||
|
@ -126,15 +124,3 @@ fn set_logging_defaults(opts: &mut Options, config: &Config) {
|
|||
opts.set_callback_logger(rocksdb_log_level, &handle_log);
|
||||
}
|
||||
}
|
||||
|
||||
fn num_threads<T: TryFrom<usize>>(config: &Config) -> Result<T> {
|
||||
const MIN_PARALLELISM: usize = 2;
|
||||
|
||||
let requested = if config.rocksdb_parallelism_threads != 0 {
|
||||
config.rocksdb_parallelism_threads
|
||||
} else {
|
||||
utils::available_parallelism()
|
||||
};
|
||||
|
||||
utils::math::try_into::<T, usize>(cmp::max(MIN_PARALLELISM, requested))
|
||||
}
|
||||
|
|
|
@ -306,14 +306,12 @@ impl super::Service {
|
|||
|
||||
#[tracing::instrument(name = "srv", level = "debug", skip(self))]
|
||||
async fn query_srv_record(&self, hostname: &'_ str) -> Result<Option<FedDest>> {
|
||||
let hostnames =
|
||||
[format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")];
|
||||
|
||||
for hostname in hostnames {
|
||||
self.services.server.check_running()?;
|
||||
|
||||
debug!("querying SRV for {hostname:?}");
|
||||
let hostname = hostname.trim_end_matches('.');
|
||||
|
||||
let hostname_suffix = format!("_matrix-fed._tcp.{hostname}.");
|
||||
let hostname = hostname_suffix.trim_end_matches('.');
|
||||
match self.resolver.resolver.srv_lookup(hostname).await {
|
||||
| Err(e) => Self::handle_resolve_error(&e, hostname)?,
|
||||
| Ok(result) => {
|
||||
|
@ -328,7 +326,6 @@ impl super::Service {
|
|||
}));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
|
|
@ -122,10 +122,7 @@ where
|
|||
}
|
||||
|
||||
// The original create event must be in the auth events
|
||||
if !matches!(
|
||||
auth_events.get(&(StateEventType::RoomCreate, String::new().into())),
|
||||
Some(_) | None
|
||||
) {
|
||||
if !auth_events.contains_key(&(StateEventType::RoomCreate, String::new().into())) {
|
||||
return Err!(Request(InvalidParam("Incoming event refers to wrong create event.")));
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ use conduwuit::{
|
|||
matrix::{Event, EventTypeExt, PduEvent, StateKey, state_res},
|
||||
trace,
|
||||
utils::stream::{BroadbandExt, ReadyExt},
|
||||
warn,
|
||||
warn
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, future::ready};
|
||||
use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType};
|
||||
|
@ -175,7 +175,7 @@ where
|
|||
let extremities: Vec<_> = self
|
||||
.services
|
||||
.state
|
||||
.get_forward_extremities(room_id)
|
||||
.get_forward_extremities(room_id, &state_lock)
|
||||
.map(ToOwned::to_owned)
|
||||
.ready_filter(|event_id| {
|
||||
// Remove any that are referenced by this incoming event's prev_events
|
||||
|
@ -193,6 +193,8 @@ where
|
|||
.collect()
|
||||
.await;
|
||||
|
||||
if extremities.len() == 0 { info!("Retained zero extremities when upgrading outlier PDU to timeline PDU with {} previous events, event id: {}", incoming_pdu.prev_events.len(), incoming_pdu.event_id) }
|
||||
|
||||
debug!(
|
||||
"Retained {} extremities checked against {} prev_events",
|
||||
extremities.len(),
|
||||
|
|
|
@ -388,6 +388,7 @@ impl Service {
|
|||
pub fn get_forward_extremities<'a>(
|
||||
&'a self,
|
||||
room_id: &'a RoomId,
|
||||
_state_lock: &'a RoomMutexGuard,
|
||||
) -> impl Stream<Item = &EventId> + Send + '_ {
|
||||
let prefix = (room_id, Interfix);
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ pub async fn create_hash_and_sign_event(
|
|||
let prev_events: Vec<OwnedEventId> = self
|
||||
.services
|
||||
.state
|
||||
.get_forward_extremities(room_id)
|
||||
.get_forward_extremities(room_id, _mutex_lock)
|
||||
.take(20)
|
||||
.map(Into::into)
|
||||
.collect()
|
||||
|
@ -165,25 +165,6 @@ pub async fn create_hash_and_sign_event(
|
|||
return Err!(Request(Forbidden("Event is not authorized.")));
|
||||
}
|
||||
|
||||
// Check with the policy server
|
||||
match self
|
||||
.services
|
||||
.event_handler
|
||||
.ask_policy_server(&pdu, room_id)
|
||||
.await
|
||||
{
|
||||
| Ok(true) => {},
|
||||
| Ok(false) => {
|
||||
return Err!(Request(Forbidden(debug_warn!(
|
||||
"Policy server marked this event as spam"
|
||||
))));
|
||||
},
|
||||
| Err(e) => {
|
||||
// fail open
|
||||
warn!("Failed to check event with policy server: {e}");
|
||||
},
|
||||
}
|
||||
|
||||
// Hash and sign
|
||||
let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| {
|
||||
err!(Request(BadJson(warn!("Failed to convert PDU to canonical JSON: {e}"))))
|
||||
|
@ -222,6 +203,25 @@ pub async fn create_hash_and_sign_event(
|
|||
|
||||
pdu_json.insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into()));
|
||||
|
||||
// Check with the policy server
|
||||
match self
|
||||
.services
|
||||
.event_handler
|
||||
.ask_policy_server(&pdu, room_id)
|
||||
.await
|
||||
{
|
||||
| Ok(true) => {},
|
||||
| Ok(false) => {
|
||||
return Err!(Request(Forbidden(debug_warn!(
|
||||
"Policy server marked this event as spam"
|
||||
))));
|
||||
},
|
||||
| Err(e) => {
|
||||
// fail open
|
||||
warn!("Failed to check event with policy server: {e}");
|
||||
},
|
||||
}
|
||||
|
||||
// Generate short event id
|
||||
let _shorteventid = self
|
||||
.services
|
||||
|
|
|
@ -401,16 +401,10 @@ impl Service {
|
|||
|
||||
fn num_senders(args: &crate::Args<'_>) -> usize {
|
||||
const MIN_SENDERS: usize = 1;
|
||||
// Limit the number of senders to the number of workers threads or number of
|
||||
// cores, conservatively.
|
||||
let max_senders = args
|
||||
.server
|
||||
.metrics
|
||||
.num_workers()
|
||||
.min(available_parallelism());
|
||||
// Limit the maximum number of senders to the number of cores.
|
||||
let max_senders = available_parallelism();
|
||||
|
||||
// If the user doesn't override the default 0, this is intended to then default
|
||||
// to 1 for now as multiple senders is experimental.
|
||||
// default is 4 senders. clamp between 1 and core count.
|
||||
args.server
|
||||
.config
|
||||
.sender_workers
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue