Merge branch 'main' into main_u

This commit is contained in:
Matthew Scheffel 2024-07-16 21:21:08 -03:00
commit f8d341002e
202 changed files with 5648 additions and 3887 deletions

View file

@ -38,8 +38,11 @@ env:
# Custom nix binary cache if fork is being used
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }}
# Get error output from nix that we can actually use
NIX_CONFIG: show-trace = true
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
NIX_CONFIG: |
show-trace = true
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
permissions:
packages: write
@ -57,7 +60,7 @@ jobs:
uses: actions/checkout@v4
- name: Tag comparison check
if: startsWith(github.ref, 'refs/tags/v')
if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }}
run: |
# Tag mismatch with latest repo tag check to prevent potential downgrades
LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)
@ -115,7 +118,7 @@ jobs:
- name: Prepare build environment
run: |
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
direnv allow
nix develop .#all-features --command true
@ -129,15 +132,10 @@ jobs:
run: |
direnv exec . engage > >(tee -a test_output.log)
- name: Sync Complement repository
uses: actions/checkout@v4
with:
repository: 'matrix-org/complement'
path: complement_src
- name: Run Complement tests
run: |
direnv exec . bin/complement 'complement_src' 'complement_test_logs.jsonl' 'complement_test_results.jsonl'
# the nix devshell sets $COMPLEMENT_SRC, so "/dev/null" is no-op
direnv exec . bin/complement "/dev/null" complement_test_logs.jsonl complement_test_results.jsonl > >(tee -a test_output.log)
cp -v -f result complement_oci_image.tar.gz
- name: Upload Complement OCI image
@ -163,11 +161,7 @@ jobs:
- name: Diff Complement results with checked-in repo results
run: |
diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_test_output.log)
echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY
echo '```diff' >> $GITHUB_STEP_SUMMARY
tail -n 100 complement_test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log)
- name: Update Job Summary
if: success() || failure()
@ -175,9 +169,15 @@ jobs:
if [ ${{ job.status }} == 'success' ]; then
echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY
else
echo '# CI failure' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
tail -n 40 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY
echo '```diff' >> $GITHUB_STEP_SUMMARY
tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
fi
build:
@ -240,7 +240,7 @@ jobs:
- name: Prepare build environment
run: |
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
direnv allow
nix develop .#all-features --command true
@ -249,7 +249,7 @@ jobs:
CARGO_DEB_TARGET_TUPLE=$(echo ${{ matrix.target }} | grep -o -E '^([^-]*-){3}[^-]*')
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
bin/nix-build-and-cache just .#static-${{ matrix.target }}
bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features
mkdir -v -p target/release/
mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/
cp -v -f result/bin/conduit target/release/conduwuit
@ -276,7 +276,7 @@ jobs:
- name: Build OCI image ${{ matrix.target }}
run: |
bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}
bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features
cp -v -f result oci-image-${{ matrix.target }}.tar.gz
- name: Upload OCI image ${{ matrix.target }}
@ -296,15 +296,15 @@ jobs:
DOCKER_ARM64: docker.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8
DOCKER_AMD64: docker.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64
DOCKER_TAG: docker.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
DOCKER_BRANCH: docker.io/${{ github.repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
DOCKER_BRANCH: docker.io/${{ github.repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
GHCR_ARM64: ghcr.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8
GHCR_AMD64: ghcr.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64
GHCR_TAG: ghcr.io/${{ github.repository }}:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
GHCR_BRANCH: ghcr.io/${{ github.repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
GHCR_BRANCH: ghcr.io/${{ github.repository }}:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
GLCR_ARM64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-arm64v8
GLCR_AMD64: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}-amd64
GLCR_TAG: registry.gitlab.com/conduwuit/conduwuit:${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
GLCR_BRANCH: registry.gitlab.com/conduwuit/conduwuit:${{ (startsWith(github.ref, 'refs/tags/v') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
GLCR_BRANCH: registry.gitlab.com/conduwuit/conduwuit:${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}

View file

@ -21,8 +21,11 @@ env:
# Custom nix binary cache if fork is being used
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }}
# Get error output from nix that we can actually use
NIX_CONFIG: show-trace = true
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
NIX_CONFIG: |
show-trace = true
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
@ -98,7 +101,7 @@ jobs:
- name: Prepare build environment
run: |
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv
direnv allow
nix develop --command true

View file

@ -26,7 +26,7 @@ jobs:
uses: actions/checkout@v4
- name: Run Trivy code and vulnerability scanner on repo
uses: aquasecurity/trivy-action@0.23.0
uses: aquasecurity/trivy-action@0.24.0
with:
scan-type: repo
format: sarif
@ -34,7 +34,7 @@ jobs:
severity: CRITICAL,HIGH,MEDIUM,LOW
- name: Run Trivy code and vulnerability scanner on filesystem
uses: aquasecurity/trivy-action@0.23.0
uses: aquasecurity/trivy-action@0.24.0
with:
scan-type: fs
format: sarif

View file

@ -58,7 +58,7 @@ before_script:
ci:
stage: ci
image: nixos/nix:2.23.1
image: nixos/nix:2.23.3
script:
# Cache CI dependencies
- ./bin/nix-build-and-cache ci
@ -83,7 +83,7 @@ ci:
artifacts:
stage: artifacts
image: nixos/nix:2.23.1
image: nixos/nix:2.23.3
script:
- ./bin/nix-build-and-cache just .#static-x86_64-unknown-linux-musl
- cp result/bin/conduit x86_64-unknown-linux-musl

337
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -20,11 +20,14 @@ license = "Apache-2.0"
readme = "README.md"
repository = "https://github.com/girlbossceo/conduwuit"
rust-version = "1.77.0"
version = "0.4.4"
version = "0.4.5"
[workspace.metadata.crane]
name = "conduit"
[workspace.dependencies.const-str]
version = "0.5.7"
[workspace.dependencies.sanitize-filename]
version = "0.5.0"
@ -50,7 +53,7 @@ version = "0.8.5"
# Used for the http request / response body type for Ruma endpoints used with reqwest
[workspace.dependencies.bytes]
version = "1.6.0"
version = "1.6.1"
[workspace.dependencies.http-body-util]
version = "0.1.1"
@ -113,7 +116,7 @@ features = [
]
[workspace.dependencies.serde]
version = "1.0.203"
version = "1.0.204"
features = ["rc"]
[workspace.dependencies.serde_json]
@ -169,7 +172,7 @@ default-features = false
# used for conduit's CLI and admin room command parsing
[workspace.dependencies.clap]
version = "4.5.4"
version = "4.5.9"
default-features = false
features = [
"std",
@ -197,6 +200,9 @@ features = [
"io-util",
]
[workspace.dependencies.tokio-metrics]
version = "0.3.1"
[workspace.dependencies.libloading]
version = "0.8.3"
@ -208,7 +214,7 @@ features = ["serde"]
# standard date and time tools
[workspace.dependencies.chrono]
version = "0.4.38"
features = ["alloc"]
features = ["alloc", "std"]
default-features = false
[workspace.dependencies.hyper]
@ -245,7 +251,7 @@ default-features = false
# Used for conduit::Error type
[workspace.dependencies.thiserror]
version = "1.0.61"
version = "1.0.62"
# Used when hashing the state
[workspace.dependencies.ring]
@ -265,7 +271,7 @@ version = "2.1.1"
version = "0.3.1"
[workspace.dependencies.async-trait]
version = "0.1.80"
version = "0.1.81"
[workspace.dependencies.lru-cache]
version = "0.1.2"
@ -303,7 +309,7 @@ features = [
[workspace.dependencies.ruma-identifiers-validation]
git = "https://github.com/girlbossceo/ruwuma"
rev = "fd686e77950680462377c9105dfb4136dd49c7a0"
rev = "c51ccb2c68d2e3557eb12b1a49036531711ec0e5"
[workspace.dependencies.rust-rocksdb]
path = "deps/rust-rocksdb"
@ -311,7 +317,6 @@ package = "rust-rocksdb-uwu"
features = [
"multi-threaded-cf",
"mt_static",
"snappy",
"lz4",
"zstd",
"zlib",
@ -380,10 +385,6 @@ version = "0.5.4"
default-features = false
features = ["use_std"]
[workspace.dependencies.tokio-metrics]
version = "0.3.1"
default-features = false
[workspace.dependencies.console-subscriber]
version = "0.3"
@ -405,11 +406,15 @@ features = [
[workspace.dependencies.rustyline-async]
version = "0.4.2"
default-features = false
[workspace.dependencies.termimad]
version = "0.29.4"
default-features = false
[workspace.dependencies.checked_ops]
version = "0.1"
#
# Patches
@ -420,16 +425,16 @@ default-features = false
# https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c
[patch.crates-io.tracing-subscriber]
git = "https://github.com/girlbossceo/tracing"
rev = "b348dca742af641c47bc390261f60711c2af573c"
rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91"
[patch.crates-io.tracing]
git = "https://github.com/girlbossceo/tracing"
rev = "b348dca742af641c47bc390261f60711c2af573c"
rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91"
[patch.crates-io.tracing-core]
git = "https://github.com/girlbossceo/tracing"
rev = "b348dca742af641c47bc390261f60711c2af573c"
rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91"
[patch.crates-io.tracing-log]
git = "https://github.com/girlbossceo/tracing"
rev = "b348dca742af641c47bc390261f60711c2af573c"
rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91"
# fixes hyper graceful shutdowns [https://github.com/programatik29/axum-server/issues/114]
# https://github.com/girlbossceo/axum-server/commit/8e3368d899079818934e61cc9c839abcbbcada8a
@ -437,6 +442,12 @@ rev = "b348dca742af641c47bc390261f60711c2af573c"
git = "https://github.com/girlbossceo/axum-server"
rev = "8e3368d899079818934e61cc9c839abcbbcada8a"
# adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50
# adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b
[patch.crates-io.rustyline-async]
git = "https://github.com/girlbossceo/rustyline-async"
rev = "de26100b0db03e419a3d8e1dd26895d170d1fe50"
#
# Our crates
#
@ -726,7 +737,6 @@ nursery = "warn"
## some sadness
missing_const_for_fn = { level = "allow", priority = 1 } # TODO
needless_collect = { level = "allow", priority = 1 } # TODO
option_if_let_else = { level = "allow", priority = 1 } # TODO
redundant_pub_crate = { level = "allow", priority = 1 } # TODO
significant_drop_in_scrutinee = { level = "allow", priority = 1 } # TODO
@ -736,21 +746,14 @@ significant_drop_tightening = { level = "allow", priority = 1 } # TODO
pedantic = "warn"
## some sadness
cast_possible_truncation = { level = "allow", priority = 1 }
cast_precision_loss = { level = "allow", priority = 1 }
cast_sign_loss = { level = "allow", priority = 1 }
doc_markdown = { level = "allow", priority = 1 }
error_impl_error = { level = "allow", priority = 1 }
expect_used = { level = "allow", priority = 1 }
enum_glob_use = { level = "allow", priority = 1 }
if_not_else = { level = "allow", priority = 1 }
if_then_some_else_none = { level = "allow", priority = 1 }
implicit_return = { level = "allow", priority = 1 }
inline_always = { level = "allow", priority = 1 }
map_err_ignore = { level = "allow", priority = 1 }
missing_docs_in_private_items = { level = "allow", priority = 1 }
missing_errors_doc = { level = "allow", priority = 1 }
missing_panics_doc = { level = "allow", priority = 1 }
mod_module_files = { level = "allow", priority = 1 }
module_name_repetitions = { level = "allow", priority = 1 }
no_effect_underscore_binding = { level = "allow", priority = 1 }
similar_names = { level = "allow", priority = 1 }
@ -764,8 +767,10 @@ perf = "warn"
###################
#restriction = "warn"
#arithmetic_side_effects = "warn" # TODO
#as_conversions = "warn" # TODO
allow_attributes = "warn"
arithmetic_side_effects = "warn"
as_conversions = "warn"
as_underscore = "warn"
assertions_on_result_states = "warn"
dbg_macro = "warn"
default_union_representation = "warn"
@ -779,7 +784,6 @@ fn_to_numeric_cast_any = "warn"
format_push_string = "warn"
get_unwrap = "warn"
impl_trait_in_params = "warn"
let_underscore_must_use = "warn"
let_underscore_untyped = "warn"
lossy_float_literal = "warn"
mem_forget = "warn"
@ -793,6 +797,7 @@ rest_pat_in_fully_bound_structs = "warn"
semicolon_outside_block = "warn"
str_to_string = "warn"
string_lit_chars_any = "warn"
string_slice = "warn"
string_to_string = "warn"
suspicious_xor_used_as_pow = "warn"
tests_outside_test_module = "warn"
@ -803,6 +808,7 @@ unnecessary_safety_doc = "warn"
unnecessary_self_imports = "warn"
unneeded_field_pattern = "warn"
unseparated_literal_suffix = "warn"
#unwrap_used = "warn" # TODO
verbose_file_reads = "warn"
###################

View file

@ -7,7 +7,7 @@ set -euo pipefail
# The `COMPLEMENT_SRC` environment variable is set in the Nix dev shell, which
# points to a store path containing the Complement source code. It's likely you
# want to just pass that as the first argument to use it here.
COMPLEMENT_SRC="$1"
COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}"
# A `.jsonl` file to write test logs to
LOG_FILE="$2"
@ -17,12 +17,19 @@ RESULTS_FILE="$3"
OCI_IMAGE="complement-conduit:main"
# Complement tests that are skipped due to flakiness/reliability issues (likely
# Complement itself induced based on various open issues)
#
# According to Go docs, these are separated by forward slashes and not pipes (why)
# Complement tests that are skipped due to flakiness/reliability issues
SKIPPED_COMPLEMENT_TESTS='-skip=TestClientSpacesSummary.*|TestJoinFederatedRoomFromApplicationServiceBridgeUser.*|TestJumpToDateEndpoint.*'
# $COMPLEMENT_SRC needs to be a directory to Complement source code
if [ -f "$COMPLEMENT_SRC" ]; then
echo "\$COMPLEMENT_SRC must be a directory/path to Complement source code"
exit 1
fi
# quick test to make sure we can actually write to $LOG_FILE and $RESULTS_FILE
touch $LOG_FILE && rm -v $LOG_FILE
touch $RESULTS_FILE && rm -v $RESULTS_FILE
toplevel="$(git rev-parse --show-toplevel)"
pushd "$toplevel" > /dev/null

View file

@ -57,6 +57,16 @@
# Defaults to 0.15
#sentry_traces_sample_rate = 0.15
# Whether to attach a stacktrace to Sentry reports.
#sentry_attach_stacktrace = false
# Send panics to sentry. This is true by default, but sentry has to be enabled.
#sentry_send_panic = true
# Send errors to sentry. This is true by default, but sentry has to be enabled. This option is
# only effective in release-mode; forced to false in debug-mode.
#sentry_send_error = true
### Database configuration
@ -411,8 +421,11 @@ allow_profile_lookup_federation_requests = true
# Set this to any float value to multiply conduwuit's in-memory LRU caches with.
# May be useful if you have significant memory to spare to increase performance.
#
# This was previously called `conduit_cache_capacity_modifier`
#
# Defaults to 1.0.
#conduit_cache_capacity_modifier = 1.0
#cache_capacity_modifier = 1.0
# Set this to any float value in megabytes for conduwuit to tell the database engine that this much memory is available for database-related caches.
# May be useful if you have significant memory to spare to increase performance.

View file

@ -10,7 +10,7 @@ repository.workspace = true
version = "0.0.1"
[features]
default = ["snappy", "lz4", "zstd", "zlib", "bzip2"]
default = ["lz4", "zstd", "zlib", "bzip2"]
jemalloc = ["rust-rocksdb/jemalloc"]
io-uring = ["rust-rocksdb/io-uring"]
valgrind = ["rust-rocksdb/valgrind"]
@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"]
[dependencies.rust-rocksdb]
git = "https://github.com/zaidoon1/rust-rocksdb"
rev = "b4887edfb84771336930855727390edec07d63fa"
rev = "4056a3b0f823013fec49f6d0b3e5698856e6476a"
#branch = "master"
default-features = false

View file

@ -1,5 +1,4 @@
# conduwuit - Behind Traefik Reverse Proxy
version: '2.4' # uses '2.4' for cpuset
services:
homeserver:
@ -24,7 +23,7 @@ services:
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
#CONDUWUIT_LOG: warn,state_res=warn
CONDUWUIT_ADDRESS: 0.0.0.0
#CONDUWUIT_CONFIG: './conduwuit.toml' # Uncomment if you mapped config toml above
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container

View file

@ -1,5 +1,4 @@
# conduwuit - Traefik Reverse Proxy Labels
version: '2.4' # uses '2.4' for cpuset
services:
homeserver:

View file

@ -0,0 +1,54 @@
services:
caddy:
# This compose file uses caddy-docker-proxy as the reverse proxy for conduwuit!
# For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy
image: lucaslorentz/caddy-docker-proxy:ci-alpine
ports:
- 80:80
- 443:443
environment:
- CADDY_INGRESS_NETWORKS=caddy
networks:
- caddy
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/data
restart: unless-stopped
labels:
caddy: example.com
caddy.0_respond: /.well-known/matrix/server {"m.server":"matrix.example.com:443"}
caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}}
homeserver:
### If you already built the conduwuit image with 'docker build' or want to use a registry image,
### then you are ready to go.
image: girlbossceo/conduwuit:latest
restart: unless-stopped
volumes:
- db:/var/lib/conduwuit
#- ./conduwuit.toml:/etc/conduwuit.toml
environment:
CONDUWUIT_SERVER_NAME: example.com # EDIT THIS
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
CONDUWUIT_DATABASE_BACKEND: rocksdb
CONDUWUIT_PORT: 6167
CONDUWUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
CONDUWUIT_ALLOW_REGISTRATION: 'true'
CONDUWUIT_ALLOW_FEDERATION: 'true'
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
#CONDUWUIT_LOG: warn,state_res=warn
CONDUWUIT_ADDRESS: 0.0.0.0
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
networks:
- caddy
labels:
caddy: matrix.example.com
caddy.reverse_proxy: "{{upstreams 6167}}"
volumes:
db:
networks:
caddy:
external: true

View file

@ -1,5 +1,4 @@
# conduwuit - Behind Traefik Reverse Proxy
version: '2.4' # uses '2.4' for cpuset
services:
homeserver:
@ -16,7 +15,7 @@ services:
CONDUWUIT_SERVER_NAME: your.server.name # EDIT THIS
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
CONDUWUIT_ALLOW_REGISTRATION : 'true'
#CONDUWUIT_CONFIG: './conduwuit.toml' # Uncomment if you mapped config toml above
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
### Uncomment and change values as desired
# CONDUWUIT_ADDRESS: 0.0.0.0
# CONDUWUIT_PORT: 6167
@ -28,7 +27,6 @@ services:
# CONDUWUIT_DATABASE_PATH: /srv/conduwuit/.local/share/conduwuit
# CONDUWUIT_WORKERS: 10
# CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here

View file

@ -1,5 +1,4 @@
# conduwuit
version: '2.4' # uses '2.4' for cpuset
services:
homeserver:
@ -24,8 +23,7 @@ services:
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
#CONDUWUIT_LOG: warn,state_res=warn
CONDUWUIT_ADDRESS: 0.0.0.0
#CONDUWUIT_CONFIG: './conduwuit.toml' # Uncomment if you mapped config toml above
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
#
### Uncomment if you want to use your own Element-Web App.
### Note: You need to provide a config.json for Element and you also need a second

View file

@ -59,13 +59,22 @@ If the `docker run` command is not for you or your setup, you can also use one o
Depending on your proxy setup, you can use one of the following files;
- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
- If you don't have a `traefik` instance set up (or any other reverse proxy), use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)
- If you don't have a `traefik` instance set up and would like to use it, use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)
- If you want a setup that works out of the box with `caddy-docker-proxy`, use [`docker-compose.with-caddy.yml`](docker-compose.with-caddy.yml) and replace all `example.com` placeholders with your own domain
- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml)
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
for your server.
When picking the `caddy-docker-proxy` compose file, it's important to first create the `caddy` network before spinning up the containers:
```bash
docker network create caddy
```
After that, you can rename it so it matches `docker-compose.yml` and spin up the containers!
Additional info about deploying conduwuit can be found [here](generic.md).
### Build

View file

@ -23,7 +23,7 @@ Otherwise, follow standard Rust project build guides (installing git and cloning
While conduwuit can run as any user it is better to use dedicated users for different services. This also allows
you to make sure that the file permissions are correctly set up.
In Debian or RHEL, you can use this command to create a conduwuit user:
In Debian or Fedora/RHEL, you can use this command to create a conduwuit user:
```bash
sudo adduser --system conduwuit --group --disabled-login --no-create-home
@ -53,13 +53,11 @@ RocksDB is the only supported database backend.
## Setting the correct file permissions
If you are using a dedicated user for conduwuit, you will need to allow it to read the config. To do that you can run this command on
Debian or RHEL:
If you are using a dedicated user for conduwuit, you will need to allow it to read the config. To do that you can run this:
```bash
sudo chown -R root:root /etc/conduwuit
sudo chmod 755 /etc/conduwuit
sudo chmod -R 755 /etc/conduwuit
```
If you use the default database path you also need to run this:

View file

@ -184,5 +184,10 @@ cargo test \
name = "nix-default"
group = "tests"
script = """
nix run .#default -- --help
env DIRENV_DEVSHELL=dynamic \
direnv exec . \
bin/nix-build-and-cache just .#default
env DIRENV_DEVSHELL=dynamic \
direnv exec . \
nix run -L .#default -- --help
"""

50
flake.lock generated
View file

@ -9,11 +9,11 @@
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1717279440,
"narHash": "sha256-kH04ReTjxOpQumgWnqy40vvQLSnLGxWP6RF3nq5Esrk=",
"lastModified": 1720542474,
"narHash": "sha256-aKjJ/4l2I9+wNGTaOGRsuS3M1+IoTibqgEMPDikXm04=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "717cc95983cdc357bc347d70be20ced21f935843",
"rev": "6139576a3ce6bb992e0f6c3022528ec233e45f00",
"type": "github"
},
"original": {
@ -81,11 +81,11 @@
"complement": {
"flake": false,
"locked": {
"lastModified": 1719903368,
"narHash": "sha256-PPzgxM4Bir+Zh9FUV/v+RBxEYeJxYVmi/BYo3uqt268=",
"lastModified": 1720637557,
"narHash": "sha256-oZz6nCmFmdJZpC+K1iOG2KkzTI6rlAmndxANPDVU7X0=",
"owner": "matrix-org",
"repo": "complement",
"rev": "bc97f1ddc1cd7485faf80c8935ee2641f3e1b57c",
"rev": "0d14432e010482ea9e13a6f7c47c1533c0c9d62f",
"type": "github"
},
"original": {
@ -123,11 +123,11 @@
]
},
"locked": {
"lastModified": 1716569590,
"narHash": "sha256-5eDbq8TuXFGGO3mqJFzhUbt5zHVTf5zilQoyW5jnJwo=",
"lastModified": 1720546058,
"narHash": "sha256-iU2yVaPIZm5vMGdlT0+57vdB/aPq/V5oZFBRwYw+HBM=",
"owner": "ipetkov",
"repo": "crane",
"rev": "109987da061a1bf452f435f1653c47511587d919",
"rev": "2d83156f23c43598cf44e152c33a59d3892f8b29",
"type": "github"
},
"original": {
@ -209,11 +209,11 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1716359173,
"narHash": "sha256-pYcjP6Gy7i6jPWrjiWAVV0BCQp+DdmGaI/k65lBb/kM=",
"lastModified": 1720852044,
"narHash": "sha256-3NBYz8VuXuKU+8ONd9NFafCNjPEGHIZQ2Mdoam1a4mY=",
"owner": "nix-community",
"repo": "fenix",
"rev": "b6fc5035b28e36a98370d0eac44f4ef3fd323df6",
"rev": "5087b12a595ee73131a944d922f24d81dae05725",
"type": "github"
},
"original": {
@ -381,11 +381,11 @@
"liburing": {
"flake": false,
"locked": {
"lastModified": 1719025212,
"narHash": "sha256-kD0yhjNStqC6uFqC1AxBwUpc/HlSFtiKrV+gwDyroDc=",
"lastModified": 1720798442,
"narHash": "sha256-gtPppAoksMLW4GuruQ36nf4EAqIA1Bs6V9Xcx8dBxrQ=",
"owner": "axboe",
"repo": "liburing",
"rev": "7b3245583069bd481190c9da18f22e9fc8c3a805",
"rev": "1d674f83b7d0f07553ac44d99a401b05853d9dbe",
"type": "github"
},
"original": {
@ -606,11 +606,11 @@
},
"nixpkgs_4": {
"locked": {
"lastModified": 1716330097,
"narHash": "sha256-8BO3B7e3BiyIDsaKA0tY8O88rClYRTjvAp66y+VBUeU=",
"lastModified": 1720768451,
"narHash": "sha256-EYekUHJE2gxeo2pM/zM9Wlqw1Uw2XTJXOSAO79ksc4Y=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5710852ba686cc1fd0d3b8e22b3117d43ba374c2",
"rev": "7e7c39ea35c5cdd002cd4588b03a3fb9ece6fad9",
"type": "github"
},
"original": {
@ -673,16 +673,16 @@
"rocksdb": {
"flake": false,
"locked": {
"lastModified": 1719949653,
"narHash": "sha256-DYx7XHH2GEh17GukKhXs6laM6l+eugCmRkF0adpi9wk=",
"lastModified": 1720900786,
"narHash": "sha256-Vta9Um/RRuWwZ46BjXftV06iWLm/j/9MX39emXUvSAY=",
"owner": "girlbossceo",
"repo": "rocksdb",
"rev": "a935c0273e1ba44eacf88ce3685a9b9831486155",
"rev": "911f4243e69c2e320a7a209bf1f5f3ff5f825495",
"type": "github"
},
"original": {
"owner": "girlbossceo",
"ref": "v9.3.1",
"ref": "v9.4.0",
"repo": "rocksdb",
"type": "github"
}
@ -705,11 +705,11 @@
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1716107283,
"narHash": "sha256-NJgrwLiLGHDrCia5AeIvZUHUY7xYGVryee0/9D3Ir1I=",
"lastModified": 1720717809,
"narHash": "sha256-6I+fm+nTLF/iaj7ffiFGlSY7POmubwUaPA/Wq0Bm53M=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "21ec8f523812b88418b2bfc64240c62b3dd967bd",
"rev": "ffbc5ad993d5cd2f3b8bcf9a511165470944ab91",
"type": "github"
},
"original": {

104
flake.nix
View file

@ -9,8 +9,7 @@
flake-utils.url = "github:numtide/flake-utils?ref=main";
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
# https://github.com/girlbossceo/rocksdb/commit/db6df0b185774778457dabfcbd822cb81760cade
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.3.1"; flake = false; };
rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.4.0"; flake = false; };
liburing = { url = "github:axboe/liburing?ref=master"; flake = false; };
};
@ -42,6 +41,37 @@
"v"
(builtins.fromJSON (builtins.readFile ./flake.lock))
.nodes.rocksdb.original.ref;
# we have this already at https://github.com/girlbossceo/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# unsetting this so i don't have to revert it and make this nix exclusive
patches = [];
cmakeFlags = pkgs.lib.subtractLists
[
# no real reason to have snappy, no one uses this
"-DWITH_SNAPPY=1"
# we dont need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=1"
# we dont need to build rocksdb tests
"-DWITH_TESTS=1"
# we use rust-rocksdb via C interface and dont need C++ RTTI
"-DUSE_RTTI=1"
]
old.cmakeFlags
++ [
# we dont need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=0"
# we dont need trace tools
"-DWITH_TRACE_TOOLS=0"
# we dont need to build rocksdb tests
"-DWITH_TESTS=0"
# we use rust-rocksdb via C interface and dont need C++ RTTI
"-DUSE_RTTI=0"
];
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
# TODO: remove once https://github.com/NixOS/nixpkgs/pull/314945 is available
liburing = pkgs.liburing.overrideAttrs (old: {
@ -50,16 +80,6 @@
configureFlags = pkgs.lib.subtractLists
[ "--enable-static" "--disable-shared" ]
old.configureFlags;
postInstall = old.postInstall + ''
# we remove the extra outputs
#
# we need to do this to prevent rocksdb from trying to link the
# static library in a dynamic stdenv
rm $out/lib/liburing*${
if pkgs.stdenv.hostPlatform.isStatic then ".so*" else ".a"
}
'';
});
});
@ -124,9 +144,29 @@
{
packages = {
default = scopeHost.main;
all-features = scopeHost.main.override {
all_features = true;
disable_features = [
# this is non-functional on nix for some reason
"hardened_malloc"
# dont include experimental features
"experimental"
];
};
hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; };
oci-image = scopeHost.oci-image;
oci-image-all-features = scopeHost.oci-image.override {
main = scopeHost.main.override {
all_features = true;
disable_features = [
# this is non-functional on nix for some reason
"hardened_malloc"
# dont include experimental features
"experimental"
];
};
};
oci-image-hmalloc = scopeHost.oci-image.override {
main = scopeHost.main.override {
features = ["hardened_malloc"];
@ -161,6 +201,20 @@
value = scopeCrossStatic.main;
}
# An output for a statically-linked binary with `--all-features`
{
name = "${binaryName}-all-features";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# this is non-functional on nix for some reason
"hardened_malloc"
# dont include experimental features
"experimental"
];
};
}
# An output for a statically-linked binary with hardened_malloc
{
name = "${binaryName}-hmalloc";
@ -175,6 +229,22 @@
value = scopeCrossStatic.oci-image;
}
# An output for an OCI image based on that binary with `--all-features`
{
name = "oci-image-${crossSystem}-all-features";
value = scopeCrossStatic.oci-image.override {
main = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# this is non-functional on nix for some reason
"hardened_malloc"
# dont include experimental features
"experimental"
];
};
};
}
# An output for an OCI image based on that binary with hardened_malloc
{
name = "oci-image-${crossSystem}-hmalloc";
@ -196,7 +266,15 @@
devShells.default = mkDevShell scopeHostStatic;
devShells.all-features = mkDevShell
(scopeHostStatic.overrideScope (final: prev: {
main = prev.main.override { all_features = true; };
main = prev.main.override {
all_features = true;
disable_features = [
# this is non-functional on nix for some reason
"hardened_malloc"
# dont include experimental features
"experimental"
];
};
}));
devShells.no-features = mkDevShell
(scopeHostStatic.overrideScope (final: prev: {

View file

@ -5,13 +5,17 @@ allow_guest_registration = true
allow_public_room_directory_over_federation = true
allow_public_room_directory_without_auth = true
allow_registration = true
allow_unstable_room_versions = true
database_backend = "rocksdb"
database_path = "/database"
log = "trace"
log = "trace,h2=warn,hyper=warn"
port = [8008, 8448]
trusted_servers = []
query_trusted_key_servers_first = false
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true
ip_range_denylist = []
url_preview_domain_contains_allowlist = ["*"]
media_compat_file_link = false
media_statup_check = false
rocksdb_direct_io = false
[global.tls]
certs = "/certificate.crt"

View file

@ -13,6 +13,12 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic {
lib.concatStringsSep
" "
([]
++ lib.optionals
stdenv.targetPlatform.isx86_64
[ "-C" "target-cpu=x86-64-v2" ]
++ lib.optionals
stdenv.targetPlatform.isAarch64
[ "-C" "target-cpu=cortex-a55" ] # cortex-a55 == ARMv8.2-a
# This disables PIE for static builds, which isn't great in terms
# of security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from

View file

@ -25,11 +25,7 @@ let
# on the nix side depend on feature values.
crateFeatures = path:
let manifest = lib.importTOML "${path}/Cargo.toml"; in
lib.remove "default" (lib.attrNames manifest.features) ++
lib.attrNames
(lib.filterAttrs
(_: dependency: dependency.optional or false)
manifest.dependencies);
lib.remove "default" (lib.attrNames manifest.features);
crateDefaultFeatures = path:
(lib.importTOML "${path}/Cargo.toml").features.default;
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
@ -43,7 +39,7 @@ features'' = lib.subtractLists disable_features' features';
featureEnabled = feature : builtins.elem feature features'';
enableLiburing = featureEnabled "io_uring" && stdenv.isLinux;
enableLiburing = featureEnabled "io_uring" && !stdenv.isDarwin;
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
@ -70,12 +66,34 @@ buildDepsOnlyEnv =
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = featureEnabled "jemalloc" && !stdenv.isDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
enableLiburing = enableLiburing;
}).overrideAttrs (old: {
# TODO: static rocksdb fails to build on darwin
# TODO: static rocksdb fails to build on darwin, also see <https://github.com/NixOS/nixpkgs/issues/320448>
# build log at <https://girlboss.ceo/~strawberry/pb/JjGH>
meta.broken = stdenv.hostPlatform.isStatic && stdenv.isDarwin;
# TODO: switch to enableUring option once https://github.com/NixOS/nixpkgs/pull/314945 is available
buildInputs = old.buildInputs ++ lib.optional enableLiburing liburing;
enableLiburing = enableLiburing;
sse42Support = stdenv.targetPlatform.isx86_64;
cmakeFlags = if stdenv.targetPlatform.isx86_64
then lib.subtractLists [ "-DPORTABLE=1" ] old.cmakeFlags
++ lib.optionals stdenv.targetPlatform.isx86_64 [
"-DPORTABLE=x86-64-v2"
"-DUSE_SSE=1"
"-DHAVE_SSE=1"
"-DHAVE_SSE42=1"
]
else if stdenv.targetPlatform.isAarch64
then lib.subtractLists [ "-DPORTABLE=1" ] old.cmakeFlags
++ lib.optionals stdenv.targetPlatform.isAarch64 [
# cortex-a55 == ARMv8.2-a
"-DPORTABLE=armv8.2-a"
]
else old.cmakeFlags;
});
in
{
@ -102,7 +120,11 @@ buildPackageEnv = {
# Only needed in static stdenv because these are transitive dependencies of rocksdb
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
" -L${lib.getLib liburing}/lib -luring";
" -L${lib.getLib liburing}/lib -luring"
+ lib.optionalString stdenv.targetPlatform.isx86_64
" -Ctarget-cpu=x86-64-v2"
+ lib.optionalString stdenv.targetPlatform.isAarch64
" -Ctarget-cpu=cortex-a55"; # cortex-a55 == ARMv8.2-a
};
@ -127,6 +149,8 @@ commonAttrs = {
];
};
dontStrip = profile == "dev";
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
nativeBuildInputs = [

View file

@ -29,15 +29,12 @@ release_max_log_level = [
clap.workspace = true
conduit-api.workspace = true
conduit-core.workspace = true
conduit-database.workspace = true
conduit-service.workspace = true
const-str.workspace = true
futures-util.workspace = true
log.workspace = true
loole.workspace = true
regex.workspace = true
ruma.workspace = true
serde_json.workspace = true
serde.workspace = true
serde_yaml.workspace = true
tokio.workspace = true
tracing-subscriber.workspace = true

View file

@ -15,7 +15,7 @@ use ruma::{
events::room::message::RoomMessageEventContent,
CanonicalJsonObject, EventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName,
};
use service::{rooms::event_handler::parse_incoming_pdu, sending::resolve::resolve_actual_dest, services, PduEvent};
use service::{rooms::event_handler::parse_incoming_pdu, sending::resolve_actual_dest, services, PduEvent};
use tokio::sync::RwLock;
use tracing_subscriber::EnvFilter;
@ -58,7 +58,7 @@ pub(super) async fn parse_pdu(body: Vec<&str>) -> Result<RoomMessageEventContent
));
}
let string = body[1..body.len() - 1].join("\n");
let string = body[1..body.len().saturating_sub(1)].join("\n");
match serde_json::from_str(&string) {
Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) {
Ok(hash) => {
@ -314,6 +314,8 @@ pub(super) async fn force_device_list_updates(_body: Vec<&str>) -> Result<RoomMe
pub(super) async fn change_log_level(
_body: Vec<&str>, filter: Option<String>, reset: bool,
) -> Result<RoomMessageEventContent> {
let handles = &["console"];
if reset {
let old_filter_layer = match EnvFilter::try_new(&services().globals.config.log) {
Ok(s) => s,
@ -324,7 +326,12 @@ pub(super) async fn change_log_level(
},
};
match services().server.log.reload.reload(&old_filter_layer) {
match services()
.server
.log
.reload
.reload(&old_filter_layer, Some(handles))
{
Ok(()) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Successfully changed log level back to config value {}",
@ -349,7 +356,12 @@ pub(super) async fn change_log_level(
},
};
match services().server.log.reload.reload(&new_filter_layer) {
match services()
.server
.log
.reload
.reload(&new_filter_layer, Some(handles))
{
Ok(()) => {
return Ok(RoomMessageEventContent::text_plain("Successfully changed log level"));
},
@ -570,7 +582,7 @@ pub(super) async fn force_set_room_state_from_server(
.state_compressor
.save_state(room_id.clone().as_ref(), new_room_state)?;
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
let state_lock = services().rooms.state.mutex.lock(&room_id).await;
services()
.rooms
.state
@ -614,15 +626,16 @@ pub(super) async fn resolve_true_destination(
let state = &services().server.log.capture;
let logs = Arc::new(Mutex::new(String::new()));
let capture = Capture::new(state, Some(filter), capture::fmt_markdown(logs.clone()));
let (actual_dest, hostname_uri);
{
let _capture_scope = capture.start();
(actual_dest, hostname_uri) = resolve_actual_dest(&server_name, !no_cache).await?;
};
let capture_scope = capture.start();
let actual = resolve_actual_dest(&server_name, !no_cache).await?;
drop(capture_scope);
let msg = format!(
"{}\nDestination: {actual_dest}\nHostname URI: {hostname_uri}",
logs.lock().expect("locked")
"{}\nDestination: {}\nHostname URI: {}",
logs.lock().expect("locked"),
actual.dest,
actual.host,
);
Ok(RoomMessageEventContent::text_markdown(msg))
}
@ -631,12 +644,46 @@ pub(super) async fn resolve_true_destination(
pub(super) fn memory_stats() -> RoomMessageEventContent {
let html_body = conduit::alloc::memory_stats();
if html_body.is_empty() {
if html_body.is_none() {
return RoomMessageEventContent::text_plain("malloc stats are not supported on your compiled malloc.");
}
RoomMessageEventContent::text_html(
"This command's output can only be viewed by clients that render HTML.".to_owned(),
html_body,
html_body.expect("string result"),
)
}
#[cfg(tokio_unstable)]
pub(super) async fn runtime_metrics(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let out = services().server.metrics.runtime_metrics().map_or_else(
|| "Runtime metrics are not available.".to_owned(),
|metrics| format!("```rs\n{metrics:#?}\n```"),
);
Ok(RoomMessageEventContent::text_markdown(out))
}
#[cfg(not(tokio_unstable))]
pub(super) async fn runtime_metrics(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(RoomMessageEventContent::text_markdown(
"Runtime metrics require building with `tokio_unstable`.",
))
}
#[cfg(tokio_unstable)]
pub(super) async fn runtime_interval(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let out = services().server.metrics.runtime_interval().map_or_else(
|| "Runtime metrics are not available.".to_owned(),
|metrics| format!("```rs\n{metrics:#?}\n```"),
);
Ok(RoomMessageEventContent::text_markdown(out))
}
#[cfg(not(tokio_unstable))]
pub(super) async fn runtime_interval(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(RoomMessageEventContent::text_markdown(
"Runtime metrics require building with `tokio_unstable`.",
))
}

View file

@ -160,6 +160,13 @@ pub(super) enum DebugCommand {
/// - Print extended memory usage
MemoryStats,
/// - Print general tokio runtime metric totals.
RuntimeMetrics,
/// - Print detailed tokio runtime metrics accumulated since last command
/// invocation.
RuntimeInterval,
/// - Developer test stubs
#[command(subcommand)]
Tester(TesterCommand),
@ -213,6 +220,8 @@ pub(super) async fn process(command: DebugCommand, body: Vec<&str>) -> Result<Ro
no_cache,
} => resolve_true_destination(body, server_name, no_cache).await?,
DebugCommand::MemoryStats => memory_stats(),
DebugCommand::RuntimeMetrics => runtime_metrics(body).await?,
DebugCommand::RuntimeInterval => runtime_interval(body).await?,
DebugCommand::Tester(command) => tester::process(command, body).await?,
})
}

View file

@ -15,14 +15,19 @@ pub(super) async fn enable_room(_body: Vec<&str>, room_id: Box<RoomId>) -> Resul
}
pub(super) async fn incoming_federation(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let map = services().globals.roomid_federationhandletime.read().await;
let map = services()
.rooms
.event_handler
.federation_handletime
.read()
.expect("locked");
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
for (r, (e, i)) in map.iter() {
let elapsed = i.elapsed();
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60,)
.expect("should be able to write to string buffer");
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
}
Ok(RoomMessageEventContent::text_plain(&msg))
}

View file

@ -1,15 +1,19 @@
use std::time::Instant;
use std::{panic::AssertUnwindSafe, time::Instant};
use clap::Parser;
use conduit::trace;
use ruma::events::{
use clap::{CommandFactory, Parser};
use conduit::{error, trace, Error};
use futures_util::future::FutureExt;
use ruma::{
events::{
relation::InReplyTo,
room::message::{Relation::Reply, RoomMessageEventContent},
},
OwnedEventId,
};
extern crate conduit_service as service;
use conduit::Result;
use conduit::{utils::string::common_prefix, Result};
pub(crate) use service::admin::{Command, Service};
use service::admin::{CommandOutput, CommandResult, HandlerResult};
@ -20,7 +24,6 @@ use crate::{
};
pub(crate) const PAGE_SIZE: usize = 100;
#[cfg_attr(test, derive(Debug))]
#[derive(Parser)]
#[command(name = "admin", version = env!("CARGO_PKG_VERSION"))]
pub(crate) enum AdminCommand {
@ -62,25 +65,46 @@ pub(crate) enum AdminCommand {
}
#[must_use]
pub fn handle(command: Command) -> HandlerResult { Box::pin(handle_command(command)) }
pub(crate) fn handle(command: Command) -> HandlerResult { Box::pin(handle_command(command)) }
#[must_use]
pub(crate) fn complete(line: &str) -> String { complete_admin_command(AdminCommand::command(), line) }
#[tracing::instrument(skip_all, name = "admin")]
async fn handle_command(command: Command) -> CommandResult {
let Some(mut content) = process_admin_message(command.command).await else {
return Ok(None);
};
AssertUnwindSafe(process_command(&command))
.catch_unwind()
.await
.map_err(Error::from_panic)
.or_else(|error| handle_panic(&error, command))
}
content.relates_to = command.reply_id.map(|event_id| Reply {
async fn process_command(command: &Command) -> CommandOutput {
process_admin_message(&command.command)
.await
.and_then(|content| reply(content, command.reply_id.clone()))
}
fn handle_panic(error: &Error, command: Command) -> CommandResult {
let link = "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺";
let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}");
let content = RoomMessageEventContent::notice_markdown(msg);
error!("Panic while processing command: {error:?}");
Ok(reply(content, command.reply_id))
}
fn reply(mut content: RoomMessageEventContent, reply_id: Option<OwnedEventId>) -> Option<RoomMessageEventContent> {
content.relates_to = reply_id.map(|event_id| Reply {
in_reply_to: InReplyTo {
event_id,
},
});
Ok(Some(content))
Some(content)
}
// Parse and process a message from the admin room
async fn process_admin_message(msg: String) -> CommandOutput {
async fn process_admin_message(msg: &str) -> CommandOutput {
let mut lines = msg.lines().filter(|l| !l.trim().is_empty());
let command = lines.next().expect("each string has at least one line");
let body = lines.collect::<Vec<_>>();
@ -100,59 +124,11 @@ async fn process_admin_message(msg: String) -> CommandOutput {
match result {
Ok(reply) => Some(reply),
Err(error) => Some(RoomMessageEventContent::notice_markdown(format!(
"Encountered an error while handling the command:\n```\n{error}\n```"
"Encountered an error while handling the command:\n```\n{error:#?}\n```"
))),
}
}
// Parse chat messages from the admin room into an AdminCommand object
fn parse_admin_command(command_line: &str) -> Result<AdminCommand, String> {
let mut argv = command_line.split_whitespace().collect::<Vec<_>>();
// Remove any escapes that came with a server-side escape command
if !argv.is_empty() && argv[0].ends_with("admin") {
argv[0] = argv[0].trim_start_matches('\\');
}
// First indice has to be "admin" but for console convenience we add it here
let server_user = services().globals.server_user.as_str();
if !argv.is_empty() && !argv[0].ends_with("admin") && !argv[0].starts_with(server_user) {
argv.insert(0, "admin");
}
// Replace `help command` with `command --help`
// Clap has a help subcommand, but it omits the long help description.
if argv.len() > 1 && argv[1] == "help" {
argv.remove(1);
argv.push("--help");
}
// Backwards compatibility with `register_appservice`-style commands
let command_with_dashes_argv1;
if argv.len() > 1 && argv[1].contains('_') {
command_with_dashes_argv1 = argv[1].replace('_', "-");
argv[1] = &command_with_dashes_argv1;
}
// Backwards compatibility with `register_appservice`-style commands
let command_with_dashes_argv2;
if argv.len() > 2 && argv[2].contains('_') {
command_with_dashes_argv2 = argv[2].replace('_', "-");
argv[2] = &command_with_dashes_argv2;
}
// if the user is using the `query` command (argv[1]), replace the database
// function/table calls with underscores to match the codebase
let command_with_dashes_argv3;
if argv.len() > 3 && argv[1].eq("query") {
command_with_dashes_argv3 = argv[3].replace('_', "-");
argv[3] = &command_with_dashes_argv3;
}
trace!(?command_line, ?argv, "parse");
AdminCommand::try_parse_from(argv).map_err(|error| error.to_string())
}
#[tracing::instrument(skip_all, name = "command")]
async fn process_admin_command(command: AdminCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
let reply_message_content = match command {
@ -169,3 +145,97 @@ async fn process_admin_command(command: AdminCommand, body: Vec<&str>) -> Result
Ok(reply_message_content)
}
// Parse chat messages from the admin room into an AdminCommand object
fn parse_admin_command(command_line: &str) -> Result<AdminCommand, String> {
let argv = parse_command_line(command_line);
AdminCommand::try_parse_from(argv).map_err(|error| error.to_string())
}
fn complete_admin_command(mut cmd: clap::Command, line: &str) -> String {
let argv = parse_command_line(line);
let mut ret = Vec::<String>::with_capacity(argv.len().saturating_add(1));
'token: for token in argv.into_iter().skip(1) {
let cmd_ = cmd.clone();
let mut choice = Vec::new();
for sub in cmd_.get_subcommands() {
let name = sub.get_name();
if *name == token {
// token already complete; recurse to subcommand
ret.push(token);
cmd.clone_from(sub);
continue 'token;
} else if name.starts_with(&token) {
// partial match; add to choices
choice.push(name);
}
}
if choice.len() == 1 {
// One choice. Add extra space because it's complete
let choice = *choice.first().expect("only choice");
ret.push(choice.to_owned());
ret.push(String::new());
} else if choice.is_empty() {
// Nothing found, return original string
ret.push(token);
} else {
// Find the common prefix
ret.push(common_prefix(&choice).into());
}
// Return from completion
return ret.join(" ");
}
// Return from no completion. Needs a space though.
ret.push(String::new());
ret.join(" ")
}
// Parse chat messages from the admin room into an AdminCommand object
fn parse_command_line(command_line: &str) -> Vec<String> {
let mut argv = command_line
.split_whitespace()
.map(str::to_owned)
.collect::<Vec<String>>();
// Remove any escapes that came with a server-side escape command
if !argv.is_empty() && argv[0].ends_with("admin") {
argv[0] = argv[0].trim_start_matches('\\').into();
}
// First indice has to be "admin" but for console convenience we add it here
let server_user = services().globals.server_user.as_str();
if !argv.is_empty() && !argv[0].ends_with("admin") && !argv[0].starts_with(server_user) {
argv.insert(0, "admin".to_owned());
}
// Replace `help command` with `command --help`
// Clap has a help subcommand, but it omits the long help description.
if argv.len() > 1 && argv[1] == "help" {
argv.remove(1);
argv.push("--help".to_owned());
}
// Backwards compatibility with `register_appservice`-style commands
if argv.len() > 1 && argv[1].contains('_') {
argv[1] = argv[1].replace('_', "-");
}
// Backwards compatibility with `register_appservice`-style commands
if argv.len() > 2 && argv[2].contains('_') {
argv[2] = argv[2].replace('_', "-");
}
// if the user is using the `query` command (argv[1]), replace the database
// function/table calls with underscores to match the codebase
if argv.len() > 3 && argv[1].eq("query") {
argv[3] = argv[3].replace('_', "-");
}
trace!(?command_line, ?argv, "parse");
argv
}

View file

@ -9,6 +9,7 @@ pub(crate) mod media;
pub(crate) mod query;
pub(crate) mod room;
pub(crate) mod server;
mod tests;
pub(crate) mod user;
pub(crate) mod utils;
@ -17,7 +18,6 @@ extern crate conduit_core as conduit;
extern crate conduit_service as service;
pub(crate) use conduit::{mod_ctor, mod_dtor, Result};
pub use handler::handle;
pub(crate) use service::{services, user_is_local};
pub(crate) use crate::{
@ -28,29 +28,29 @@ pub(crate) use crate::{
mod_ctor! {}
mod_dtor! {}
#[cfg(test)]
mod test {
use clap::Parser;
use crate::handler::AdminCommand;
#[test]
fn get_help_short() { get_help_inner("-h"); }
#[test]
fn get_help_long() { get_help_inner("--help"); }
#[test]
fn get_help_subcommand() { get_help_inner("help"); }
fn get_help_inner(input: &str) {
let error = AdminCommand::try_parse_from(["argv[0] doesn't matter", input])
.unwrap_err()
.to_string();
// Search for a handful of keywords that suggest the help printed properly
assert!(error.contains("Usage:"));
assert!(error.contains("Commands:"));
assert!(error.contains("Options:"));
}
/// Install the admin command handler
pub async fn init() {
_ = services()
.admin
.complete
.write()
.expect("locked for writing")
.insert(handler::complete);
_ = services()
.admin
.handle
.write()
.await
.insert(handler::handle);
}
/// Uninstall the admin command handler
pub async fn fini() {
_ = services().admin.handle.write().await.take();
_ = services()
.admin
.complete
.write()
.expect("locked for writing")
.take();
}

View file

@ -26,7 +26,7 @@ pub(super) async fn globals(subcommand: Globals) -> Result<RoomMessageEventConte
},
Globals::LastCheckForUpdatesId => {
let timer = tokio::time::Instant::now();
let results = services().globals.db.last_check_for_updates_id();
let results = services().updates.last_check_for_updates_id();
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(

View file

@ -2,6 +2,7 @@ mod account_data;
mod appservice;
mod globals;
mod presence;
mod resolver;
mod room_alias;
mod room_state_cache;
mod sending;
@ -12,12 +13,12 @@ use conduit::Result;
use room_state_cache::room_state_cache;
use ruma::{
events::{room::message::RoomMessageEventContent, RoomAccountDataEventType},
RoomAliasId, RoomId, ServerName, UserId,
OwnedServerName, RoomAliasId, RoomId, ServerName, UserId,
};
use self::{
account_data::account_data, appservice::appservice, globals::globals, presence::presence, room_alias::room_alias,
sending::sending, users::users,
account_data::account_data, appservice::appservice, globals::globals, presence::presence, resolver::resolver,
room_alias::room_alias, sending::sending, users::users,
};
#[cfg_attr(test, derive(Debug))]
@ -55,6 +56,10 @@ pub(super) enum QueryCommand {
/// - users.rs iterators and getters
#[command(subcommand)]
Users(Users),
/// - resolver service
#[command(subcommand)]
Resolver(Resolver),
}
#[cfg_attr(test, derive(Debug))]
@ -287,6 +292,21 @@ pub(super) enum Users {
Iter,
}
#[cfg_attr(test, derive(Debug))]
#[derive(Subcommand)]
/// Resolver service and caches
pub(super) enum Resolver {
/// Query the destinations cache
DestinationsCache {
server_name: Option<OwnedServerName>,
},
/// Query the overrides cache
OverridesCache {
name: Option<String>,
},
}
/// Processes admin query commands
pub(super) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result<RoomMessageEventContent> {
Ok(match command {
@ -298,5 +318,6 @@ pub(super) async fn process(command: QueryCommand, _body: Vec<&str>) -> Result<R
QueryCommand::Globals(command) => globals(command).await?,
QueryCommand::Sending(command) => sending(command).await?,
QueryCommand::Users(command) => users(command).await?,
QueryCommand::Resolver(command) => resolver(command).await?,
})
}

View file

@ -0,0 +1,87 @@
use std::fmt::Write;
use conduit::{utils::time, Result};
use ruma::{events::room::message::RoomMessageEventContent, OwnedServerName};
use super::Resolver;
use crate::services;
/// All the getters and iterators in key_value/users.rs
pub(super) async fn resolver(subcommand: Resolver) -> Result<RoomMessageEventContent> {
match subcommand {
Resolver::DestinationsCache {
server_name,
} => destinations_cache(server_name).await,
Resolver::OverridesCache {
name,
} => overrides_cache(name).await,
}
}
async fn destinations_cache(server_name: Option<OwnedServerName>) -> Result<RoomMessageEventContent> {
use service::sending::CachedDest;
let mut out = String::new();
writeln!(out, "| Server Name | Destination | Hostname | Expires |")?;
writeln!(out, "| ----------- | ----------- | -------- | ------- |")?;
let row = |(
name,
&CachedDest {
ref dest,
ref host,
expire,
},
)| {
let expire = time::format(expire, "%+");
writeln!(out, "| {name} | {dest} | {host} | {expire} |").expect("wrote line");
};
let map = services()
.globals
.resolver
.destinations
.read()
.expect("locked");
if let Some(server_name) = server_name.as_ref() {
map.get_key_value(server_name).map(row);
} else {
map.iter().for_each(row);
}
Ok(RoomMessageEventContent::notice_markdown(out))
}
async fn overrides_cache(server_name: Option<String>) -> Result<RoomMessageEventContent> {
use service::sending::CachedOverride;
let mut out = String::new();
writeln!(out, "| Server Name | IP | Port | Expires |")?;
writeln!(out, "| ----------- | --- | ----:| ------- |")?;
let row = |(
name,
&CachedOverride {
ref ips,
port,
expire,
},
)| {
let expire = time::format(expire, "%+");
writeln!(out, "| {name} | {ips:?} | {port} | {expire} |").expect("wrote line");
};
let map = services()
.globals
.resolver
.overrides
.read()
.expect("locked");
if let Some(server_name) = server_name.as_ref() {
map.get_key_value(server_name).map(row);
} else {
map.iter().for_each(row);
}
Ok(RoomMessageEventContent::notice_markdown(out))
}

View file

@ -16,6 +16,14 @@ pub(super) enum RoomCommand {
/// - List all rooms the server knows about
List {
page: Option<usize>,
/// Excludes rooms that we have federation disabled with
#[arg(long)]
exclude_disabled: bool,
/// Excludes rooms that we have banned
#[arg(long)]
exclude_banned: bool,
},
#[command(subcommand)]
@ -179,6 +187,8 @@ pub(super) async fn process(command: RoomCommand, body: Vec<&str>) -> Result<Roo
RoomCommand::List {
page,
} => list(body, page).await?,
exclude_disabled,
exclude_banned,
} => list(body, page, exclude_disabled, exclude_banned).await?,
})
}

View file

@ -1,18 +1,46 @@
use std::fmt::Write;
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId};
use ruma::events::room::message::RoomMessageEventContent;
use crate::{escape_html, get_room_info, handler::PAGE_SIZE, services, Result};
pub(super) async fn list(_body: Vec<&str>, page: Option<usize>) -> Result<RoomMessageEventContent> {
pub(super) async fn list(
_body: Vec<&str>, page: Option<usize>, exclude_disabled: bool, exclude_banned: bool,
) -> Result<RoomMessageEventContent> {
// TODO: i know there's a way to do this with clap, but i can't seem to find it
let page = page.unwrap_or(1);
let mut rooms = services()
.rooms
.metadata
.iter_ids()
.filter_map(Result::ok)
.map(|id: OwnedRoomId| get_room_info(&id))
.filter_map(|room_id| {
room_id
.ok()
.filter(|room_id| {
if exclude_disabled
&& services()
.rooms
.metadata
.is_disabled(room_id)
.unwrap_or(false)
{
return false;
}
if exclude_banned
&& services()
.rooms
.metadata
.is_banned(room_id)
.unwrap_or(false)
{
return false;
}
true
})
.map(|room_id| get_room_info(&room_id))
})
.collect::<Vec<_>>();
rooms.sort_by_key(|r| r.1);
rooms.reverse();

View file

@ -1,7 +1,5 @@
use api::client::leave_room;
use ruma::{
events::room::message::RoomMessageEventContent, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId,
};
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId};
use tracing::{debug, error, info, warn};
use super::{super::Service, RoomModerationCommand};
@ -124,9 +122,7 @@ async fn ban_room(
.is_admin(local_user)
.unwrap_or(true))
})
})
.collect::<Vec<OwnedUserId>>()
{
}) {
debug!(
"Attempting leave for user {} in room {} (forced, ignoring all errors, evicting admins too)",
&local_user, &room_id
@ -153,9 +149,7 @@ async fn ban_room(
.is_admin(local_user)
.unwrap_or(false))
})
})
.collect::<Vec<OwnedUserId>>()
{
}) {
debug!("Attempting leave for user {} in room {}", &local_user, &room_id);
if let Err(e) = leave_room(&local_user, &room_id, None).await {
error!(
@ -191,7 +185,10 @@ async fn ban_list_of_rooms(body: Vec<&str>, force: bool, disable_federation: boo
));
}
let rooms_s = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
let rooms_s = body
.clone()
.drain(1..body.len().saturating_sub(1))
.collect::<Vec<_>>();
let admin_room_alias = &services().globals.admin_alias;
@ -332,9 +329,7 @@ async fn ban_list_of_rooms(body: Vec<&str>, force: bool, disable_federation: boo
.is_admin(local_user)
.unwrap_or(true))
})
})
.collect::<Vec<OwnedUserId>>()
{
}) {
debug!(
"Attempting leave for user {} in room {} (forced, ignoring all errors, evicting admins too)",
&local_user, room_id
@ -361,9 +356,7 @@ async fn ban_list_of_rooms(body: Vec<&str>, force: bool, disable_federation: boo
.is_admin(local_user)
.unwrap_or(false))
})
})
.collect::<Vec<OwnedUserId>>()
{
}) {
debug!("Attempting leave for user {} in room {}", &local_user, &room_id);
if let Err(e) = leave_room(&local_user, &room_id, None).await {
error!(

View file

@ -1,24 +1,17 @@
use conduit::{warn, Error, Result};
use conduit::{utils::time, warn, Err, Result};
use ruma::events::room::message::RoomMessageEventContent;
use crate::services;
pub(super) async fn uptime(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let seconds = services()
let elapsed = services()
.server
.started
.elapsed()
.expect("standard duration")
.as_secs();
let result = format!(
"up {} days, {} hours, {} minutes, {} seconds.",
seconds / 86400,
(seconds % 86400) / 60 / 60,
(seconds % 3600) / 60,
seconds % 60,
);
.expect("standard duration");
Ok(RoomMessageEventContent::notice_plain(result))
let result = time::pretty(elapsed);
Ok(RoomMessageEventContent::notice_plain(format!("{result}.")))
}
pub(super) async fn show_config(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
@ -27,28 +20,17 @@ pub(super) async fn show_config(_body: Vec<&str>) -> Result<RoomMessageEventCont
}
pub(super) async fn memory_usage(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
let response0 = services().memory_usage().await;
let response1 = services().globals.db.memory_usage();
let response2 = conduit::alloc::memory_usage();
let services_usage = services().memory_usage().await?;
let database_usage = services().db.db.memory_usage()?;
let allocator_usage = conduit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}"));
Ok(RoomMessageEventContent::text_plain(format!(
"Services:\n{response0}\n\nDatabase:\n{response1}\n{}",
if !response2.is_empty() {
format!("Allocator:\n {response2}")
} else {
String::new()
}
"Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}",
)))
}
pub(super) async fn clear_database_caches(_body: Vec<&str>, amount: u32) -> Result<RoomMessageEventContent> {
services().globals.db.clear_caches(amount);
Ok(RoomMessageEventContent::text_plain("Done."))
}
pub(super) async fn clear_service_caches(_body: Vec<&str>, amount: u32) -> Result<RoomMessageEventContent> {
services().clear_caches(amount).await;
pub(super) async fn clear_caches(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
services().clear_cache().await;
Ok(RoomMessageEventContent::text_plain("Done."))
}
@ -106,11 +88,10 @@ pub(super) async fn restart(_body: Vec<&str>, force: bool) -> Result<RoomMessage
use conduit::utils::sys::current_exe_deleted;
if !force && current_exe_deleted() {
return Err(Error::Err(
"The server cannot be restarted because the executable was tampered with. If this is expected use --force \
to override."
.to_owned(),
));
return Err!(
"The server cannot be restarted because the executable changed. If this is expected use --force to \
override."
);
}
services().server.restart()?;

View file

@ -18,17 +18,8 @@ pub(super) enum ServerCommand {
/// - Print database memory usage statistics
MemoryUsage,
/// - Clears all of Conduit's database caches with index smaller than the
/// amount
ClearDatabaseCaches {
amount: u32,
},
/// - Clears all of Conduit's service caches with index smaller than the
/// amount
ClearServiceCaches {
amount: u32,
},
/// - Clears all of Conduwuit's caches
ClearCaches,
/// - Performs an online backup of the database (only available for RocksDB
/// at the moment)
@ -65,12 +56,7 @@ pub(super) async fn process(command: ServerCommand, body: Vec<&str>) -> Result<R
ServerCommand::Uptime => uptime(body).await?,
ServerCommand::ShowConfig => show_config(body).await?,
ServerCommand::MemoryUsage => memory_usage(body).await?,
ServerCommand::ClearDatabaseCaches {
amount,
} => clear_database_caches(body, amount).await?,
ServerCommand::ClearServiceCaches {
amount,
} => clear_service_caches(body, amount).await?,
ServerCommand::ClearCaches => clear_caches(body).await?,
ServerCommand::ListBackups => list_backups(body).await?,
ServerCommand::BackupDatabase => backup_database(body).await?,
ServerCommand::ListDatabaseFiles => list_database_files(body).await?,

26
src/admin/tests.rs Normal file
View file

@ -0,0 +1,26 @@
#![cfg(test)]
#[test]
fn get_help_short() { get_help_inner("-h"); }
#[test]
fn get_help_long() { get_help_inner("--help"); }
#[test]
fn get_help_subcommand() { get_help_inner("help"); }
fn get_help_inner(input: &str) {
use clap::Parser;
use crate::handler::AdminCommand;
let Err(error) = AdminCommand::try_parse_from(["argv[0] doesn't matter", input]) else {
panic!("no error!");
};
let error = error.to_string();
// Search for a handful of keywords that suggest the help printed properly
assert!(error.contains("Usage:"));
assert!(error.contains("Commands:"));
assert!(error.contains("Options:"));
}

View file

@ -8,7 +8,7 @@ use ruma::{
tag::{TagEvent, TagEventContent, TagInfo},
RoomAccountDataEventType,
},
OwnedRoomId, OwnedUserId, RoomId,
OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId,
};
use tracing::{error, info, warn};
@ -23,7 +23,7 @@ pub(super) async fn list(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
match services().users.list_local_users() {
Ok(users) => {
let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len());
plain_msg += &users.join("\n");
plain_msg += users.join("\n").as_str();
plain_msg += "\n```";
Ok(RoomMessageEventContent::notice_markdown(plain_msg))
@ -95,7 +95,7 @@ pub(super) async fn create(
if let Some(room_id_server_name) = room.server_name() {
match join_room_by_id_helper(
Some(&user_id),
&user_id,
room,
Some("Automatically joining this room upon registration".to_owned()),
&[room_id_server_name.to_owned(), services().globals.server_name().to_owned()],
@ -195,7 +195,10 @@ pub(super) async fn deactivate_all(
));
}
let usernames = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
let usernames = body
.clone()
.drain(1..body.len().saturating_sub(1))
.collect::<Vec<_>>();
let mut user_ids: Vec<OwnedUserId> = Vec::with_capacity(usernames.len());
let mut admins = Vec::new();
@ -331,6 +334,35 @@ pub(super) async fn list_joined_rooms(_body: Vec<&str>, user_id: String) -> Resu
Ok(RoomMessageEventContent::text_html(output_plain, output_html))
}
pub(super) async fn force_join_room(
_body: Vec<&str>, user_id: String, room_id: OwnedRoomOrAliasId,
) -> Result<RoomMessageEventContent> {
let user_id = parse_local_user_id(&user_id)?;
let room_id = services().rooms.alias.resolve(&room_id).await?;
assert!(service::user_is_local(&user_id), "Parsed user_id must be a local user");
join_room_by_id_helper(&user_id, &room_id, None, &[], None).await?;
Ok(RoomMessageEventContent::notice_markdown(format!(
"{user_id} has been joined to {room_id}.",
)))
}
pub(super) async fn make_user_admin(_body: Vec<&str>, user_id: String) -> Result<RoomMessageEventContent> {
let user_id = parse_local_user_id(&user_id)?;
let displayname = services()
.users
.displayname(&user_id)?
.unwrap_or_else(|| user_id.to_string());
assert!(service::user_is_local(&user_id), "Parsed user_id must be a local user");
service::admin::make_user_admin(&user_id, displayname).await?;
Ok(RoomMessageEventContent::notice_markdown(format!(
"{user_id} has been granted admin privileges.",
)))
}
pub(super) async fn put_room_tag(
_body: Vec<&str>, user_id: String, room_id: Box<RoomId>, tag: String,
) -> Result<RoomMessageEventContent> {

View file

@ -2,7 +2,7 @@ mod commands;
use clap::Subcommand;
use conduit::Result;
use ruma::{events::room::message::RoomMessageEventContent, RoomId};
use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomOrAliasId, RoomId};
use self::commands::*;
@ -49,7 +49,7 @@ pub(super) enum UserCommand {
/// Markdown code block below the command.
DeactivateAll {
#[arg(short, long)]
/// Remove users from their joined rooms
/// Does not leave any rooms the user is in on deactivation
no_leave_rooms: bool,
#[arg(short, long)]
/// Also deactivate admin accounts and will assume leave all rooms too
@ -65,6 +65,17 @@ pub(super) enum UserCommand {
user_id: String,
},
/// - Manually join a local user to a room.
ForceJoinRoom {
user_id: String,
room_id: OwnedRoomOrAliasId,
},
/// - Grant server-admin privileges to a user.
MakeUserAdmin {
user_id: String,
},
/// - Puts a room tag for the specified user and room ID.
///
/// This is primarily useful if you'd like to set your admin room
@ -113,6 +124,13 @@ pub(super) async fn process(command: UserCommand, body: Vec<&str>) -> Result<Roo
UserCommand::ListJoinedRooms {
user_id,
} => list_joined_rooms(body, user_id).await?,
UserCommand::ForceJoinRoom {
user_id,
room_id,
} => force_join_room(body, user_id, room_id).await?,
UserCommand::MakeUserAdmin {
user_id,
} => make_user_admin(body, user_id).await?,
UserCommand::PutRoomTag {
user_id,
room_id,

View file

@ -1,4 +1,4 @@
use conduit_core::Error;
use conduit_core::{err, Err};
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
use service::user_is_local;
@ -33,7 +33,7 @@ pub(crate) fn get_room_info(id: &RoomId) -> (OwnedRoomId, u64, String) {
/// Parses user ID
pub(crate) fn parse_user_id(user_id: &str) -> Result<OwnedUserId> {
UserId::parse_with_server_name(user_id.to_lowercase(), services().globals.server_name())
.map_err(|e| Error::Err(format!("The supplied username is not a valid username: {e}")))
.map_err(|e| err!("The supplied username is not a valid username: {e}"))
}
/// Parses user ID as our local user
@ -41,7 +41,7 @@ pub(crate) fn parse_local_user_id(user_id: &str) -> Result<OwnedUserId> {
let user_id = parse_user_id(user_id)?;
if !user_is_local(&user_id) {
return Err(Error::Err(String::from("User does not belong to our server.")));
return Err!("User {user_id:?} does not belong to our server.");
}
Ok(user_id)
@ -52,11 +52,11 @@ pub(crate) fn parse_active_local_user_id(user_id: &str) -> Result<OwnedUserId> {
let user_id = parse_local_user_id(user_id)?;
if !services().users.exists(&user_id)? {
return Err(Error::Err(String::from("User does not exist on this server.")));
return Err!("User {user_id:?} does not exist on this server.");
}
if services().users.is_deactivated(&user_id)? {
return Err(Error::Err(String::from("User is deactivated.")));
return Err!("User {user_id:?} is deactivated.");
}
Ok(user_id)

View file

@ -41,9 +41,11 @@ bytes.workspace = true
conduit-core.workspace = true
conduit-database.workspace = true
conduit-service.workspace = true
const-str.workspace = true
futures-util.workspace = true
hmac.workspace = true
http.workspace = true
http-body-util.workspace = true
hyper.workspace = true
image.workspace = true
ipaddress.workspace = true
@ -56,7 +58,6 @@ serde_html_form.workspace = true
serde_json.workspace = true
serde.workspace = true
sha-1.workspace = true
thiserror.workspace = true
tokio.workspace = true
tracing.workspace = true
webpage.workspace = true

View file

@ -309,7 +309,7 @@ pub(crate) async fn register_route(
// log in conduit admin channel if a guest registered
if body.appservice_info.is_none() && is_guest && services().globals.log_guest_registrations() {
info!("New guest user \"{user_id}\" registered on this server from IP.");
info!("New guest user \"{user_id}\" registered on this server.");
if let Some(device_display_name) = &body.initial_device_display_name {
if body
@ -376,7 +376,7 @@ pub(crate) async fn register_route(
if let Some(room_id_server_name) = room.server_name() {
if let Err(e) = join_room_by_id_helper(
Some(&user_id),
&user_id,
room,
Some("Automatically joining this room upon registration".to_owned()),
&[room_id_server_name.to_owned(), services().globals.server_name().to_owned()],
@ -423,7 +423,12 @@ pub(crate) async fn register_route(
pub(crate) async fn change_password_route(
InsecureClientIp(client): InsecureClientIp, body: Ruma<change_password::v3::Request>,
) -> Result<change_password::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
// Authentication for this endpoint was made optional, but we need
// authentication currently
let sender_user = body
.sender_user
.as_ref()
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo {
@ -512,7 +517,12 @@ pub(crate) async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoa
pub(crate) async fn deactivate_route(
InsecureClientIp(client): InsecureClientIp, body: Ruma<deactivate::v3::Request>,
) -> Result<deactivate::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
// Authentication for this endpoint was made optional, but we need
// authentication currently
let sender_user = body
.sender_user
.as_ref()
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo {

View file

@ -1,4 +1,5 @@
use axum_client_ip::InsecureClientIp;
use conduit::{err, info, warn, Error, Result};
use ruma::{
api::{
client::{
@ -10,14 +11,16 @@ use ruma::{
},
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork},
events::{
room::join_rules::{JoinRule, RoomJoinRulesEventContent},
room::{
join_rules::{JoinRule, RoomJoinRulesEventContent},
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
},
StateEventType,
},
uint, ServerName, UInt,
uint, RoomId, ServerName, UInt, UserId,
};
use tracing::{error, info, warn};
use crate::{service::server_is_ours, services, Error, Result, Ruma};
use crate::{service::server_is_ours, services, Ruma};
/// # `POST /_matrix/client/v3/publicRooms`
///
@ -103,8 +106,6 @@ pub(crate) async fn get_public_rooms_route(
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
///
/// Sets the visibility of a given room in the room directory.
///
/// - TODO: Access control checks
#[tracing::instrument(skip_all, fields(%client), name = "room_directory")]
pub(crate) async fn set_room_visibility_route(
InsecureClientIp(client): InsecureClientIp, body: Ruma<set_room_visibility::v3::Request>,
@ -116,6 +117,13 @@ pub(crate) async fn set_room_visibility_route(
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
}
if !user_can_publish_room(sender_user, &body.room_id)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"User is not allowed to publish this room",
));
}
match &body.visibility {
room::Visibility::Public => {
if services().globals.config.lockdown_public_room_directory && !services().users.is_admin(sender_user)? {
@ -268,8 +276,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
_ => None,
})
.map_err(|e| {
error!("Invalid room join rule event in database: {}", e);
Error::BadDatabase("Invalid room join rule event in database.")
err!(Database(error!("Invalid room join rule event in database: {e}")))
})
})
.transpose()?
@ -351,3 +358,32 @@ pub(crate) async fn get_public_rooms_filtered_helper(
total_room_count_estimate: Some(total_room_count_estimate),
})
}
/// Check whether the user can publish to the room directory via power levels of
/// room history visibility event or room creator
fn user_can_publish_room(user_id: &UserId, room_id: &RoomId) -> Result<bool> {
if let Some(event) =
services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
{
serde_json::from_str(event.content.get())
.map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels"))
.map(|content: RoomPowerLevelsEventContent| {
RoomPowerLevels::from(content).user_can_send_state(user_id, StateEventType::RoomHistoryVisibility)
})
} else if let Some(event) =
services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
{
Ok(event.sender == user_id)
} else {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"User is not allowed to publish this room",
));
}
}

View file

@ -1,9 +1,9 @@
use std::{
cmp,
collections::{hash_map, BTreeMap, HashMap, HashSet},
time::{Duration, Instant},
time::Instant,
};
use conduit::{utils, utils::math::continue_exponential_backoff_secs, Error, Result};
use futures_util::{stream::FuturesUnordered, StreamExt};
use ruma::{
api::{
@ -18,15 +18,11 @@ use ruma::{
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
};
use serde_json::json;
use service::user_is_local;
use tracing::debug;
use super::SESSION_ID_LENGTH;
use crate::{
service::user_is_local,
services,
utils::{self},
Error, Result, Ruma,
};
use crate::{services, Ruma};
/// # `POST /_matrix/client/r0/keys/upload`
///
@ -334,7 +330,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool + Send>(
.globals
.bad_query_ratelimiter
.write()
.await
.expect("locked")
.entry(id)
{
hash_map::Entry::Vacant(e) => {
@ -353,15 +349,14 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool + Send>(
.globals
.bad_query_ratelimiter
.read()
.await
.expect("locked")
.get(server)
{
// Exponential backoff
const MAX_DURATION: Duration = Duration::from_secs(60 * 60 * 24);
let min_elapsed_duration = cmp::min(MAX_DURATION, Duration::from_secs(5 * 60) * (*tries) * (*tries));
if time.elapsed() < min_elapsed_duration {
debug!("Backing off query from {:?}", server);
const MIN: u64 = 5 * 60;
const MAX: u64 = 60 * 60 * 24;
if continue_exponential_backoff_secs(MIN, MAX, time.elapsed(), *tries) {
debug!("Backing off query from {server:?}");
return (server, Err(Error::BadServerResponse("bad query, still backing off")));
}
}

View file

@ -2,6 +2,8 @@
use std::{io::Cursor, sync::Arc, time::Duration};
use axum_client_ip::InsecureClientIp;
use conduit::{debug, error, utils::math::ruma_from_usize, warn};
use image::io::Reader as ImgReader;
use ipaddress::IPAddress;
use reqwest::Url;
@ -12,7 +14,6 @@ use ruma::api::client::{
get_media_preview,
},
};
use tracing::{debug, error, warn};
use webpage::HTML;
use crate::{
@ -44,7 +45,7 @@ pub(crate) async fn get_media_config_route(
_body: Ruma<get_media_config::v3::Request>,
) -> Result<get_media_config::v3::Response> {
Ok(get_media_config::v3::Response {
upload_size: services().globals.max_request_size().into(),
upload_size: ruma_from_usize(services().globals.config.max_request_size),
})
}
@ -64,18 +65,22 @@ pub(crate) async fn get_media_config_v1_route(
/// # `GET /_matrix/media/v3/preview_url`
///
/// Returns URL preview.
#[tracing::instrument(skip_all, fields(%client), name = "url_preview")]
pub(crate) async fn get_media_preview_route(
body: Ruma<get_media_preview::v3::Request>,
InsecureClientIp(client): InsecureClientIp, body: Ruma<get_media_preview::v3::Request>,
) -> Result<get_media_preview::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let url = &body.url;
if !url_preview_allowed(url) {
warn!(%sender_user, "URL is not allowed to be previewed: {url}");
return Err(Error::BadRequest(ErrorKind::forbidden(), "URL is not allowed to be previewed"));
}
match get_url_preview(url).await {
Ok(preview) => {
let res = serde_json::value::to_raw_value(&preview).map_err(|e| {
error!("Failed to convert UrlPreviewData into a serde json value: {}", e);
error!(%sender_user, "Failed to convert UrlPreviewData into a serde json value: {e}");
Error::BadRequest(
ErrorKind::LimitExceeded {
retry_after: Some(RetryAfter::Delay(Duration::from_secs(5))),
@ -87,7 +92,7 @@ pub(crate) async fn get_media_preview_route(
Ok(get_media_preview::v3::Response::from_raw_value(res))
},
Err(e) => {
warn!("Failed to generate a URL preview: {e}");
warn!(%sender_user, "Failed to generate a URL preview: {e}");
// there doesn't seem to be an agreed-upon error code in the spec.
// the only response codes in the preview_url spec page are 200 and 429.
@ -108,10 +113,13 @@ pub(crate) async fn get_media_preview_route(
/// See <https://spec.matrix.org/legacy/legacy/#id27>
///
/// Returns URL preview.
#[tracing::instrument(skip_all, fields(%client), name = "url_preview")]
pub(crate) async fn get_media_preview_v1_route(
body: Ruma<get_media_preview::v3::Request>,
InsecureClientIp(client): InsecureClientIp, body: Ruma<get_media_preview::v3::Request>,
) -> Result<RumaResponse<get_media_preview::v3::Response>> {
get_media_preview_route(body).await.map(RumaResponse)
get_media_preview_route(InsecureClientIp(client), body)
.await
.map(RumaResponse)
}
/// # `POST /_matrix/media/v3/upload`
@ -120,8 +128,9 @@ pub(crate) async fn get_media_preview_v1_route(
///
/// - Some metadata will be saved in the database
/// - Media will be saved in the media/ directory
#[tracing::instrument(skip_all, fields(%client), name = "media_upload")]
pub(crate) async fn create_content_route(
body: Ruma<create_content::v3::Request>,
InsecureClientIp(client): InsecureClientIp, body: Ruma<create_content::v3::Request>,
) -> Result<create_content::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
@ -167,10 +176,13 @@ pub(crate) async fn create_content_route(
///
/// - Some metadata will be saved in the database
/// - Media will be saved in the media/ directory
#[tracing::instrument(skip_all, fields(%client), name = "media_upload")]
pub(crate) async fn create_content_v1_route(
body: Ruma<create_content::v3::Request>,
InsecureClientIp(client): InsecureClientIp, body: Ruma<create_content::v3::Request>,
) -> Result<RumaResponse<create_content::v3::Response>> {
create_content_route(body).await.map(RumaResponse)
create_content_route(InsecureClientIp(client), body)
.await
.map(RumaResponse)
}
/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}`
@ -181,16 +193,20 @@ pub(crate) async fn create_content_v1_route(
/// - Only redirects if `allow_redirect` is true
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
/// seconds
pub(crate) async fn get_content_route(body: Ruma<get_content::v3::Request>) -> Result<get_content::v3::Response> {
#[tracing::instrument(skip_all, fields(%client), name = "media_get")]
pub(crate) async fn get_content_route(
InsecureClientIp(client): InsecureClientIp, body: Ruma<get_content::v3::Request>,
) -> Result<get_content::v3::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
if let Some(FileMeta {
content,
content_type,
file,
content_disposition,
}) = services().media.get(&mxc).await?
{
let content_disposition = Some(make_content_disposition(&content_type, content_disposition, None));
let file = content.expect("content");
Ok(get_content::v3::Response {
file,
@ -243,10 +259,13 @@ pub(crate) async fn get_content_route(body: Ruma<get_content::v3::Request>) -> R
/// - Only redirects if `allow_redirect` is true
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
/// seconds
#[tracing::instrument(skip_all, fields(%client), name = "media_get")]
pub(crate) async fn get_content_v1_route(
body: Ruma<get_content::v3::Request>,
InsecureClientIp(client): InsecureClientIp, body: Ruma<get_content::v3::Request>,
) -> Result<RumaResponse<get_content::v3::Response>> {
get_content_route(body).await.map(RumaResponse)
get_content_route(InsecureClientIp(client), body)
.await
.map(RumaResponse)
}
/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}/{fileName}`
@ -257,14 +276,15 @@ pub(crate) async fn get_content_v1_route(
/// - Only redirects if `allow_redirect` is true
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
/// seconds
#[tracing::instrument(skip_all, fields(%client), name = "media_get")]
pub(crate) async fn get_content_as_filename_route(
body: Ruma<get_content_as_filename::v3::Request>,
InsecureClientIp(client): InsecureClientIp, body: Ruma<get_content_as_filename::v3::Request>,
) -> Result<get_content_as_filename::v3::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
if let Some(FileMeta {
content,
content_type,
file,
content_disposition,
}) = services().media.get(&mxc).await?
{
@ -274,6 +294,7 @@ pub(crate) async fn get_content_as_filename_route(
Some(body.filename.clone()),
));
let file = content.expect("content");
Ok(get_content_as_filename::v3::Response {
file,
content_type,
@ -328,10 +349,13 @@ pub(crate) async fn get_content_as_filename_route(
/// - Only redirects if `allow_redirect` is true
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
/// seconds
#[tracing::instrument(skip_all, fields(%client), name = "media_get")]
pub(crate) async fn get_content_as_filename_v1_route(
body: Ruma<get_content_as_filename::v3::Request>,
InsecureClientIp(client): InsecureClientIp, body: Ruma<get_content_as_filename::v3::Request>,
) -> Result<RumaResponse<get_content_as_filename::v3::Response>> {
get_content_as_filename_route(body).await.map(RumaResponse)
get_content_as_filename_route(InsecureClientIp(client), body)
.await
.map(RumaResponse)
}
/// # `GET /_matrix/media/v3/thumbnail/{serverName}/{mediaId}`
@ -342,14 +366,15 @@ pub(crate) async fn get_content_as_filename_v1_route(
/// - Only redirects if `allow_redirect` is true
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
/// seconds
#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")]
pub(crate) async fn get_content_thumbnail_route(
body: Ruma<get_content_thumbnail::v3::Request>,
InsecureClientIp(client): InsecureClientIp, body: Ruma<get_content_thumbnail::v3::Request>,
) -> Result<get_content_thumbnail::v3::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
if let Some(FileMeta {
content,
content_type,
file,
content_disposition,
}) = services()
.media
@ -365,6 +390,7 @@ pub(crate) async fn get_content_thumbnail_route(
.await?
{
let content_disposition = Some(make_content_disposition(&content_type, content_disposition, None));
let file = content.expect("content");
Ok(get_content_thumbnail::v3::Response {
file,
@ -453,10 +479,13 @@ pub(crate) async fn get_content_thumbnail_route(
/// - Only redirects if `allow_redirect` is true
/// - Uses client-provided `timeout_ms` if available, else defaults to 20
/// seconds
#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")]
pub(crate) async fn get_content_thumbnail_v1_route(
body: Ruma<get_content_thumbnail::v3::Request>,
InsecureClientIp(client): InsecureClientIp, body: Ruma<get_content_thumbnail::v3::Request>,
) -> Result<RumaResponse<get_content_thumbnail::v3::Response>> {
get_content_thumbnail_route(body).await.map(RumaResponse)
get_content_thumbnail_route(InsecureClientIp(client), body)
.await
.map(RumaResponse)
}
async fn get_remote_content(

View file

@ -1,13 +1,15 @@
use std::{
cmp,
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
net::IpAddr,
sync::Arc,
time::{Duration, Instant},
time::Instant,
};
use axum_client_ip::InsecureClientIp;
use conduit::utils::mutex_map;
use conduit::{
debug, debug_warn, error, info, trace, utils, utils::math::continue_exponential_backoff_secs, warn, Error,
PduEvent, Result,
};
use ruma::{
api::{
client::{
@ -34,15 +36,16 @@ use ruma::{
};
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use tokio::sync::RwLock;
use tracing::{debug, error, info, trace, warn};
use crate::{
client::{update_avatar_url, update_displayname},
service::{
pdu::{gen_event_id_canonical_json, PduBuilder},
rooms::state::RoomMutexGuard,
sending::convert_to_outgoing_federation_event,
server_is_ours, user_is_local,
},
services, utils, Error, PduEvent, Result, Ruma,
services, Ruma,
};
/// Checks if the room is banned in any way possible and the sender user is not
@ -199,7 +202,7 @@ pub(crate) async fn join_room_by_id_route(
}
join_room_by_id_helper(
body.sender_user.as_deref(),
sender_user,
&body.room_id,
body.reason.clone(),
&servers,
@ -298,7 +301,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
};
let join_room_response = join_room_by_id_helper(
Some(sender_user),
sender_user,
&room_id,
body.reason.clone(),
&servers,
@ -363,6 +366,8 @@ pub(crate) async fn invite_user_route(
pub(crate) async fn kick_user_route(body: Ruma<kick_user::v3::Request>) -> Result<kick_user::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
let mut event: RoomMemberEventContent = serde_json::from_str(
services()
.rooms
@ -380,12 +385,6 @@ pub(crate) async fn kick_user_route(body: Ruma<kick_user::v3::Request>) -> Resul
event.membership = MembershipState::Leave;
event.reason.clone_from(&body.reason);
let state_lock = services()
.globals
.roomid_mutex_state
.lock(&body.room_id)
.await;
services()
.rooms
.timeline
@ -414,6 +413,8 @@ pub(crate) async fn kick_user_route(body: Ruma<kick_user::v3::Request>) -> Resul
pub(crate) async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_user::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
let event = services()
.rooms
.state_accessor
@ -444,12 +445,6 @@ pub(crate) async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<
},
)?;
let state_lock = services()
.globals
.roomid_mutex_state
.lock(&body.room_id)
.await;
services()
.rooms
.timeline
@ -478,6 +473,8 @@ pub(crate) async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<
pub(crate) async fn unban_user_route(body: Ruma<unban_user::v3::Request>) -> Result<unban_user::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
let mut event: RoomMemberEventContent = serde_json::from_str(
services()
.rooms
@ -493,12 +490,6 @@ pub(crate) async fn unban_user_route(body: Ruma<unban_user::v3::Request>) -> Res
event.reason.clone_from(&body.reason);
event.join_authorized_via_users_server = None;
let state_lock = services()
.globals
.roomid_mutex_state
.lock(&body.room_id)
.await;
services()
.rooms
.timeline
@ -650,35 +641,36 @@ pub(crate) async fn joined_members_route(
}
pub async fn join_room_by_id_helper(
sender_user: Option<&UserId>, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
third_party_signed: Option<&ThirdPartySigned>,
) -> Result<join_room_by_id::v3::Response> {
let sender_user = sender_user.expect("user is authenticated");
let state_lock = services().rooms.state.mutex.lock(room_id).await;
if matches!(services().rooms.state_cache.is_joined(sender_user, room_id), Ok(true)) {
info!("{sender_user} is already joined in {room_id}");
debug_warn!("{sender_user} is already joined in {room_id}");
return Ok(join_room_by_id::v3::Response {
room_id: room_id.into(),
});
}
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
// Ask a remote server if we are not participating in this room
if !services()
if services()
.rooms
.state_cache
.server_in_room(services().globals.server_name(), room_id)?
|| servers.is_empty()
|| (servers.len() == 1 && server_is_ours(&servers[0]))
{
join_room_by_id_helper_remote(sender_user, room_id, reason, servers, third_party_signed, state_lock).await
} else {
join_room_by_id_helper_local(sender_user, room_id, reason, servers, third_party_signed, state_lock).await
} else {
// Ask a remote server if we are not participating in this room
join_room_by_id_helper_remote(sender_user, room_id, reason, servers, third_party_signed, state_lock).await
}
}
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")]
async fn join_room_by_id_helper_remote(
sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
_third_party_signed: Option<&ThirdPartySigned>, state_lock: mutex_map::Guard<()>,
_third_party_signed: Option<&ThirdPartySigned>, state_lock: RoomMutexGuard,
) -> Result<join_room_by_id::v3::Response> {
info!("Joining {room_id} over federation.");
@ -779,7 +771,7 @@ async fn join_room_by_id_helper_remote(
federation::membership::create_join_event::v2::Request {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()),
pdu: convert_to_outgoing_federation_event(join_event.clone()),
omit_members: false,
},
)
@ -788,14 +780,9 @@ async fn join_room_by_id_helper_remote(
info!("send_join finished");
if join_authorized_via_users_server.is_some() {
use RoomVersionId::*;
match &room_version_id {
RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V3
| RoomVersionId::V4
| RoomVersionId::V5
| RoomVersionId::V6
| RoomVersionId::V7 => {
V1 | V2 | V3 | V4 | V5 | V6 | V7 => {
warn!(
"Found `join_authorised_via_users_server` but room {} is version {}. Ignoring.",
room_id, &room_version_id
@ -803,7 +790,7 @@ async fn join_room_by_id_helper_remote(
},
// only room versions 8 and above using `join_authorized_via_users_server` (restricted joins) need to
// validate and send signatures
RoomVersionId::V8 | RoomVersionId::V9 | RoomVersionId::V10 | RoomVersionId::V11 => {
V8 | V9 | V10 | V11 => {
if let Some(signed_raw) = &send_join_response.room_state.event {
info!(
"There is a signed event. This room is probably using restricted joins. Adding signature to \
@ -1011,11 +998,12 @@ async fn join_room_by_id_helper_remote(
Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
}
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")]
async fn join_room_by_id_helper_local(
sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
_third_party_signed: Option<&ThirdPartySigned>, state_lock: mutex_map::Guard<()>,
_third_party_signed: Option<&ThirdPartySigned>, state_lock: RoomMutexGuard,
) -> Result<join_room_by_id::v3::Response> {
info!("We can join locally");
debug!("We can join locally");
let join_rules_event =
services()
@ -1115,7 +1103,7 @@ async fn join_room_by_id_helper_local(
.iter()
.any(|server_name| !server_is_ours(server_name))
{
info!("We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements");
warn!("We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements");
let (make_join_response, remote_server) = make_join_request(sender_user, room_id, servers).await?;
let room_version_id = match make_join_response.room_version {
@ -1207,7 +1195,7 @@ async fn join_room_by_id_helper_local(
federation::membership::create_join_event::v2::Request {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()),
pdu: convert_to_outgoing_federation_event(join_event.clone()),
omit_members: false,
},
)
@ -1280,16 +1268,12 @@ async fn make_join_request(
make_join_counter = make_join_counter.saturating_add(1);
if let Err(ref e) = make_join_response {
trace!("make_join ErrorKind string: {:?}", e.error_code().to_string());
trace!("make_join ErrorKind string: {:?}", e.kind().to_string());
// converting to a string is necessary (i think) because ruma is forcing us to
// fill in the struct for M_INCOMPATIBLE_ROOM_VERSION
if e.error_code()
.to_string()
.contains("M_INCOMPATIBLE_ROOM_VERSION")
|| e.error_code()
.to_string()
.contains("M_UNSUPPORTED_ROOM_VERSION")
if e.kind().to_string().contains("M_INCOMPATIBLE_ROOM_VERSION")
|| e.kind().to_string().contains("M_UNSUPPORTED_ROOM_VERSION")
{
incompatible_room_version_count = incompatible_room_version_count.saturating_add(1);
}
@ -1342,7 +1326,7 @@ pub async fn validate_and_add_event_id(
.globals
.bad_event_ratelimiter
.write()
.await
.expect("locked")
.entry(id)
{
Entry::Vacant(e) => {
@ -1358,15 +1342,14 @@ pub async fn validate_and_add_event_id(
.globals
.bad_event_ratelimiter
.read()
.await
.expect("locked")
.get(&event_id)
{
// Exponential backoff
const MAX_DURATION: Duration = Duration::from_secs(60 * 60 * 24);
let min_elapsed_duration = cmp::min(MAX_DURATION, Duration::from_secs(5 * 60) * (*tries) * (*tries));
if time.elapsed() < min_elapsed_duration {
debug!("Backing off from {}", event_id);
const MIN: u64 = 60 * 5;
const MAX: u64 = 60 * 60 * 24;
if continue_exponential_backoff_secs(MIN, MAX, time.elapsed(), *tries) {
debug!("Backing off from {event_id}");
return Err(Error::BadServerResponse("bad event, still backing off"));
}
}
@ -1395,7 +1378,7 @@ pub(crate) async fn invite_helper(
if !user_is_local(user_id) {
let (pdu, pdu_json, invite_room_state) = {
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
let state_lock = services().rooms.state.mutex.lock(room_id).await;
let content = to_raw_value(&RoomMemberEventContent {
avatar_url: services().users.avatar_url(user_id)?,
displayname: None,
@ -1438,7 +1421,7 @@ pub(crate) async fn invite_helper(
room_id: room_id.to_owned(),
event_id: (*pdu.event_id).to_owned(),
room_version: room_version_id.clone(),
event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()),
event: convert_to_outgoing_federation_event(pdu_json.clone()),
invite_room_state,
via: services().rooms.state_cache.servers_route_via(room_id).ok(),
},
@ -1507,7 +1490,7 @@ pub(crate) async fn invite_helper(
));
}
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
let state_lock = services().rooms.state.mutex.lock(room_id).await;
services()
.rooms
@ -1601,7 +1584,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
true,
)?;
} else {
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
let state_lock = services().rooms.state.mutex.lock(room_id).await;
let member_event =
services()
@ -1680,8 +1663,7 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> {
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(ToOwned::to_owned))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned())
.collect::<HashSet<OwnedServerName>>(),
.map(|user| user.server_name().to_owned()),
);
debug!("servers in remote_leave_room: {servers:?}");
@ -1775,7 +1757,7 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> {
federation::membership::create_leave_event::v2::Request {
room_id: room_id.to_owned(),
event_id,
pdu: PduEvent::convert_to_outgoing_federation_event(leave_event.clone()),
pdu: convert_to_outgoing_federation_event(leave_event.clone()),
},
)
.await?;

View file

@ -29,11 +29,7 @@ pub(crate) async fn send_message_event_route(
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_deref();
let state_lock = services()
.globals
.roomid_mutex_state
.lock(&body.room_id)
.await;
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
// Forbid m.room.encrypted if encryption is disabled
if MessageLikeEventType::RoomEncrypted == body.event_type && !services().globals.allow_encryption() {

View file

@ -353,7 +353,7 @@ pub async fn update_avatar_url(
pub async fn update_all_rooms(all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>, user_id: OwnedUserId) {
for (pdu_builder, room_id) in all_joined_rooms {
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
let state_lock = services().rooms.state.mutex.lock(room_id).await;
if let Err(e) = services()
.rooms
.timeline

View file

@ -15,11 +15,7 @@ pub(crate) async fn redact_event_route(body: Ruma<redact_event::v3::Request>) ->
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
let state_lock = services()
.globals
.roomid_mutex_state
.lock(&body.room_id)
.await;
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
let event_id = services()
.rooms

View file

@ -90,7 +90,7 @@ pub(crate) async fn create_room_route(body: Ruma<create_room::v3::Request>) -> R
}
let _short_id = services().rooms.short.get_or_create_shortroomid(&room_id)?;
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
let state_lock = services().rooms.state.mutex.lock(&room_id).await;
let alias: Option<OwnedRoomAliasId> = if let Some(alias) = &body.room_alias_name {
Some(room_alias_check(alias, &body.appservice_info).await?)
@ -118,6 +118,8 @@ pub(crate) async fn create_room_route(body: Ruma<create_room::v3::Request>) -> R
let content = match &body.creation_content {
Some(content) => {
use RoomVersionId::*;
let mut content = content
.deserialize_as::<CanonicalJsonObject>()
.map_err(|e| {
@ -125,16 +127,7 @@ pub(crate) async fn create_room_route(body: Ruma<create_room::v3::Request>) -> R
Error::bad_database("Failed to deserialise content as canonical JSON.")
})?;
match room_version {
RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V3
| RoomVersionId::V4
| RoomVersionId::V5
| RoomVersionId::V6
| RoomVersionId::V7
| RoomVersionId::V8
| RoomVersionId::V9
| RoomVersionId::V10 => {
V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
content.insert(
"creator".into(),
json!(&sender_user).try_into().map_err(|e| {
@ -143,7 +136,7 @@ pub(crate) async fn create_room_route(body: Ruma<create_room::v3::Request>) -> R
})?,
);
},
RoomVersionId::V11 => {}, // V11 removed the "creator" key
V11 => {}, // V11 removed the "creator" key
_ => {
warn!("Unexpected or unsupported room version {room_version}");
return Err(Error::BadRequest(
@ -152,7 +145,6 @@ pub(crate) async fn create_room_route(body: Ruma<create_room::v3::Request>) -> R
));
},
}
content.insert(
"room_version".into(),
json!(room_version.as_str())
@ -162,18 +154,11 @@ pub(crate) async fn create_room_route(body: Ruma<create_room::v3::Request>) -> R
content
},
None => {
use RoomVersionId::*;
let content = match room_version {
RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V3
| RoomVersionId::V4
| RoomVersionId::V5
| RoomVersionId::V6
| RoomVersionId::V7
| RoomVersionId::V8
| RoomVersionId::V9
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()),
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => RoomCreateEventContent::new_v1(sender_user.clone()),
V11 => RoomCreateEventContent::new_v11(),
_ => {
warn!("Unexpected or unsupported room version {room_version}");
return Err(Error::BadRequest(
@ -573,11 +558,7 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
.short
.get_or_create_shortroomid(&replacement_room)?;
let state_lock = services()
.globals
.roomid_mutex_state
.lock(&body.room_id)
.await;
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
// Send a m.room.tombstone event to the old room to indicate that it is not
// intended to be used any further Fail if the sender does not have the required
@ -605,11 +586,7 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
// Change lock to replacement room
drop(state_lock);
let state_lock = services()
.globals
.roomid_mutex_state
.lock(&replacement_room)
.await;
let state_lock = services().rooms.state.mutex.lock(&replacement_room).await;
// Get the old room creation event
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
@ -631,17 +608,10 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
// Send a m.room.create event containing a predecessor field and the applicable
// room_version
{
use RoomVersionId::*;
match body.new_version {
RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V3
| RoomVersionId::V4
| RoomVersionId::V5
| RoomVersionId::V6
| RoomVersionId::V7
| RoomVersionId::V8
| RoomVersionId::V9
| RoomVersionId::V10 => {
V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
create_event_content.insert(
"creator".into(),
json!(&sender_user).try_into().map_err(|e| {
@ -650,7 +620,7 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
})?,
);
},
RoomVersionId::V11 => {
V11 => {
// "creator" key no longer exists in V11 rooms
create_event_content.remove("creator");
},
@ -662,6 +632,7 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
));
},
}
}
create_event_content.insert(
"room_version".into(),

View file

@ -47,7 +47,7 @@ pub(crate) async fn get_hierarchy_route(body: Ruma<get_hierarchy::v1::Request>)
&body.room_id,
limit.try_into().unwrap_or(10),
key.map_or(vec![], |token| token.short_room_ids),
max_depth.try_into().unwrap_or(3),
max_depth.into(),
body.suggested_only,
)
.await

View file

@ -1,6 +1,6 @@
use std::sync::Arc;
use conduit::{error, warn};
use conduit::{debug_info, error};
use ruma::{
api::client::{
error::ErrorKind,
@ -36,18 +36,16 @@ pub(crate) async fn send_state_event_for_key_route(
) -> Result<send_state_event::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event_id = send_state_event_for_key_helper(
Ok(send_state_event::v3::Response {
event_id: send_state_event_for_key_helper(
sender_user,
&body.room_id,
&body.event_type,
&body.body.body,
body.state_key.clone(),
)
.await?;
let event_id = (*event_id).to_owned();
Ok(send_state_event::v3::Response {
event_id,
.await?
.into(),
})
}
@ -128,7 +126,7 @@ pub(crate) async fn get_state_events_for_key_route(
.state_accessor
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
.ok_or_else(|| {
warn!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id);
debug_info!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id);
Error::BadRequest(ErrorKind::NotFound, "State event not found.")
})?;
if body
@ -172,7 +170,7 @@ async fn send_state_event_for_key_helper(
sender: &UserId, room_id: &RoomId, event_type: &StateEventType, json: &Raw<AnyStateEventContent>, state_key: String,
) -> Result<Arc<EventId>> {
allowed_to_send_state_event(room_id, event_type, json).await?;
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
let state_lock = services().rooms.state.mutex.lock(room_id).await;
let event_id = services()
.rooms
.timeline

View file

@ -1,10 +1,15 @@
use std::{
cmp,
cmp::Ordering,
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
time::Duration,
};
use conduit::PduCount;
use conduit::{
error,
utils::math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
Err, PduCount,
};
use ruma::{
api::client::{
filter::{FilterDefinition, LazyLoadOptions},
@ -27,7 +32,7 @@ use ruma::{
serde::Raw,
uint, DeviceId, EventId, OwnedUserId, RoomId, UInt, UserId,
};
use tracing::{error, Instrument as _, Span};
use tracing::{Instrument as _, Span};
use crate::{service::pdu::EventHash, services, utils, Error, PduEvent, Result, Ruma, RumaResponse};
@ -194,7 +199,7 @@ pub(crate) async fn sync_events_route(
let (room_id, invite_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish
let insert_lock = services().globals.roomid_mutex_insert.lock(&room_id).await;
let insert_lock = services().rooms.timeline.mutex_insert.lock(&room_id).await;
drop(insert_lock);
let invite_count = services()
@ -298,16 +303,10 @@ pub(crate) async fn sync_events_route(
{
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let mut duration = body.timeout.unwrap_or_default();
if duration.as_secs() > 30 {
duration = Duration::from_secs(30);
}
#[allow(clippy::let_underscore_must_use)]
{
let default = Duration::from_secs(30);
let duration = cmp::min(body.timeout.unwrap_or(default), default);
_ = tokio::time::timeout(duration, watcher).await;
}
}
Ok(response)
}
@ -318,7 +317,7 @@ async fn handle_left_room(
next_batch_string: &str, full_state: bool, lazy_load_enabled: bool,
) -> Result<()> {
// Get and drop the lock to wait for remaining operations to finish
let insert_lock = services().globals.roomid_mutex_insert.lock(room_id).await;
let insert_lock = services().rooms.timeline.mutex_insert.lock(room_id).await;
drop(insert_lock);
let left_count = services()
@ -520,7 +519,7 @@ async fn load_joined_room(
) -> Result<JoinedRoom> {
// Get and drop the lock to wait for remaining operations to finish
// This will make sure the we have all events until next_batch
let insert_lock = services().globals.roomid_mutex_insert.lock(room_id).await;
let insert_lock = services().rooms.timeline.mutex_insert.lock(room_id).await;
drop(insert_lock);
let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?;
@ -546,8 +545,7 @@ async fn load_joined_room(
// Database queries:
let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? else {
error!("Room {} has no state", room_id);
return Err(Error::BadDatabase("Room has no state"));
return Err!(Database(error!("Room {room_id} has no state")));
};
let since_shortstatehash = services()
@ -975,8 +973,8 @@ async fn load_joined_room(
},
summary: RoomSummary {
heroes,
joined_member_count: joined_member_count.map(|n| (n as u32).into()),
invited_member_count: invited_member_count.map(|n| (n as u32).into()),
joined_member_count: joined_member_count.map(ruma_from_u64),
invited_member_count: invited_member_count.map(ruma_from_u64),
},
unread_notifications: UnreadNotificationsCount {
highlight_count,
@ -1026,7 +1024,7 @@ fn load_timeline(
// Take the last events for the timeline
timeline_pdus = non_timeline_pdus
.by_ref()
.take(limit as usize)
.take(usize_from_u64_truncated(limit))
.collect::<Vec<_>>()
.into_iter()
.rev()
@ -1300,7 +1298,7 @@ pub(crate) async fn sync_events_v4_route(
r.0,
UInt::try_from(all_joined_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
);
let room_ids = all_joined_rooms[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)].to_vec();
let room_ids = all_joined_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec();
new_known_rooms.extend(room_ids.iter().cloned());
for room_id in &room_ids {
let todo_room = todo_rooms
@ -1333,7 +1331,7 @@ pub(crate) async fn sync_events_v4_route(
}
})
.collect(),
count: UInt::from(all_joined_rooms.len() as u32),
count: ruma_from_usize(all_joined_rooms.len()),
},
);
@ -1529,20 +1527,22 @@ pub(crate) async fn sync_events_v4_route(
prev_batch,
limited,
joined_count: Some(
(services()
services()
.rooms
.state_cache
.room_joined_count(room_id)?
.unwrap_or(0) as u32)
.into(),
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
invited_count: Some(
(services()
services()
.rooms
.state_cache
.room_invited_count(room_id)?
.unwrap_or(0) as u32)
.into(),
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
num_live: None, // Count events in timeline greater than global sync counter
timestamp: None,
@ -1557,15 +1557,10 @@ pub(crate) async fn sync_events_v4_route(
{
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let mut duration = body.timeout.unwrap_or(Duration::from_secs(30));
if duration.as_secs() > 30 {
duration = Duration::from_secs(30);
}
#[allow(clippy::let_underscore_must_use)]
{
let default = Duration::from_secs(30);
let duration = cmp::min(body.timeout.unwrap_or(default), default);
_ = tokio::time::timeout(duration, watcher).await;
}
}
Ok(sync_events::v4::Response {
initial: globalsince == 0,

View file

@ -2,7 +2,7 @@ use std::collections::BTreeMap;
use ruma::api::client::thirdparty::get_protocols;
use crate::{Result, Ruma};
use crate::{Result, Ruma, RumaResponse};
/// # `GET /_matrix/client/r0/thirdparty/protocols`
///
@ -15,3 +15,13 @@ pub(crate) async fn get_protocols_route(
protocols: BTreeMap::new(),
})
}
/// # `GET /_matrix/client/unstable/thirdparty/protocols`
///
/// Same as `get_protocols_route`, except for some reason Element Android legacy
/// calls this
pub(crate) async fn get_protocols_route_unstable(
body: Ruma<get_protocols::v3::Request>,
) -> Result<RumaResponse<get_protocols::v3::Response>> {
get_protocols_route(body).await.map(RumaResponse)
}

View file

@ -1,12 +1,12 @@
use axum_client_ip::InsecureClientIp;
use conduit::{warn, RumaResponse};
use conduit::warn;
use ruma::{
api::client::{error::ErrorKind, membership::mutual_rooms, room::get_summary},
events::room::member::MembershipState,
OwnedRoomId,
};
use crate::{services, Error, Result, Ruma};
use crate::{services, Error, Result, Ruma, RumaResponse};
/// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`
///

View file

@ -1,14 +1,14 @@
pub mod client;
mod router;
pub mod routes;
pub mod router;
pub mod server;
extern crate conduit_core as conduit;
extern crate conduit_service as service;
pub(crate) use conduit::{debug_info, debug_warn, utils, Error, Result};
pub(crate) use service::{pdu::PduEvent, services, user_is_local};
pub(crate) use conduit::{debug_info, debug_warn, pdu::PduEvent, utils, Error, Result};
pub(crate) use service::{services, user_is_local};
pub use crate::router::State;
pub(crate) use crate::router::{Ruma, RumaResponse};
conduit::mod_ctor! {}

View file

@ -1,15 +1,24 @@
mod args;
mod auth;
mod handler;
mod request;
mod response;
use axum::{
response::IntoResponse,
routing::{any, get, post},
Router,
};
use conduit::{Error, Server};
use conduit::{err, Server};
use http::Uri;
use ruma::api::client::error::ErrorKind;
use crate::{client, router::RouterExt, server};
use self::handler::RouterExt;
pub(super) use self::{args::Args as Ruma, response::RumaResponse};
use crate::{client, server};
pub fn build(router: Router, server: &Server) -> Router {
pub type State = &'static service::Services;
pub fn build(router: Router<State>, server: &Server) -> Router<State> {
let config = &server.config;
let router = router
.ruma_route(client::get_supported_versions_route)
@ -94,6 +103,8 @@ pub fn build(router: Router, server: &Server) -> Router {
.ruma_route(client::search_users_route)
.ruma_route(client::get_member_events_route)
.ruma_route(client::get_protocols_route)
.route("/_matrix/client/unstable/thirdparty/protocols",
get(client::get_protocols_route_unstable))
.ruma_route(client::send_message_event_route)
.ruma_route(client::send_state_event_for_key_route)
.ruma_route(client::get_state_events_route)
@ -231,7 +242,7 @@ pub fn build(router: Router, server: &Server) -> Router {
}
async fn initial_sync(_uri: Uri) -> impl IntoResponse {
Error::BadRequest(ErrorKind::GuestAccessForbidden, "Guest access not implemented")
err!(Request(GuestAccessForbidden("Guest access not implemented")))
}
async fn federation_disabled() -> impl IntoResponse { Error::bad_config("Federation is disabled.") }
async fn federation_disabled() -> impl IntoResponse { err!(Config("allow_federation", "Federation is disabled.")) }

View file

@ -1,24 +1,15 @@
mod auth;
mod handler;
mod request;
use std::{mem, ops::Deref};
use axum::{async_trait, body::Body, extract::FromRequest};
use bytes::{BufMut, BytesMut};
pub(super) use conduit::error::RumaResponse;
use conduit::{debug, debug_warn, trace, warn};
use ruma::{
api::{client::error::ErrorKind, IncomingRequest},
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
};
use conduit::{debug, err, trace, Error, Result};
use ruma::{api::IncomingRequest, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId};
pub(super) use self::handler::RouterExt;
use self::{auth::Auth, request::Request};
use crate::{service::appservice::RegistrationInfo, services, Error, Result};
use super::{auth, auth::Auth, request, request::Request};
use crate::{service::appservice::RegistrationInfo, services};
/// Extractor for Ruma request structs
pub(crate) struct Ruma<T> {
pub(crate) struct Args<T> {
/// Request struct body
pub(crate) body: T,
@ -44,7 +35,7 @@ pub(crate) struct Ruma<T> {
}
#[async_trait]
impl<T, S> FromRequest<S, Body> for Ruma<T>
impl<T, S> FromRequest<S, Body> for Args<T>
where
T: IncomingRequest,
{
@ -65,7 +56,7 @@ where
}
}
impl<T> Deref for Ruma<T> {
impl<T> Deref for Args<T> {
type Target = T;
fn deref(&self) -> &Self::Target { &self.body }
@ -109,21 +100,14 @@ where
let mut http_request = hyper::Request::builder()
.uri(request.parts.uri.clone())
.method(request.parts.method.clone());
*http_request.headers_mut().unwrap() = request.parts.headers.clone();
let http_request = http_request.body(body).unwrap();
debug!(
"{:?} {:?} {:?}",
http_request.method(),
http_request.uri(),
http_request.headers()
);
*http_request.headers_mut().expect("mutable http headers") = request.parts.headers.clone();
let http_request = http_request.body(body).expect("http request body");
trace!("{:?} {:?} {:?}", http_request.method(), http_request.uri(), json_body);
let body = T::try_from_http_request(http_request, &request.path).map_err(|e| {
warn!("try_from_http_request failed: {e:?}",);
debug_warn!("JSON body: {:?}", json_body);
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
})?;
let headers = http_request.headers();
let method = http_request.method();
let uri = http_request.uri();
debug!("{method:?} {uri:?} {headers:?}");
trace!("{method:?} {uri:?} {json_body:?}");
Ok(body)
T::try_from_http_request(http_request, &request.path).map_err(|e| err!(Request(BadJson(debug_warn!("{e}")))))
}

View file

@ -6,6 +6,7 @@ use axum_extra::{
typed_header::TypedHeaderRejectionReason,
TypedHeader,
};
use conduit::Err;
use http::uri::PathAndQuery;
use ruma::{
api::{client::error::ErrorKind, AuthScheme, Metadata},
@ -183,7 +184,7 @@ fn auth_appservice(request: &Request, info: Box<RegistrationInfo>) -> Result<Aut
async fn auth_server(request: &mut Request, json_body: &Option<CanonicalJsonValue>) -> Result<Auth> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
return Err!(Config("allow_federation", "Federation is disabled."));
}
let TypedHeader(Authorization(x_matrix)) = request

View file

@ -10,7 +10,7 @@ use conduit::Result;
use http::Method;
use ruma::api::IncomingRequest;
use super::{Ruma, RumaResponse};
use super::{Ruma, RumaResponse, State};
pub(in super::super) trait RouterExt {
fn ruma_route<H, T>(self, handler: H) -> Self
@ -18,7 +18,7 @@ pub(in super::super) trait RouterExt {
H: RumaHandler<T>;
}
impl RouterExt for Router {
impl RouterExt for Router<State> {
fn ruma_route<H, T>(self, handler: H) -> Self
where
H: RumaHandler<T>,
@ -28,9 +28,9 @@ impl RouterExt for Router {
}
pub(in super::super) trait RumaHandler<T> {
fn add_routes(&self, router: Router) -> Router;
fn add_routes(&self, router: Router<State>) -> Router<State>;
fn add_route(&self, router: Router, path: &str) -> Router;
fn add_route(&self, router: Router<State>, path: &str) -> Router<State>;
}
macro_rules! ruma_handler {
@ -41,17 +41,17 @@ macro_rules! ruma_handler {
Req: IncomingRequest + Send + 'static,
Ret: IntoResponse,
Fut: Future<Output = Result<Req::OutgoingResponse, Ret>> + Send,
Fun: FnOnce($($tx,)* Ruma<Req>) -> Fut + Clone + Send + Sync + 'static,
$( $tx: FromRequestParts<()> + Send + 'static, )*
Fun: FnOnce($($tx,)* Ruma<Req>,) -> Fut + Clone + Send + Sync + 'static,
$( $tx: FromRequestParts<State> + Send + 'static, )*
{
fn add_routes(&self, router: Router) -> Router {
fn add_routes(&self, router: Router<State>) -> Router<State> {
Req::METADATA
.history
.all_paths()
.fold(router, |router, path| self.add_route(router, path))
}
fn add_route(&self, router: Router, path: &str) -> Router {
fn add_route(&self, router: Router<State>, path: &str) -> Router<State> {
let handle = self.clone();
let method = method_to_filter(&Req::METADATA.method);
let action = |$($tx,)* req| async { handle($($tx,)* req).await.map(RumaResponse) };

View file

@ -2,11 +2,11 @@ use std::str;
use axum::{extract::Path, RequestExt, RequestPartsExt};
use bytes::Bytes;
use conduit::err;
use http::request::Parts;
use ruma::api::client::error::ErrorKind;
use serde::Deserialize;
use crate::{services, Error, Result};
use crate::{services, Result};
#[derive(Deserialize)]
pub(super) struct QueryParams {
@ -26,19 +26,15 @@ pub(super) async fn from(request: hyper::Request<axum::body::Body>) -> Result<Re
let (mut parts, body) = limited.into_parts();
let path: Path<Vec<String>> = parts.extract().await?;
let query = serde_html_form::from_str(parts.uri.query().unwrap_or_default())
.map_err(|_| Error::BadRequest(ErrorKind::Unknown, "Failed to read query parameters"))?;
let query = parts.uri.query().unwrap_or_default();
let query =
serde_html_form::from_str(query).map_err(|e| err!(Request(Unknown("Failed to read query parameters: {e}"))))?;
let max_body_size = services()
.globals
.config
.max_request_size
.try_into()
.expect("failed to convert max request size");
let max_body_size = services().globals.config.max_request_size;
let body = axum::body::to_bytes(body, max_body_size)
.await
.map_err(|_| Error::BadRequest(ErrorKind::TooLarge, "Request body too large"))?;
.map_err(|e| err!(Request(TooLarge("Request body too large: {e}"))))?;
Ok(Request {
path,

View file

@ -0,0 +1,24 @@
use axum::response::{IntoResponse, Response};
use bytes::BytesMut;
use conduit::{error, Error};
use http::StatusCode;
use http_body_util::Full;
use ruma::api::{client::uiaa::UiaaResponse, OutgoingResponse};
pub(crate) struct RumaResponse<T>(pub(crate) T);
impl From<Error> for RumaResponse<UiaaResponse> {
fn from(t: Error) -> Self { Self(t.into()) }
}
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
fn into_response(self) -> Response {
self.0
.try_into_http_response::<BytesMut>()
.inspect_err(|e| error!("response error: {e}"))
.map_or_else(
|_| StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|r| r.map(BytesMut::freeze).map(Full::new).into_response(),
)
}
}

View file

@ -1,9 +1,11 @@
use conduit::{Error, Result};
use ruma::{
api::{client::error::ErrorKind, federation::backfill::get_backfill},
uint, user_id, MilliSecondsSinceUnixEpoch,
};
use service::{sending::convert_to_outgoing_federation_event, services};
use crate::{services, Error, PduEvent, Result, Ruma};
use crate::Ruma;
/// # `GET /_matrix/federation/v1/backfill/<room_id>`
///
@ -62,7 +64,7 @@ pub(crate) async fn get_backfill_route(body: Ruma<get_backfill::v1::Request>) ->
})
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
.filter_map(|r| r.ok().flatten())
.map(PduEvent::convert_to_outgoing_federation_event)
.map(convert_to_outgoing_federation_event)
.collect();
Ok(get_backfill::v1::Response {

View file

@ -1,9 +1,11 @@
use conduit::{Error, Result};
use ruma::{
api::{client::error::ErrorKind, federation::event::get_event},
MilliSecondsSinceUnixEpoch, RoomId,
};
use service::{sending::convert_to_outgoing_federation_event, services};
use crate::{services, Error, PduEvent, Result, Ruma};
use crate::Ruma;
/// # `GET /_matrix/federation/v1/event/{eventId}`
///
@ -48,6 +50,6 @@ pub(crate) async fn get_event_route(body: Ruma<get_event::v1::Request>) -> Resul
Ok(get_event::v1::Response {
origin: services().globals.server_name().to_owned(),
origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
pdu: PduEvent::convert_to_outgoing_federation_event(event),
pdu: convert_to_outgoing_federation_event(event),
})
}

View file

@ -1,11 +1,13 @@
use std::sync::Arc;
use conduit::{Error, Result};
use ruma::{
api::{client::error::ErrorKind, federation::authorization::get_event_authorization},
RoomId,
};
use service::{sending::convert_to_outgoing_federation_event, services};
use crate::{services, Error, PduEvent, Result, Ruma};
use crate::Ruma;
/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}`
///
@ -57,7 +59,7 @@ pub(crate) async fn get_event_authorization_route(
Ok(get_event_authorization::v1::Response {
auth_chain: auth_chain_ids
.filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok()?)
.map(PduEvent::convert_to_outgoing_federation_event)
.map(convert_to_outgoing_federation_event)
.collect(),
})
}

View file

@ -1,9 +1,11 @@
use conduit::{Error, Result};
use ruma::{
api::{client::error::ErrorKind, federation::event::get_missing_events},
OwnedEventId, RoomId,
};
use service::{sending::convert_to_outgoing_federation_event, services};
use crate::{services, Error, PduEvent, Result, Ruma};
use crate::Ruma;
/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}`
///
@ -79,7 +81,7 @@ pub(crate) async fn get_missing_events_route(
)
.map_err(|_| Error::bad_database("Invalid prev_events in event in database."))?,
);
events.push(PduEvent::convert_to_outgoing_federation_event(pdu));
events.push(convert_to_outgoing_federation_event(pdu));
}
i = i.saturating_add(1);
}

View file

@ -1,18 +1,14 @@
use axum_client_ip::InsecureClientIp;
use conduit::{utils, warn, Error, PduEvent, Result};
use ruma::{
api::{client::error::ErrorKind, federation::membership::create_invite},
events::room::member::{MembershipState, RoomMemberEventContent},
serde::JsonObject,
CanonicalJsonValue, EventId, OwnedUserId,
};
use tracing::warn;
use service::{sending::convert_to_outgoing_federation_event, server_is_ours, services};
use crate::{
service::server_is_ours,
services,
utils::{self},
Error, PduEvent, Result, Ruma,
};
use crate::Ruma;
/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`
///
@ -176,6 +172,6 @@ pub(crate) async fn create_invite_route(
}
Ok(create_invite::v2::Response {
event: PduEvent::convert_to_outgoing_federation_event(signed_event),
event: convert_to_outgoing_federation_event(signed_event),
})
}

View file

@ -7,7 +7,7 @@ use ruma::{
},
StateEventType, TimelineEventType,
},
RoomId, RoomVersionId, UserId,
CanonicalJsonObject, RoomId, RoomVersionId, UserId,
};
use serde_json::value::to_raw_value;
use tracing::warn;
@ -71,11 +71,7 @@ pub(crate) async fn create_join_event_template_route(
let room_version_id = services().rooms.state.get_room_version(&body.room_id)?;
let state_lock = services()
.globals
.roomid_mutex_state
.lock(&body.room_id)
.await;
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
let join_authorized_via_users_server = if (services()
.rooms
@ -148,27 +144,7 @@ pub(crate) async fn create_join_event_template_route(
drop(state_lock);
// room v3 and above removed the "event_id" field from remote PDU format
match room_version_id {
RoomVersionId::V1 | RoomVersionId::V2 => {},
RoomVersionId::V3
| RoomVersionId::V4
| RoomVersionId::V5
| RoomVersionId::V6
| RoomVersionId::V7
| RoomVersionId::V8
| RoomVersionId::V9
| RoomVersionId::V10
| RoomVersionId::V11 => {
pdu_json.remove("event_id");
},
_ => {
warn!("Unexpected or unsupported room version {room_version_id}");
return Err(Error::BadRequest(
ErrorKind::BadJson,
"Unexpected or unsupported room version found",
));
},
};
maybe_strip_event_id(&mut pdu_json, &room_version_id)?;
Ok(prepare_join_event::v1::Response {
room_version: Some(room_version_id),
@ -183,6 +159,8 @@ pub(crate) async fn create_join_event_template_route(
pub(crate) fn user_can_perform_restricted_join(
user_id: &UserId, room_id: &RoomId, room_version_id: &RoomVersionId,
) -> Result<bool> {
use RoomVersionId::*;
let join_rules_event =
services()
.rooms
@ -202,16 +180,7 @@ pub(crate) fn user_can_perform_restricted_join(
return Ok(false);
};
if matches!(
room_version_id,
RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V3
| RoomVersionId::V4
| RoomVersionId::V5
| RoomVersionId::V6
| RoomVersionId::V7
) {
if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
return Ok(false);
}
@ -243,3 +212,23 @@ pub(crate) fn user_can_perform_restricted_join(
))
}
}
pub(crate) fn maybe_strip_event_id(pdu_json: &mut CanonicalJsonObject, room_version_id: &RoomVersionId) -> Result<()> {
use RoomVersionId::*;
match room_version_id {
V1 | V2 => {},
V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 | V11 => {
pdu_json.remove("event_id");
},
_ => {
warn!("Unexpected or unsupported room version {room_version_id}");
return Err(Error::BadRequest(
ErrorKind::BadJson,
"Unexpected or unsupported room version found",
));
},
};
Ok(())
}

View file

@ -1,14 +1,15 @@
use conduit::{Error, Result};
use ruma::{
api::{client::error::ErrorKind, federation::membership::prepare_leave_event},
events::{
room::member::{MembershipState, RoomMemberEventContent},
TimelineEventType,
},
RoomVersionId,
};
use serde_json::value::to_raw_value;
use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma};
use super::make_join::maybe_strip_event_id;
use crate::{service::pdu::PduBuilder, services, Ruma};
/// # `PUT /_matrix/federation/v1/make_leave/{roomId}/{eventId}`
///
@ -35,11 +36,7 @@ pub(crate) async fn create_leave_event_template_route(
.acl_check(origin, &body.room_id)?;
let room_version_id = services().rooms.state.get_room_version(&body.room_id)?;
let state_lock = services()
.globals
.roomid_mutex_state
.lock(&body.room_id)
.await;
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
let content = to_raw_value(&RoomMemberEventContent {
avatar_url: None,
blurhash: None,
@ -68,26 +65,7 @@ pub(crate) async fn create_leave_event_template_route(
drop(state_lock);
// room v3 and above removed the "event_id" field from remote PDU format
match room_version_id {
RoomVersionId::V1 | RoomVersionId::V2 => {},
RoomVersionId::V3
| RoomVersionId::V4
| RoomVersionId::V5
| RoomVersionId::V6
| RoomVersionId::V7
| RoomVersionId::V8
| RoomVersionId::V9
| RoomVersionId::V10
| RoomVersionId::V11 => {
pdu_json.remove("event_id");
},
_ => {
return Err(Error::BadRequest(
ErrorKind::BadJson,
"Unexpected or unsupported room version found",
));
},
};
maybe_strip_event_id(&mut pdu_json, &room_version_id)?;
Ok(prepare_leave_event::v1::Response {
room_version: Some(room_version_id),

View file

@ -1,7 +1,8 @@
use std::{collections::BTreeMap, net::IpAddr, time::Instant};
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduit::debug_warn;
use conduit::{debug, debug_warn, err, trace, warn, Err};
use ruma::{
api::{
client::error::ErrorKind,
@ -18,11 +19,10 @@ use ruma::{
OwnedEventId, ServerName,
};
use tokio::sync::RwLock;
use tracing::{debug, error, trace, warn};
use crate::{
service::rooms::event_handler::parse_incoming_pdu,
services,
services::Services,
utils::{self},
Error, Result, Ruma,
};
@ -34,29 +34,23 @@ type ResolvedMap = BTreeMap<OwnedEventId, Result<(), Error>>;
/// Push EDUs and PDUs to this server.
#[tracing::instrument(skip_all, fields(%client), name = "send")]
pub(crate) async fn send_transaction_message_route(
InsecureClientIp(client): InsecureClientIp, body: Ruma<send_transaction_message::v1::Request>,
State(services): State<&Services>, InsecureClientIp(client): InsecureClientIp,
body: Ruma<send_transaction_message::v1::Request>,
) -> Result<send_transaction_message::v1::Response> {
let origin = body.origin.as_ref().expect("server is authenticated");
if *origin != body.body.origin {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Not allowed to send transactions on behalf of other servers",
));
return Err!(Request(Forbidden(
"Not allowed to send transactions on behalf of other servers"
)));
}
if body.pdus.len() > 50_usize {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Not allowed to send more than 50 PDUs in one transaction",
));
return Err!(Request(Forbidden("Not allowed to send more than 50 PDUs in one transaction")));
}
if body.edus.len() > 100_usize {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Not allowed to send more than 100 EDUs in one transaction",
));
return Err!(Request(Forbidden("Not allowed to send more than 100 EDUs in one transaction")));
}
let txn_start_time = Instant::now();
@ -69,8 +63,8 @@ pub(crate) async fn send_transaction_message_route(
"Starting txn",
);
let resolved_map = handle_pdus(&client, &body, origin, &txn_start_time).await?;
handle_edus(&client, &body, origin).await?;
let resolved_map = handle_pdus(services, &client, &body, origin, &txn_start_time).await?;
handle_edus(services, &client, &body, origin).await?;
debug!(
pdus = ?body.pdus.len(),
@ -84,13 +78,14 @@ pub(crate) async fn send_transaction_message_route(
Ok(send_transaction_message::v1::Response {
pdus: resolved_map
.into_iter()
.map(|(e, r)| (e, r.map_err(|e| e.sanitized_error())))
.map(|(e, r)| (e, r.map_err(|e| e.sanitized_string())))
.collect(),
})
}
async fn handle_pdus(
_client: &IpAddr, body: &Ruma<send_transaction_message::v1::Request>, origin: &ServerName, txn_start_time: &Instant,
services: &Services, _client: &IpAddr, body: &Ruma<send_transaction_message::v1::Request>, origin: &ServerName,
txn_start_time: &Instant,
) -> Result<ResolvedMap> {
let mut parsed_pdus = Vec::with_capacity(body.pdus.len());
for pdu in &body.pdus {
@ -110,7 +105,7 @@ async fn handle_pdus(
// corresponding signing keys
let pub_key_map = RwLock::new(BTreeMap::new());
if !parsed_pdus.is_empty() {
services()
services
.rooms
.event_handler
.fetch_required_signing_keys(parsed_pdus.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map)
@ -126,14 +121,15 @@ async fn handle_pdus(
let mut resolved_map = BTreeMap::new();
for (event_id, value, room_id) in parsed_pdus {
let pdu_start_time = Instant::now();
let mutex_lock = services()
.globals
.roomid_mutex_federation
let mutex_lock = services
.rooms
.event_handler
.mutex_federation
.lock(&room_id)
.await;
resolved_map.insert(
event_id.clone(),
services()
services
.rooms
.event_handler
.handle_incoming_pdu(origin, &room_id, &event_id, value, true, &pub_key_map)
@ -161,7 +157,7 @@ async fn handle_pdus(
}
async fn handle_edus(
client: &IpAddr, body: &Ruma<send_transaction_message::v1::Request>, origin: &ServerName,
services: &Services, client: &IpAddr, body: &Ruma<send_transaction_message::v1::Request>, origin: &ServerName,
) -> Result<()> {
for edu in body
.edus
@ -169,12 +165,12 @@ async fn handle_edus(
.filter_map(|edu| serde_json::from_str::<Edu>(edu.json().get()).ok())
{
match edu {
Edu::Presence(presence) => handle_edu_presence(client, origin, presence).await?,
Edu::Receipt(receipt) => handle_edu_receipt(client, origin, receipt).await?,
Edu::Typing(typing) => handle_edu_typing(client, origin, typing).await?,
Edu::DeviceListUpdate(content) => handle_edu_device_list_update(client, origin, content).await?,
Edu::DirectToDevice(content) => handle_edu_direct_to_device(client, origin, content).await?,
Edu::SigningKeyUpdate(content) => handle_edu_signing_key_update(client, origin, content).await?,
Edu::Presence(presence) => handle_edu_presence(services, client, origin, presence).await?,
Edu::Receipt(receipt) => handle_edu_receipt(services, client, origin, receipt).await?,
Edu::Typing(typing) => handle_edu_typing(services, client, origin, typing).await?,
Edu::DeviceListUpdate(content) => handle_edu_device_list_update(services, client, origin, content).await?,
Edu::DirectToDevice(content) => handle_edu_direct_to_device(services, client, origin, content).await?,
Edu::SigningKeyUpdate(content) => handle_edu_signing_key_update(services, client, origin, content).await?,
Edu::_Custom(ref _custom) => {
debug_warn!(?body.edus, "received custom/unknown EDU");
},
@ -184,8 +180,10 @@ async fn handle_edus(
Ok(())
}
async fn handle_edu_presence(_client: &IpAddr, origin: &ServerName, presence: PresenceContent) -> Result<()> {
if !services().globals.allow_incoming_presence() {
async fn handle_edu_presence(
services: &Services, _client: &IpAddr, origin: &ServerName, presence: PresenceContent,
) -> Result<()> {
if !services.globals.allow_incoming_presence() {
return Ok(());
}
@ -198,7 +196,7 @@ async fn handle_edu_presence(_client: &IpAddr, origin: &ServerName, presence: Pr
continue;
}
services().presence.set_presence(
services.presence.set_presence(
&update.user_id,
&update.presence,
Some(update.currently_active),
@ -210,13 +208,15 @@ async fn handle_edu_presence(_client: &IpAddr, origin: &ServerName, presence: Pr
Ok(())
}
async fn handle_edu_receipt(_client: &IpAddr, origin: &ServerName, receipt: ReceiptContent) -> Result<()> {
if !services().globals.allow_incoming_read_receipts() {
async fn handle_edu_receipt(
services: &Services, _client: &IpAddr, origin: &ServerName, receipt: ReceiptContent,
) -> Result<()> {
if !services.globals.allow_incoming_read_receipts() {
return Ok(());
}
for (room_id, room_updates) in receipt.receipts {
if services()
if services
.rooms
.event_handler
.acl_check(origin, &room_id)
@ -238,7 +238,7 @@ async fn handle_edu_receipt(_client: &IpAddr, origin: &ServerName, receipt: Rece
continue;
}
if services()
if services
.rooms
.state_cache
.room_members(&room_id)
@ -254,7 +254,7 @@ async fn handle_edu_receipt(_client: &IpAddr, origin: &ServerName, receipt: Rece
room_id: room_id.clone(),
};
services()
services
.rooms
.read_receipt
.readreceipt_update(&user_id, &room_id, &event)?;
@ -272,8 +272,10 @@ async fn handle_edu_receipt(_client: &IpAddr, origin: &ServerName, receipt: Rece
Ok(())
}
async fn handle_edu_typing(_client: &IpAddr, origin: &ServerName, typing: TypingContent) -> Result<()> {
if !services().globals.config.allow_incoming_typing {
async fn handle_edu_typing(
services: &Services, _client: &IpAddr, origin: &ServerName, typing: TypingContent,
) -> Result<()> {
if !services.globals.config.allow_incoming_typing {
return Ok(());
}
@ -285,7 +287,7 @@ async fn handle_edu_typing(_client: &IpAddr, origin: &ServerName, typing: Typing
return Ok(());
}
if services()
if services
.rooms
.event_handler
.acl_check(typing.user_id.server_name(), &typing.room_id)
@ -298,26 +300,26 @@ async fn handle_edu_typing(_client: &IpAddr, origin: &ServerName, typing: Typing
return Ok(());
}
if services()
if services
.rooms
.state_cache
.is_joined(&typing.user_id, &typing.room_id)?
{
if typing.typing {
let timeout = utils::millis_since_unix_epoch().saturating_add(
services()
services
.globals
.config
.typing_federation_timeout_s
.saturating_mul(1000),
);
services()
services
.rooms
.typing
.typing_add(&typing.user_id, &typing.room_id, timeout)
.await?;
} else {
services()
services
.rooms
.typing
.typing_remove(&typing.user_id, &typing.room_id)
@ -335,7 +337,7 @@ async fn handle_edu_typing(_client: &IpAddr, origin: &ServerName, typing: Typing
}
async fn handle_edu_device_list_update(
_client: &IpAddr, origin: &ServerName, content: DeviceListUpdateContent,
services: &Services, _client: &IpAddr, origin: &ServerName, content: DeviceListUpdateContent,
) -> Result<()> {
let DeviceListUpdateContent {
user_id,
@ -350,13 +352,13 @@ async fn handle_edu_device_list_update(
return Ok(());
}
services().users.mark_device_key_update(&user_id)?;
services.users.mark_device_key_update(&user_id)?;
Ok(())
}
async fn handle_edu_direct_to_device(
_client: &IpAddr, origin: &ServerName, content: DirectDeviceContent,
services: &Services, _client: &IpAddr, origin: &ServerName, content: DirectDeviceContent,
) -> Result<()> {
let DirectDeviceContent {
sender,
@ -374,7 +376,7 @@ async fn handle_edu_direct_to_device(
}
// Check if this is a new transaction id
if services()
if services
.transaction_ids
.existing_txnid(&sender, None, &message_id)?
.is_some()
@ -386,28 +388,27 @@ async fn handle_edu_direct_to_device(
for (target_device_id_maybe, event) in map {
match target_device_id_maybe {
DeviceIdOrAllDevices::DeviceId(target_device_id) => {
services().users.add_to_device_event(
services.users.add_to_device_event(
&sender,
target_user_id,
target_device_id,
&ev_type.to_string(),
event.deserialize_as().map_err(|e| {
error!("To-Device event is invalid: {event:?} {e}");
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
})?,
event
.deserialize_as()
.map_err(|e| err!(Request(InvalidParam(error!("To-Device event is invalid: {e}")))))?,
)?;
},
DeviceIdOrAllDevices::AllDevices => {
for target_device_id in services().users.all_device_ids(target_user_id) {
services().users.add_to_device_event(
for target_device_id in services.users.all_device_ids(target_user_id) {
services.users.add_to_device_event(
&sender,
target_user_id,
&target_device_id?,
&ev_type.to_string(),
event
.deserialize_as()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?,
.map_err(|e| err!(Request(InvalidParam("Event is invalid: {e}"))))?,
)?;
}
},
@ -416,7 +417,7 @@ async fn handle_edu_direct_to_device(
}
// Save transaction id with empty data
services()
services
.transaction_ids
.add_txnid(&sender, None, &message_id, &[])?;
@ -424,7 +425,7 @@ async fn handle_edu_direct_to_device(
}
async fn handle_edu_signing_key_update(
_client: &IpAddr, origin: &ServerName, content: SigningKeyUpdateContent,
services: &Services, _client: &IpAddr, origin: &ServerName, content: SigningKeyUpdateContent,
) -> Result<()> {
let SigningKeyUpdateContent {
user_id,
@ -441,7 +442,7 @@ async fn handle_edu_signing_key_update(
}
if let Some(master_key) = master_key {
services()
services
.users
.add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true)?;
}

View file

@ -2,6 +2,7 @@
use std::collections::BTreeMap;
use conduit::{Error, Result};
use ruma::{
api::{client::error::ErrorKind, federation::membership::create_join_event},
events::{
@ -11,11 +12,13 @@ use ruma::{
CanonicalJsonValue, OwnedServerName, OwnedUserId, RoomId, ServerName,
};
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use service::user_is_local;
use service::{
pdu::gen_event_id_canonical_json, sending::convert_to_outgoing_federation_event, services, user_is_local,
};
use tokio::sync::RwLock;
use tracing::warn;
use crate::{service::pdu::gen_event_id_canonical_json, services, Error, PduEvent, Result, Ruma};
use crate::Ruma;
/// helper method for /send_join v1 and v2
async fn create_join_event(
@ -153,8 +156,9 @@ async fn create_join_event(
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?;
let mutex_lock = services()
.globals
.roomid_mutex_federation
.rooms
.event_handler
.mutex_federation
.lock(room_id)
.await;
let pdu_id: Vec<u8> = services()
@ -181,12 +185,12 @@ async fn create_join_event(
Ok(create_join_event::v1::RoomState {
auth_chain: auth_chain_ids
.filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok().flatten())
.map(PduEvent::convert_to_outgoing_federation_event)
.map(convert_to_outgoing_federation_event)
.collect(),
state: state_ids
.iter()
.filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten())
.map(PduEvent::convert_to_outgoing_federation_event)
.map(convert_to_outgoing_federation_event)
.collect(),
// Event field is required if the room version supports restricted join rules.
event: Some(

View file

@ -152,8 +152,9 @@ async fn create_leave_event(origin: &ServerName, room_id: &RoomId, pdu: &RawJson
.await?;
let mutex_lock = services()
.globals
.roomid_mutex_federation
.rooms
.event_handler
.mutex_federation
.lock(room_id)
.await;
let pdu_id: Vec<u8> = services()

View file

@ -1,8 +1,10 @@
use std::sync::Arc;
use conduit::{Error, Result};
use ruma::api::{client::error::ErrorKind, federation::event::get_room_state};
use service::{sending::convert_to_outgoing_federation_event, services};
use crate::{services, Error, PduEvent, Result, Ruma};
use crate::Ruma;
/// # `GET /_matrix/federation/v1/state/{roomId}`
///
@ -42,7 +44,7 @@ pub(crate) async fn get_room_state_route(
.await?
.into_values()
.map(|id| {
PduEvent::convert_to_outgoing_federation_event(
convert_to_outgoing_federation_event(
services()
.rooms
.timeline
@ -67,7 +69,7 @@ pub(crate) async fn get_room_state_route(
.timeline
.get_pdu_json(&id)
.ok()?
.map(PduEvent::convert_to_outgoing_federation_event)
.map(convert_to_outgoing_federation_event)
})
.collect(),
pdus,

View file

@ -53,7 +53,9 @@ sha256_media = []
argon2.workspace = true
axum.workspace = true
bytes.workspace = true
checked_ops.workspace = true
chrono.workspace = true
const-str.workspace = true
either.workspace = true
figment.workspace = true
http-body-util.workspace = true
@ -80,6 +82,7 @@ tikv-jemalloc-ctl.workspace = true
tikv-jemalloc-sys.optional = true
tikv-jemalloc-sys.workspace = true
tokio.workspace = true
tokio-metrics.workspace = true
tracing-core.workspace = true
tracing-subscriber.workspace = true
tracing.workspace = true

View file

@ -1,9 +1,9 @@
//! Default allocator with no special features
/// Always returns the empty string
/// Always returns None
#[must_use]
pub fn memory_stats() -> String { String::default() }
pub fn memory_stats() -> Option<String> { None }
/// Always returns the empty string
/// Always returns None
#[must_use]
pub fn memory_usage() -> String { String::default() }
pub fn memory_usage() -> Option<String> { None }

View file

@ -4,9 +4,10 @@
static HMALLOC: hardened_malloc_rs::HardenedMalloc = hardened_malloc_rs::HardenedMalloc;
#[must_use]
pub fn memory_usage() -> String {
String::default() //TODO: get usage
}
//TODO: get usage
pub fn memory_usage() -> Option<string> { None }
#[must_use]
pub fn memory_stats() -> String { "Extended statistics are not available from hardened_malloc.".to_owned() }
pub fn memory_stats() -> Option<String> {
Some("Extended statistics are not available from hardened_malloc.".to_owned())
}

View file

@ -10,22 +10,31 @@ use tikv_jemallocator as jemalloc;
static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc;
#[must_use]
pub fn memory_usage() -> String {
pub fn memory_usage() -> Option<String> {
use mallctl::stats;
let allocated = stats::allocated::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let active = stats::active::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let mapped = stats::mapped::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let metadata = stats::metadata::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let resident = stats::resident::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let retained = stats::retained::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
format!(
" allocated: {allocated:.2} MiB\n active: {active:.2} MiB\n mapped: {mapped:.2} MiB\n metadata: {metadata:.2} \
MiB\n resident: {resident:.2} MiB\n retained: {retained:.2} MiB\n "
)
let mibs = |input: Result<usize, mallctl::Error>| {
let input = input.unwrap_or_default();
let kibs = input / 1024;
let kibs = u32::try_from(kibs).unwrap_or_default();
let kibs = f64::from(kibs);
kibs / 1024.0
};
let allocated = mibs(stats::allocated::read());
let active = mibs(stats::active::read());
let mapped = mibs(stats::mapped::read());
let metadata = mibs(stats::metadata::read());
let resident = mibs(stats::resident::read());
let retained = mibs(stats::retained::read());
Some(format!(
"allocated: {allocated:.2} MiB\nactive: {active:.2} MiB\nmapped: {mapped:.2} MiB\nmetadata: {metadata:.2} \
MiB\nresident: {resident:.2} MiB\nretained: {retained:.2} MiB\n"
))
}
#[must_use]
pub fn memory_stats() -> String {
pub fn memory_stats() -> Option<String> {
const MAX_LENGTH: usize = 65536 - 4096;
let opts_s = "d";
@ -42,7 +51,7 @@ pub fn memory_stats() -> String {
unsafe { ffi::malloc_stats_print(Some(malloc_stats_cb), opaque, opts_p) };
str.truncate(MAX_LENGTH);
format!("<pre><code>{str}</code></pre>")
Some(format!("<pre><code>{str}</code></pre>"))
}
extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) {

View file

@ -1,110 +1,126 @@
#[cfg(unix)]
use std::path::Path; // not unix specific, just only for UNIX sockets stuff and *nix container checks
use figment::Figment;
use tracing::{debug, error, info, warn};
use crate::{error::Error, Config};
pub fn check(config: &Config) -> Result<(), Error> {
#[cfg(feature = "rocksdb")]
warn!(
"Note the rocksdb feature was deleted from conduwuit, sqlite was deleted and RocksDB is the only supported \
backend now. Please update your build script to remove this feature."
);
#[cfg(feature = "sha256_media")]
warn!(
"Note the sha256_media feature was deleted from conduwuit, it is now fully integrated in a \
forwards-compatible way. Please update your build script to remove this feature."
);
config.warn_deprecated();
config.warn_unknown_key();
if config.sentry && config.sentry_endpoint.is_none() {
return Err(Error::bad_config("Sentry cannot be enabled without an endpoint set"));
}
if cfg!(feature = "hardened_malloc") && cfg!(feature = "jemalloc") {
warn!("hardened_malloc and jemalloc are both enabled, this causes jemalloc to be used.");
}
if config.unix_socket_path.is_some() && !cfg!(unix) {
return Err(Error::bad_config(
"UNIX socket support is only available on *nix platforms. Please remove \"unix_socket_path\" from your \
config.",
));
}
config.get_bind_addrs().iter().for_each(|addr| {
if addr.ip().is_loopback() && cfg!(unix) {
debug!("Found loopback listening address {addr}, running checks if we're in a container.",);
#[cfg(unix)]
if Path::new("/proc/vz").exists() /* Guest */ && !Path::new("/proc/bz").exists()
/* Host */
{
error!(
"You are detected using OpenVZ with a loopback/localhost listening address of {addr}. If you are \
using OpenVZ for containers and you use NAT-based networking to communicate with the host and \
guest, this will NOT work. Please change this to \"0.0.0.0\". If this is expected, you can \
ignore.",
);
}
#[cfg(unix)]
if Path::new("/.dockerenv").exists() {
error!(
"You are detected using Docker with a loopback/localhost listening address of {addr}. If you are \
using a reverse proxy on the host and require communication to conduwuit in the Docker container \
via NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". If this is \
expected, you can ignore.",
);
}
#[cfg(unix)]
if Path::new("/run/.containerenv").exists() {
error!(
"You are detected using Podman with a loopback/localhost listening address of {addr}. If you are \
using a reverse proxy on the host and require communication to conduwuit in the Podman container \
via NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". If this is \
expected, you can ignore.",
);
}
}
});
// rocksdb does not allow max_log_files to be 0
if config.rocksdb_max_log_files == 0 {
return Err(Error::bad_config(
"When using RocksDB, rocksdb_max_log_files cannot be 0. Please set a value at least 1.",
));
}
// yeah, unless the user built a debug build hopefully for local testing only
if config.server_name == "your.server.name" && !cfg!(debug_assertions) {
return Err(Error::bad_config(
"You must specify a valid server name for production usage of conduwuit.",
));
}
use super::DEPRECATED_KEYS;
use crate::{debug, debug_info, error, info, warn, Config, Err, Result};
#[allow(clippy::cognitive_complexity)]
pub fn check(config: &Config) -> Result<()> {
if cfg!(debug_assertions) {
info!("Note: conduwuit was built without optimisations (i.e. debug build)");
}
// prevents catching this in `--all-features`
if cfg!(all(feature = "rocksdb", not(feature = "sha256_media"))) {
warn!(
"Note the rocksdb feature was deleted from conduwuit. SQLite support was removed and RocksDB is the only \
supported backend now. Please update your build script to remove this feature."
);
}
// prevents catching this in `--all-features`
if cfg!(all(feature = "sha256_media", not(feature = "rocksdb"))) {
warn!(
"Note the sha256_media feature was deleted from conduwuit, it is now fully integrated in a \
forwards-compatible way. Please update your build script to remove this feature."
);
}
warn_deprecated(config);
warn_unknown_key(config);
if config.sentry && config.sentry_endpoint.is_none() {
return Err!(Config("sentry_endpoint", "Sentry cannot be enabled without an endpoint set"));
}
if cfg!(all(feature = "hardened_malloc", feature = "jemalloc")) {
warn!(
"hardened_malloc and jemalloc are both enabled, this causes jemalloc to be used. If using --all-features, \
this is harmless."
);
}
if cfg!(not(unix)) && config.unix_socket_path.is_some() {
return Err!(Config(
"unix_socket_path",
"UNIX socket support is only available on *nix platforms. Please remove 'unix_socket_path' from your \
config."
));
}
if cfg!(unix) && config.unix_socket_path.is_none() {
config.get_bind_addrs().iter().for_each(|addr| {
use std::path::Path;
if addr.ip().is_loopback() {
debug_info!("Found loopback listening address {addr}, running checks if we're in a container.");
if Path::new("/proc/vz").exists() /* Guest */ && !Path::new("/proc/bz").exists()
/* Host */
{
error!(
"You are detected using OpenVZ with a loopback/localhost listening address of {addr}. If you \
are using OpenVZ for containers and you use NAT-based networking to communicate with the \
host and guest, this will NOT work. Please change this to \"0.0.0.0\". If this is expected, \
you can ignore.",
);
}
if Path::new("/.dockerenv").exists() {
error!(
"You are detected using Docker with a loopback/localhost listening address of {addr}. If you \
are using a reverse proxy on the host and require communication to conduwuit in the Docker \
container via NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". \
If this is expected, you can ignore.",
);
}
if Path::new("/run/.containerenv").exists() {
error!(
"You are detected using Podman with a loopback/localhost listening address of {addr}. If you \
are using a reverse proxy on the host and require communication to conduwuit in the Podman \
container via NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". \
If this is expected, you can ignore.",
);
}
}
});
}
// rocksdb does not allow max_log_files to be 0
if config.rocksdb_max_log_files == 0 {
return Err!(Config(
"max_log_files",
"rocksdb_max_log_files cannot be 0. Please set a value at least 1."
));
}
// yeah, unless the user built a debug build hopefully for local testing only
if cfg!(not(debug_assertions)) && config.server_name == "your.server.name" {
return Err!(Config(
"server_name",
"You must specify a valid server name for production usage of conduwuit."
));
}
// check if the user specified a registration token as `""`
if config.registration_token == Some(String::new()) {
return Err(Error::bad_config("Registration token was specified but is empty (\"\")"));
return Err!(Config(
"registration_token",
"Registration token was specified but is empty (\"\")"
));
}
if config.max_request_size < 5_120_000 {
return Err(Error::bad_config("Max request size is less than 5MB. Please increase it."));
return Err!(Config(
"max_request_size",
"Max request size is less than 5MB. Please increase it."
));
}
// check if user specified valid IP CIDR ranges on startup
for cidr in &config.ip_range_denylist {
if let Err(e) = ipaddress::IPAddress::parse(cidr) {
error!("Error parsing specified IP CIDR range from string: {e}");
return Err(Error::bad_config("Error parsing specified IP CIDR ranges from strings"));
return Err!(Config("ip_range_denylist", "Parsing specified IP CIDR range from string: {e}."));
}
}
@ -112,13 +128,14 @@ pub fn check(config: &Config) -> Result<(), Error> {
&& !config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse
&& config.registration_token.is_none()
{
return Err(Error::bad_config(
return Err!(Config(
"registration_token",
"!! You have `allow_registration` enabled without a token configured in your config which means you are \
allowing ANYONE to register on your conduwuit instance without any 2nd-step (e.g. registration token).\n
If this is not the intended behaviour, please set a registration token with the `registration_token` config option.\n
For security and safety reasons, conduwuit will shut down. If you are extra sure this is the desired behaviour you \
want, please set the following config option to true:
`yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`",
`yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`"
));
}
@ -135,8 +152,9 @@ For security and safety reasons, conduwuit will shut down. If you are extra sure
}
if config.allow_outgoing_presence && !config.allow_local_presence {
return Err(Error::bad_config(
"Outgoing presence requires allowing local presence. Please enable \"allow_local_presence\".",
return Err!(Config(
"allow_local_presence",
"Outgoing presence requires allowing local presence. Please enable 'allow_local_presence'."
));
}
@ -173,3 +191,52 @@ For security and safety reasons, conduwuit will shut down. If you are extra sure
Ok(())
}
/// Iterates over all the keys in the config file and warns if there is a
/// deprecated key specified
fn warn_deprecated(config: &Config) {
debug!("Checking for deprecated config keys");
let mut was_deprecated = false;
for key in config
.catchall
.keys()
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
{
warn!("Config parameter \"{}\" is deprecated, ignoring.", key);
was_deprecated = true;
}
if was_deprecated {
warn!(
"Read conduwuit config documentation at https://conduwuit.puppyirl.gay/configuration.html and check your \
configuration if any new configuration parameters should be adjusted"
);
}
}
/// iterates over all the catchall keys (unknown config options) and warns
/// if there are any.
fn warn_unknown_key(config: &Config) {
debug!("Checking for unknown config keys");
for key in config
.catchall
.keys()
.filter(|key| "config".to_owned().ne(key.to_owned()) /* "config" is expected */)
{
warn!("Config parameter \"{}\" is unknown to conduwuit, ignoring.", key);
}
}
/// Checks the presence of the `address` and `unix_socket_path` keys in the
/// raw_config, exiting the process if both keys were detected.
pub(super) fn is_dual_listening(raw_config: &Figment) -> Result<()> {
let contains_address = raw_config.contains("address");
let contains_unix_socket = raw_config.contains("unix_socket_path");
if contains_address && contains_unix_socket {
return Err!(
"TOML keys \"address\" and \"unix_socket_path\" were both defined. Please specify only one option."
);
}
Ok(())
}

View file

@ -1,6 +1,6 @@
use std::{
collections::BTreeMap,
fmt::{self, Write as _},
fmt,
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
path::PathBuf,
};
@ -19,30 +19,15 @@ use ruma::{
api::client::discovery::discover_support::ContactRole, OwnedRoomId, OwnedServerName, OwnedUserId, RoomVersionId,
};
use serde::{de::IgnoredAny, Deserialize};
use tracing::{debug, error, warn};
use url::Url;
pub use self::check::check;
use self::proxy::ProxyConfig;
use crate::error::Error;
use crate::{error::Error, Err, Result};
pub mod check;
pub mod proxy;
#[derive(Deserialize, Clone, Debug)]
#[serde(transparent)]
struct ListeningPort {
#[serde(with = "either::serde_untagged")]
ports: Either<u16, Vec<u16>>,
}
#[derive(Deserialize, Clone, Debug)]
#[serde(transparent)]
struct ListeningAddr {
#[serde(with = "either::serde_untagged")]
addrs: Either<IpAddr, Vec<IpAddr>>,
}
/// all the config options for conduwuit
#[derive(Clone, Debug, Deserialize)]
#[allow(clippy::struct_excessive_bools)]
@ -73,8 +58,8 @@ pub struct Config {
#[serde(default = "default_pdu_cache_capacity")]
pub pdu_cache_capacity: u32,
#[serde(default = "default_conduit_cache_capacity_modifier")]
pub conduit_cache_capacity_modifier: f64,
#[serde(default = "default_cache_capacity_modifier", alias = "conduit_cache_capacity_modifier")]
pub cache_capacity_modifier: f64,
#[serde(default = "default_auth_chain_cache_capacity")]
pub auth_chain_cache_capacity: u32,
#[serde(default = "default_shorteventid_cache_capacity")]
@ -114,7 +99,7 @@ pub struct Config {
pub ip_lookup_strategy: u8,
#[serde(default = "default_max_request_size")]
pub max_request_size: u32,
pub max_request_size: usize,
#[serde(default = "default_max_fetch_prev_events")]
pub max_fetch_prev_events: u16,
@ -181,16 +166,14 @@ pub struct Config {
#[serde(default)]
pub well_known: WellKnownConfig,
#[serde(default)]
#[cfg(feature = "perf_measurements")]
pub allow_jaeger: bool,
#[serde(default = "default_jaeger_filter")]
pub jaeger_filter: String,
#[serde(default)]
#[cfg(feature = "perf_measurements")]
pub tracing_flame: bool,
#[serde(default = "default_tracing_flame_filter")]
#[cfg(feature = "perf_measurements")]
pub tracing_flame_filter: String,
#[serde(default = "default_tracing_flame_output_path")]
#[cfg(feature = "perf_measurements")]
pub tracing_flame_output_path: String,
#[serde(default)]
pub proxy: ProxyConfig,
@ -356,6 +339,14 @@ pub struct Config {
pub sentry_send_server_name: bool,
#[serde(default = "default_sentry_traces_sample_rate")]
pub sentry_traces_sample_rate: f32,
#[serde(default)]
pub sentry_attach_stacktrace: bool,
#[serde(default = "true_fn")]
pub sentry_send_panic: bool,
#[serde(default = "true_fn")]
pub sentry_send_error: bool,
#[serde(default = "default_sentry_filter")]
pub sentry_filter: String,
#[serde(default)]
pub tokio_console: bool,
@ -386,8 +377,23 @@ pub struct WellKnownConfig {
pub support_mxid: Option<OwnedUserId>,
}
const DEPRECATED_KEYS: &[&str] = &[
#[derive(Deserialize, Clone, Debug)]
#[serde(transparent)]
struct ListeningPort {
#[serde(with = "either::serde_untagged")]
ports: Either<u16, Vec<u16>>,
}
#[derive(Deserialize, Clone, Debug)]
#[serde(transparent)]
struct ListeningAddr {
#[serde(with = "either::serde_untagged")]
addrs: Either<IpAddr, Vec<IpAddr>>,
}
const DEPRECATED_KEYS: &[&str; 9] = &[
"cache_capacity",
"conduit_cache_capacity_modifier",
"max_concurrent_requests",
"well_known_client",
"well_known_server",
@ -399,7 +405,7 @@ const DEPRECATED_KEYS: &[&str] = &[
impl Config {
/// Initialize config
pub fn new(path: Option<PathBuf>) -> Result<Self, Error> {
pub fn new(path: Option<PathBuf>) -> Result<Self> {
let raw_config = if let Some(config_file_env) = Env::var("CONDUIT_CONFIG") {
Figment::new()
.merge(Toml::file(config_file_env).nested())
@ -422,69 +428,16 @@ impl Config {
};
let config = match raw_config.extract::<Self>() {
Err(e) => return Err(Error::BadConfig(format!("{e}"))),
Err(e) => return Err!("There was a problem with your configuration file: {e}"),
Ok(config) => config,
};
// don't start if we're listening on both UNIX sockets and TCP at same time
if Self::is_dual_listening(&raw_config) {
return Err(Error::bad_config("dual listening on UNIX and TCP sockets not allowed."));
};
check::is_dual_listening(&raw_config)?;
Ok(config)
}
/// Iterates over all the keys in the config file and warns if there is a
/// deprecated key specified
pub(crate) fn warn_deprecated(&self) {
debug!("Checking for deprecated config keys");
let mut was_deprecated = false;
for key in self
.catchall
.keys()
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
{
warn!("Config parameter \"{}\" is deprecated, ignoring.", key);
was_deprecated = true;
}
if was_deprecated {
warn!(
"Read conduwuit config documentation at https://conduwuit.puppyirl.gay/configuration.html and check \
your configuration if any new configuration parameters should be adjusted"
);
}
}
/// iterates over all the catchall keys (unknown config options) and warns
/// if there are any.
pub(crate) fn warn_unknown_key(&self) {
debug!("Checking for unknown config keys");
for key in self
.catchall
.keys()
.filter(|key| "config".to_owned().ne(key.to_owned()) /* "config" is expected */)
{
warn!("Config parameter \"{}\" is unknown to conduwuit, ignoring.", key);
}
}
/// Checks the presence of the `address` and `unix_socket_path` keys in the
/// raw_config, exiting the process if both keys were detected.
fn is_dual_listening(raw_config: &Figment) -> bool {
let check_address = raw_config.find_value("address");
let check_unix_socket = raw_config.find_value("unix_socket_path");
// are the check_address and check_unix_socket keys both Ok (specified) at the
// same time?
if check_address.is_ok() && check_unix_socket.is_ok() {
error!("TOML keys \"address\" and \"unix_socket_path\" were both defined. Please specify only one option.");
return true;
}
false
}
#[must_use]
pub fn get_bind_addrs(&self) -> Vec<SocketAddr> {
let mut addrs = Vec::new();
@ -516,361 +469,358 @@ impl Config {
impl fmt::Display for Config {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Prepare a list of config values to show
let lines = [
("Server name", self.server_name.host()),
("Database backend", &self.database_backend),
("Database path", &self.database_path.to_string_lossy()),
(
writeln!(f, "Active config values:\n\n").expect("wrote line to formatter stream");
let mut line = |key: &str, val: &str| {
writeln!(f, "{key}: {val}").expect("wrote line to formatter stream");
};
line("Server name", self.server_name.host());
line("Database backend", &self.database_backend);
line("Database path", &self.database_path.to_string_lossy());
line(
"Database backup path",
self.database_backup_path
.as_ref()
.map_or("", |path| path.to_str().unwrap_or("")),
),
("Database backups to keep", &self.database_backups_to_keep.to_string()),
("Database cache capacity (MB)", &self.db_cache_capacity_mb.to_string()),
("Cache capacity modifier", &self.conduit_cache_capacity_modifier.to_string()),
("PDU cache capacity", &self.pdu_cache_capacity.to_string()),
("Auth chain cache capacity", &self.auth_chain_cache_capacity.to_string()),
("Short eventid cache capacity", &self.shorteventid_cache_capacity.to_string()),
("Eventid short cache capacity", &self.eventidshort_cache_capacity.to_string()),
("Short statekey cache capacity", &self.shortstatekey_cache_capacity.to_string()),
("Statekey short cache capacity", &self.statekeyshort_cache_capacity.to_string()),
(
);
line("Database backups to keep", &self.database_backups_to_keep.to_string());
line("Database cache capacity (MB)", &self.db_cache_capacity_mb.to_string());
line("Cache capacity modifier", &self.cache_capacity_modifier.to_string());
line("PDU cache capacity", &self.pdu_cache_capacity.to_string());
line("Auth chain cache capacity", &self.auth_chain_cache_capacity.to_string());
line("Short eventid cache capacity", &self.shorteventid_cache_capacity.to_string());
line("Eventid short cache capacity", &self.eventidshort_cache_capacity.to_string());
line("Short statekey cache capacity", &self.shortstatekey_cache_capacity.to_string());
line("Statekey short cache capacity", &self.statekeyshort_cache_capacity.to_string());
line(
"Server visibility cache capacity",
&self.server_visibility_cache_capacity.to_string(),
),
(
);
line(
"User visibility cache capacity",
&self.user_visibility_cache_capacity.to_string(),
),
("Stateinfo cache capacity", &self.stateinfo_cache_capacity.to_string()),
(
);
line("Stateinfo cache capacity", &self.stateinfo_cache_capacity.to_string());
line(
"Roomid space hierarchy cache capacity",
&self.roomid_spacehierarchy_cache_capacity.to_string(),
),
("DNS cache entry limit", &self.dns_cache_entries.to_string()),
("DNS minimum TTL", &self.dns_min_ttl.to_string()),
("DNS minimum NXDOMAIN TTL", &self.dns_min_ttl_nxdomain.to_string()),
("DNS attempts", &self.dns_attempts.to_string()),
("DNS timeout", &self.dns_timeout.to_string()),
("DNS fallback to TCP", &self.dns_tcp_fallback.to_string()),
("DNS query over TCP only", &self.query_over_tcp_only.to_string()),
("Query all nameservers", &self.query_all_nameservers.to_string()),
("Maximum request size (bytes)", &self.max_request_size.to_string()),
("Sender retry backoff limit", &self.sender_retry_backoff_limit.to_string()),
("Request connect timeout", &self.request_conn_timeout.to_string()),
("Request timeout", &self.request_timeout.to_string()),
("Request total timeout", &self.request_total_timeout.to_string()),
("Idle connections per host", &self.request_idle_per_host.to_string()),
("Request pool idle timeout", &self.request_idle_timeout.to_string()),
("Well_known connect timeout", &self.well_known_conn_timeout.to_string()),
("Well_known timeout", &self.well_known_timeout.to_string()),
("Federation timeout", &self.federation_timeout.to_string()),
("Federation pool idle per host", &self.federation_idle_per_host.to_string()),
("Federation pool idle timeout", &self.federation_idle_timeout.to_string()),
("Sender timeout", &self.sender_timeout.to_string()),
("Sender pool idle timeout", &self.sender_idle_timeout.to_string()),
("Appservice timeout", &self.appservice_timeout.to_string()),
("Appservice pool idle timeout", &self.appservice_idle_timeout.to_string()),
("Pusher pool idle timeout", &self.pusher_idle_timeout.to_string()),
("Allow registration", &self.allow_registration.to_string()),
(
);
line("DNS cache entry limit", &self.dns_cache_entries.to_string());
line("DNS minimum TTL", &self.dns_min_ttl.to_string());
line("DNS minimum NXDOMAIN TTL", &self.dns_min_ttl_nxdomain.to_string());
line("DNS attempts", &self.dns_attempts.to_string());
line("DNS timeout", &self.dns_timeout.to_string());
line("DNS fallback to TCP", &self.dns_tcp_fallback.to_string());
line("DNS query over TCP only", &self.query_over_tcp_only.to_string());
line("Query all nameservers", &self.query_all_nameservers.to_string());
line("Maximum request size (bytes)", &self.max_request_size.to_string());
line("Sender retry backoff limit", &self.sender_retry_backoff_limit.to_string());
line("Request connect timeout", &self.request_conn_timeout.to_string());
line("Request timeout", &self.request_timeout.to_string());
line("Request total timeout", &self.request_total_timeout.to_string());
line("Idle connections per host", &self.request_idle_per_host.to_string());
line("Request pool idle timeout", &self.request_idle_timeout.to_string());
line("Well_known connect timeout", &self.well_known_conn_timeout.to_string());
line("Well_known timeout", &self.well_known_timeout.to_string());
line("Federation timeout", &self.federation_timeout.to_string());
line("Federation pool idle per host", &self.federation_idle_per_host.to_string());
line("Federation pool idle timeout", &self.federation_idle_timeout.to_string());
line("Sender timeout", &self.sender_timeout.to_string());
line("Sender pool idle timeout", &self.sender_idle_timeout.to_string());
line("Appservice timeout", &self.appservice_timeout.to_string());
line("Appservice pool idle timeout", &self.appservice_idle_timeout.to_string());
line("Pusher pool idle timeout", &self.pusher_idle_timeout.to_string());
line("Allow registration", &self.allow_registration.to_string());
line(
"Registration token",
if self.registration_token.is_some() {
"set"
} else {
"not set (open registration!)"
},
),
(
);
line(
"Allow guest registration (inherently false if allow registration is false)",
&self.allow_guest_registration.to_string(),
),
(
);
line(
"Log guest registrations in admin room",
&self.log_guest_registrations.to_string(),
),
(
);
line(
"Allow guests to auto join rooms",
&self.allow_guests_auto_join_rooms.to_string(),
),
("New user display name suffix", &self.new_user_displayname_suffix),
("Allow encryption", &self.allow_encryption.to_string()),
("Allow federation", &self.allow_federation.to_string()),
(
);
line("New user display name suffix", &self.new_user_displayname_suffix);
line("Allow encryption", &self.allow_encryption.to_string());
line("Allow federation", &self.allow_federation.to_string());
line(
"Allow incoming federated presence requests (updates)",
&self.allow_incoming_presence.to_string(),
),
(
);
line(
"Allow outgoing federated presence requests (updates)",
&self.allow_outgoing_presence.to_string(),
),
(
);
line(
"Allow local presence requests (updates)",
&self.allow_local_presence.to_string(),
),
(
);
line(
"Allow incoming remote read receipts",
&self.allow_incoming_read_receipts.to_string(),
),
(
);
line(
"Allow outgoing remote read receipts",
&self.allow_outgoing_read_receipts.to_string(),
),
(
);
line(
"Block non-admin room invites (local and remote, admins can still send and receive invites)",
&self.block_non_admin_invites.to_string(),
),
("Enable admin escape commands", &self.admin_escape_commands.to_string()),
("Allow outgoing federated typing", &self.allow_outgoing_typing.to_string()),
("Allow incoming federated typing", &self.allow_incoming_typing.to_string()),
(
);
line("Enable admin escape commands", &self.admin_escape_commands.to_string());
line("Allow outgoing federated typing", &self.allow_outgoing_typing.to_string());
line("Allow incoming federated typing", &self.allow_incoming_typing.to_string());
line(
"Incoming federated typing timeout",
&self.typing_federation_timeout_s.to_string(),
),
("Client typing timeout minimum", &self.typing_client_timeout_min_s.to_string()),
("Client typing timeout maxmimum", &self.typing_client_timeout_max_s.to_string()),
("Allow device name federation", &self.allow_device_name_federation.to_string()),
(
);
line("Client typing timeout minimum", &self.typing_client_timeout_min_s.to_string());
line("Client typing timeout maxmimum", &self.typing_client_timeout_max_s.to_string());
line("Allow device name federation", &self.allow_device_name_federation.to_string());
line(
"Allow incoming profile lookup federation requests",
&self.allow_profile_lookup_federation_requests.to_string(),
),
(
);
line(
"Auto deactivate banned room join attempts",
&self.auto_deactivate_banned_room_attempts.to_string(),
),
("Notification push path", &self.notification_push_path),
("Allow room creation", &self.allow_room_creation.to_string()),
(
);
line("Notification push path", &self.notification_push_path);
line("Allow room creation", &self.allow_room_creation.to_string());
line(
"Allow public room directory over federation",
&self.allow_public_room_directory_over_federation.to_string(),
),
(
);
line(
"Allow public room directory without authentication",
&self.allow_public_room_directory_without_auth.to_string(),
),
(
);
line(
"Lockdown public room directory (only allow admins to publish)",
&self.lockdown_public_room_directory.to_string(),
),
(
);
line(
"JWT secret",
match self.jwt_secret {
Some(_) => "set",
None => "not set",
},
),
(
);
line(
"Trusted key servers",
&self
.trusted_servers
.iter()
.map(|server| server.host())
.join(", "),
),
(
);
line(
"Query Trusted Key Servers First",
&self.query_trusted_key_servers_first.to_string(),
),
("OpenID Token TTL", &self.openid_token_ttl.to_string()),
(
);
line("OpenID Token TTL", &self.openid_token_ttl.to_string());
line(
"TURN username",
if self.turn_username.is_empty() {
"not set"
} else {
&self.turn_username
},
),
("TURN password", {
);
line("TURN password", {
if self.turn_password.is_empty() {
"not set"
} else {
"set"
}
}),
("TURN secret", {
});
line("TURN secret", {
if self.turn_secret.is_empty() {
"not set"
} else {
"set"
}
}),
("Turn TTL", &self.turn_ttl.to_string()),
("Turn URIs", {
});
line("Turn TTL", &self.turn_ttl.to_string());
line("Turn URIs", {
let mut lst = vec![];
for item in self.turn_uris.iter().cloned().enumerate() {
let (_, uri): (usize, String) = item;
lst.push(uri);
}
&lst.join(", ")
}),
("Auto Join Rooms", {
});
line("Auto Join Rooms", {
let mut lst = vec![];
for room in &self.auto_join_rooms {
lst.push(room);
}
&lst.into_iter().join(", ")
}),
#[cfg(feature = "zstd_compression")]
("Zstd HTTP Compression", &self.zstd_compression.to_string()),
#[cfg(feature = "gzip_compression")]
("Gzip HTTP Compression", &self.gzip_compression.to_string()),
#[cfg(feature = "brotli_compression")]
("Brotli HTTP Compression", &self.brotli_compression.to_string()),
("RocksDB database LOG level", &self.rocksdb_log_level),
("RocksDB database LOG to stderr", &self.rocksdb_log_stderr.to_string()),
("RocksDB database LOG time-to-roll", &self.rocksdb_log_time_to_roll.to_string()),
("RocksDB Max LOG Files", &self.rocksdb_max_log_files.to_string()),
(
});
line("Zstd HTTP Compression", &self.zstd_compression.to_string());
line("Gzip HTTP Compression", &self.gzip_compression.to_string());
line("Brotli HTTP Compression", &self.brotli_compression.to_string());
line("RocksDB database LOG level", &self.rocksdb_log_level);
line("RocksDB database LOG to stderr", &self.rocksdb_log_stderr.to_string());
line("RocksDB database LOG time-to-roll", &self.rocksdb_log_time_to_roll.to_string());
line("RocksDB Max LOG Files", &self.rocksdb_max_log_files.to_string());
line(
"RocksDB database max LOG file size",
&self.rocksdb_max_log_file_size.to_string(),
),
(
);
line(
"RocksDB database optimize for spinning disks",
&self.rocksdb_optimize_for_spinning_disks.to_string(),
),
("RocksDB Direct-IO", &self.rocksdb_direct_io.to_string()),
("RocksDB Parallelism Threads", &self.rocksdb_parallelism_threads.to_string()),
("RocksDB Compression Algorithm", &self.rocksdb_compression_algo),
("RocksDB Compression Level", &self.rocksdb_compression_level.to_string()),
(
);
line("RocksDB Direct-IO", &self.rocksdb_direct_io.to_string());
line("RocksDB Parallelism Threads", &self.rocksdb_parallelism_threads.to_string());
line("RocksDB Compression Algorithm", &self.rocksdb_compression_algo);
line("RocksDB Compression Level", &self.rocksdb_compression_level.to_string());
line(
"RocksDB Bottommost Compression Level",
&self.rocksdb_bottommost_compression_level.to_string(),
),
(
);
line(
"RocksDB Bottommost Level Compression",
&self.rocksdb_bottommost_compression.to_string(),
),
("RocksDB Recovery Mode", &self.rocksdb_recovery_mode.to_string()),
("RocksDB Repair Mode", &self.rocksdb_repair.to_string()),
("RocksDB Read-only Mode", &self.rocksdb_read_only.to_string()),
(
);
line("RocksDB Recovery Mode", &self.rocksdb_recovery_mode.to_string());
line("RocksDB Repair Mode", &self.rocksdb_repair.to_string());
line("RocksDB Read-only Mode", &self.rocksdb_read_only.to_string());
line(
"RocksDB Compaction Idle Priority",
&self.rocksdb_compaction_prio_idle.to_string(),
),
(
);
line(
"RocksDB Compaction Idle IOPriority",
&self.rocksdb_compaction_ioprio_idle.to_string(),
),
("Media integrity checks on startup", &self.media_startup_check.to_string()),
("Media compatibility filesystem links", &self.media_compat_file_link.to_string()),
("Prevent Media Downloads From", {
);
line("Media integrity checks on startup", &self.media_startup_check.to_string());
line("Media compatibility filesystem links", &self.media_compat_file_link.to_string());
line("Prevent Media Downloads From", {
let mut lst = vec![];
for domain in &self.prevent_media_downloads_from {
lst.push(domain.host());
}
&lst.join(", ")
}),
("Forbidden Remote Server Names (\"Global\" ACLs)", {
});
line("Forbidden Remote Server Names (\"Global\" ACLs)", {
let mut lst = vec![];
for domain in &self.forbidden_remote_server_names {
lst.push(domain.host());
}
&lst.join(", ")
}),
("Forbidden Remote Room Directory Server Names", {
});
line("Forbidden Remote Room Directory Server Names", {
let mut lst = vec![];
for domain in &self.forbidden_remote_room_directory_server_names {
lst.push(domain.host());
}
&lst.join(", ")
}),
("Outbound Request IP Range Denylist", {
});
line("Outbound Request IP Range Denylist", {
let mut lst = vec![];
for item in self.ip_range_denylist.iter().cloned().enumerate() {
let (_, ip): (usize, String) = item;
lst.push(ip);
}
&lst.join(", ")
}),
("Forbidden usernames", {
});
line("Forbidden usernames", {
&self.forbidden_usernames.patterns().iter().join(", ")
}),
("Forbidden room aliases", {
});
line("Forbidden room aliases", {
&self.forbidden_alias_names.patterns().iter().join(", ")
}),
(
});
line(
"URL preview domain contains allowlist",
&self.url_preview_domain_contains_allowlist.join(", "),
),
(
);
line(
"URL preview domain explicit allowlist",
&self.url_preview_domain_explicit_allowlist.join(", "),
),
(
);
line(
"URL preview domain explicit denylist",
&self.url_preview_domain_explicit_denylist.join(", "),
),
(
);
line(
"URL preview URL contains allowlist",
&self.url_preview_url_contains_allowlist.join(", "),
),
("URL preview maximum spider size", &self.url_preview_max_spider_size.to_string()),
("URL preview check root domain", &self.url_preview_check_root_domain.to_string()),
(
);
line("URL preview maximum spider size", &self.url_preview_max_spider_size.to_string());
line("URL preview check root domain", &self.url_preview_check_root_domain.to_string());
line(
"Allow check for updates / announcements check",
&self.allow_check_for_updates.to_string(),
),
("Enable netburst on startup", &self.startup_netburst.to_string()),
);
line("Enable netburst on startup", &self.startup_netburst.to_string());
#[cfg(feature = "sentry_telemetry")]
("Sentry.io reporting and tracing", &self.sentry.to_string()),
line("Sentry.io reporting and tracing", &self.sentry.to_string());
#[cfg(feature = "sentry_telemetry")]
("Sentry.io send server_name in logs", &self.sentry_send_server_name.to_string()),
line("Sentry.io send server_name in logs", &self.sentry_send_server_name.to_string());
#[cfg(feature = "sentry_telemetry")]
("Sentry.io tracing sample rate", &self.sentry_traces_sample_rate.to_string()),
(
line("Sentry.io tracing sample rate", &self.sentry_traces_sample_rate.to_string());
line("Sentry.io attach stacktrace", &self.sentry_attach_stacktrace.to_string());
line("Sentry.io send panics", &self.sentry_send_panic.to_string());
line("Sentry.io send errors", &self.sentry_send_error.to_string());
line("Sentry.io tracing filter", &self.sentry_filter);
line(
"Well-known server name",
self.well_known
.server
.as_ref()
.map_or("", |server| server.as_str()),
),
(
);
line(
"Well-known client URL",
self.well_known
.client
.as_ref()
.map_or("", |url| url.as_str()),
),
(
);
line(
"Well-known support email",
self.well_known
.support_email
.as_ref()
.map_or("", |str| str.as_ref()),
),
(
);
line(
"Well-known support Matrix ID",
self.well_known
.support_mxid
.as_ref()
.map_or("", |mxid| mxid.as_str()),
),
(
);
line(
"Well-known support role",
self.well_known
.support_role
.as_ref()
.map_or("", |role| role.as_str()),
),
(
);
line(
"Well-known support page/URL",
self.well_known
.support_page
.as_ref()
.map_or("", |url| url.as_str()),
),
("Enable the tokio-console", &self.tokio_console.to_string()),
];
);
line("Enable the tokio-console", &self.tokio_console.to_string());
let mut msg: String = "Active config values:\n\n".to_owned();
for line in lines.into_iter().enumerate() {
writeln!(msg, "{}: {}", line.1 .0, line.1 .1).expect("should be able to write to string buffer");
}
write!(f, "{msg}")
Ok(())
}
}
@ -898,7 +848,7 @@ fn default_db_cache_capacity_mb() -> f64 { 256.0 }
fn default_pdu_cache_capacity() -> u32 { 150_000 }
fn default_conduit_cache_capacity_modifier() -> f64 { 1.0 }
fn default_cache_capacity_modifier() -> f64 { 1.0 }
fn default_auth_chain_cache_capacity() -> u32 { 100_000 }
@ -930,7 +880,7 @@ fn default_dns_timeout() -> u64 { 10 }
fn default_ip_lookup_strategy() -> u8 { 5 }
fn default_max_request_size() -> u32 {
fn default_max_request_size() -> usize {
20 * 1024 * 1024 // Default to 20 MB
}
@ -968,10 +918,20 @@ fn default_pusher_idle_timeout() -> u64 { 15 }
fn default_max_fetch_prev_events() -> u16 { 100_u16 }
#[cfg(feature = "perf_measurements")]
fn default_tracing_flame_filter() -> String { "trace,h2=off".to_owned() }
fn default_tracing_flame_filter() -> String {
cfg!(debug_assertions)
.then_some("trace,h2=off")
.unwrap_or("info")
.to_owned()
}
fn default_jaeger_filter() -> String {
cfg!(debug_assertions)
.then_some("trace,h2=off")
.unwrap_or("info")
.to_owned()
}
#[cfg(feature = "perf_measurements")]
fn default_tracing_flame_output_path() -> String { "./tracing.folded".to_owned() }
fn default_trusted_servers() -> Vec<OwnedServerName> { vec![OwnedServerName::try_from("matrix.org").unwrap()] }
@ -1070,4 +1030,6 @@ fn default_sentry_endpoint() -> Option<Url> {
fn default_sentry_traces_sample_rate() -> f32 { 0.15 }
fn default_sentry_filter() -> String { "info".to_owned() }
fn default_startup_netburst_keep() -> i64 { 50 }

View file

@ -127,6 +127,7 @@ impl WildCardedDomain {
impl std::str::FromStr for WildCardedDomain {
type Err = std::convert::Infallible;
#[allow(clippy::string_slice)]
fn from_str(s: &str) -> Result<Self, Self::Err> {
// maybe do some domain validation?
Ok(if s.starts_with("*.") {

View file

@ -1,6 +1,4 @@
#![allow(dead_code)] // this is a developer's toolbox
use std::panic;
use std::{any::Any, panic};
/// Export all of the ancillary tools from here as well.
pub use crate::utils::debug::*;
@ -14,9 +12,9 @@ pub use crate::utils::debug::*;
macro_rules! debug_event {
( $level:expr, $($x:tt)+ ) => {
if cfg!(debug_assertions) && cfg!(not(feature = "dev_release_log_level")) {
::tracing::event!( $level, $($x)+ );
::tracing::event!( $level, $($x)+ )
} else {
::tracing::debug!( $($x)+ );
::tracing::debug!( $($x)+ )
}
}
}
@ -27,7 +25,7 @@ macro_rules! debug_event {
#[macro_export]
macro_rules! debug_error {
( $($x:tt)+ ) => {
$crate::debug_event!(::tracing::Level::ERROR, $($x)+ );
$crate::debug_event!(::tracing::Level::ERROR, $($x)+ )
}
}
@ -37,7 +35,7 @@ macro_rules! debug_error {
#[macro_export]
macro_rules! debug_warn {
( $($x:tt)+ ) => {
$crate::debug_event!(::tracing::Level::WARN, $($x)+ );
$crate::debug_event!(::tracing::Level::WARN, $($x)+ )
}
}
@ -47,7 +45,7 @@ macro_rules! debug_warn {
#[macro_export]
macro_rules! debug_info {
( $($x:tt)+ ) => {
$crate::debug_event!(::tracing::Level::INFO, $($x)+ );
$crate::debug_event!(::tracing::Level::INFO, $($x)+ )
}
}
@ -79,3 +77,6 @@ pub fn trap() {
std::arch::asm!("int3");
}
}
#[must_use]
pub fn panic_str(p: &Box<dyn Any + Send>) -> &'static str { p.downcast_ref::<&str>().copied().unwrap_or_default() }

View file

@ -1,209 +0,0 @@
use std::{convert::Infallible, fmt};
use axum::response::{IntoResponse, Response};
use bytes::BytesMut;
use http::StatusCode;
use http_body_util::Full;
use ruma::{
api::{
client::{
error::ErrorKind::{
Forbidden, GuestAccessForbidden, LimitExceeded, MissingToken, NotFound, ThreepidAuthFailed,
ThreepidDenied, TooLarge, Unauthorized, Unknown, UnknownToken, Unrecognized, UserDeactivated,
WrongRoomKeysVersion,
},
uiaa::{UiaaInfo, UiaaResponse},
},
OutgoingResponse,
},
OwnedServerName,
};
use thiserror::Error;
use tracing::error;
#[derive(Error)]
pub enum Error {
// std
#[error("{0}")]
Fmt(#[from] fmt::Error),
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("{0}")]
Utf8Error(#[from] std::str::Utf8Error),
#[error("{0}")]
FromUtf8Error(#[from] std::string::FromUtf8Error),
#[error("{0}")]
TryFromSliceError(#[from] std::array::TryFromSliceError),
// third-party
#[error("Regex error: {0}")]
Regex(#[from] regex::Error),
#[error("Tracing filter error: {0}")]
TracingFilter(#[from] tracing_subscriber::filter::ParseError),
#[error("Image error: {0}")]
Image(#[from] image::error::ImageError),
#[error("Request error: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("{0}")]
Extension(#[from] axum::extract::rejection::ExtensionRejection),
#[error("{0}")]
Path(#[from] axum::extract::rejection::PathRejection),
// ruma
#[error("{0}")]
Mxid(#[from] ruma::IdParseError),
#[error("{0}: {1}")]
BadRequest(ruma::api::client::error::ErrorKind, &'static str),
#[error("from {0}: {1}")]
Redaction(OwnedServerName, ruma::canonical_json::RedactionError),
#[error("Remote server {0} responded with: {1}")]
Federation(OwnedServerName, ruma::api::client::error::Error),
#[error("{0} in {1}")]
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
// conduwuit
#[error("There was a problem with your configuration: {0}")]
BadConfig(String),
#[error("{0}")]
BadDatabase(&'static str),
#[error("{0}")]
Database(String),
#[error("{0}")]
BadServerResponse(&'static str),
#[error("{0}")]
Conflict(&'static str), // This is only needed for when a room alias already exists
#[error("uiaa")]
Uiaa(UiaaInfo),
// unique / untyped
#[error("{0}")]
Err(String),
}
impl Error {
pub fn bad_database(message: &'static str) -> Self {
error!("BadDatabase: {}", message);
Self::BadDatabase(message)
}
pub fn bad_config(message: &str) -> Self {
error!("BadConfig: {}", message);
Self::BadConfig(message.to_owned())
}
/// Returns the Matrix error code / error kind
pub fn error_code(&self) -> ruma::api::client::error::ErrorKind {
if let Self::Federation(_, error) = self {
return error.error_kind().unwrap_or_else(|| &Unknown).clone();
}
match self {
Self::BadRequest(kind, _) => kind.clone(),
_ => Unknown,
}
}
/// Sanitizes public-facing errors that can leak sensitive information.
pub fn sanitized_error(&self) -> String {
match self {
Self::Database {
..
} => String::from("Database error occurred."),
Self::Io {
..
} => String::from("I/O error occurred."),
_ => self.to_string(),
}
}
}
impl From<Infallible> for Error {
fn from(i: Infallible) -> Self { match i {} }
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{self}") }
}
#[derive(Clone)]
pub struct RumaResponse<T>(pub T);
impl<T> From<T> for RumaResponse<T> {
fn from(t: T) -> Self { Self(t) }
}
impl From<Error> for RumaResponse<UiaaResponse> {
fn from(t: Error) -> Self { t.to_response() }
}
impl Error {
pub fn to_response(&self) -> RumaResponse<UiaaResponse> {
use ruma::api::client::error::{Error as RumaError, ErrorBody};
if let Self::Uiaa(uiaainfo) = self {
return RumaResponse(UiaaResponse::AuthResponse(uiaainfo.clone()));
}
if let Self::Federation(origin, error) = self {
let mut error = error.clone();
error.body = ErrorBody::Standard {
kind: error.error_kind().unwrap_or_else(|| &Unknown).clone(),
message: format!("Answer from {origin}: {error}"),
};
return RumaResponse(UiaaResponse::MatrixError(error));
}
let message = format!("{self}");
let (kind, status_code) = match self {
Self::BadRequest(kind, _) => (
kind.clone(),
match kind {
WrongRoomKeysVersion {
..
}
| Forbidden {
..
}
| GuestAccessForbidden
| ThreepidAuthFailed
| UserDeactivated
| ThreepidDenied => StatusCode::FORBIDDEN,
Unauthorized
| UnknownToken {
..
}
| MissingToken => StatusCode::UNAUTHORIZED,
NotFound | Unrecognized => StatusCode::NOT_FOUND,
LimitExceeded {
..
} => StatusCode::TOO_MANY_REQUESTS,
TooLarge => StatusCode::PAYLOAD_TOO_LARGE,
_ => StatusCode::BAD_REQUEST,
},
),
Self::Conflict(_) => (Unknown, StatusCode::CONFLICT),
_ => (Unknown, StatusCode::INTERNAL_SERVER_ERROR),
};
RumaResponse(UiaaResponse::MatrixError(RumaError {
body: ErrorBody::Standard {
kind,
message,
},
status_code,
}))
}
}
impl ::axum::response::IntoResponse for Error {
fn into_response(self) -> ::axum::response::Response { self.to_response().into_response() }
}
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
fn into_response(self) -> Response {
match self.0.try_into_http_response::<BytesMut>() {
Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
}
}
}

100
src/core/error/err.rs Normal file
View file

@ -0,0 +1,100 @@
//! Error construction macros
//!
//! These are specialized macros specific to this project's patterns for
//! throwing Errors; they make Error construction succinct and reduce clutter.
//! They are developed from folding existing patterns into the macro while
//! fixing several anti-patterns in the codebase.
//!
//! - The primary macros `Err!` and `err!` are provided. `Err!` simply wraps
//! `err!` in the Result variant to reduce `Err(err!(...))` boilerplate, thus
//! `err!` can be used in any case.
//!
//! 1. The macro makes the general Error construction easy: `return
//! Err!("something went wrong")` replaces the prior `return
//! Err(Error::Err("something went wrong".to_owned()))`.
//!
//! 2. The macro integrates format strings automatically: `return
//! Err!("something bad: {msg}")` replaces the prior `return
//! Err(Error::Err(format!("something bad: {msg}")))`.
//!
//! 3. The macro scopes variants of Error: `return Err!(Database("problem with
//! bad database."))` replaces the prior `return Err(Error::Database("problem
//! with bad database."))`.
//!
//! 4. The macro matches and scopes some special-case sub-variants, for example
//! with ruma ErrorKind: `return Err!(Request(MissingToken("you must provide
//! an access token")))`.
//!
//! 5. The macro fixes the anti-pattern of repeating messages in an error! log
//! and then again in an Error construction, often slightly different due to
//! the Error variant not supporting a format string. Instead `return
//! Err(Database(error!("problem with db: {msg}")))` logs the error at the
//! callsite and then returns the error with the same string. Caller has the
//! option of replacing `error!` with `debug_error!`.
#[macro_export]
macro_rules! Err {
($($args:tt)*) => {
Err($crate::err!($($args)*))
};
}
#[macro_export]
macro_rules! err {
(Config($item:literal, $($args:expr),*)) => {{
$crate::error!(config = %$item, $($args),*);
$crate::error::Error::Config($item, $crate::format_maybe!($($args),*))
}};
(Request(Forbidden($level:ident!($($args:expr),*)))) => {{
$crate::$level!($($args),*);
$crate::error::Error::Request(
::ruma::api::client::error::ErrorKind::forbidden(),
$crate::format_maybe!($($args),*),
::http::StatusCode::BAD_REQUEST
)
}};
(Request(Forbidden($($args:expr),*))) => {
$crate::error::Error::Request(
::ruma::api::client::error::ErrorKind::forbidden(),
$crate::format_maybe!($($args),*),
::http::StatusCode::BAD_REQUEST
)
};
(Request($variant:ident($level:ident!($($args:expr),*)))) => {{
$crate::$level!($($args),*);
$crate::error::Error::Request(
::ruma::api::client::error::ErrorKind::$variant,
$crate::format_maybe!($($args),*),
::http::StatusCode::BAD_REQUEST
)
}};
(Request($variant:ident($($args:expr),*))) => {
$crate::error::Error::Request(
::ruma::api::client::error::ErrorKind::$variant,
$crate::format_maybe!($($args),*),
::http::StatusCode::BAD_REQUEST
)
};
($variant:ident($level:ident!($($args:expr),*))) => {{
$crate::$level!($($args),*);
$crate::error::Error::$variant($crate::format_maybe!($($args),*))
}};
($variant:ident($($args:expr),*)) => {
$crate::error::Error::$variant($crate::format_maybe!($($args),*))
};
($level:ident!($($args:expr),*)) => {{
$crate::$level!($($args),*);
$crate::error::Error::Err($crate::format_maybe!($($args),*))
}};
($($args:expr),*) => {
$crate::error::Error::Err($crate::format_maybe!($($args),*))
};
}

74
src/core/error/log.rs Normal file
View file

@ -0,0 +1,74 @@
use std::{convert::Infallible, fmt};
use super::Error;
use crate::{debug_error, error};
#[inline]
pub fn else_log<T, E>(error: E) -> Result<T, Infallible>
where
T: Default,
Error: From<E>,
{
Ok(default_log(error))
}
#[inline]
pub fn else_debug_log<T, E>(error: E) -> Result<T, Infallible>
where
T: Default,
Error: From<E>,
{
Ok(default_debug_log(error))
}
#[inline]
pub fn default_log<T, E>(error: E) -> T
where
T: Default,
Error: From<E>,
{
let error = Error::from(error);
inspect_log(&error);
T::default()
}
#[inline]
pub fn default_debug_log<T, E>(error: E) -> T
where
T: Default,
Error: From<E>,
{
let error = Error::from(error);
inspect_debug_log(&error);
T::default()
}
#[inline]
pub fn map_log<E>(error: E) -> Error
where
Error: From<E>,
{
let error = Error::from(error);
inspect_log(&error);
error
}
#[inline]
pub fn map_debug_log<E>(error: E) -> Error
where
Error: From<E>,
{
let error = Error::from(error);
inspect_debug_log(&error);
error
}
#[inline]
pub fn inspect_log<E: fmt::Display>(error: &E) {
error!("{error}");
}
#[inline]
pub fn inspect_debug_log<E: fmt::Debug>(error: &E) {
debug_error!("{error:?}");
}

156
src/core/error/mod.rs Normal file
View file

@ -0,0 +1,156 @@
mod err;
mod log;
mod panic;
mod response;
use std::{any::Any, borrow::Cow, convert::Infallible, fmt};
pub use log::*;
use crate::error;
#[derive(thiserror::Error)]
pub enum Error {
#[error("PANIC!")]
PanicAny(Box<dyn Any + Send>),
#[error("PANIC! {0}")]
Panic(&'static str, Box<dyn Any + Send + 'static>),
// std
#[error("{0}")]
Fmt(#[from] fmt::Error),
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("{0}")]
Utf8Error(#[from] std::str::Utf8Error),
#[error("{0}")]
FromUtf8Error(#[from] std::string::FromUtf8Error),
#[error("{0}")]
TryFromSliceError(#[from] std::array::TryFromSliceError),
#[error("{0}")]
TryFromIntError(#[from] std::num::TryFromIntError),
#[error("{0}")]
ParseIntError(#[from] std::num::ParseIntError),
#[error("{0}")]
ParseFloatError(#[from] std::num::ParseFloatError),
// third-party
#[error("Join error: {0}")]
JoinError(#[from] tokio::task::JoinError),
#[error("Regex error: {0}")]
Regex(#[from] regex::Error),
#[error("Tracing filter error: {0}")]
TracingFilter(#[from] tracing_subscriber::filter::ParseError),
#[error("Tracing reload error: {0}")]
TracingReload(#[from] tracing_subscriber::reload::Error),
#[error("Image error: {0}")]
Image(#[from] image::error::ImageError),
#[error("Request error: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("{0}")]
Extension(#[from] axum::extract::rejection::ExtensionRejection),
#[error("{0}")]
Path(#[from] axum::extract::rejection::PathRejection),
#[error("{0}")]
Http(#[from] http::Error),
#[error("{0}")]
HttpHeader(#[from] http::header::InvalidHeaderValue),
// ruma
#[error("{0}")]
IntoHttpError(#[from] ruma::api::error::IntoHttpError),
#[error("{0}")]
RumaError(#[from] ruma::api::client::error::Error),
#[error("uiaa")]
Uiaa(ruma::api::client::uiaa::UiaaInfo),
#[error("{0}")]
Mxid(#[from] ruma::IdParseError),
#[error("{0}: {1}")]
BadRequest(ruma::api::client::error::ErrorKind, &'static str), //TODO: remove
#[error("{0}: {1}")]
Request(ruma::api::client::error::ErrorKind, Cow<'static, str>, http::StatusCode),
#[error("from {0}: {1}")]
Redaction(ruma::OwnedServerName, ruma::canonical_json::RedactionError),
#[error("Remote server {0} responded with: {1}")]
Federation(ruma::OwnedServerName, ruma::api::client::error::Error),
#[error("{0} in {1}")]
InconsistentRoomState(&'static str, ruma::OwnedRoomId),
// conduwuit
#[error("Arithmetic operation failed: {0}")]
Arithmetic(Cow<'static, str>),
#[error("There was a problem with the '{0}' directive in your configuration: {1}")]
Config(&'static str, Cow<'static, str>),
#[error("{0}")]
Database(Cow<'static, str>),
#[error("{0}")]
BadServerResponse(&'static str),
#[error("{0}")]
Conflict(&'static str), // This is only needed for when a room alias already exists
// unique / untyped
#[error("{0}")]
Err(Cow<'static, str>),
}
impl Error {
pub fn bad_database(message: &'static str) -> Self { crate::err!(Database(error!("{message}"))) }
/// Sanitizes public-facing errors that can leak sensitive information.
pub fn sanitized_string(&self) -> String {
match self {
Self::Database(..) => String::from("Database error occurred."),
Self::Io(..) => String::from("I/O error occurred."),
_ => self.to_string(),
}
}
pub fn message(&self) -> String {
match self {
Self::Federation(ref origin, ref error) => format!("Answer from {origin}: {error}"),
Self::RumaError(ref error) => response::ruma_error_message(error),
_ => format!("{self}"),
}
}
/// Returns the Matrix error code / error kind
#[inline]
pub fn kind(&self) -> ruma::api::client::error::ErrorKind {
use ruma::api::client::error::ErrorKind::Unknown;
match self {
Self::Federation(_, error) => response::ruma_error_kind(error).clone(),
Self::BadRequest(kind, ..) | Self::Request(kind, ..) => kind.clone(),
_ => Unknown,
}
}
pub fn status_code(&self) -> http::StatusCode {
match self {
Self::Federation(_, ref error) | Self::RumaError(ref error) => error.status_code,
Self::Request(ref kind, _, code) => response::status_code(kind, *code),
Self::BadRequest(ref kind, ..) => response::bad_request_code(kind),
Self::Conflict(_) => http::StatusCode::CONFLICT,
_ => http::StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{self}") }
}
#[allow(clippy::fallible_impl_from)]
impl From<Infallible> for Error {
#[cold]
#[inline(never)]
fn from(_e: Infallible) -> Self {
panic!("infallible error should never exist");
}
}
#[cold]
#[inline(never)]
pub fn infallible(_e: &Infallible) {
panic!("infallible error should never exist");
}

41
src/core/error/panic.rs Normal file
View file

@ -0,0 +1,41 @@
use std::{
any::Any,
panic::{panic_any, RefUnwindSafe, UnwindSafe},
};
use super::Error;
use crate::debug;
impl UnwindSafe for Error {}
impl RefUnwindSafe for Error {}
impl Error {
pub fn panic(self) -> ! { panic_any(self.into_panic()) }
#[must_use]
pub fn from_panic(e: Box<dyn Any + Send>) -> Self { Self::Panic(debug::panic_str(&e), e) }
pub fn into_panic(self) -> Box<dyn Any + Send + 'static> {
match self {
Self::Panic(_, e) | Self::PanicAny(e) => e,
Self::JoinError(e) => e.into_panic(),
_ => Box::new(self),
}
}
/// Get the panic message string.
pub fn panic_str(self) -> Option<&'static str> {
self.is_panic()
.then_some(debug::panic_str(&self.into_panic()))
}
/// Check if the Error is trafficking a panic object.
#[inline]
pub fn is_panic(&self) -> bool {
match &self {
Self::Panic(..) | Self::PanicAny(..) => true,
Self::JoinError(e) => e.is_panic(),
_ => false,
}
}
}

110
src/core/error/response.rs Normal file
View file

@ -0,0 +1,110 @@
use bytes::BytesMut;
use http::StatusCode;
use http_body_util::Full;
use ruma::api::{
client::{
error::{ErrorBody, ErrorKind},
uiaa::UiaaResponse,
},
OutgoingResponse,
};
use super::Error;
use crate::error;
impl axum::response::IntoResponse for Error {
fn into_response(self) -> axum::response::Response {
let response: UiaaResponse = self.into();
response
.try_into_http_response::<BytesMut>()
.inspect_err(|e| error!("error response error: {e}"))
.map_or_else(
|_| StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|r| r.map(BytesMut::freeze).map(Full::new).into_response(),
)
}
}
impl From<Error> for UiaaResponse {
fn from(error: Error) -> Self {
if let Error::Uiaa(uiaainfo) = error {
return Self::AuthResponse(uiaainfo);
}
let body = ErrorBody::Standard {
kind: error.kind(),
message: error.message(),
};
Self::MatrixError(ruma::api::client::error::Error {
status_code: error.status_code(),
body,
})
}
}
pub(super) fn status_code(kind: &ErrorKind, hint: StatusCode) -> StatusCode {
if hint == StatusCode::BAD_REQUEST {
bad_request_code(kind)
} else {
hint
}
}
pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode {
use ErrorKind::*;
match kind {
// 429
LimitExceeded {
..
} => StatusCode::TOO_MANY_REQUESTS,
// 413
TooLarge => StatusCode::PAYLOAD_TOO_LARGE,
// 405
Unrecognized => StatusCode::METHOD_NOT_ALLOWED,
// 404
NotFound => StatusCode::NOT_FOUND,
// 403
GuestAccessForbidden
| ThreepidAuthFailed
| UserDeactivated
| ThreepidDenied
| WrongRoomKeysVersion {
..
}
| Forbidden {
..
} => StatusCode::FORBIDDEN,
// 401
UnknownToken {
..
}
| MissingToken
| Unauthorized => StatusCode::UNAUTHORIZED,
// 400
_ => StatusCode::BAD_REQUEST,
}
}
pub(super) fn ruma_error_message(error: &ruma::api::client::error::Error) -> String {
if let ErrorBody::Standard {
message,
..
} = &error.body
{
return message.to_string();
}
format!("{error}")
}
pub(super) fn ruma_error_kind(e: &ruma::api::client::error::Error) -> &ErrorKind {
e.error_kind().unwrap_or(&ErrorKind::Unknown)
}

View file

@ -8,5 +8,6 @@ pub struct Guard {
}
impl Drop for Guard {
#[inline]
fn drop(&mut self) { self.capture.stop(); }
}

View file

@ -17,6 +17,7 @@ struct Visitor {
}
impl Layer {
#[inline]
pub fn new(state: &Arc<State>) -> Self {
Self {
state: state.clone(),
@ -25,6 +26,7 @@ impl Layer {
}
impl fmt::Debug for Layer {
#[inline]
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.debug_struct("capture::Layer").finish()
}

View file

@ -29,25 +29,25 @@ pub struct Log {
#[macro_export]
macro_rules! error {
( $($x:tt)+ ) => { ::tracing::error!( $($x)+ ); }
( $($x:tt)+ ) => { ::tracing::error!( $($x)+ ) }
}
#[macro_export]
macro_rules! warn {
( $($x:tt)+ ) => { ::tracing::warn!( $($x)+ ); }
( $($x:tt)+ ) => { ::tracing::warn!( $($x)+ ) }
}
#[macro_export]
macro_rules! info {
( $($x:tt)+ ) => { ::tracing::info!( $($x)+ ); }
( $($x:tt)+ ) => { ::tracing::info!( $($x)+ ) }
}
#[macro_export]
macro_rules! debug {
( $($x:tt)+ ) => { ::tracing::debug!( $($x)+ ); }
( $($x:tt)+ ) => { ::tracing::debug!( $($x)+ ) }
}
#[macro_export]
macro_rules! trace {
( $($x:tt)+ ) => { ::tracing::trace!( $($x)+ ); }
( $($x:tt)+ ) => { ::tracing::trace!( $($x)+ ) }
}

View file

@ -1,7 +1,12 @@
use std::sync::Arc;
use std::{
collections::HashMap,
sync::{Arc, Mutex},
};
use tracing_subscriber::{reload, EnvFilter};
use crate::{error, Result};
/// We need to store a reload::Handle value, but can't name it's type explicitly
/// because the S type parameter depends on the subscriber's previous layers. In
/// our case, this includes unnameable 'impl Trait' types.
@ -17,39 +22,60 @@ use tracing_subscriber::{reload, EnvFilter};
///
/// [1]: <https://github.com/tokio-rs/tracing/pull/1035/commits/8a87ea52425098d3ef8f56d92358c2f6c144a28f>
pub trait ReloadHandle<L> {
fn current(&self) -> Option<L>;
fn reload(&self, new_value: L) -> Result<(), reload::Error>;
}
impl<L, S> ReloadHandle<L> for reload::Handle<L, S> {
impl<L: Clone, S> ReloadHandle<L> for reload::Handle<L, S> {
fn current(&self) -> Option<L> { Self::clone_current(self) }
fn reload(&self, new_value: L) -> Result<(), reload::Error> { Self::reload(self, new_value) }
}
struct LogLevelReloadHandlesInner {
handles: Vec<Box<dyn ReloadHandle<EnvFilter> + Send + Sync>>,
}
/// Wrapper to allow reloading the filter on several several
/// [`tracing_subscriber::reload::Handle`]s at once, with the same value.
#[derive(Clone)]
pub struct LogLevelReloadHandles {
inner: Arc<LogLevelReloadHandlesInner>,
handles: Arc<Mutex<HandleMap>>,
}
type HandleMap = HashMap<String, Handle>;
type Handle = Box<dyn ReloadHandle<EnvFilter> + Send + Sync>;
impl LogLevelReloadHandles {
#[must_use]
pub fn new(handles: Vec<Box<dyn ReloadHandle<EnvFilter> + Send + Sync>>) -> Self {
Self {
inner: Arc::new(LogLevelReloadHandlesInner {
handles,
}),
}
pub fn add(&self, name: &str, handle: Handle) {
self.handles
.lock()
.expect("locked")
.insert(name.into(), handle);
}
pub fn reload(&self, new_value: &EnvFilter) -> Result<(), reload::Error> {
for handle in &self.inner.handles {
handle.reload(new_value.clone())?;
}
pub fn reload(&self, new_value: &EnvFilter, names: Option<&[&str]>) -> Result<()> {
self.handles
.lock()
.expect("locked")
.iter()
.filter(|(name, _)| names.map_or(false, |names| names.contains(&name.as_str())))
.for_each(|(_, handle)| {
_ = handle.reload(new_value.clone()).or_else(error::else_log);
});
Ok(())
}
#[must_use]
pub fn current(&self, name: &str) -> Option<EnvFilter> {
self.handles
.lock()
.expect("locked")
.get(name)
.map(|handle| handle.current())?
}
}
impl Default for LogLevelReloadHandles {
fn default() -> Self {
Self {
handles: Arc::new(HandleMap::new().into()),
}
}
}

View file

@ -10,16 +10,21 @@ pub struct Suppress {
impl Suppress {
pub fn new(server: &Arc<Server>) -> Self {
let handle = "console";
let config = &server.config.log;
Self::from_filters(server, EnvFilter::try_new(config).unwrap_or_default(), &EnvFilter::default())
}
let suppress = EnvFilter::default();
let restore = server
.log
.reload
.current(handle)
.unwrap_or_else(|| EnvFilter::try_new(config).unwrap_or_default());
fn from_filters(server: &Arc<Server>, restore: EnvFilter, suppress: &EnvFilter) -> Self {
server
.log
.reload
.reload(suppress)
.reload(&suppress, Some(&[handle]))
.expect("log filter reloaded");
Self {
server: server.clone(),
restore,
@ -32,7 +37,7 @@ impl Drop for Suppress {
self.server
.log
.reload
.reload(&self.restore)
.reload(&self.restore, Some(&["console"]))
.expect("log filter reloaded");
}
}

72
src/core/metrics/mod.rs Normal file
View file

@ -0,0 +1,72 @@
use std::sync::atomic::AtomicU32;
use tokio::runtime;
use tokio_metrics::TaskMonitor;
#[cfg(tokio_unstable)]
use tokio_metrics::{RuntimeIntervals, RuntimeMonitor};
pub struct Metrics {
_runtime: Option<runtime::Handle>,
runtime_metrics: Option<runtime::RuntimeMetrics>,
task_monitor: Option<TaskMonitor>,
#[cfg(tokio_unstable)]
_runtime_monitor: Option<RuntimeMonitor>,
#[cfg(tokio_unstable)]
runtime_intervals: std::sync::Mutex<Option<RuntimeIntervals>>,
// TODO: move stats
pub requests_spawn_active: AtomicU32,
pub requests_spawn_finished: AtomicU32,
pub requests_handle_active: AtomicU32,
pub requests_handle_finished: AtomicU32,
pub requests_panic: AtomicU32,
}
impl Metrics {
#[must_use]
pub fn new(runtime: Option<runtime::Handle>) -> Self {
#[cfg(tokio_unstable)]
let runtime_monitor = runtime.as_ref().map(RuntimeMonitor::new);
#[cfg(tokio_unstable)]
let runtime_intervals = runtime_monitor.as_ref().map(RuntimeMonitor::intervals);
Self {
_runtime: runtime.clone(),
runtime_metrics: runtime.as_ref().map(runtime::Handle::metrics),
task_monitor: runtime.map(|_| TaskMonitor::new()),
#[cfg(tokio_unstable)]
_runtime_monitor: runtime_monitor,
#[cfg(tokio_unstable)]
runtime_intervals: std::sync::Mutex::new(runtime_intervals),
requests_spawn_active: AtomicU32::new(0),
requests_spawn_finished: AtomicU32::new(0),
requests_handle_active: AtomicU32::new(0),
requests_handle_finished: AtomicU32::new(0),
requests_panic: AtomicU32::new(0),
}
}
#[cfg(tokio_unstable)]
pub fn runtime_interval(&self) -> Option<tokio_metrics::RuntimeMetrics> {
self.runtime_intervals
.lock()
.expect("locked")
.as_mut()
.map(Iterator::next)
.expect("next interval")
}
pub fn task_root(&self) -> Option<&TaskMonitor> { self.task_monitor.as_ref() }
pub fn runtime_metrics(&self) -> Option<&runtime::RuntimeMetrics> { self.runtime_metrics.as_ref() }
}

View file

@ -3,15 +3,16 @@ pub mod config;
pub mod debug;
pub mod error;
pub mod log;
pub mod metrics;
pub mod mods;
pub mod pducount;
pub mod pdu;
pub mod server;
pub mod utils;
pub mod version;
pub use config::Config;
pub use error::{Error, RumaResponse};
pub use pducount::PduCount;
pub use error::Error;
pub use pdu::{PduBuilder, PduCount, PduEvent};
pub use server::Server;
pub use version::version;

16
src/core/pdu/builder.rs Normal file
View file

@ -0,0 +1,16 @@
use std::{collections::BTreeMap, sync::Arc};
use ruma::{events::TimelineEventType, EventId};
use serde::Deserialize;
use serde_json::value::RawValue as RawJsonValue;
/// Build the start of a PDU in order to add it to the Database.
#[derive(Debug, Deserialize)]
pub struct PduBuilder {
#[serde(rename = "type")]
pub event_type: TimelineEventType,
pub content: Box<RawJsonValue>,
pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
pub state_key: Option<String>,
pub redacts: Option<Arc<EventId>>,
}

View file

@ -1,6 +1,10 @@
mod builder;
mod count;
use std::{cmp::Ordering, collections::BTreeMap, sync::Arc};
use conduit::{warn, Error};
pub use builder::PduBuilder;
pub use count::PduCount;
use ruma::{
canonical_json::redact_content_in_place,
events::{
@ -19,7 +23,7 @@ use serde_json::{
value::{to_raw_value, RawValue as RawJsonValue},
};
use crate::services;
use crate::{warn, Error};
#[derive(Deserialize)]
struct ExtractRedactedBecause {
@ -60,7 +64,7 @@ pub struct PduEvent {
}
impl PduEvent {
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &Self) -> crate::Result<()> {
self.unsigned = None;
@ -112,7 +116,11 @@ impl PduEvent {
.map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get()))
.map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?;
unsigned.insert("age".to_owned(), to_raw_value(&1).unwrap());
let now: u64 = MilliSecondsSinceUnixEpoch::now().get().into();
let then: u64 = self.origin_server_ts.into();
let this_age: u64 = now - then;
unsigned.insert("age".to_owned(), to_raw_value(&this_age).unwrap());
self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid"));
Ok(())
@ -154,7 +162,7 @@ impl PduEvent {
(self.redacts.clone(), self.content.clone())
}
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn to_sync_room_event(&self) -> Raw<AnySyncTimelineEvent> {
let (redacts, content) = self.copy_redacts();
let mut json = json!({
@ -179,7 +187,7 @@ impl PduEvent {
}
/// This only works for events that are also AnyRoomEvents.
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn to_any_event(&self) -> Raw<AnyEphemeralRoomEvent> {
let (redacts, content) = self.copy_redacts();
let mut json = json!({
@ -204,7 +212,7 @@ impl PduEvent {
serde_json::from_value(json).expect("Raw::from_value always works")
}
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn to_room_event(&self) -> Raw<AnyTimelineEvent> {
let (redacts, content) = self.copy_redacts();
let mut json = json!({
@ -229,7 +237,7 @@ impl PduEvent {
serde_json::from_value(json).expect("Raw::from_value always works")
}
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn to_message_like_event(&self) -> Raw<AnyMessageLikeEvent> {
let (redacts, content) = self.copy_redacts();
let mut json = json!({
@ -254,7 +262,7 @@ impl PduEvent {
serde_json::from_value(json).expect("Raw::from_value always works")
}
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn to_state_event(&self) -> Raw<AnyStateEvent> {
let mut json = json!({
"content": self.content,
@ -273,7 +281,7 @@ impl PduEvent {
serde_json::from_value(json).expect("Raw::from_value always works")
}
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn to_sync_state_event(&self) -> Raw<AnySyncStateEvent> {
let mut json = json!({
"content": self.content,
@ -291,7 +299,7 @@ impl PduEvent {
serde_json::from_value(json).expect("Raw::from_value always works")
}
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn to_stripped_state_event(&self) -> Raw<AnyStrippedStateEvent> {
let json = json!({
"content": self.content,
@ -303,7 +311,7 @@ impl PduEvent {
serde_json::from_value(json).expect("Raw::from_value always works")
}
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn to_stripped_spacechild_state_event(&self) -> Raw<HierarchySpaceChildEvent> {
let json = json!({
"content": self.content,
@ -316,7 +324,7 @@ impl PduEvent {
serde_json::from_value(json).expect("Raw::from_value always works")
}
#[tracing::instrument(skip(self))]
#[tracing::instrument(skip(self), level = "debug")]
pub fn to_member_event(&self) -> Raw<StateEvent<RoomMemberEventContent>> {
let mut json = json!({
"content": self.content,
@ -336,42 +344,6 @@ impl PduEvent {
serde_json::from_value(json).expect("Raw::from_value always works")
}
/// This does not return a full `Pdu` it is only to satisfy ruma's types.
#[tracing::instrument]
pub fn convert_to_outgoing_federation_event(mut pdu_json: CanonicalJsonObject) -> Box<RawJsonValue> {
if let Some(unsigned) = pdu_json
.get_mut("unsigned")
.and_then(|val| val.as_object_mut())
{
unsigned.remove("transaction_id");
}
// room v3 and above removed the "event_id" field from remote PDU format
if let Some(room_id) = pdu_json
.get("room_id")
.and_then(|val| RoomId::parse(val.as_str()?).ok())
{
match services().rooms.state.get_room_version(&room_id) {
Ok(room_version_id) => match room_version_id {
RoomVersionId::V1 | RoomVersionId::V2 => {},
_ => _ = pdu_json.remove("event_id"),
},
Err(_) => _ = pdu_json.remove("event_id"),
}
} else {
pdu_json.remove("event_id");
}
// TODO: another option would be to convert it to a canonical string to validate
// size and return a Result<Raw<...>>
// serde_json::from_str::<Raw<_>>(
// ruma::serde::to_canonical_json_string(pdu_json).expect("CanonicalJson is
// valid serde_json::Value"), )
// .expect("Raw::from_value always works")
to_raw_value(&pdu_json).expect("CanonicalJson is valid serde_json::Value")
}
pub fn from_id_val(event_id: &EventId, mut json: CanonicalJsonObject) -> Result<Self, serde_json::Error> {
json.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()));
@ -438,14 +410,3 @@ pub fn gen_event_id_canonical_json(
Ok((event_id, value))
}
/// Build the start of a PDU in order to add it to the Database.
#[derive(Debug, Deserialize)]
pub struct PduBuilder {
#[serde(rename = "type")]
pub event_type: TimelineEventType,
pub content: Box<RawJsonValue>,
pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
pub state_key: Option<String>,
pub redacts: Option<Arc<EventId>>,
}

View file

@ -1,11 +1,11 @@
use std::{
sync::atomic::{AtomicBool, AtomicU32, Ordering},
sync::atomic::{AtomicBool, Ordering},
time::SystemTime,
};
use tokio::{runtime, sync::broadcast};
use crate::{config::Config, log, Error, Result};
use crate::{config::Config, log::Log, metrics::Metrics, Err, Result};
/// Server runtime state; public portion
pub struct Server {
@ -33,71 +33,68 @@ pub struct Server {
pub signal: broadcast::Sender<&'static str>,
/// Logging subsystem state
pub log: log::Log,
pub log: Log,
/// TODO: move stats
pub requests_spawn_active: AtomicU32,
pub requests_spawn_finished: AtomicU32,
pub requests_handle_active: AtomicU32,
pub requests_handle_finished: AtomicU32,
pub requests_panic: AtomicU32,
/// Metrics subsystem state
pub metrics: Metrics,
}
impl Server {
#[must_use]
pub fn new(config: Config, runtime: Option<runtime::Handle>, log: log::Log) -> Self {
pub fn new(config: Config, runtime: Option<runtime::Handle>, log: Log) -> Self {
Self {
config,
started: SystemTime::now(),
stopping: AtomicBool::new(false),
reloading: AtomicBool::new(false),
restarting: AtomicBool::new(false),
runtime,
runtime: runtime.clone(),
signal: broadcast::channel::<&'static str>(1).0,
log,
requests_spawn_active: AtomicU32::new(0),
requests_spawn_finished: AtomicU32::new(0),
requests_handle_active: AtomicU32::new(0),
requests_handle_finished: AtomicU32::new(0),
requests_panic: AtomicU32::new(0),
metrics: Metrics::new(runtime),
}
}
pub fn reload(&self) -> Result<()> {
if cfg!(not(conduit_mods)) {
return Err(Error::Err("Reloading not enabled".into()));
return Err!("Reloading not enabled");
}
if self.reloading.swap(true, Ordering::AcqRel) {
return Err(Error::Err("Reloading already in progress".into()));
return Err!("Reloading already in progress");
}
if self.stopping.swap(true, Ordering::AcqRel) {
return Err(Error::Err("Shutdown already in progress".into()));
return Err!("Shutdown already in progress");
}
self.signal("SIGINT")
self.signal("SIGINT").inspect_err(|_| {
self.stopping.store(false, Ordering::Release);
self.reloading.store(false, Ordering::Release);
})
}
pub fn restart(&self) -> Result<()> {
if self.restarting.swap(true, Ordering::AcqRel) {
return Err(Error::Err("Restart already in progress".into()));
return Err!("Restart already in progress");
}
self.shutdown()
.inspect_err(|_| self.restarting.store(false, Ordering::Release))
}
pub fn shutdown(&self) -> Result<()> {
if self.stopping.swap(true, Ordering::AcqRel) {
return Err(Error::Err("Shutdown already in progress".into()));
return Err!("Shutdown already in progress");
}
self.signal("SIGTERM")
.inspect_err(|_| self.stopping.store(false, Ordering::Release))
}
pub fn signal(&self, sig: &'static str) -> Result<()> {
if let Err(e) = self.signal.send(sig) {
return Err(Error::Err(format!("Failed to send signal: {e}")));
return Err!("Failed to send signal: {e}");
}
Ok(())

View file

@ -3,27 +3,28 @@ use crate::Result;
#[inline]
#[must_use]
pub fn increment(old: Option<&[u8]>) -> [u8; 8] {
old.map(TryInto::try_into)
.map_or(0_u64, |val| val.map_or(0_u64, u64::from_be_bytes))
old.map_or(0_u64, u64_from_bytes_or_zero)
.wrapping_add(1)
.to_be_bytes()
}
/// Parses the big-endian bytes into an u64.
#[inline]
pub fn u64_from_bytes(bytes: &[u8]) -> Result<u64> {
let array: [u8; 8] = bytes.try_into()?;
Ok(u64_from_u8x8(array))
}
/// Parses the 8 big-endian bytes into an u64.
/// Parses 8 big-endian bytes into an u64; panic on invalid argument
#[inline]
#[must_use]
pub fn u64_from_u8(bytes: &[u8]) -> u64 {
let bytes: &[u8; 8] = bytes.try_into().expect("must slice at least 8 bytes");
u64_from_u8x8(*bytes)
}
pub fn u64_from_u8(bytes: &[u8]) -> u64 { u64_from_bytes(bytes).expect("must slice at least 8 bytes") }
/// Parses the big-endian bytes into an u64.
#[inline]
#[must_use]
pub fn u64_from_bytes_or_zero(bytes: &[u8]) -> u64 { u64_from_bytes(bytes).unwrap_or(0) }
/// Parses the big-endian bytes into an u64.
#[inline]
pub fn u64_from_bytes(bytes: &[u8]) -> Result<u64> { Ok(u64_from_u8x8(*u8x8_from_bytes(bytes)?)) }
#[inline]
#[must_use]
pub fn u64_from_u8x8(bytes: [u8; 8]) -> u64 { u64::from_be_bytes(bytes) }
#[inline]
pub fn u8x8_from_bytes(bytes: &[u8]) -> Result<&[u8; 8]> { Ok(bytes.try_into()?) }

View file

@ -66,7 +66,7 @@ pub fn content_disposition_type(content_type: &Option<String>) -> &'static str {
/// sanitises the file name for the Content-Disposition using
/// `sanitize_filename` crate
#[tracing::instrument]
#[tracing::instrument(level = "debug")]
pub fn sanitise_filename(filename: String) -> String {
let options = sanitize_filename::Options {
truncate: false,

Some files were not shown because too many files have changed in this diff Show more